mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 02:33:34 +00:00
6
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/BUILD
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/BUILD
generated
vendored
@ -26,7 +26,10 @@ go_library(
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["defaults_test.go"],
|
||||
srcs = [
|
||||
"conversion_test.go",
|
||||
"defaults_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
@ -37,6 +40,7 @@ go_test(
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/utils/pointer:go_default_library",
|
||||
],
|
||||
)
|
||||
|
12
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/conversion.go
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/conversion.go
generated
vendored
@ -185,8 +185,10 @@ func Convert_v2beta1_ObjectMetricSource_To_autoscaling_ObjectMetricSource(in *au
|
||||
}
|
||||
|
||||
func Convert_autoscaling_PodsMetricSource_To_v2beta1_PodsMetricSource(in *autoscaling.PodsMetricSource, out *autoscalingv2beta1.PodsMetricSource, s conversion.Scope) error {
|
||||
targetAverageValue := *in.Target.AverageValue
|
||||
out.TargetAverageValue = targetAverageValue
|
||||
if in.Target.AverageValue != nil {
|
||||
targetAverageValue := *in.Target.AverageValue
|
||||
out.TargetAverageValue = targetAverageValue
|
||||
}
|
||||
|
||||
out.MetricName = in.Metric.Name
|
||||
out.Selector = in.Metric.Selector
|
||||
@ -247,8 +249,10 @@ func Convert_autoscaling_ObjectMetricStatus_To_v2beta1_ObjectMetricStatus(in *au
|
||||
}
|
||||
out.MetricName = in.Metric.Name
|
||||
out.Selector = in.Metric.Selector
|
||||
currentAverageValue := *in.Current.AverageValue
|
||||
out.AverageValue = ¤tAverageValue
|
||||
if in.Current.AverageValue != nil {
|
||||
currentAverageValue := *in.Current.AverageValue
|
||||
out.AverageValue = ¤tAverageValue
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
84
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/conversion_test.go
generated
vendored
Normal file
84
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/conversion_test.go
generated
vendored
Normal file
@ -0,0 +1,84 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v2beta1
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"k8s.io/api/autoscaling/v2beta1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/apis/autoscaling"
|
||||
)
|
||||
|
||||
// Testing nil pointer panic uncovered by #70806
|
||||
// TODO(yue9944882): Test nil/empty conversion across all resource types
|
||||
func TestNilOrEmptyConversion(t *testing.T) {
|
||||
scheme := runtime.NewScheme()
|
||||
assert.NoError(t, addConversionFuncs(scheme))
|
||||
|
||||
testCases := []struct {
|
||||
obj1 interface{}
|
||||
obj2 interface{}
|
||||
}{
|
||||
{
|
||||
obj1: &autoscaling.ExternalMetricSource{},
|
||||
obj2: &v2beta1.ExternalMetricSource{},
|
||||
},
|
||||
{
|
||||
obj1: &autoscaling.ExternalMetricStatus{},
|
||||
obj2: &v2beta1.ExternalMetricStatus{},
|
||||
},
|
||||
{
|
||||
obj1: &autoscaling.PodsMetricSource{},
|
||||
obj2: &v2beta1.PodsMetricSource{},
|
||||
},
|
||||
{
|
||||
obj1: &autoscaling.PodsMetricStatus{},
|
||||
obj2: &v2beta1.PodsMetricStatus{},
|
||||
},
|
||||
{
|
||||
obj1: &autoscaling.ObjectMetricSource{},
|
||||
obj2: &v2beta1.ObjectMetricSource{},
|
||||
},
|
||||
{
|
||||
obj1: &autoscaling.ObjectMetricStatus{},
|
||||
obj2: &v2beta1.ObjectMetricStatus{},
|
||||
},
|
||||
{
|
||||
obj1: &autoscaling.ResourceMetricSource{},
|
||||
obj2: &v2beta1.ResourceMetricSource{},
|
||||
},
|
||||
{
|
||||
obj1: &autoscaling.ResourceMetricStatus{},
|
||||
obj2: &v2beta1.ResourceMetricStatus{},
|
||||
},
|
||||
{
|
||||
obj1: &autoscaling.HorizontalPodAutoscaler{},
|
||||
obj2: &v2beta1.HorizontalPodAutoscaler{},
|
||||
},
|
||||
{
|
||||
obj1: &autoscaling.MetricTarget{},
|
||||
obj2: &v2beta1.CrossVersionObjectReference{},
|
||||
},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
assert.NoError(t, scheme.Convert(testCase.obj1, testCase.obj2, nil))
|
||||
assert.NoError(t, scheme.Convert(testCase.obj2, testCase.obj1, nil))
|
||||
}
|
||||
}
|
@ -68,6 +68,7 @@ func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI stri
|
||||
newVM := compute.VirtualMachine{
|
||||
Location: vm.Location,
|
||||
VirtualMachineProperties: &compute.VirtualMachineProperties{
|
||||
HardwareProfile: vm.HardwareProfile,
|
||||
StorageProfile: &compute.StorageProfile{
|
||||
DataDisks: &disks,
|
||||
},
|
||||
@ -132,6 +133,7 @@ func (as *availabilitySet) DetachDiskByName(diskName, diskURI string, nodeName t
|
||||
newVM := compute.VirtualMachine{
|
||||
Location: vm.Location,
|
||||
VirtualMachineProperties: &compute.VirtualMachineProperties{
|
||||
HardwareProfile: vm.HardwareProfile,
|
||||
StorageProfile: &compute.StorageProfile{
|
||||
DataDisks: &disks,
|
||||
},
|
||||
|
29
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_vmss.go
generated
vendored
29
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_vmss.go
generated
vendored
@ -67,7 +67,17 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod
|
||||
CreateOption: "attach",
|
||||
})
|
||||
}
|
||||
vm.StorageProfile.DataDisks = &disks
|
||||
newVM := compute.VirtualMachineScaleSetVM{
|
||||
Sku: vm.Sku,
|
||||
Location: vm.Location,
|
||||
VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{
|
||||
HardwareProfile: vm.HardwareProfile,
|
||||
StorageProfile: &compute.StorageProfile{
|
||||
OsDisk: vm.StorageProfile.OsDisk,
|
||||
DataDisks: &disks,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
@ -77,7 +87,7 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod
|
||||
defer ss.vmssVMCache.Delete(key)
|
||||
|
||||
klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk(%s)", nodeResourceGroup, nodeName, diskName)
|
||||
_, err = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, vm)
|
||||
_, err = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM)
|
||||
if err != nil {
|
||||
detail := err.Error()
|
||||
if strings.Contains(detail, errLeaseFailed) || strings.Contains(detail, errDiskBlobNotFound) {
|
||||
@ -126,7 +136,18 @@ func (ss *scaleSet) DetachDiskByName(diskName, diskURI string, nodeName types.No
|
||||
return fmt.Errorf("detach azure disk failure, disk %s not found, diskURI: %s", diskName, diskURI)
|
||||
}
|
||||
|
||||
vm.StorageProfile.DataDisks = &disks
|
||||
newVM := compute.VirtualMachineScaleSetVM{
|
||||
Sku: vm.Sku,
|
||||
Location: vm.Location,
|
||||
VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{
|
||||
HardwareProfile: vm.HardwareProfile,
|
||||
StorageProfile: &compute.StorageProfile{
|
||||
OsDisk: vm.StorageProfile.OsDisk,
|
||||
DataDisks: &disks,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
@ -135,7 +156,7 @@ func (ss *scaleSet) DetachDiskByName(diskName, diskURI string, nodeName types.No
|
||||
defer ss.vmssVMCache.Delete(key)
|
||||
|
||||
klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk(%s)", nodeResourceGroup, nodeName, diskName)
|
||||
_, err = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, vm)
|
||||
_, err = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM)
|
||||
if err != nil {
|
||||
klog.Errorf("azureDisk - detach disk(%s) from %s failed, err: %v", diskName, nodeName, err)
|
||||
} else {
|
||||
|
13
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instances.go
generated
vendored
13
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instances.go
generated
vendored
@ -96,7 +96,8 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N
|
||||
addresses := []v1.NodeAddress{
|
||||
{Type: v1.NodeHostName, Address: string(name)},
|
||||
}
|
||||
for _, address := range ipAddress.IPV4.IPAddress {
|
||||
if len(ipAddress.IPV4.IPAddress) > 0 && len(ipAddress.IPV4.IPAddress[0].PrivateIP) > 0 {
|
||||
address := ipAddress.IPV4.IPAddress[0]
|
||||
addresses = append(addresses, v1.NodeAddress{
|
||||
Type: v1.NodeInternalIP,
|
||||
Address: address.PrivateIP,
|
||||
@ -108,7 +109,8 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N
|
||||
})
|
||||
}
|
||||
}
|
||||
for _, address := range ipAddress.IPV6.IPAddress {
|
||||
if len(ipAddress.IPV6.IPAddress) > 0 && len(ipAddress.IPV6.IPAddress[0].PrivateIP) > 0 {
|
||||
address := ipAddress.IPV6.IPAddress[0]
|
||||
addresses = append(addresses, v1.NodeAddress{
|
||||
Type: v1.NodeInternalIP,
|
||||
Address: address.PrivateIP,
|
||||
@ -120,6 +122,13 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if len(addresses) == 1 {
|
||||
// No IP addresses is got from instance metadata service, clean up cache and report errors.
|
||||
az.metadata.imsCache.Delete(metadataCacheKey)
|
||||
return nil, fmt.Errorf("get empty IP addresses from instance metadata service")
|
||||
}
|
||||
|
||||
return addresses, nil
|
||||
}
|
||||
|
||||
|
121
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instances_test.go
generated
vendored
121
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instances_test.go
generated
vendored
@ -21,10 +21,12 @@ import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
@ -216,3 +218,122 @@ func TestInstanceShutdownByProviderID(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeAddresses(t *testing.T) {
|
||||
cloud := getTestCloud()
|
||||
cloud.Config.UseInstanceMetadata = true
|
||||
metadataTemplate := `{"compute":{"name":"%s"},"network":{"interface":[{"ipv4":{"ipAddress":[{"privateIpAddress":"%s","publicIpAddress":"%s"}]},"ipv6":{"ipAddress":[{"privateIpAddress":"%s","publicIpAddress":"%s"}]}}]}}`
|
||||
|
||||
testcases := []struct {
|
||||
name string
|
||||
nodeName string
|
||||
ipV4 string
|
||||
ipV6 string
|
||||
ipV4Public string
|
||||
ipV6Public string
|
||||
expected []v1.NodeAddress
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "NodeAddresses should get both ipV4 and ipV6 private addresses",
|
||||
nodeName: "vm1",
|
||||
ipV4: "10.240.0.1",
|
||||
ipV6: "1111:11111:00:00:1111:1111:000:111",
|
||||
expected: []v1.NodeAddress{
|
||||
{
|
||||
Type: v1.NodeHostName,
|
||||
Address: "vm1",
|
||||
},
|
||||
{
|
||||
Type: v1.NodeInternalIP,
|
||||
Address: "10.240.0.1",
|
||||
},
|
||||
{
|
||||
Type: v1.NodeInternalIP,
|
||||
Address: "1111:11111:00:00:1111:1111:000:111",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "NodeAddresses should report error when IPs are empty",
|
||||
nodeName: "vm1",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "NodeAddresses should get ipV4 private and public addresses",
|
||||
nodeName: "vm1",
|
||||
ipV4: "10.240.0.1",
|
||||
ipV4Public: "9.9.9.9",
|
||||
expected: []v1.NodeAddress{
|
||||
{
|
||||
Type: v1.NodeHostName,
|
||||
Address: "vm1",
|
||||
},
|
||||
{
|
||||
Type: v1.NodeInternalIP,
|
||||
Address: "10.240.0.1",
|
||||
},
|
||||
{
|
||||
Type: v1.NodeExternalIP,
|
||||
Address: "9.9.9.9",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "NodeAddresses should get ipV6 private and public addresses",
|
||||
nodeName: "vm1",
|
||||
ipV6: "1111:11111:00:00:1111:1111:000:111",
|
||||
ipV6Public: "2222:22221:00:00:2222:2222:000:111",
|
||||
expected: []v1.NodeAddress{
|
||||
{
|
||||
Type: v1.NodeHostName,
|
||||
Address: "vm1",
|
||||
},
|
||||
{
|
||||
Type: v1.NodeInternalIP,
|
||||
Address: "1111:11111:00:00:1111:1111:000:111",
|
||||
},
|
||||
{
|
||||
Type: v1.NodeExternalIP,
|
||||
Address: "2222:22221:00:00:2222:2222:000:111",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testcases {
|
||||
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Errorf("Test [%s] unexpected error: %v", test.name, err)
|
||||
}
|
||||
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintf(w, fmt.Sprintf(metadataTemplate, test.nodeName, test.ipV4, test.ipV4Public, test.ipV6, test.ipV6Public))
|
||||
}))
|
||||
go func() {
|
||||
http.Serve(listener, mux)
|
||||
}()
|
||||
defer listener.Close()
|
||||
|
||||
cloud.metadata, err = NewInstanceMetadataService("http://" + listener.Addr().String() + "/")
|
||||
if err != nil {
|
||||
t.Errorf("Test [%s] unexpected error: %v", test.name, err)
|
||||
}
|
||||
|
||||
ipAddresses, err := cloud.NodeAddresses(context.Background(), types.NodeName(test.nodeName))
|
||||
if test.expectError {
|
||||
if err == nil {
|
||||
t.Errorf("Test [%s] unexpected nil err", test.name)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("Test [%s] unexpected error: %v", test.name, err)
|
||||
}
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(ipAddresses, test.expected) {
|
||||
t.Errorf("Test [%s] unexpected ipAddresses: %s, expected %q", test.name, ipAddresses, test.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen.go
generated
vendored
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen_test.go
generated
vendored
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/controller/cronjob/injection.go
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/controller/cronjob/injection.go
generated
vendored
@ -118,7 +118,8 @@ func (r realJobControl) CreateJob(namespace string, job *batchv1.Job) (*batchv1.
|
||||
}
|
||||
|
||||
func (r realJobControl) DeleteJob(namespace string, name string) error {
|
||||
return r.KubeClient.BatchV1().Jobs(namespace).Delete(name, nil)
|
||||
background := metav1.DeletePropagationBackground
|
||||
return r.KubeClient.BatchV1().Jobs(namespace).Delete(name, &metav1.DeleteOptions{PropagationPolicy: &background})
|
||||
}
|
||||
|
||||
type fakeJobControl struct {
|
||||
|
24
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal.go
generated
vendored
24
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal.go
generated
vendored
@ -207,14 +207,18 @@ func (a *HorizontalController) processNextWorkItem() bool {
|
||||
}
|
||||
defer a.queue.Done(key)
|
||||
|
||||
err := a.reconcileKey(key.(string))
|
||||
if err == nil {
|
||||
// don't "forget" here because we want to only process a given HPA once per resync interval
|
||||
return true
|
||||
deleted, err := a.reconcileKey(key.(string))
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
}
|
||||
// Add request processing HPA after resync interval just in case last resync didn't insert
|
||||
// request into the queue. Request is not inserted into queue by resync if previous one wasn't processed yet.
|
||||
// This happens quite often because requests from previous resync are removed from the queue at the same moment
|
||||
// as next resync inserts new requests.
|
||||
if !deleted {
|
||||
a.queue.AddRateLimited(key)
|
||||
}
|
||||
|
||||
a.queue.AddRateLimited(key)
|
||||
utilruntime.HandleError(err)
|
||||
return true
|
||||
}
|
||||
|
||||
@ -298,20 +302,20 @@ func (a *HorizontalController) computeReplicasForMetrics(hpa *autoscalingv2.Hori
|
||||
return replicas, metric, statuses, timestamp, nil
|
||||
}
|
||||
|
||||
func (a *HorizontalController) reconcileKey(key string) error {
|
||||
func (a *HorizontalController) reconcileKey(key string) (deleted bool, err error) {
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
return true, err
|
||||
}
|
||||
|
||||
hpa, err := a.hpaLister.HorizontalPodAutoscalers(namespace).Get(name)
|
||||
if errors.IsNotFound(err) {
|
||||
klog.Infof("Horizontal Pod Autoscaler %s has been deleted in %s", name, namespace)
|
||||
delete(a.recommendations, key)
|
||||
return nil
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return a.reconcileAutoscaler(hpa, key)
|
||||
return false, a.reconcileAutoscaler(hpa, key)
|
||||
}
|
||||
|
||||
// computeStatusForObjectMetric computes the desired number of replicas for the specified metric of type ObjectMetricSourceType.
|
||||
|
55
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal_test.go
generated
vendored
55
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal_test.go
generated
vendored
@ -2176,7 +2176,7 @@ func TestComputedToleranceAlgImplementation(t *testing.T) {
|
||||
finalPods := int32(math.Ceil(resourcesUsedRatio * float64(startPods)))
|
||||
|
||||
// To breach tolerance we will create a utilization ratio difference of tolerance to usageRatioToleranceValue)
|
||||
tc := testCase{
|
||||
tc1 := testCase{
|
||||
minReplicas: 0,
|
||||
maxReplicas: 1000,
|
||||
initialReplicas: startPods,
|
||||
@ -2209,22 +2209,49 @@ func TestComputedToleranceAlgImplementation(t *testing.T) {
|
||||
useMetricsAPI: true,
|
||||
recommendations: []timestampedRecommendation{},
|
||||
}
|
||||
tc1.runTest(t)
|
||||
|
||||
tc.runTest(t)
|
||||
|
||||
// Reuse the data structure above, now testing "unscaling".
|
||||
// Now, we test that no scaling happens if we are in a very close margin to the tolerance
|
||||
target = math.Abs(1/(requestedToUsed*(1-defaultTestingTolerance))) + .004
|
||||
finalCPUPercentTarget = int32(target * 100)
|
||||
tc.CPUTarget = finalCPUPercentTarget
|
||||
tc.initialReplicas = startPods
|
||||
tc.expectedDesiredReplicas = startPods
|
||||
tc.expectedConditions = statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
|
||||
Type: autoscalingv2.AbleToScale,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "ReadyForNewScale",
|
||||
})
|
||||
tc.runTest(t)
|
||||
tc2 := testCase{
|
||||
minReplicas: 0,
|
||||
maxReplicas: 1000,
|
||||
initialReplicas: startPods,
|
||||
expectedDesiredReplicas: startPods,
|
||||
CPUTarget: finalCPUPercentTarget,
|
||||
reportedLevels: []uint64{
|
||||
totalUsedCPUOfAllPods / 10,
|
||||
totalUsedCPUOfAllPods / 10,
|
||||
totalUsedCPUOfAllPods / 10,
|
||||
totalUsedCPUOfAllPods / 10,
|
||||
totalUsedCPUOfAllPods / 10,
|
||||
totalUsedCPUOfAllPods / 10,
|
||||
totalUsedCPUOfAllPods / 10,
|
||||
totalUsedCPUOfAllPods / 10,
|
||||
totalUsedCPUOfAllPods / 10,
|
||||
totalUsedCPUOfAllPods / 10,
|
||||
},
|
||||
reportedCPURequests: []resource.Quantity{
|
||||
resource.MustParse(fmt.Sprint(perPodRequested+100) + "m"),
|
||||
resource.MustParse(fmt.Sprint(perPodRequested-100) + "m"),
|
||||
resource.MustParse(fmt.Sprint(perPodRequested+10) + "m"),
|
||||
resource.MustParse(fmt.Sprint(perPodRequested-10) + "m"),
|
||||
resource.MustParse(fmt.Sprint(perPodRequested+2) + "m"),
|
||||
resource.MustParse(fmt.Sprint(perPodRequested-2) + "m"),
|
||||
resource.MustParse(fmt.Sprint(perPodRequested+1) + "m"),
|
||||
resource.MustParse(fmt.Sprint(perPodRequested-1) + "m"),
|
||||
resource.MustParse(fmt.Sprint(perPodRequested) + "m"),
|
||||
resource.MustParse(fmt.Sprint(perPodRequested) + "m"),
|
||||
},
|
||||
useMetricsAPI: true,
|
||||
recommendations: []timestampedRecommendation{},
|
||||
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
|
||||
Type: autoscalingv2.AbleToScale,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "ReadyForNewScale",
|
||||
}),
|
||||
}
|
||||
tc2.runTest(t)
|
||||
}
|
||||
|
||||
func TestScaleUpRCImmediately(t *testing.T) {
|
||||
|
12
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/legacy_horizontal_test.go
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/legacy_horizontal_test.go
generated
vendored
@ -100,6 +100,8 @@ type legacyTestCase struct {
|
||||
// Last scale time
|
||||
lastScaleTime *metav1.Time
|
||||
recommendations []timestampedRecommendation
|
||||
|
||||
finished bool
|
||||
}
|
||||
|
||||
// Needs to be called under a lock.
|
||||
@ -462,12 +464,14 @@ func (tc *legacyTestCase) verifyResults(t *testing.T) {
|
||||
func (tc *legacyTestCase) runTest(t *testing.T) {
|
||||
testClient, testScaleClient := tc.prepareTestClient(t)
|
||||
metricsClient := metrics.NewHeapsterMetricsClient(testClient, metrics.DefaultHeapsterNamespace, metrics.DefaultHeapsterScheme, metrics.DefaultHeapsterService, metrics.DefaultHeapsterPort)
|
||||
|
||||
eventClient := &fake.Clientset{}
|
||||
eventClient.AddReactor("*", "events", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
if tc.finished {
|
||||
return true, &v1.Event{}, nil
|
||||
}
|
||||
obj := action.(core.CreateAction).GetObject().(*v1.Event)
|
||||
if tc.verifyEvents {
|
||||
switch obj.Reason {
|
||||
@ -514,7 +518,10 @@ func (tc *legacyTestCase) runTest(t *testing.T) {
|
||||
informerFactory.Start(stop)
|
||||
go hpaController.Run(stop)
|
||||
|
||||
// Wait for HPA to be processed.
|
||||
<-tc.processed
|
||||
tc.Lock()
|
||||
tc.finished = true
|
||||
if tc.verifyEvents {
|
||||
tc.Unlock()
|
||||
// We need to wait for events to be broadcasted (sleep for longer than record.sleepDuration).
|
||||
@ -522,9 +529,8 @@ func (tc *legacyTestCase) runTest(t *testing.T) {
|
||||
} else {
|
||||
tc.Unlock()
|
||||
}
|
||||
// Wait for HPA to be processed.
|
||||
<-tc.processed
|
||||
tc.verifyResults(t)
|
||||
|
||||
}
|
||||
|
||||
func TestLegacyScaleUp(t *testing.T) {
|
||||
|
13
vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apply/apply.go
generated
vendored
13
vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apply/apply.go
generated
vendored
@ -343,6 +343,13 @@ func (o *ApplyOptions) Run() error {
|
||||
return err
|
||||
}
|
||||
|
||||
// If server-dry-run is requested but the type doesn't support it, fail right away.
|
||||
if o.ServerDryRun {
|
||||
if err := dryRunVerifier.HasSupport(info.Mapping.GroupVersionKind); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if info.Namespaced() {
|
||||
visitedNamespaces.Insert(info.Namespace)
|
||||
}
|
||||
@ -366,12 +373,6 @@ func (o *ApplyOptions) Run() error {
|
||||
if !errors.IsNotFound(err) {
|
||||
return cmdutil.AddSourceToErr(fmt.Sprintf("retrieving current configuration of:\n%s\nfrom server for:", info.String()), info.Source, err)
|
||||
}
|
||||
// If server-dry-run is requested but the type doesn't support it, fail right away.
|
||||
if o.ServerDryRun {
|
||||
if err := dryRunVerifier.HasSupport(info.Mapping.GroupVersionKind); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Create the resource if it doesn't exist
|
||||
// First, update the annotation used by kubectl apply
|
||||
|
12
vendor/k8s.io/kubernetes/pkg/kubectl/cmd/diff/diff.go
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/kubectl/cmd/diff/diff.go
generated
vendored
@ -289,7 +289,17 @@ func (obj InfoObject) Merged() (runtime.Object, error) {
|
||||
}
|
||||
|
||||
func (obj InfoObject) Name() string {
|
||||
return obj.Info.Name
|
||||
group := ""
|
||||
if obj.Info.Mapping.GroupVersionKind.Group != "" {
|
||||
group = fmt.Sprintf("%v.", obj.Info.Mapping.GroupVersionKind.Group)
|
||||
}
|
||||
return group + fmt.Sprintf(
|
||||
"%v.%v.%v.%v",
|
||||
obj.Info.Mapping.GroupVersionKind.Version,
|
||||
obj.Info.Mapping.GroupVersionKind.Kind,
|
||||
obj.Info.Namespace,
|
||||
obj.Info.Name,
|
||||
)
|
||||
}
|
||||
|
||||
// Differ creates two DiffVersion and diffs them.
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/kubectl/cmd/get/get.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/kubectl/cmd/get/get.go
generated
vendored
@ -334,8 +334,10 @@ func (r *RuntimeSorter) Sort() error {
|
||||
case *metav1beta1.Table:
|
||||
includesTable = true
|
||||
|
||||
if err := NewTableSorter(t, r.field).Sort(); err != nil {
|
||||
continue
|
||||
if sorter, err := NewTableSorter(t, r.field); err != nil {
|
||||
return err
|
||||
} else if err := sorter.Sort(); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
includesRuntimeObjs = true
|
||||
|
25
vendor/k8s.io/kubernetes/pkg/kubectl/cmd/get/sorter.go
generated
vendored
25
vendor/k8s.io/kubernetes/pkg/kubectl/cmd/get/sorter.go
generated
vendored
@ -318,13 +318,18 @@ func (t *TableSorter) Len() int {
|
||||
|
||||
func (t *TableSorter) Swap(i, j int) {
|
||||
t.obj.Rows[i], t.obj.Rows[j] = t.obj.Rows[j], t.obj.Rows[i]
|
||||
t.parsedRows[i], t.parsedRows[j] = t.parsedRows[j], t.parsedRows[i]
|
||||
}
|
||||
|
||||
func (t *TableSorter) Less(i, j int) bool {
|
||||
iValues := t.parsedRows[i]
|
||||
jValues := t.parsedRows[j]
|
||||
if len(iValues) == 0 || len(iValues[0]) == 0 || len(jValues) == 0 || len(jValues[0]) == 0 {
|
||||
klog.Fatalf("couldn't find any field with path %q in the list of objects", t.field)
|
||||
|
||||
if len(iValues) == 0 || len(iValues[0]) == 0 {
|
||||
return true
|
||||
}
|
||||
if len(jValues) == 0 || len(jValues[0]) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
iField := iValues[0][0]
|
||||
@ -342,28 +347,36 @@ func (t *TableSorter) Sort() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewTableSorter(table *metav1beta1.Table, field string) *TableSorter {
|
||||
func NewTableSorter(table *metav1beta1.Table, field string) (*TableSorter, error) {
|
||||
var parsedRows [][][]reflect.Value
|
||||
|
||||
parser := jsonpath.New("sorting").AllowMissingKeys(true)
|
||||
err := parser.Parse(field)
|
||||
if err != nil {
|
||||
klog.Fatalf("sorting error: %v\n", err)
|
||||
return nil, fmt.Errorf("sorting error: %v", err)
|
||||
}
|
||||
|
||||
fieldFoundOnce := false
|
||||
for i := range table.Rows {
|
||||
parsedRow, err := findJSONPathResults(parser, table.Rows[i].Object.Object)
|
||||
if err != nil {
|
||||
klog.Fatalf("Failed to get values for %#v using %s (%#v)", parsedRow, field, err)
|
||||
return nil, fmt.Errorf("Failed to get values for %#v using %s (%#v)", parsedRow, field, err)
|
||||
}
|
||||
parsedRows = append(parsedRows, parsedRow)
|
||||
if len(parsedRow) > 0 && len(parsedRow[0]) > 0 {
|
||||
fieldFoundOnce = true
|
||||
}
|
||||
}
|
||||
|
||||
if len(table.Rows) > 0 && !fieldFoundOnce {
|
||||
return nil, fmt.Errorf("couldn't find any field with path %q in the list of objects", field)
|
||||
}
|
||||
|
||||
return &TableSorter{
|
||||
obj: table,
|
||||
field: field,
|
||||
parsedRows: parsedRows,
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
func findJSONPathResults(parser *jsonpath.JSONPath, from runtime.Object) ([][]reflect.Value, error) {
|
||||
if unstructuredObj, ok := from.(*unstructured.Unstructured); ok {
|
||||
|
82
vendor/k8s.io/kubernetes/pkg/kubectl/cmd/get/sorter_test.go
generated
vendored
82
vendor/k8s.io/kubernetes/pkg/kubectl/cmd/get/sorter_test.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package get
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
@ -25,10 +26,20 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
"k8s.io/kubernetes/pkg/kubectl/scheme"
|
||||
)
|
||||
|
||||
func toUnstructuredOrDie(data []byte) *unstructured.Unstructured {
|
||||
unstrBody := map[string]interface{}{}
|
||||
err := json.Unmarshal(data, &unstrBody)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return &unstructured.Unstructured{Object: unstrBody}
|
||||
}
|
||||
func encodeOrDie(obj runtime.Object) []byte {
|
||||
data, err := runtime.Encode(scheme.Codecs.LegacyCodec(corev1.SchemeGroupVersion), obj)
|
||||
if err != nil {
|
||||
@ -65,6 +76,16 @@ func TestSortingPrinter(t *testing.T) {
|
||||
name string
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
obj: &corev1.PodList{
|
||||
Items: []corev1.Pod{},
|
||||
},
|
||||
sort: &corev1.PodList{
|
||||
Items: []corev1.Pod{},
|
||||
},
|
||||
field: "{.metadata.name}",
|
||||
},
|
||||
{
|
||||
name: "in-order-already",
|
||||
obj: &corev1.PodList{
|
||||
@ -237,16 +258,16 @@ func TestSortingPrinter(t *testing.T) {
|
||||
name: "v1.List in order",
|
||||
obj: &corev1.List{
|
||||
Items: []runtime.RawExtension{
|
||||
{Raw: encodeOrDie(a)},
|
||||
{Raw: encodeOrDie(b)},
|
||||
{Raw: encodeOrDie(c)},
|
||||
{Object: a, Raw: encodeOrDie(a)},
|
||||
{Object: b, Raw: encodeOrDie(b)},
|
||||
{Object: c, Raw: encodeOrDie(c)},
|
||||
},
|
||||
},
|
||||
sort: &corev1.List{
|
||||
Items: []runtime.RawExtension{
|
||||
{Raw: encodeOrDie(a)},
|
||||
{Raw: encodeOrDie(b)},
|
||||
{Raw: encodeOrDie(c)},
|
||||
{Object: a, Raw: encodeOrDie(a)},
|
||||
{Object: b, Raw: encodeOrDie(b)},
|
||||
{Object: c, Raw: encodeOrDie(c)},
|
||||
},
|
||||
},
|
||||
field: "{.metadata.name}",
|
||||
@ -255,16 +276,16 @@ func TestSortingPrinter(t *testing.T) {
|
||||
name: "v1.List in reverse",
|
||||
obj: &corev1.List{
|
||||
Items: []runtime.RawExtension{
|
||||
{Raw: encodeOrDie(c)},
|
||||
{Raw: encodeOrDie(b)},
|
||||
{Raw: encodeOrDie(a)},
|
||||
{Object: c, Raw: encodeOrDie(c)},
|
||||
{Object: b, Raw: encodeOrDie(b)},
|
||||
{Object: a, Raw: encodeOrDie(a)},
|
||||
},
|
||||
},
|
||||
sort: &corev1.List{
|
||||
Items: []runtime.RawExtension{
|
||||
{Raw: encodeOrDie(a)},
|
||||
{Raw: encodeOrDie(b)},
|
||||
{Raw: encodeOrDie(c)},
|
||||
{Object: a, Raw: encodeOrDie(a)},
|
||||
{Object: b, Raw: encodeOrDie(b)},
|
||||
{Object: c, Raw: encodeOrDie(c)},
|
||||
},
|
||||
},
|
||||
field: "{.metadata.name}",
|
||||
@ -390,6 +411,43 @@ func TestSortingPrinter(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name+" table", func(t *testing.T) {
|
||||
table := &metav1beta1.Table{}
|
||||
meta.EachListItem(tt.obj, func(item runtime.Object) error {
|
||||
table.Rows = append(table.Rows, metav1beta1.TableRow{
|
||||
Object: runtime.RawExtension{Object: toUnstructuredOrDie(encodeOrDie(item))},
|
||||
})
|
||||
return nil
|
||||
})
|
||||
|
||||
expectedTable := &metav1beta1.Table{}
|
||||
meta.EachListItem(tt.sort, func(item runtime.Object) error {
|
||||
expectedTable.Rows = append(expectedTable.Rows, metav1beta1.TableRow{
|
||||
Object: runtime.RawExtension{Object: toUnstructuredOrDie(encodeOrDie(item))},
|
||||
})
|
||||
return nil
|
||||
})
|
||||
|
||||
sorter, err := NewTableSorter(table, tt.field)
|
||||
if err == nil {
|
||||
err = sorter.Sort()
|
||||
}
|
||||
if err != nil {
|
||||
if len(tt.expectedErr) > 0 {
|
||||
if strings.Contains(err.Error(), tt.expectedErr) {
|
||||
return
|
||||
}
|
||||
t.Fatalf("%s: expected error containing: %q, got: \"%v\"", tt.name, tt.expectedErr, err)
|
||||
}
|
||||
t.Fatalf("%s: unexpected error: %v", tt.name, err)
|
||||
}
|
||||
if len(tt.expectedErr) > 0 {
|
||||
t.Fatalf("%s: expected error containing: %q, got none", tt.name, tt.expectedErr)
|
||||
}
|
||||
if !reflect.DeepEqual(table, expectedTable) {
|
||||
t.Errorf("[%s]\nexpected/saw:\n%s", tt.name, diff.ObjectReflectDiff(expectedTable, table))
|
||||
}
|
||||
})
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
sort := &SortingPrinter{SortField: tt.field, Decoder: scheme.Codecs.UniversalDecoder()}
|
||||
err := sort.sortObj(tt.obj)
|
||||
|
14
vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/secret_for_docker_registry.go
generated
vendored
14
vendor/k8s.io/kubernetes/pkg/kubectl/generate/versioned/secret_for_docker_registry.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package versioned
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
@ -152,6 +153,7 @@ func handleDockerCfgJSONContent(username, password, email, server string) ([]byt
|
||||
Username: username,
|
||||
Password: password,
|
||||
Email: email,
|
||||
Auth: encodeDockerConfigFieldAuth(username, password),
|
||||
}
|
||||
|
||||
dockerCfgJSON := DockerConfigJSON{
|
||||
@ -161,6 +163,11 @@ func handleDockerCfgJSONContent(username, password, email, server string) ([]byt
|
||||
return json.Marshal(dockerCfgJSON)
|
||||
}
|
||||
|
||||
func encodeDockerConfigFieldAuth(username, password string) string {
|
||||
fieldValue := username + ":" + password
|
||||
return base64.StdEncoding.EncodeToString([]byte(fieldValue))
|
||||
}
|
||||
|
||||
// DockerConfigJSON represents a local docker auth config file
|
||||
// for pulling images.
|
||||
type DockerConfigJSON struct {
|
||||
@ -175,7 +182,8 @@ type DockerConfigJSON struct {
|
||||
type DockerConfig map[string]DockerConfigEntry
|
||||
|
||||
type DockerConfigEntry struct {
|
||||
Username string
|
||||
Password string
|
||||
Email string
|
||||
Username string `json:"username,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
Email string `json:"email,omitempty"`
|
||||
Auth string `json:"auth,omitempty"`
|
||||
}
|
||||
|
@ -73,7 +73,7 @@ func TestSecretForDockerRegistryGenerate(t *testing.T) {
|
||||
},
|
||||
expected: &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo-7566tc6mgc",
|
||||
Name: "foo-548cm7fgdh",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
v1.DockerConfigJsonKey: secretData,
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/kubelet/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/kubelet/BUILD
generated
vendored
@ -165,6 +165,7 @@ go_test(
|
||||
"kubelet_pods_windows_test.go",
|
||||
"kubelet_resources_test.go",
|
||||
"kubelet_test.go",
|
||||
"kubelet_volumes_linux_test.go",
|
||||
"kubelet_volumes_test.go",
|
||||
"main_test.go",
|
||||
"oom_watcher_test.go",
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/kubelet/config/defaults.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/kubelet/config/defaults.go
generated
vendored
@ -19,6 +19,7 @@ package config
|
||||
const (
|
||||
DefaultKubeletPodsDirName = "pods"
|
||||
DefaultKubeletVolumesDirName = "volumes"
|
||||
DefaultKubeletVolumeSubpathsDirName = "volume-subpaths"
|
||||
DefaultKubeletVolumeDevicesDirName = "volumeDevices"
|
||||
DefaultKubeletPluginsDirName = "plugins"
|
||||
DefaultKubeletPluginsRegistrationDirName = "plugins_registry"
|
||||
|
22
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_getters.go
generated
vendored
22
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_getters.go
generated
vendored
@ -25,7 +25,7 @@ import (
|
||||
cadvisorapiv1 "github.com/google/cadvisor/info/v1"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
"k8s.io/kubernetes/pkg/kubelet/config"
|
||||
@ -99,6 +99,13 @@ func (kl *Kubelet) getPodDir(podUID types.UID) string {
|
||||
return filepath.Join(kl.getPodsDir(), string(podUID))
|
||||
}
|
||||
|
||||
// getPodVolumesSubpathsDir returns the full path to the per-pod subpaths directory under
|
||||
// which subpath volumes are created for the specified pod. This directory may not
|
||||
// exist if the pod does not exist or subpaths are not specified.
|
||||
func (kl *Kubelet) getPodVolumeSubpathsDir(podUID types.UID) string {
|
||||
return filepath.Join(kl.getPodDir(podUID), config.DefaultKubeletVolumeSubpathsDirName)
|
||||
}
|
||||
|
||||
// getPodVolumesDir returns the full path to the per-pod data directory under
|
||||
// which volumes are created for the specified pod. This directory may not
|
||||
// exist if the pod does not exist.
|
||||
@ -315,6 +322,19 @@ func (kl *Kubelet) getMountedVolumePathListFromDisk(podUID types.UID) ([]string,
|
||||
return mountedVolumes, nil
|
||||
}
|
||||
|
||||
// podVolumesSubpathsDirExists returns true if the pod volume-subpaths directory for
|
||||
// a given pod exists
|
||||
func (kl *Kubelet) podVolumeSubpathsDirExists(podUID types.UID) (bool, error) {
|
||||
podVolDir := kl.getPodVolumeSubpathsDir(podUID)
|
||||
|
||||
if pathExists, pathErr := volumeutil.PathExists(podVolDir); pathErr != nil {
|
||||
return true, fmt.Errorf("Error checking if path %q exists: %v", podVolDir, pathErr)
|
||||
} else if !pathExists {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// GetVersionInfo returns information about the version of cAdvisor in use.
|
||||
func (kl *Kubelet) GetVersionInfo() (*cadvisorapiv1.VersionInfo, error) {
|
||||
return kl.cadvisor.VersionInfo()
|
||||
|
16
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_volumes.go
generated
vendored
16
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_volumes.go
generated
vendored
@ -19,7 +19,7 @@ package kubelet
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
@ -114,6 +114,8 @@ func (kl *Kubelet) cleanupOrphanedPodDirs(pods []*v1.Pod, runningPods []*kubecon
|
||||
}
|
||||
// If volumes have not been unmounted/detached, do not delete directory.
|
||||
// Doing so may result in corruption of data.
|
||||
// TODO: getMountedVolumePathListFromDisk() call may be redundant with
|
||||
// kl.getPodVolumePathListFromDisk(). Can this be cleaned up?
|
||||
if podVolumesExist := kl.podVolumesExist(uid); podVolumesExist {
|
||||
klog.V(3).Infof("Orphaned pod %q found, but volumes are not cleaned up", uid)
|
||||
continue
|
||||
@ -128,6 +130,18 @@ func (kl *Kubelet) cleanupOrphanedPodDirs(pods []*v1.Pod, runningPods []*kubecon
|
||||
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("Orphaned pod %q found, but volume paths are still present on disk", uid))
|
||||
continue
|
||||
}
|
||||
|
||||
// If there are any volume-subpaths, do not cleanup directories
|
||||
volumeSubpathExists, err := kl.podVolumeSubpathsDirExists(uid)
|
||||
if err != nil {
|
||||
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("Orphaned pod %q found, but error %v occurred during reading of volume-subpaths dir from disk", uid, err))
|
||||
continue
|
||||
}
|
||||
if volumeSubpathExists {
|
||||
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("Orphaned pod %q found, but volume subpaths are still present on disk", uid))
|
||||
continue
|
||||
}
|
||||
|
||||
klog.V(3).Infof("Orphaned pod %q found, removing", uid)
|
||||
if err := removeall.RemoveAllOneFilesystem(kl.mounter, kl.getPodDir(uid)); err != nil {
|
||||
klog.Errorf("Failed to remove orphaned pod %q dir; err: %v", uid, err)
|
||||
|
156
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_volumes_linux_test.go
generated
vendored
Normal file
156
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_volumes_linux_test.go
generated
vendored
Normal file
@ -0,0 +1,156 @@
|
||||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubelet
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
_ "k8s.io/kubernetes/pkg/apis/core/install"
|
||||
)
|
||||
|
||||
func validateDirExists(dir string) error {
|
||||
_, err := ioutil.ReadDir(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateDirNotExists(dir string) error {
|
||||
_, err := ioutil.ReadDir(dir)
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("dir %q still exists", dir)
|
||||
}
|
||||
|
||||
func TestCleanupOrphanedPodDirs(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
pods []*v1.Pod
|
||||
prepareFunc func(kubelet *Kubelet) error
|
||||
validateFunc func(kubelet *Kubelet) error
|
||||
expectErr bool
|
||||
}{
|
||||
"nothing-to-do": {},
|
||||
"pods-dir-not-found": {
|
||||
prepareFunc: func(kubelet *Kubelet) error {
|
||||
return os.Remove(kubelet.getPodsDir())
|
||||
},
|
||||
expectErr: true,
|
||||
},
|
||||
"pod-doesnot-exist-novolume": {
|
||||
prepareFunc: func(kubelet *Kubelet) error {
|
||||
podDir := kubelet.getPodDir("pod1uid")
|
||||
return os.MkdirAll(filepath.Join(podDir, "not/a/volume"), 0750)
|
||||
},
|
||||
validateFunc: func(kubelet *Kubelet) error {
|
||||
podDir := kubelet.getPodDir("pod1uid")
|
||||
return validateDirNotExists(filepath.Join(podDir, "not"))
|
||||
},
|
||||
},
|
||||
"pod-exists-with-volume": {
|
||||
pods: []*v1.Pod{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
UID: "pod1uid",
|
||||
},
|
||||
},
|
||||
},
|
||||
prepareFunc: func(kubelet *Kubelet) error {
|
||||
podDir := kubelet.getPodDir("pod1uid")
|
||||
return os.MkdirAll(filepath.Join(podDir, "volumes/plugin/name"), 0750)
|
||||
},
|
||||
validateFunc: func(kubelet *Kubelet) error {
|
||||
podDir := kubelet.getPodDir("pod1uid")
|
||||
return validateDirExists(filepath.Join(podDir, "volumes/plugin/name"))
|
||||
},
|
||||
},
|
||||
"pod-doesnot-exist-with-volume": {
|
||||
prepareFunc: func(kubelet *Kubelet) error {
|
||||
podDir := kubelet.getPodDir("pod1uid")
|
||||
return os.MkdirAll(filepath.Join(podDir, "volumes/plugin/name"), 0750)
|
||||
},
|
||||
validateFunc: func(kubelet *Kubelet) error {
|
||||
podDir := kubelet.getPodDir("pod1uid")
|
||||
return validateDirExists(filepath.Join(podDir, "volumes/plugin/name"))
|
||||
},
|
||||
},
|
||||
"pod-doesnot-exist-with-subpath": {
|
||||
prepareFunc: func(kubelet *Kubelet) error {
|
||||
podDir := kubelet.getPodDir("pod1uid")
|
||||
return os.MkdirAll(filepath.Join(podDir, "volume-subpaths/volume/container/index"), 0750)
|
||||
},
|
||||
validateFunc: func(kubelet *Kubelet) error {
|
||||
podDir := kubelet.getPodDir("pod1uid")
|
||||
return validateDirExists(filepath.Join(podDir, "volume-subpaths/volume/container/index"))
|
||||
},
|
||||
},
|
||||
"pod-doesnot-exist-with-subpath-top": {
|
||||
prepareFunc: func(kubelet *Kubelet) error {
|
||||
podDir := kubelet.getPodDir("pod1uid")
|
||||
return os.MkdirAll(filepath.Join(podDir, "volume-subpaths"), 0750)
|
||||
},
|
||||
validateFunc: func(kubelet *Kubelet) error {
|
||||
podDir := kubelet.getPodDir("pod1uid")
|
||||
return validateDirExists(filepath.Join(podDir, "volume-subpaths"))
|
||||
},
|
||||
},
|
||||
// TODO: test volume in volume-manager
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
|
||||
if tc.prepareFunc != nil {
|
||||
if err := tc.prepareFunc(kubelet); err != nil {
|
||||
t.Fatalf("%s failed preparation: %v", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
err := kubelet.cleanupOrphanedPodDirs(tc.pods, nil)
|
||||
if tc.expectErr && err == nil {
|
||||
t.Errorf("%s failed: expected error, got success", name)
|
||||
}
|
||||
if !tc.expectErr && err != nil {
|
||||
t.Errorf("%s failed: got error %v", name, err)
|
||||
}
|
||||
|
||||
if tc.validateFunc != nil {
|
||||
if err := tc.validateFunc(kubelet); err != nil {
|
||||
t.Errorf("%s failed validation: %v", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
3
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_sandbox.go
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_sandbox.go
generated
vendored
@ -57,6 +57,9 @@ func (m *kubeGenericRuntimeManager) createPodSandbox(pod *v1.Pod, attempt uint32
|
||||
message := fmt.Sprintf("CreatePodSandbox for pod %q failed: %v", format.Pod(pod), err)
|
||||
return "", message, err
|
||||
}
|
||||
if runtimeHandler != "" {
|
||||
klog.V(2).Infof("Running pod %s with RuntimeHandler %q", format.Pod(pod), runtimeHandler)
|
||||
}
|
||||
}
|
||||
|
||||
podSandBoxID, err := m.runtimeService.RunPodSandbox(podSandboxConfig, runtimeHandler)
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/BUILD
generated
vendored
@ -18,6 +18,7 @@ go_library(
|
||||
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
|
||||
"//pkg/kubelet/cm:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/util:go_default_library",
|
||||
"//pkg/kubelet/util/format:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
|
24
vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/summary.go
generated
vendored
24
vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/summary.go
generated
vendored
@ -19,7 +19,11 @@ package stats
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/klog"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util"
|
||||
)
|
||||
|
||||
type SummaryProvider interface {
|
||||
@ -32,6 +36,11 @@ type SummaryProvider interface {
|
||||
|
||||
// summaryProviderImpl implements the SummaryProvider interface.
|
||||
type summaryProviderImpl struct {
|
||||
// kubeletCreationTime is the time at which the summaryProvider was created.
|
||||
kubeletCreationTime metav1.Time
|
||||
// systemBootTime is the time at which the system was started
|
||||
systemBootTime metav1.Time
|
||||
|
||||
provider StatsProvider
|
||||
}
|
||||
|
||||
@ -40,7 +49,18 @@ var _ SummaryProvider = &summaryProviderImpl{}
|
||||
// NewSummaryProvider returns a SummaryProvider using the stats provided by the
|
||||
// specified statsProvider.
|
||||
func NewSummaryProvider(statsProvider StatsProvider) SummaryProvider {
|
||||
return &summaryProviderImpl{statsProvider}
|
||||
kubeletCreationTime := metav1.Now()
|
||||
bootTime, err := util.GetBootTime()
|
||||
if err != nil {
|
||||
// bootTime will be zero if we encounter an error getting the boot time.
|
||||
klog.Warningf("Error getting system boot time. Node metrics will have an incorrect start time: %v", err)
|
||||
}
|
||||
|
||||
return &summaryProviderImpl{
|
||||
kubeletCreationTime: kubeletCreationTime,
|
||||
systemBootTime: metav1.NewTime(bootTime),
|
||||
provider: statsProvider,
|
||||
}
|
||||
}
|
||||
|
||||
func (sp *summaryProviderImpl) Get(updateStats bool) (*statsapi.Summary, error) {
|
||||
@ -77,7 +97,7 @@ func (sp *summaryProviderImpl) Get(updateStats bool) (*statsapi.Summary, error)
|
||||
CPU: rootStats.CPU,
|
||||
Memory: rootStats.Memory,
|
||||
Network: networkStats,
|
||||
StartTime: rootStats.StartTime,
|
||||
StartTime: sp.systemBootTime,
|
||||
Fs: rootFsStats,
|
||||
Runtime: &statsapi.RuntimeStats{ImageFs: imageFsStats},
|
||||
Rlimit: rlimit,
|
||||
|
29
vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/summary_sys_containers.go
generated
vendored
29
vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/summary_sys_containers.go
generated
vendored
@ -21,6 +21,7 @@ package stats
|
||||
import (
|
||||
"k8s.io/klog"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
)
|
||||
@ -29,11 +30,12 @@ func (sp *summaryProviderImpl) GetSystemContainersStats(nodeConfig cm.NodeConfig
|
||||
systemContainers := map[string]struct {
|
||||
name string
|
||||
forceStatsUpdate bool
|
||||
startTime metav1.Time
|
||||
}{
|
||||
statsapi.SystemContainerKubelet: {nodeConfig.KubeletCgroupsName, false},
|
||||
statsapi.SystemContainerRuntime: {nodeConfig.RuntimeCgroupsName, false},
|
||||
statsapi.SystemContainerMisc: {nodeConfig.SystemCgroupsName, false},
|
||||
statsapi.SystemContainerPods: {sp.provider.GetPodCgroupRoot(), updateStats},
|
||||
statsapi.SystemContainerKubelet: {name: nodeConfig.KubeletCgroupsName, forceStatsUpdate: false, startTime: sp.kubeletCreationTime},
|
||||
statsapi.SystemContainerRuntime: {name: nodeConfig.RuntimeCgroupsName, forceStatsUpdate: false},
|
||||
statsapi.SystemContainerMisc: {name: nodeConfig.SystemCgroupsName, forceStatsUpdate: false},
|
||||
statsapi.SystemContainerPods: {name: sp.provider.GetPodCgroupRoot(), forceStatsUpdate: updateStats},
|
||||
}
|
||||
for sys, cont := range systemContainers {
|
||||
// skip if cgroup name is undefined (not all system containers are required)
|
||||
@ -48,6 +50,11 @@ func (sp *summaryProviderImpl) GetSystemContainersStats(nodeConfig cm.NodeConfig
|
||||
// System containers don't have a filesystem associated with them.
|
||||
s.Logs, s.Rootfs = nil, nil
|
||||
s.Name = sys
|
||||
|
||||
// if we know the start time of a system container, use that instead of the start time provided by cAdvisor
|
||||
if !cont.startTime.IsZero() {
|
||||
s.StartTime = cont.startTime
|
||||
}
|
||||
stats = append(stats, *s)
|
||||
}
|
||||
|
||||
@ -58,11 +65,12 @@ func (sp *summaryProviderImpl) GetSystemContainersCPUAndMemoryStats(nodeConfig c
|
||||
systemContainers := map[string]struct {
|
||||
name string
|
||||
forceStatsUpdate bool
|
||||
startTime metav1.Time
|
||||
}{
|
||||
statsapi.SystemContainerKubelet: {nodeConfig.KubeletCgroupsName, false},
|
||||
statsapi.SystemContainerRuntime: {nodeConfig.RuntimeCgroupsName, false},
|
||||
statsapi.SystemContainerMisc: {nodeConfig.SystemCgroupsName, false},
|
||||
statsapi.SystemContainerPods: {sp.provider.GetPodCgroupRoot(), updateStats},
|
||||
statsapi.SystemContainerKubelet: {name: nodeConfig.KubeletCgroupsName, forceStatsUpdate: false, startTime: sp.kubeletCreationTime},
|
||||
statsapi.SystemContainerRuntime: {name: nodeConfig.RuntimeCgroupsName, forceStatsUpdate: false},
|
||||
statsapi.SystemContainerMisc: {name: nodeConfig.SystemCgroupsName, forceStatsUpdate: false},
|
||||
statsapi.SystemContainerPods: {name: sp.provider.GetPodCgroupRoot(), forceStatsUpdate: updateStats},
|
||||
}
|
||||
for sys, cont := range systemContainers {
|
||||
// skip if cgroup name is undefined (not all system containers are required)
|
||||
@ -75,6 +83,11 @@ func (sp *summaryProviderImpl) GetSystemContainersCPUAndMemoryStats(nodeConfig c
|
||||
continue
|
||||
}
|
||||
s.Name = sys
|
||||
|
||||
// if we know the start time of a system container, use that instead of the start time provided by cAdvisor
|
||||
if !cont.startTime.IsZero() {
|
||||
s.StartTime = cont.startTime
|
||||
}
|
||||
stats = append(stats, *s)
|
||||
}
|
||||
|
||||
|
8
vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/summary_test.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/summary_test.go
generated
vendored
@ -83,12 +83,14 @@ func TestSummaryProviderGetStats(t *testing.T) {
|
||||
On("GetCgroupStats", "/kubelet", false).Return(cgroupStatsMap["/kubelet"].cs, cgroupStatsMap["/kubelet"].ns, nil).
|
||||
On("GetCgroupStats", "/kubepods", true).Return(cgroupStatsMap["/pods"].cs, cgroupStatsMap["/pods"].ns, nil)
|
||||
|
||||
provider := NewSummaryProvider(mockStatsProvider)
|
||||
kubeletCreationTime := metav1.Now()
|
||||
systemBootTime := metav1.Now()
|
||||
provider := summaryProviderImpl{kubeletCreationTime: kubeletCreationTime, systemBootTime: systemBootTime, provider: mockStatsProvider}
|
||||
summary, err := provider.Get(true)
|
||||
assert.NoError(err)
|
||||
|
||||
assert.Equal(summary.Node.NodeName, "test-node")
|
||||
assert.Equal(summary.Node.StartTime, cgroupStatsMap["/"].cs.StartTime)
|
||||
assert.Equal(summary.Node.StartTime, systemBootTime)
|
||||
assert.Equal(summary.Node.CPU, cgroupStatsMap["/"].cs.CPU)
|
||||
assert.Equal(summary.Node.Memory, cgroupStatsMap["/"].cs.Memory)
|
||||
assert.Equal(summary.Node.Network, cgroupStatsMap["/"].ns)
|
||||
@ -98,7 +100,7 @@ func TestSummaryProviderGetStats(t *testing.T) {
|
||||
assert.Equal(len(summary.Node.SystemContainers), 4)
|
||||
assert.Contains(summary.Node.SystemContainers, statsapi.ContainerStats{
|
||||
Name: "kubelet",
|
||||
StartTime: cgroupStatsMap["/kubelet"].cs.StartTime,
|
||||
StartTime: kubeletCreationTime,
|
||||
CPU: cgroupStatsMap["/kubelet"].cs.CPU,
|
||||
Memory: cgroupStatsMap["/kubelet"].cs.Memory,
|
||||
Accelerators: cgroupStatsMap["/kubelet"].cs.Accelerators,
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/util/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/util/BUILD
generated
vendored
@ -34,6 +34,8 @@ go_test(
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"boottime_util_darwin.go",
|
||||
"boottime_util_linux.go",
|
||||
"doc.go",
|
||||
"util.go",
|
||||
"util_unix.go",
|
||||
|
44
vendor/k8s.io/kubernetes/pkg/kubelet/util/boottime_util_darwin.go
generated
vendored
Normal file
44
vendor/k8s.io/kubernetes/pkg/kubelet/util/boottime_util_darwin.go
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
// +build darwin
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// GetBootTime returns the time at which the machine was started, truncated to the nearest second
|
||||
func GetBootTime() (time.Time, error) {
|
||||
output, err := unix.SysctlRaw("kern.boottime")
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
var timeval syscall.Timeval
|
||||
if len(output) != int(unsafe.Sizeof(timeval)) {
|
||||
return time.Time{}, fmt.Errorf("unexpected output when calling syscall kern.bootime. Expected len(output) to be %v, but got %v",
|
||||
int(unsafe.Sizeof(timeval)), len(output))
|
||||
}
|
||||
timeval = *(*syscall.Timeval)(unsafe.Pointer(&output[0]))
|
||||
sec, nsec := timeval.Unix()
|
||||
return time.Unix(sec, nsec).Truncate(time.Second), nil
|
||||
}
|
36
vendor/k8s.io/kubernetes/pkg/kubelet/util/boottime_util_linux.go
generated
vendored
Normal file
36
vendor/k8s.io/kubernetes/pkg/kubelet/util/boottime_util_linux.go
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
// +build freebsd linux
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// GetBootTime returns the time at which the machine was started, truncated to the nearest second
|
||||
func GetBootTime() (time.Time, error) {
|
||||
currentTime := time.Now()
|
||||
var info unix.Sysinfo_t
|
||||
if err := unix.Sysinfo(&info); err != nil {
|
||||
return time.Time{}, fmt.Errorf("error getting system uptime: %s", err)
|
||||
}
|
||||
return currentTime.Add(-time.Duration(info.Uptime) * time.Second).Truncate(time.Second), nil
|
||||
}
|
5
vendor/k8s.io/kubernetes/pkg/kubelet/util/util_unsupported.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/kubelet/util/util_unsupported.go
generated
vendored
@ -45,3 +45,8 @@ func UnlockPath(fileHandles []uintptr) {
|
||||
func LocalEndpoint(path, file string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// GetBootTime empty implementation
|
||||
func GetBootTime() (time.Time, error) {
|
||||
return time.Time{}, fmt.Errorf("GetBootTime is unsupported in this build")
|
||||
}
|
||||
|
13
vendor/k8s.io/kubernetes/pkg/kubelet/util/util_windows.go
generated
vendored
13
vendor/k8s.io/kubernetes/pkg/kubelet/util/util_windows.go
generated
vendored
@ -23,6 +23,7 @@ import (
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/go-winio"
|
||||
@ -112,3 +113,15 @@ func LocalEndpoint(path, file string) string {
|
||||
}
|
||||
return u.String() + "//./pipe/" + file
|
||||
}
|
||||
|
||||
var tickCount = syscall.NewLazyDLL("kernel32.dll").NewProc("GetTickCount64")
|
||||
|
||||
// GetBootTime returns the time at which the machine was started, truncated to the nearest second
|
||||
func GetBootTime() (time.Time, error) {
|
||||
currentTime := time.Now()
|
||||
output, _, err := tickCount.Call()
|
||||
if errno, ok := err.(syscall.Errno); !ok || errno != 0 {
|
||||
return time.Time{}, err
|
||||
}
|
||||
return currentTime.Add(-time.Duration(output) * time.Millisecond).Truncate(time.Second), nil
|
||||
}
|
||||
|
13
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/desired_state_of_world.go
generated
vendored
13
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/desired_state_of_world.go
generated
vendored
@ -203,11 +203,12 @@ func (dsw *desiredStateOfWorld) AddPodToVolume(
|
||||
|
||||
var volumeName v1.UniqueVolumeName
|
||||
|
||||
// The unique volume name used depends on whether the volume is attachable
|
||||
// The unique volume name used depends on whether the volume is attachable/device-mountable
|
||||
// or not.
|
||||
attachable := dsw.isAttachableVolume(volumeSpec)
|
||||
if attachable {
|
||||
// For attachable volumes, use the unique volume name as reported by
|
||||
deviceMountable := dsw.isDeviceMountableVolume(volumeSpec)
|
||||
if attachable || deviceMountable {
|
||||
// For attachable/device-mountable volumes, use the unique volume name as reported by
|
||||
// the plugin.
|
||||
volumeName, err =
|
||||
util.GetUniqueVolumeNameFromSpec(volumePlugin, volumeSpec)
|
||||
@ -219,13 +220,11 @@ func (dsw *desiredStateOfWorld) AddPodToVolume(
|
||||
err)
|
||||
}
|
||||
} else {
|
||||
// For non-attachable volumes, generate a unique name based on the pod
|
||||
// For non-attachable and non-device-mountable volumes, generate a unique name based on the pod
|
||||
// namespace and name and the name of the volume within the pod.
|
||||
volumeName = util.GetUniqueVolumeNameForNonAttachableVolume(podName, volumePlugin, volumeSpec)
|
||||
volumeName = util.GetUniqueVolumeNameFromSpecWithPod(podName, volumePlugin, volumeSpec)
|
||||
}
|
||||
|
||||
deviceMountable := dsw.isDeviceMountableVolume(volumeSpec)
|
||||
|
||||
if _, volumeExists := dsw.volumesToMount[volumeName]; !volumeExists {
|
||||
dsw.volumesToMount[volumeName] = volumeToMount{
|
||||
volumeName: volumeName,
|
||||
|
162
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/desired_state_of_world_test.go
generated
vendored
162
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/desired_state_of_world_test.go
generated
vendored
@ -117,6 +117,168 @@ func Test_AddPodToVolume_Positive_ExistingPodExistingVolume(t *testing.T) {
|
||||
verifyVolumeExistsWithSpecNameInVolumeDsw(t, podName, volumeSpec.Name(), dsw)
|
||||
}
|
||||
|
||||
// Call AddPodToVolume() on different pods for different kinds of volumes
|
||||
// Verities generated names are same for different pods if volume is device mountable or attachable
|
||||
// Verities generated names are different for different pods if volume is not device mountble and attachable
|
||||
func Test_AddPodToVolume_Positive_NamesForDifferentPodsAndDifferentVolumes(t *testing.T) {
|
||||
// Arrange
|
||||
fakeVolumeHost := volumetesting.NewFakeVolumeHost(
|
||||
"", /* rootDir */
|
||||
nil, /* kubeClient */
|
||||
nil, /* plugins */
|
||||
)
|
||||
plugins := []volume.VolumePlugin{
|
||||
&volumetesting.FakeBasicVolumePlugin{
|
||||
Plugin: volumetesting.FakeVolumePlugin{
|
||||
PluginName: "basic",
|
||||
},
|
||||
},
|
||||
&volumetesting.FakeDeviceMountableVolumePlugin{
|
||||
FakeBasicVolumePlugin: volumetesting.FakeBasicVolumePlugin{
|
||||
Plugin: volumetesting.FakeVolumePlugin{
|
||||
PluginName: "device-mountable",
|
||||
},
|
||||
},
|
||||
},
|
||||
&volumetesting.FakeAttachableVolumePlugin{
|
||||
FakeDeviceMountableVolumePlugin: volumetesting.FakeDeviceMountableVolumePlugin{
|
||||
FakeBasicVolumePlugin: volumetesting.FakeBasicVolumePlugin{
|
||||
Plugin: volumetesting.FakeVolumePlugin{
|
||||
PluginName: "attachable",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
volumePluginMgr := volume.VolumePluginMgr{}
|
||||
volumePluginMgr.InitPlugins(plugins, nil /* prober */, fakeVolumeHost)
|
||||
dsw := NewDesiredStateOfWorld(&volumePluginMgr)
|
||||
|
||||
testcases := map[string]struct {
|
||||
pod1 *v1.Pod
|
||||
pod2 *v1.Pod
|
||||
same bool
|
||||
}{
|
||||
"basic": {
|
||||
&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
UID: "pod1uid",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "basic",
|
||||
VolumeSource: v1.VolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod2",
|
||||
UID: "pod2uid",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "basic",
|
||||
VolumeSource: v1.VolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
false,
|
||||
},
|
||||
"device-mountable": {
|
||||
&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
UID: "pod1uid",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "device-mountable",
|
||||
VolumeSource: v1.VolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod2",
|
||||
UID: "pod2uid",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "device-mountable",
|
||||
VolumeSource: v1.VolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
true,
|
||||
},
|
||||
"attachable": {
|
||||
&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
UID: "pod1uid",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "attachable",
|
||||
VolumeSource: v1.VolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod2",
|
||||
UID: "pod2uid",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "attachable",
|
||||
VolumeSource: v1.VolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
true,
|
||||
},
|
||||
}
|
||||
|
||||
// Act & Assert
|
||||
for name, v := range testcases {
|
||||
volumeSpec1 := &volume.Spec{Volume: &v.pod1.Spec.Volumes[0]}
|
||||
volumeSpec2 := &volume.Spec{Volume: &v.pod2.Spec.Volumes[0]}
|
||||
generatedVolumeName1, err1 := dsw.AddPodToVolume(util.GetUniquePodName(v.pod1), v.pod1, volumeSpec1, volumeSpec1.Name(), "")
|
||||
generatedVolumeName2, err2 := dsw.AddPodToVolume(util.GetUniquePodName(v.pod2), v.pod2, volumeSpec2, volumeSpec2.Name(), "")
|
||||
if err1 != nil {
|
||||
t.Fatalf("test %q: AddPodToVolume failed. Expected: <no error> Actual: <%v>", name, err1)
|
||||
}
|
||||
if err2 != nil {
|
||||
t.Fatalf("test %q: AddPodToVolume failed. Expected: <no error> Actual: <%v>", name, err2)
|
||||
}
|
||||
if v.same {
|
||||
if generatedVolumeName1 != generatedVolumeName2 {
|
||||
t.Fatalf("test %q: AddPodToVolume should generate same names, but got %q != %q", name, generatedVolumeName1, generatedVolumeName2)
|
||||
}
|
||||
} else {
|
||||
if generatedVolumeName1 == generatedVolumeName2 {
|
||||
t.Fatalf("test %q: AddPodToVolume should generate different names, but got %q == %q", name, generatedVolumeName1, generatedVolumeName2)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Populates data struct with a new volume/pod
|
||||
// Calls DeletePodFromVolume() to removes the pod
|
||||
// Verifies newly added pod/volume are deleted
|
||||
|
8
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler.go
generated
vendored
@ -455,6 +455,10 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
deviceMountablePlugin, err := rc.volumePluginMgr.FindDeviceMountablePluginByName(volume.pluginName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create pod object
|
||||
pod := &v1.Pod{
|
||||
@ -480,13 +484,13 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume,
|
||||
}
|
||||
|
||||
var uniqueVolumeName v1.UniqueVolumeName
|
||||
if attachablePlugin != nil {
|
||||
if attachablePlugin != nil || deviceMountablePlugin != nil {
|
||||
uniqueVolumeName, err = util.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
uniqueVolumeName = util.GetUniqueVolumeNameForNonAttachableVolume(volume.podName, plugin, volumeSpec)
|
||||
uniqueVolumeName = util.GetUniqueVolumeNameFromSpecWithPod(volume.podName, plugin, volumeSpec)
|
||||
}
|
||||
// Check existence of mount point for filesystem volume or symbolic link for block volume
|
||||
isExist, checkErr := rc.operationExecutor.CheckVolumeExistenceOperation(volumeSpec, volume.mountPath, volumeSpec.Name(), rc.mounter, uniqueVolumeName, volume.podName, pod.UID, attachablePlugin)
|
||||
|
12
vendor/k8s.io/kubernetes/pkg/proxy/ipvs/graceful_termination.go
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/proxy/ipvs/graceful_termination.go
generated
vendored
@ -75,10 +75,10 @@ func (q *graceTerminateRSList) remove(rs *listItem) bool {
|
||||
|
||||
uniqueRS := rs.String()
|
||||
if _, ok := q.list[uniqueRS]; ok {
|
||||
return false
|
||||
delete(q.list, uniqueRS)
|
||||
return true
|
||||
}
|
||||
delete(q.list, uniqueRS)
|
||||
return true
|
||||
return false
|
||||
}
|
||||
|
||||
func (q *graceTerminateRSList) flushList(handler func(rsToDelete *listItem) (bool, error)) bool {
|
||||
@ -164,7 +164,11 @@ func (m *GracefulTerminationManager) deleteRsFunc(rsToDelete *listItem) (bool, e
|
||||
}
|
||||
for _, rs := range rss {
|
||||
if rsToDelete.RealServer.Equal(rs) {
|
||||
if rs.ActiveConn != 0 {
|
||||
// Delete RS with no connections
|
||||
// For UDP, ActiveConn is always 0
|
||||
// For TCP, InactiveConn are connections not in ESTABLISHED state
|
||||
if rs.ActiveConn+rs.InactiveConn != 0 {
|
||||
klog.Infof("Not deleting, RS %v: %v ActiveConn, %v InactiveConn", rsToDelete.String(), rs.ActiveConn, rs.InactiveConn)
|
||||
return false, nil
|
||||
}
|
||||
klog.Infof("Deleting rs: %s", rsToDelete.String())
|
||||
|
84
vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier.go
generated
vendored
84
vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier.go
generated
vendored
@ -162,6 +162,8 @@ const sysctlRouteLocalnet = "net/ipv4/conf/all/route_localnet"
|
||||
const sysctlBridgeCallIPTables = "net/bridge/bridge-nf-call-iptables"
|
||||
const sysctlVSConnTrack = "net/ipv4/vs/conntrack"
|
||||
const sysctlConnReuse = "net/ipv4/vs/conn_reuse_mode"
|
||||
const sysctlExpireNoDestConn = "net/ipv4/vs/expire_nodest_conn"
|
||||
const sysctlExpireQuiescentTemplate = "net/ipv4/vs/expire_quiescent_template"
|
||||
const sysctlForward = "net/ipv4/ip_forward"
|
||||
const sysctlArpIgnore = "net/ipv4/conf/all/arp_ignore"
|
||||
const sysctlArpAnnounce = "net/ipv4/conf/all/arp_announce"
|
||||
@ -321,6 +323,20 @@ func NewProxier(ipt utiliptables.Interface,
|
||||
}
|
||||
}
|
||||
|
||||
// Set the expire_nodest_conn sysctl we need for
|
||||
if val, _ := sysctl.GetSysctl(sysctlExpireNoDestConn); val != 1 {
|
||||
if err := sysctl.SetSysctl(sysctlExpireNoDestConn, 1); err != nil {
|
||||
return nil, fmt.Errorf("can't set sysctl %s: %v", sysctlExpireNoDestConn, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Set the expire_quiescent_template sysctl we need for
|
||||
if val, _ := sysctl.GetSysctl(sysctlExpireQuiescentTemplate); val != 1 {
|
||||
if err := sysctl.SetSysctl(sysctlExpireQuiescentTemplate, 1); err != nil {
|
||||
return nil, fmt.Errorf("can't set sysctl %s: %v", sysctlExpireQuiescentTemplate, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Set the ip_forward sysctl we need for
|
||||
if val, _ := sysctl.GetSysctl(sysctlForward); val != 1 {
|
||||
if err := sysctl.SetSysctl(sysctlForward, 1); err != nil {
|
||||
@ -1190,7 +1206,15 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
}
|
||||
proxier.portsMap = replacementPortsMap
|
||||
|
||||
// Clean up legacy IPVS services
|
||||
// Get legacy bind address
|
||||
// currentBindAddrs represents ip addresses bind to DefaultDummyDevice from the system
|
||||
currentBindAddrs, err := proxier.netlinkHandle.ListBindAddress(DefaultDummyDevice)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get bind address, err: %v", err)
|
||||
}
|
||||
legacyBindAddrs := proxier.getLegacyBindAddr(activeBindAddrs, currentBindAddrs)
|
||||
|
||||
// Clean up legacy IPVS services and unbind addresses
|
||||
appliedSvcs, err := proxier.ipvs.GetVirtualServers()
|
||||
if err == nil {
|
||||
for _, appliedSvc := range appliedSvcs {
|
||||
@ -1199,15 +1223,7 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
} else {
|
||||
klog.Errorf("Failed to get ipvs service, err: %v", err)
|
||||
}
|
||||
proxier.cleanLegacyService(activeIPVSServices, currentIPVSServices)
|
||||
|
||||
// Clean up legacy bind address
|
||||
// currentBindAddrs represents ip addresses bind to DefaultDummyDevice from the system
|
||||
currentBindAddrs, err := proxier.netlinkHandle.ListBindAddress(DefaultDummyDevice)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get bind address, err: %v", err)
|
||||
}
|
||||
proxier.cleanLegacyBindAddr(activeBindAddrs, currentBindAddrs)
|
||||
proxier.cleanLegacyService(activeIPVSServices, currentIPVSServices, legacyBindAddrs)
|
||||
|
||||
// Update healthz timestamp
|
||||
if proxier.healthzServer != nil {
|
||||
@ -1602,32 +1618,41 @@ func (proxier *Proxier) syncEndpoint(svcPortName proxy.ServicePortName, onlyNode
|
||||
Port: uint16(portNum),
|
||||
}
|
||||
|
||||
klog.V(5).Infof("Using graceful delete to delete: %v", delDest)
|
||||
klog.V(5).Infof("Using graceful delete to delete: %v", uniqueRS)
|
||||
err = proxier.gracefuldeleteManager.GracefulDeleteRS(appliedVirtualServer, delDest)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to delete destination: %v, error: %v", delDest, err)
|
||||
klog.Errorf("Failed to delete destination: %v, error: %v", uniqueRS, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (proxier *Proxier) cleanLegacyService(activeServices map[string]bool, currentServices map[string]*utilipvs.VirtualServer) {
|
||||
func (proxier *Proxier) cleanLegacyService(activeServices map[string]bool, currentServices map[string]*utilipvs.VirtualServer, legacyBindAddrs map[string]bool) {
|
||||
for cs := range currentServices {
|
||||
svc := currentServices[cs]
|
||||
if _, ok := activeServices[cs]; !ok {
|
||||
// This service was not processed in the latest sync loop so before deleting it,
|
||||
// make sure it does not fall within an excluded CIDR range.
|
||||
okayToDelete := true
|
||||
rsList, _ := proxier.ipvs.GetRealServers(svc)
|
||||
|
||||
// If we still have real servers graceful termination is not done
|
||||
if len(rsList) > 0 {
|
||||
okayToDelete = false
|
||||
}
|
||||
// Applying graceful termination to all real servers
|
||||
for _, rs := range rsList {
|
||||
uniqueRS := GetUniqueRSName(svc, rs)
|
||||
// if there are in terminating real server in this service, then handle it later
|
||||
// If RS is already in the graceful termination list, no need to add it again
|
||||
if proxier.gracefuldeleteManager.InTerminationList(uniqueRS) {
|
||||
okayToDelete = false
|
||||
break
|
||||
continue
|
||||
}
|
||||
klog.V(5).Infof("Using graceful delete to delete: %v", uniqueRS)
|
||||
if err := proxier.gracefuldeleteManager.GracefulDeleteRS(svc, rs); err != nil {
|
||||
klog.Errorf("Failed to delete destination: %v, error: %v", uniqueRS, err)
|
||||
}
|
||||
}
|
||||
// make sure it does not fall within an excluded CIDR range.
|
||||
for _, excludedCIDR := range proxier.excludeCIDRs {
|
||||
// Any validation of this CIDR already should have occurred.
|
||||
_, n, _ := net.ParseCIDR(excludedCIDR)
|
||||
@ -1637,26 +1662,33 @@ func (proxier *Proxier) cleanLegacyService(activeServices map[string]bool, curre
|
||||
}
|
||||
}
|
||||
if okayToDelete {
|
||||
klog.V(4).Infof("Delete service %s", svc.String())
|
||||
if err := proxier.ipvs.DeleteVirtualServer(svc); err != nil {
|
||||
klog.Errorf("Failed to delete service, error: %v", err)
|
||||
klog.Errorf("Failed to delete service %s, error: %v", svc.String(), err)
|
||||
}
|
||||
addr := svc.Address.String()
|
||||
if _, ok := legacyBindAddrs[addr]; ok {
|
||||
klog.V(4).Infof("Unbinding address %s", addr)
|
||||
if err := proxier.netlinkHandle.UnbindAddress(addr, DefaultDummyDevice); err != nil {
|
||||
klog.Errorf("Failed to unbind service addr %s from dummy interface %s: %v", addr, DefaultDummyDevice, err)
|
||||
} else {
|
||||
// In case we delete a multi-port service, avoid trying to unbind multiple times
|
||||
delete(legacyBindAddrs, addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (proxier *Proxier) cleanLegacyBindAddr(activeBindAddrs map[string]bool, currentBindAddrs []string) {
|
||||
func (proxier *Proxier) getLegacyBindAddr(activeBindAddrs map[string]bool, currentBindAddrs []string) map[string]bool {
|
||||
legacyAddrs := make(map[string]bool)
|
||||
for _, addr := range currentBindAddrs {
|
||||
if _, ok := activeBindAddrs[addr]; !ok {
|
||||
// This address was not processed in the latest sync loop
|
||||
klog.V(4).Infof("Unbind addr %s", addr)
|
||||
err := proxier.netlinkHandle.UnbindAddress(addr, DefaultDummyDevice)
|
||||
// Ignore no such address error when try to unbind address
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to unbind service addr %s from dummy interface %s: %v", addr, DefaultDummyDevice, err)
|
||||
}
|
||||
legacyAddrs[addr] = true
|
||||
}
|
||||
}
|
||||
return legacyAddrs
|
||||
}
|
||||
|
||||
// Join all words with spaces, terminate with newline and write to buff.
|
||||
|
108
vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier_test.go
generated
vendored
108
vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier_test.go
generated
vendored
@ -23,7 +23,6 @@ import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -126,7 +125,7 @@ func (fakeSysctl *FakeSysctl) SetSysctl(sysctl string, newVal int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewFakeProxier(ipt utiliptables.Interface, ipvs utilipvs.Interface, ipset utilipset.Interface, nodeIPs []net.IP) *Proxier {
|
||||
func NewFakeProxier(ipt utiliptables.Interface, ipvs utilipvs.Interface, ipset utilipset.Interface, nodeIPs []net.IP, excludeCIDRs []string) *Proxier {
|
||||
fcmd := fakeexec.FakeCmd{
|
||||
CombinedOutputScript: []fakeexec.FakeCombinedOutputAction{
|
||||
func() ([]byte, error) { return []byte("dummy device have been created"), nil },
|
||||
@ -151,7 +150,7 @@ func NewFakeProxier(ipt utiliptables.Interface, ipvs utilipvs.Interface, ipset u
|
||||
serviceChanges: proxy.NewServiceChangeTracker(newServiceInfo, nil, nil),
|
||||
endpointsMap: make(proxy.EndpointsMap),
|
||||
endpointsChanges: proxy.NewEndpointChangeTracker(testHostname, nil, nil, nil),
|
||||
excludeCIDRs: make([]string, 0),
|
||||
excludeCIDRs: excludeCIDRs,
|
||||
iptables: ipt,
|
||||
ipvs: ipvs,
|
||||
ipset: ipset,
|
||||
@ -228,7 +227,7 @@ func TestCleanupLeftovers(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
ipvs := ipvstest.NewFake()
|
||||
ipset := ipsettest.NewFake(testIPSetVersion)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil)
|
||||
svcIP := "10.20.30.41"
|
||||
svcPort := 80
|
||||
svcNodePort := 3001
|
||||
@ -418,7 +417,7 @@ func TestNodePortUDP(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
ipvs := ipvstest.NewFake()
|
||||
ipset := ipsettest.NewFake(testIPSetVersion)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, []net.IP{nodeIP})
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, []net.IP{nodeIP}, nil)
|
||||
|
||||
svcIP := "10.20.30.41"
|
||||
svcPort := 80
|
||||
@ -495,7 +494,7 @@ func TestNodePort(t *testing.T) {
|
||||
nodeIPv4 := net.ParseIP("100.101.102.103")
|
||||
nodeIPv6 := net.ParseIP("2001:db8::1:1")
|
||||
nodeIPs := sets.NewString(nodeIPv4.String(), nodeIPv6.String())
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, []net.IP{nodeIPv4, nodeIPv6})
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, []net.IP{nodeIPv4, nodeIPv6}, nil)
|
||||
svcIP := "10.20.30.41"
|
||||
svcPort := 80
|
||||
svcNodePort := 3001
|
||||
@ -573,7 +572,7 @@ func TestNodePortNoEndpoint(t *testing.T) {
|
||||
ipvs := ipvstest.NewFake()
|
||||
ipset := ipsettest.NewFake(testIPSetVersion)
|
||||
nodeIP := net.ParseIP("100.101.102.103")
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, []net.IP{nodeIP})
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, []net.IP{nodeIP}, nil)
|
||||
svcIP := "10.20.30.41"
|
||||
svcPort := 80
|
||||
svcNodePort := 3001
|
||||
@ -628,7 +627,7 @@ func TestClusterIPNoEndpoint(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
ipvs := ipvstest.NewFake()
|
||||
ipset := ipsettest.NewFake(testIPSetVersion)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil)
|
||||
svcIP := "10.20.30.41"
|
||||
svcPort := 80
|
||||
svcPortName := proxy.ServicePortName{
|
||||
@ -672,7 +671,7 @@ func TestClusterIP(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
ipvs := ipvstest.NewFake()
|
||||
ipset := ipsettest.NewFake(testIPSetVersion)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil)
|
||||
|
||||
svcIPv4 := "10.20.30.41"
|
||||
svcPortV4 := 80
|
||||
@ -779,7 +778,7 @@ func TestExternalIPsNoEndpoint(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
ipvs := ipvstest.NewFake()
|
||||
ipset := ipsettest.NewFake(testIPSetVersion)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil)
|
||||
svcIP := "10.20.30.41"
|
||||
svcPort := 80
|
||||
svcExternalIPs := "50.60.70.81"
|
||||
@ -834,7 +833,7 @@ func TestExternalIPs(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
ipvs := ipvstest.NewFake()
|
||||
ipset := ipsettest.NewFake(testIPSetVersion)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil)
|
||||
svcIP := "10.20.30.41"
|
||||
svcPort := 80
|
||||
svcExternalIPs := sets.NewString("50.60.70.81", "2012::51", "127.0.0.1")
|
||||
@ -1338,7 +1337,7 @@ func TestBuildServiceMapAddRemove(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
ipvs := ipvstest.NewFake()
|
||||
ipset := ipsettest.NewFake(testIPSetVersion)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil)
|
||||
|
||||
services := []*v1.Service{
|
||||
makeTestService("somewhere-else", "cluster-ip", func(svc *v1.Service) {
|
||||
@ -1448,7 +1447,7 @@ func TestBuildServiceMapServiceHeadless(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
ipvs := ipvstest.NewFake()
|
||||
ipset := ipsettest.NewFake(testIPSetVersion)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil)
|
||||
|
||||
makeServiceMap(fp,
|
||||
makeTestService("somewhere-else", "headless", func(svc *v1.Service) {
|
||||
@ -1487,7 +1486,7 @@ func TestBuildServiceMapServiceTypeExternalName(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
ipvs := ipvstest.NewFake()
|
||||
ipset := ipsettest.NewFake(testIPSetVersion)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil)
|
||||
|
||||
makeServiceMap(fp,
|
||||
makeTestService("somewhere-else", "external-name", func(svc *v1.Service) {
|
||||
@ -1515,7 +1514,7 @@ func TestBuildServiceMapServiceUpdate(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
ipvs := ipvstest.NewFake()
|
||||
ipset := ipsettest.NewFake(testIPSetVersion)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil)
|
||||
|
||||
servicev1 := makeTestService("somewhere", "some-service", func(svc *v1.Service) {
|
||||
svc.Spec.Type = v1.ServiceTypeClusterIP
|
||||
@ -1599,7 +1598,7 @@ func TestSessionAffinity(t *testing.T) {
|
||||
ipvs := ipvstest.NewFake()
|
||||
ipset := ipsettest.NewFake(testIPSetVersion)
|
||||
nodeIP := net.ParseIP("100.101.102.103")
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, []net.IP{nodeIP})
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, []net.IP{nodeIP}, nil)
|
||||
svcIP := "10.20.30.41"
|
||||
svcPort := 80
|
||||
svcNodePort := 3001
|
||||
@ -2462,7 +2461,7 @@ func Test_updateEndpointsMap(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
ipvs := ipvstest.NewFake()
|
||||
ipset := ipsettest.NewFake(testIPSetVersion)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil)
|
||||
fp.hostname = nodeName
|
||||
|
||||
// First check that after adding all previous versions of endpoints,
|
||||
@ -2706,7 +2705,7 @@ func Test_syncService(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
ipvs := ipvstest.NewFake()
|
||||
ipset := ipsettest.NewFake(testIPSetVersion)
|
||||
proxier := NewFakeProxier(ipt, ipvs, ipset, nil)
|
||||
proxier := NewFakeProxier(ipt, ipvs, ipset, nil, nil)
|
||||
|
||||
proxier.netlinkHandle.EnsureDummyDevice(DefaultDummyDevice)
|
||||
if testCases[i].oldVirtualServer != nil {
|
||||
@ -2736,7 +2735,7 @@ func buildFakeProxier() (*iptablestest.FakeIPTables, *Proxier) {
|
||||
ipt := iptablestest.NewFake()
|
||||
ipvs := ipvstest.NewFake()
|
||||
ipset := ipsettest.NewFake(testIPSetVersion)
|
||||
return ipt, NewFakeProxier(ipt, ipvs, ipset, nil)
|
||||
return ipt, NewFakeProxier(ipt, ipvs, ipset, nil, nil)
|
||||
}
|
||||
|
||||
func hasJump(rules []iptablestest.Rule, destChain, ipSet string) bool {
|
||||
@ -2806,33 +2805,10 @@ func checkIPVS(t *testing.T, fp *Proxier, vs *netlinktest.ExpectedVirtualServer)
|
||||
}
|
||||
|
||||
func TestCleanLegacyService(t *testing.T) {
|
||||
execer := exec.New()
|
||||
ipt := iptablestest.NewFake()
|
||||
ipvs := ipvstest.NewFake()
|
||||
ipset := ipsettest.NewFake(testIPSetVersion)
|
||||
excludeCIDRs := []string{"3.3.3.0/24", "4.4.4.0/24"}
|
||||
proxier, err := NewProxier(
|
||||
ipt,
|
||||
ipvs,
|
||||
ipset,
|
||||
NewFakeSysctl(),
|
||||
execer,
|
||||
250*time.Millisecond,
|
||||
100*time.Millisecond,
|
||||
excludeCIDRs,
|
||||
false,
|
||||
0,
|
||||
"10.0.0.0/24",
|
||||
testHostname,
|
||||
net.ParseIP("127.0.0.1"),
|
||||
nil,
|
||||
nil,
|
||||
DefaultScheduler,
|
||||
make([]string, 0),
|
||||
)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil, []string{"3.3.3.0/24", "4.4.4.0/24"})
|
||||
|
||||
// All ipvs services that were processed in the latest sync loop.
|
||||
activeServices := map[string]bool{"ipvs0": true, "ipvs1": true}
|
||||
@ -2888,15 +2864,22 @@ func TestCleanLegacyService(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for v := range currentServices {
|
||||
proxier.ipvs.AddVirtualServer(currentServices[v])
|
||||
fp.ipvs.AddVirtualServer(currentServices[v])
|
||||
}
|
||||
proxier.cleanLegacyService(activeServices, currentServices)
|
||||
|
||||
fp.netlinkHandle.EnsureDummyDevice(DefaultDummyDevice)
|
||||
activeBindAddrs := map[string]bool{"1.1.1.1": true, "2.2.2.2": true, "3.3.3.3": true, "4.4.4.4": true}
|
||||
currentBindAddrs := []string{"1.1.1.1", "2.2.2.2", "3.3.3.3", "4.4.4.4", "5.5.5.5", "6.6.6.6"}
|
||||
for i := range currentBindAddrs {
|
||||
fp.netlinkHandle.EnsureAddressBind(currentBindAddrs[i], DefaultDummyDevice)
|
||||
}
|
||||
|
||||
fp.cleanLegacyService(activeServices, currentServices, map[string]bool{"5.5.5.5": true, "6.6.6.6": true})
|
||||
// ipvs4 and ipvs5 should have been cleaned.
|
||||
remainingVirtualServers, _ := proxier.ipvs.GetVirtualServers()
|
||||
remainingVirtualServers, _ := fp.ipvs.GetVirtualServers()
|
||||
if len(remainingVirtualServers) != 4 {
|
||||
t.Errorf("Expected number of remaining IPVS services after cleanup to be %v. Got %v", 4, len(remainingVirtualServers))
|
||||
}
|
||||
|
||||
for _, vs := range remainingVirtualServers {
|
||||
// Checking that ipvs4 and ipvs5 were removed.
|
||||
if vs.Port == 57 {
|
||||
@ -2906,33 +2889,13 @@ func TestCleanLegacyService(t *testing.T) {
|
||||
t.Errorf("Expected ipvs5 to be removed after cleanup. It still remains")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCleanLegacyBindAddr(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
ipvs := ipvstest.NewFake()
|
||||
ipset := ipsettest.NewFake(testIPSetVersion)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil)
|
||||
|
||||
// All ipvs service addresses that were bound to ipvs0 in the latest sync loop.
|
||||
activeBindAddrs := map[string]bool{"1.2.3.4": true, "1002:ab8::2:1": true}
|
||||
// All service addresses that were bound to ipvs0 in system
|
||||
currentBindAddrs := []string{"1.2.3.4", "1.2.3.5", "1.2.3.6", "1002:ab8::2:1", "1002:ab8::2:2"}
|
||||
|
||||
fp.netlinkHandle.EnsureDummyDevice(DefaultDummyDevice)
|
||||
|
||||
for i := range currentBindAddrs {
|
||||
fp.netlinkHandle.EnsureAddressBind(currentBindAddrs[i], DefaultDummyDevice)
|
||||
}
|
||||
fp.cleanLegacyBindAddr(activeBindAddrs, currentBindAddrs)
|
||||
|
||||
// Addresses 5.5.5.5 and 6.6.6.6 should not be bound any more
|
||||
remainingAddrs, _ := fp.netlinkHandle.ListBindAddress(DefaultDummyDevice)
|
||||
// should only remain "1.2.3.4" and "1002:ab8::2:1"
|
||||
if len(remainingAddrs) != 2 {
|
||||
t.Errorf("Expected number of remaining bound addrs after cleanup to be %v. Got %v", 2, len(remainingAddrs))
|
||||
if len(remainingAddrs) != 4 {
|
||||
t.Errorf("Expected number of remaining bound addrs after cleanup to be %v. Got %v", 4, len(remainingAddrs))
|
||||
}
|
||||
|
||||
// check that address "1.2.3.4" and "1002:ab8::2:1" remain
|
||||
// check that address "1.1.1.1", "2.2.2.2", "3.3.3.3", "4.4.4.4" are still bound
|
||||
remainingAddrsMap := make(map[string]bool)
|
||||
for i := range remainingAddrs {
|
||||
remainingAddrsMap[remainingAddrs[i]] = true
|
||||
@ -2940,13 +2903,14 @@ func TestCleanLegacyBindAddr(t *testing.T) {
|
||||
if !reflect.DeepEqual(activeBindAddrs, remainingAddrsMap) {
|
||||
t.Errorf("Expected remainingAddrsMap %v, got %v", activeBindAddrs, remainingAddrsMap)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestMultiPortServiceBindAddr(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
ipvs := ipvstest.NewFake()
|
||||
ipset := ipsettest.NewFake(testIPSetVersion)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil)
|
||||
|
||||
service1 := makeTestService("ns1", "svc1", func(svc *v1.Service) {
|
||||
svc.Spec.Type = v1.ServiceTypeClusterIP
|
||||
|
47
vendor/k8s.io/kubernetes/pkg/proxy/util/utils.go
generated
vendored
47
vendor/k8s.io/kubernetes/pkg/proxy/util/utils.go
generated
vendored
@ -17,6 +17,8 @@ limitations under the License.
|
||||
package util
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
@ -35,6 +37,11 @@ const (
|
||||
IPv6ZeroCIDR = "::/0"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrAddressNotAllowed = errors.New("address not allowed")
|
||||
ErrNoAddresses = errors.New("No addresses for hostname")
|
||||
)
|
||||
|
||||
func IsZeroCIDR(cidr string) bool {
|
||||
if cidr == IPv4ZeroCIDR || cidr == IPv6ZeroCIDR {
|
||||
return true
|
||||
@ -42,6 +49,46 @@ func IsZeroCIDR(cidr string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IsProxyableIP checks if a given IP address is permitted to be proxied
|
||||
func IsProxyableIP(ip string) error {
|
||||
netIP := net.ParseIP(ip)
|
||||
if netIP == nil {
|
||||
return ErrAddressNotAllowed
|
||||
}
|
||||
return isProxyableIP(netIP)
|
||||
}
|
||||
|
||||
func isProxyableIP(ip net.IP) error {
|
||||
if ip.IsLoopback() || ip.IsLinkLocalUnicast() || ip.IsLinkLocalMulticast() || ip.IsInterfaceLocalMulticast() {
|
||||
return ErrAddressNotAllowed
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resolver is an interface for net.Resolver
|
||||
type Resolver interface {
|
||||
LookupIPAddr(ctx context.Context, host string) ([]net.IPAddr, error)
|
||||
}
|
||||
|
||||
// IsProxyableHostname checks if the IP addresses for a given hostname are permitted to be proxied
|
||||
func IsProxyableHostname(ctx context.Context, resolv Resolver, hostname string) error {
|
||||
resp, err := resolv.LookupIPAddr(ctx, hostname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(resp) == 0 {
|
||||
return ErrNoAddresses
|
||||
}
|
||||
|
||||
for _, host := range resp {
|
||||
if err := isProxyableIP(host.IP); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func IsLocalIP(ip string) (bool, error) {
|
||||
addrs, err := net.InterfaceAddrs()
|
||||
if err != nil {
|
||||
|
69
vendor/k8s.io/kubernetes/pkg/proxy/util/utils_test.go
generated
vendored
69
vendor/k8s.io/kubernetes/pkg/proxy/util/utils_test.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package util
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
@ -27,6 +28,74 @@ import (
|
||||
fake "k8s.io/kubernetes/pkg/proxy/util/testing"
|
||||
)
|
||||
|
||||
func TestIsProxyableIP(t *testing.T) {
|
||||
testCases := []struct {
|
||||
ip string
|
||||
want error
|
||||
}{
|
||||
{"127.0.0.1", ErrAddressNotAllowed},
|
||||
{"127.0.0.2", ErrAddressNotAllowed},
|
||||
{"169.254.169.254", ErrAddressNotAllowed},
|
||||
{"169.254.1.1", ErrAddressNotAllowed},
|
||||
{"224.0.0.0", ErrAddressNotAllowed},
|
||||
{"10.0.0.1", nil},
|
||||
{"192.168.0.1", nil},
|
||||
{"172.16.0.1", nil},
|
||||
{"8.8.8.8", nil},
|
||||
{"::1", ErrAddressNotAllowed},
|
||||
{"fe80::", ErrAddressNotAllowed},
|
||||
{"ff02::", ErrAddressNotAllowed},
|
||||
{"ff01::", ErrAddressNotAllowed},
|
||||
{"2600::", nil},
|
||||
{"1", ErrAddressNotAllowed},
|
||||
{"", ErrAddressNotAllowed},
|
||||
}
|
||||
|
||||
for i := range testCases {
|
||||
got := IsProxyableIP(testCases[i].ip)
|
||||
if testCases[i].want != got {
|
||||
t.Errorf("case %d: expected %v, got %v", i, testCases[i].want, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type dummyResolver struct {
|
||||
ips []string
|
||||
err error
|
||||
}
|
||||
|
||||
func (r *dummyResolver) LookupIPAddr(ctx context.Context, host string) ([]net.IPAddr, error) {
|
||||
if r.err != nil {
|
||||
return nil, r.err
|
||||
}
|
||||
resp := []net.IPAddr{}
|
||||
for _, ipString := range r.ips {
|
||||
resp = append(resp, net.IPAddr{IP: net.ParseIP(ipString)})
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func TestIsProxyableHostname(t *testing.T) {
|
||||
testCases := []struct {
|
||||
hostname string
|
||||
ips []string
|
||||
want error
|
||||
}{
|
||||
{"k8s.io", []string{}, ErrNoAddresses},
|
||||
{"k8s.io", []string{"8.8.8.8"}, nil},
|
||||
{"k8s.io", []string{"169.254.169.254"}, ErrAddressNotAllowed},
|
||||
{"k8s.io", []string{"127.0.0.1", "8.8.8.8"}, ErrAddressNotAllowed},
|
||||
}
|
||||
|
||||
for i := range testCases {
|
||||
resolv := dummyResolver{ips: testCases[i].ips}
|
||||
got := IsProxyableHostname(context.Background(), &resolv, testCases[i].hostname)
|
||||
if testCases[i].want != got {
|
||||
t.Errorf("case %d: expected %v, got %v", i, testCases[i].want, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestShouldSkipService(t *testing.T) {
|
||||
testCases := []struct {
|
||||
service *v1.Service
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/registry/core/node/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/registry/core/node/BUILD
generated
vendored
@ -19,6 +19,7 @@ go_library(
|
||||
"//pkg/apis/core/validation:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/client:go_default_library",
|
||||
"//pkg/proxy/util:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
|
5
vendor/k8s.io/kubernetes/pkg/registry/core/node/strategy.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/registry/core/node/strategy.go
generated
vendored
@ -40,6 +40,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/apis/core/validation"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/client"
|
||||
proxyutil "k8s.io/kubernetes/pkg/proxy/util"
|
||||
)
|
||||
|
||||
// nodeStrategy implements behavior for nodes
|
||||
@ -217,6 +218,10 @@ func ResourceLocation(getter ResourceGetter, connection client.ConnectionInfoGet
|
||||
nil
|
||||
}
|
||||
|
||||
if err := proxyutil.IsProxyableHostname(ctx, &net.Resolver{}, info.Hostname); err != nil {
|
||||
return nil, nil, errors.NewBadRequest(err.Error())
|
||||
}
|
||||
|
||||
// Otherwise, return the requested scheme and port, and the proxy transport
|
||||
return &url.URL{Scheme: schemeReq, Host: net.JoinHostPort(info.Hostname, portReq)}, proxyTransport, nil
|
||||
}
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/registry/core/pod/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/registry/core/pod/BUILD
generated
vendored
@ -20,6 +20,7 @@ go_library(
|
||||
"//pkg/apis/core/helper/qos:go_default_library",
|
||||
"//pkg/apis/core/validation:go_default_library",
|
||||
"//pkg/kubelet/client:go_default_library",
|
||||
"//pkg/proxy/util:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
5
vendor/k8s.io/kubernetes/pkg/registry/core/pod/strategy.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/registry/core/pod/strategy.go
generated
vendored
@ -47,6 +47,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/apis/core/helper/qos"
|
||||
"k8s.io/kubernetes/pkg/apis/core/validation"
|
||||
"k8s.io/kubernetes/pkg/kubelet/client"
|
||||
proxyutil "k8s.io/kubernetes/pkg/proxy/util"
|
||||
)
|
||||
|
||||
// podStrategy implements behavior for Pods
|
||||
@ -290,6 +291,10 @@ func ResourceLocation(getter ResourceGetter, rt http.RoundTripper, ctx context.C
|
||||
}
|
||||
}
|
||||
|
||||
if err := proxyutil.IsProxyableIP(pod.Status.PodIP); err != nil {
|
||||
return nil, nil, errors.NewBadRequest(err.Error())
|
||||
}
|
||||
|
||||
loc := &url.URL{
|
||||
Scheme: scheme,
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/registry/scheduling/rest/storage_scheduling.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/registry/scheduling/rest/storage_scheduling.go
generated
vendored
@ -93,7 +93,7 @@ func AddSystemPriorityClasses() genericapiserver.PostStartHookFunc {
|
||||
} else {
|
||||
// Unable to get the priority class for reasons other than "not found".
|
||||
klog.Warningf("unable to get PriorityClass %v: %v. Retrying...", pc.Name, err)
|
||||
return false, err
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/BUILD
generated
vendored
@ -17,7 +17,6 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/scheduler/internal/cache:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/types.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/types.go
generated
vendored
@ -23,7 +23,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
)
|
||||
|
||||
// NodeFieldSelectorKeys is a map that: the key are node field selector keys; the values are
|
||||
@ -92,6 +91,9 @@ type NodeLister interface {
|
||||
List() ([]*v1.Node, error)
|
||||
}
|
||||
|
||||
// PodFilter is a function to filter a pod. If pod passed return true else return false.
|
||||
type PodFilter func(*v1.Pod) bool
|
||||
|
||||
// PodLister interface represents anything that can list pods for a scheduler.
|
||||
type PodLister interface {
|
||||
// We explicitly return []*v1.Pod, instead of v1.PodList, to avoid
|
||||
@ -99,7 +101,7 @@ type PodLister interface {
|
||||
List(labels.Selector) ([]*v1.Pod, error)
|
||||
// This is similar to "List()", but the returned slice does not
|
||||
// contain pods that don't pass `podFilter`.
|
||||
FilteredList(podFilter schedulerinternalcache.PodFilter, selector labels.Selector) ([]*v1.Pod, error)
|
||||
FilteredList(podFilter PodFilter, selector labels.Selector) ([]*v1.Pod, error)
|
||||
}
|
||||
|
||||
// ServiceLister interface represents anything that can produce a list of services; the list is consumed by a scheduler.
|
||||
|
56
vendor/k8s.io/kubernetes/pkg/scheduler/core/generic_scheduler.go
generated
vendored
56
vendor/k8s.io/kubernetes/pkg/scheduler/core/generic_scheduler.go
generated
vendored
@ -351,7 +351,7 @@ func (g *genericScheduler) processPreemptionWithExtenders(
|
||||
// worth the complexity, especially because we generally expect to have a very
|
||||
// small number of nominated pods per node.
|
||||
func (g *genericScheduler) getLowerPriorityNominatedPods(pod *v1.Pod, nodeName string) []*v1.Pod {
|
||||
pods := g.schedulingQueue.WaitingPodsForNode(nodeName)
|
||||
pods := g.schedulingQueue.NominatedPodsForNode(nodeName)
|
||||
|
||||
if len(pods) == 0 {
|
||||
return nil
|
||||
@ -501,7 +501,7 @@ func addNominatedPods(pod *v1.Pod, meta algorithm.PredicateMetadata,
|
||||
// This may happen only in tests.
|
||||
return false, meta, nodeInfo
|
||||
}
|
||||
nominatedPods := queue.WaitingPodsForNode(nodeInfo.Node().Name)
|
||||
nominatedPods := queue.NominatedPodsForNode(nodeInfo.Node().Name)
|
||||
if nominatedPods == nil || len(nominatedPods) == 0 {
|
||||
return false, meta, nodeInfo
|
||||
}
|
||||
@ -655,24 +655,26 @@ func PrioritizeNodes(
|
||||
|
||||
// DEPRECATED: we can remove this when all priorityConfigs implement the
|
||||
// Map-Reduce pattern.
|
||||
workqueue.ParallelizeUntil(context.TODO(), 16, len(priorityConfigs), func(i int) {
|
||||
priorityConfig := priorityConfigs[i]
|
||||
if priorityConfig.Function == nil {
|
||||
for i := range priorityConfigs {
|
||||
if priorityConfigs[i].Function != nil {
|
||||
wg.Add(1)
|
||||
go func(index int) {
|
||||
defer wg.Done()
|
||||
var err error
|
||||
results[index], err = priorityConfigs[index].Function(pod, nodeNameToInfo, nodes)
|
||||
if err != nil {
|
||||
appendError(err)
|
||||
}
|
||||
}(i)
|
||||
} else {
|
||||
results[i] = make(schedulerapi.HostPriorityList, len(nodes))
|
||||
return
|
||||
}
|
||||
|
||||
var err error
|
||||
results[i], err = priorityConfig.Function(pod, nodeNameToInfo, nodes)
|
||||
if err != nil {
|
||||
appendError(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
workqueue.ParallelizeUntil(context.TODO(), 16, len(nodes), func(index int) {
|
||||
nodeInfo := nodeNameToInfo[nodes[index].Name]
|
||||
for i, priorityConfig := range priorityConfigs {
|
||||
if priorityConfig.Function != nil {
|
||||
for i := range priorityConfigs {
|
||||
if priorityConfigs[i].Function != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
@ -685,22 +687,22 @@ func PrioritizeNodes(
|
||||
}
|
||||
})
|
||||
|
||||
for i, priorityConfig := range priorityConfigs {
|
||||
if priorityConfig.Reduce == nil {
|
||||
for i := range priorityConfigs {
|
||||
if priorityConfigs[i].Reduce == nil {
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(index int, config algorithm.PriorityConfig) {
|
||||
go func(index int) {
|
||||
defer wg.Done()
|
||||
if err := config.Reduce(pod, meta, nodeNameToInfo, results[index]); err != nil {
|
||||
if err := priorityConfigs[index].Reduce(pod, meta, nodeNameToInfo, results[index]); err != nil {
|
||||
appendError(err)
|
||||
}
|
||||
if klog.V(10) {
|
||||
for _, hostPriority := range results[index] {
|
||||
klog.Infof("%v -> %v: %v, Score: (%d)", util.GetPodFullName(pod), hostPriority.Host, config.Name, hostPriority.Score)
|
||||
klog.Infof("%v -> %v: %v, Score: (%d)", util.GetPodFullName(pod), hostPriority.Host, priorityConfigs[index].Name, hostPriority.Score)
|
||||
}
|
||||
}
|
||||
}(i, priorityConfig)
|
||||
}(i)
|
||||
}
|
||||
// Wait for all computations to be finished.
|
||||
wg.Wait()
|
||||
@ -720,14 +722,14 @@ func PrioritizeNodes(
|
||||
|
||||
if len(extenders) != 0 && nodes != nil {
|
||||
combinedScores := make(map[string]int, len(nodeNameToInfo))
|
||||
for _, extender := range extenders {
|
||||
if !extender.IsInterested(pod) {
|
||||
for i := range extenders {
|
||||
if !extenders[i].IsInterested(pod) {
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(ext algorithm.SchedulerExtender) {
|
||||
go func(extIndex int) {
|
||||
defer wg.Done()
|
||||
prioritizedList, weight, err := ext.Prioritize(pod, nodes)
|
||||
prioritizedList, weight, err := extenders[extIndex].Prioritize(pod, nodes)
|
||||
if err != nil {
|
||||
// Prioritization errors from extender can be ignored, let k8s/other extenders determine the priorities
|
||||
return
|
||||
@ -736,12 +738,12 @@ func PrioritizeNodes(
|
||||
for i := range *prioritizedList {
|
||||
host, score := (*prioritizedList)[i].Host, (*prioritizedList)[i].Score
|
||||
if klog.V(10) {
|
||||
klog.Infof("%v -> %v: %v, Score: (%d)", util.GetPodFullName(pod), host, ext.Name(), score)
|
||||
klog.Infof("%v -> %v: %v, Score: (%d)", util.GetPodFullName(pod), host, extenders[extIndex].Name(), score)
|
||||
}
|
||||
combinedScores[host] += score * weight
|
||||
}
|
||||
mu.Unlock()
|
||||
}(extender)
|
||||
}(i)
|
||||
}
|
||||
// wait for all go routines to finish
|
||||
wg.Wait()
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/scheduler/factory/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/scheduler/factory/BUILD
generated
vendored
@ -73,6 +73,7 @@ go_test(
|
||||
"//pkg/scheduler/testing:go_default_library",
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
|
56
vendor/k8s.io/kubernetes/pkg/scheduler/factory/factory.go
generated
vendored
56
vendor/k8s.io/kubernetes/pkg/scheduler/factory/factory.go
generated
vendored
@ -992,7 +992,14 @@ func (c *configFactory) updateNodeInCache(oldObj, newObj interface{}) {
|
||||
}
|
||||
|
||||
c.invalidateCachedPredicatesOnNodeUpdate(newNode, oldNode)
|
||||
c.podQueue.MoveAllToActiveQueue()
|
||||
// Only activate unschedulable pods if the node became more schedulable.
|
||||
// We skip the node property comparison when there is no unschedulable pods in the queue
|
||||
// to save processing cycles. We still trigger a move to active queue to cover the case
|
||||
// that a pod being processed by the scheduler is determined unschedulable. We want this
|
||||
// pod to be reevaluated when a change in the cluster happens.
|
||||
if c.podQueue.NumUnschedulablePods() == 0 || nodeSchedulingPropertiesChanged(newNode, oldNode) {
|
||||
c.podQueue.MoveAllToActiveQueue()
|
||||
}
|
||||
}
|
||||
|
||||
func (c *configFactory) invalidateCachedPredicatesOnNodeUpdate(newNode *v1.Node, oldNode *v1.Node) {
|
||||
@ -1064,6 +1071,53 @@ func (c *configFactory) invalidateCachedPredicatesOnNodeUpdate(newNode *v1.Node,
|
||||
}
|
||||
}
|
||||
|
||||
func nodeSchedulingPropertiesChanged(newNode *v1.Node, oldNode *v1.Node) bool {
|
||||
if nodeSpecUnschedulableChanged(newNode, oldNode) {
|
||||
return true
|
||||
}
|
||||
if nodeAllocatableChanged(newNode, oldNode) {
|
||||
return true
|
||||
}
|
||||
if nodeLabelsChanged(newNode, oldNode) {
|
||||
return true
|
||||
}
|
||||
if nodeTaintsChanged(newNode, oldNode) {
|
||||
return true
|
||||
}
|
||||
if nodeConditionsChanged(newNode, oldNode) {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func nodeAllocatableChanged(newNode *v1.Node, oldNode *v1.Node) bool {
|
||||
return !reflect.DeepEqual(oldNode.Status.Allocatable, newNode.Status.Allocatable)
|
||||
}
|
||||
|
||||
func nodeLabelsChanged(newNode *v1.Node, oldNode *v1.Node) bool {
|
||||
return !reflect.DeepEqual(oldNode.GetLabels(), newNode.GetLabels())
|
||||
}
|
||||
|
||||
func nodeTaintsChanged(newNode *v1.Node, oldNode *v1.Node) bool {
|
||||
return !reflect.DeepEqual(newNode.Spec.Taints, oldNode.Spec.Taints)
|
||||
}
|
||||
|
||||
func nodeConditionsChanged(newNode *v1.Node, oldNode *v1.Node) bool {
|
||||
strip := func(conditions []v1.NodeCondition) map[v1.NodeConditionType]v1.ConditionStatus {
|
||||
conditionStatuses := make(map[v1.NodeConditionType]v1.ConditionStatus, len(conditions))
|
||||
for i := range conditions {
|
||||
conditionStatuses[conditions[i].Type] = conditions[i].Status
|
||||
}
|
||||
return conditionStatuses
|
||||
}
|
||||
return !reflect.DeepEqual(strip(oldNode.Status.Conditions), strip(newNode.Status.Conditions))
|
||||
}
|
||||
|
||||
func nodeSpecUnschedulableChanged(newNode *v1.Node, oldNode *v1.Node) bool {
|
||||
return newNode.Spec.Unschedulable != oldNode.Spec.Unschedulable && newNode.Spec.Unschedulable == false
|
||||
}
|
||||
|
||||
func (c *configFactory) deleteNodeFromCache(obj interface{}) {
|
||||
var node *v1.Node
|
||||
switch t := obj.(type) {
|
||||
|
144
vendor/k8s.io/kubernetes/pkg/scheduler/factory/factory_test.go
generated
vendored
144
vendor/k8s.io/kubernetes/pkg/scheduler/factory/factory_test.go
generated
vendored
@ -24,6 +24,7 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
@ -657,3 +658,146 @@ func testGetBinderFunc(expectedBinderType, podName string, extenders []algorithm
|
||||
t.Errorf("Expected binder %q but got %q", expectedBinderType, binderType)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeAllocatableChanged(t *testing.T) {
|
||||
newQuantity := func(value int64) resource.Quantity {
|
||||
return *resource.NewQuantity(value, resource.BinarySI)
|
||||
}
|
||||
for _, c := range []struct {
|
||||
Name string
|
||||
Changed bool
|
||||
OldAllocatable v1.ResourceList
|
||||
NewAllocatable v1.ResourceList
|
||||
}{
|
||||
{
|
||||
Name: "no allocatable resources changed",
|
||||
Changed: false,
|
||||
OldAllocatable: v1.ResourceList{v1.ResourceMemory: newQuantity(1024)},
|
||||
NewAllocatable: v1.ResourceList{v1.ResourceMemory: newQuantity(1024)},
|
||||
},
|
||||
{
|
||||
Name: "new node has more allocatable resources",
|
||||
Changed: true,
|
||||
OldAllocatable: v1.ResourceList{v1.ResourceMemory: newQuantity(1024)},
|
||||
NewAllocatable: v1.ResourceList{v1.ResourceMemory: newQuantity(1024), v1.ResourceStorage: newQuantity(1024)},
|
||||
},
|
||||
} {
|
||||
oldNode := &v1.Node{Status: v1.NodeStatus{Allocatable: c.OldAllocatable}}
|
||||
newNode := &v1.Node{Status: v1.NodeStatus{Allocatable: c.NewAllocatable}}
|
||||
changed := nodeAllocatableChanged(newNode, oldNode)
|
||||
if changed != c.Changed {
|
||||
t.Errorf("nodeAllocatableChanged should be %t, got %t", c.Changed, changed)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeLabelsChanged(t *testing.T) {
|
||||
for _, c := range []struct {
|
||||
Name string
|
||||
Changed bool
|
||||
OldLabels map[string]string
|
||||
NewLabels map[string]string
|
||||
}{
|
||||
{
|
||||
Name: "no labels changed",
|
||||
Changed: false,
|
||||
OldLabels: map[string]string{"foo": "bar"},
|
||||
NewLabels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
// Labels changed.
|
||||
{
|
||||
Name: "new node has more labels",
|
||||
Changed: true,
|
||||
OldLabels: map[string]string{"foo": "bar"},
|
||||
NewLabels: map[string]string{"foo": "bar", "test": "value"},
|
||||
},
|
||||
} {
|
||||
oldNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: c.OldLabels}}
|
||||
newNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: c.NewLabels}}
|
||||
changed := nodeLabelsChanged(newNode, oldNode)
|
||||
if changed != c.Changed {
|
||||
t.Errorf("Test case %q failed: should be %t, got %t", c.Name, c.Changed, changed)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeTaintsChanged(t *testing.T) {
|
||||
for _, c := range []struct {
|
||||
Name string
|
||||
Changed bool
|
||||
OldTaints []v1.Taint
|
||||
NewTaints []v1.Taint
|
||||
}{
|
||||
{
|
||||
Name: "no taint changed",
|
||||
Changed: false,
|
||||
OldTaints: []v1.Taint{{Key: "key", Value: "value"}},
|
||||
NewTaints: []v1.Taint{{Key: "key", Value: "value"}},
|
||||
},
|
||||
{
|
||||
Name: "taint value changed",
|
||||
Changed: true,
|
||||
OldTaints: []v1.Taint{{Key: "key", Value: "value1"}},
|
||||
NewTaints: []v1.Taint{{Key: "key", Value: "value2"}},
|
||||
},
|
||||
} {
|
||||
oldNode := &v1.Node{Spec: v1.NodeSpec{Taints: c.OldTaints}}
|
||||
newNode := &v1.Node{Spec: v1.NodeSpec{Taints: c.NewTaints}}
|
||||
changed := nodeTaintsChanged(newNode, oldNode)
|
||||
if changed != c.Changed {
|
||||
t.Errorf("Test case %q failed: should be %t, not %t", c.Name, c.Changed, changed)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeConditionsChanged(t *testing.T) {
|
||||
nodeConditionType := reflect.TypeOf(v1.NodeCondition{})
|
||||
if nodeConditionType.NumField() != 6 {
|
||||
t.Errorf("NodeCondition type has changed. The nodeConditionsChanged() function must be reevaluated.")
|
||||
}
|
||||
|
||||
for _, c := range []struct {
|
||||
Name string
|
||||
Changed bool
|
||||
OldConditions []v1.NodeCondition
|
||||
NewConditions []v1.NodeCondition
|
||||
}{
|
||||
{
|
||||
Name: "no condition changed",
|
||||
Changed: false,
|
||||
OldConditions: []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue}},
|
||||
NewConditions: []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue}},
|
||||
},
|
||||
{
|
||||
Name: "only LastHeartbeatTime changed",
|
||||
Changed: false,
|
||||
OldConditions: []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue, LastHeartbeatTime: metav1.Unix(1, 0)}},
|
||||
NewConditions: []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue, LastHeartbeatTime: metav1.Unix(2, 0)}},
|
||||
},
|
||||
{
|
||||
Name: "new node has more healthy conditions",
|
||||
Changed: true,
|
||||
OldConditions: []v1.NodeCondition{},
|
||||
NewConditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionTrue}},
|
||||
},
|
||||
{
|
||||
Name: "new node has less unhealthy conditions",
|
||||
Changed: true,
|
||||
OldConditions: []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue}},
|
||||
NewConditions: []v1.NodeCondition{},
|
||||
},
|
||||
{
|
||||
Name: "condition status changed",
|
||||
Changed: true,
|
||||
OldConditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionFalse}},
|
||||
NewConditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionTrue}},
|
||||
},
|
||||
} {
|
||||
oldNode := &v1.Node{Status: v1.NodeStatus{Conditions: c.OldConditions}}
|
||||
newNode := &v1.Node{Status: v1.NodeStatus{Conditions: c.NewConditions}}
|
||||
changed := nodeConditionsChanged(newNode, oldNode)
|
||||
if changed != c.Changed {
|
||||
t.Errorf("Test case %q failed: should be %t, got %t", c.Name, c.Changed, changed)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/BUILD
generated
vendored
@ -11,6 +11,7 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/cache.go
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/cache.go
generated
vendored
@ -27,6 +27,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
|
||||
"k8s.io/klog"
|
||||
@ -149,7 +150,7 @@ func (cache *schedulerCache) List(selector labels.Selector) ([]*v1.Pod, error) {
|
||||
return cache.FilteredList(alwaysTrue, selector)
|
||||
}
|
||||
|
||||
func (cache *schedulerCache) FilteredList(podFilter PodFilter, selector labels.Selector) ([]*v1.Pod, error) {
|
||||
func (cache *schedulerCache) FilteredList(podFilter algorithm.PodFilter, selector labels.Selector) ([]*v1.Pod, error) {
|
||||
cache.mu.RLock()
|
||||
defer cache.mu.RUnlock()
|
||||
// podFilter is expected to return true for most or all of the pods. We
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/fake/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/fake/BUILD
generated
vendored
@ -6,6 +6,7 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/internal/cache/fake",
|
||||
visibility = ["//pkg/scheduler:__subpackages__"],
|
||||
deps = [
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/scheduler/internal/cache:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/fake/fake_cache.go
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/fake/fake_cache.go
generated
vendored
@ -19,6 +19,7 @@ package fake
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
)
|
||||
@ -83,7 +84,7 @@ func (c *Cache) UpdateNodeNameToInfoMap(infoMap map[string]*schedulercache.NodeI
|
||||
func (c *Cache) List(s labels.Selector) ([]*v1.Pod, error) { return nil, nil }
|
||||
|
||||
// FilteredList is a fake method for testing.
|
||||
func (c *Cache) FilteredList(filter schedulerinternalcache.PodFilter, selector labels.Selector) ([]*v1.Pod, error) {
|
||||
func (c *Cache) FilteredList(filter algorithm.PodFilter, selector labels.Selector) ([]*v1.Pod, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/interface.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/interface.go
generated
vendored
@ -19,12 +19,10 @@ package cache
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
)
|
||||
|
||||
// PodFilter is a function to filter a pod. If pod passed return true else return false.
|
||||
type PodFilter func(*v1.Pod) bool
|
||||
|
||||
// Cache collects pods' information and provides node-level aggregated information.
|
||||
// It's intended for generic scheduler to do efficient lookup.
|
||||
// Cache's operations are pod centric. It does incremental updates based on pod events.
|
||||
@ -106,7 +104,7 @@ type Cache interface {
|
||||
List(labels.Selector) ([]*v1.Pod, error)
|
||||
|
||||
// FilteredList returns all cached pods that pass the filter.
|
||||
FilteredList(filter PodFilter, selector labels.Selector) ([]*v1.Pod, error)
|
||||
FilteredList(filter algorithm.PodFilter, selector labels.Selector) ([]*v1.Pod, error)
|
||||
|
||||
// Snapshot takes a snapshot on current cache
|
||||
Snapshot() *Snapshot
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/scheduler/internal/queue/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/scheduler/internal/queue/BUILD
generated
vendored
@ -12,6 +12,7 @@ go_library(
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
@ -22,9 +23,11 @@ go_test(
|
||||
srcs = ["scheduling_queue_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
213
vendor/k8s.io/kubernetes/pkg/scheduler/internal/queue/scheduling_queue.go
generated
vendored
213
vendor/k8s.io/kubernetes/pkg/scheduler/internal/queue/scheduling_queue.go
generated
vendored
@ -36,6 +36,7 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
ktypes "k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
@ -62,13 +63,18 @@ type SchedulingQueue interface {
|
||||
MoveAllToActiveQueue()
|
||||
AssignedPodAdded(pod *v1.Pod)
|
||||
AssignedPodUpdated(pod *v1.Pod)
|
||||
WaitingPodsForNode(nodeName string) []*v1.Pod
|
||||
NominatedPodsForNode(nodeName string) []*v1.Pod
|
||||
WaitingPods() []*v1.Pod
|
||||
// Close closes the SchedulingQueue so that the goroutine which is
|
||||
// waiting to pop items can exit gracefully.
|
||||
Close()
|
||||
// UpdateNominatedPodForNode adds the given pod to the nominated pod map or
|
||||
// updates it if it already exists.
|
||||
UpdateNominatedPodForNode(pod *v1.Pod, nodeName string)
|
||||
// DeleteNominatedPodIfExists deletes nominatedPod from internal cache
|
||||
DeleteNominatedPodIfExists(pod *v1.Pod)
|
||||
// NumUnschedulablePods returns the number of unschedulable pods exist in the SchedulingQueue.
|
||||
NumUnschedulablePods() int
|
||||
}
|
||||
|
||||
// NewSchedulingQueue initializes a new scheduling queue. If pod priority is
|
||||
@ -148,9 +154,9 @@ func (f *FIFO) AssignedPodUpdated(pod *v1.Pod) {}
|
||||
// MoveAllToActiveQueue does nothing in FIFO as all pods are always in the active queue.
|
||||
func (f *FIFO) MoveAllToActiveQueue() {}
|
||||
|
||||
// WaitingPodsForNode returns pods that are nominated to run on the given node,
|
||||
// NominatedPodsForNode returns pods that are nominated to run on the given node,
|
||||
// but FIFO does not support it.
|
||||
func (f *FIFO) WaitingPodsForNode(nodeName string) []*v1.Pod {
|
||||
func (f *FIFO) NominatedPodsForNode(nodeName string) []*v1.Pod {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -162,6 +168,14 @@ func (f *FIFO) Close() {
|
||||
// DeleteNominatedPodIfExists does nothing in FIFO.
|
||||
func (f *FIFO) DeleteNominatedPodIfExists(pod *v1.Pod) {}
|
||||
|
||||
// UpdateNominatedPodForNode does nothing in FIFO.
|
||||
func (f *FIFO) UpdateNominatedPodForNode(pod *v1.Pod, nodeName string) {}
|
||||
|
||||
// NumUnschedulablePods returns the number of unschedulable pods exist in the SchedulingQueue.
|
||||
func (f *FIFO) NumUnschedulablePods() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
// NewFIFO creates a FIFO object.
|
||||
func NewFIFO() *FIFO {
|
||||
return &FIFO{FIFO: cache.NewFIFO(cache.MetaNamespaceKeyFunc)}
|
||||
@ -187,10 +201,9 @@ type PriorityQueue struct {
|
||||
activeQ *Heap
|
||||
// unschedulableQ holds pods that have been tried and determined unschedulable.
|
||||
unschedulableQ *UnschedulablePodsMap
|
||||
// nominatedPods is a map keyed by a node name and the value is a list of
|
||||
// pods which are nominated to run on the node. These are pods which can be in
|
||||
// the activeQ or unschedulableQ.
|
||||
nominatedPods map[string][]*v1.Pod
|
||||
// nominatedPods is a structures that stores pods which are nominated to run
|
||||
// on nodes.
|
||||
nominatedPods *nominatedPodMap
|
||||
// receivedMoveRequest is set to true whenever we receive a request to move a
|
||||
// pod from the unschedulableQ to the activeQ, and is set to false, when we pop
|
||||
// a pod from the activeQ. It indicates if we received a move request when a
|
||||
@ -206,57 +219,38 @@ type PriorityQueue struct {
|
||||
// Making sure that PriorityQueue implements SchedulingQueue.
|
||||
var _ = SchedulingQueue(&PriorityQueue{})
|
||||
|
||||
// podTimeStamp returns pod's last schedule time or its creation time if the
|
||||
// scheduler has never tried scheduling it.
|
||||
func podTimestamp(pod *v1.Pod) *metav1.Time {
|
||||
_, condition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
|
||||
if condition == nil {
|
||||
return &pod.CreationTimestamp
|
||||
}
|
||||
return &condition.LastTransitionTime
|
||||
}
|
||||
|
||||
// activeQComp is the function used by the activeQ heap algorithm to sort pods.
|
||||
// It sorts pods based on their priority. When priorities are equal, it uses
|
||||
// podTimestamp.
|
||||
func activeQComp(pod1, pod2 interface{}) bool {
|
||||
p1 := pod1.(*v1.Pod)
|
||||
p2 := pod2.(*v1.Pod)
|
||||
prio1 := util.GetPodPriority(p1)
|
||||
prio2 := util.GetPodPriority(p2)
|
||||
return (prio1 > prio2) || (prio1 == prio2 && podTimestamp(p1).Before(podTimestamp(p2)))
|
||||
}
|
||||
|
||||
// NewPriorityQueue creates a PriorityQueue object.
|
||||
func NewPriorityQueue() *PriorityQueue {
|
||||
pq := &PriorityQueue{
|
||||
activeQ: newHeap(cache.MetaNamespaceKeyFunc, util.HigherPriorityPod),
|
||||
activeQ: newHeap(cache.MetaNamespaceKeyFunc, activeQComp),
|
||||
unschedulableQ: newUnschedulablePodsMap(),
|
||||
nominatedPods: map[string][]*v1.Pod{},
|
||||
nominatedPods: newNominatedPodMap(),
|
||||
}
|
||||
pq.cond.L = &pq.lock
|
||||
return pq
|
||||
}
|
||||
|
||||
// addNominatedPodIfNeeded adds a pod to nominatedPods if it has a NominatedNodeName and it does not
|
||||
// already exist in the map. Adding an existing pod is not going to update the pod.
|
||||
func (p *PriorityQueue) addNominatedPodIfNeeded(pod *v1.Pod) {
|
||||
nnn := NominatedNodeName(pod)
|
||||
if len(nnn) > 0 {
|
||||
for _, np := range p.nominatedPods[nnn] {
|
||||
if np.UID == pod.UID {
|
||||
klog.V(4).Infof("Pod %v/%v already exists in the nominated map!", pod.Namespace, pod.Name)
|
||||
return
|
||||
}
|
||||
}
|
||||
p.nominatedPods[nnn] = append(p.nominatedPods[nnn], pod)
|
||||
}
|
||||
}
|
||||
|
||||
// deleteNominatedPodIfExists deletes a pod from the nominatedPods.
|
||||
// NOTE: this function assumes lock has been acquired in caller.
|
||||
func (p *PriorityQueue) deleteNominatedPodIfExists(pod *v1.Pod) {
|
||||
nnn := NominatedNodeName(pod)
|
||||
if len(nnn) > 0 {
|
||||
for i, np := range p.nominatedPods[nnn] {
|
||||
if np.UID == pod.UID {
|
||||
p.nominatedPods[nnn] = append(p.nominatedPods[nnn][:i], p.nominatedPods[nnn][i+1:]...)
|
||||
if len(p.nominatedPods[nnn]) == 0 {
|
||||
delete(p.nominatedPods, nnn)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// updateNominatedPod updates a pod in the nominatedPods.
|
||||
func (p *PriorityQueue) updateNominatedPod(oldPod, newPod *v1.Pod) {
|
||||
// Even if the nominated node name of the Pod is not changed, we must delete and add it again
|
||||
// to ensure that its pointer is updated.
|
||||
p.deleteNominatedPodIfExists(oldPod)
|
||||
p.addNominatedPodIfNeeded(newPod)
|
||||
}
|
||||
|
||||
// Add adds a pod to the active queue. It should be called only when a new pod
|
||||
// is added so there is no chance the pod is already in either queue.
|
||||
func (p *PriorityQueue) Add(pod *v1.Pod) error {
|
||||
@ -268,10 +262,9 @@ func (p *PriorityQueue) Add(pod *v1.Pod) error {
|
||||
} else {
|
||||
if p.unschedulableQ.get(pod) != nil {
|
||||
klog.Errorf("Error: pod %v/%v is already in the unschedulable queue.", pod.Namespace, pod.Name)
|
||||
p.deleteNominatedPodIfExists(pod)
|
||||
p.unschedulableQ.delete(pod)
|
||||
}
|
||||
p.addNominatedPodIfNeeded(pod)
|
||||
p.nominatedPods.add(pod, "")
|
||||
p.cond.Broadcast()
|
||||
}
|
||||
return err
|
||||
@ -292,7 +285,7 @@ func (p *PriorityQueue) AddIfNotPresent(pod *v1.Pod) error {
|
||||
if err != nil {
|
||||
klog.Errorf("Error adding pod %v/%v to the scheduling queue: %v", pod.Namespace, pod.Name, err)
|
||||
} else {
|
||||
p.addNominatedPodIfNeeded(pod)
|
||||
p.nominatedPods.add(pod, "")
|
||||
p.cond.Broadcast()
|
||||
}
|
||||
return err
|
||||
@ -317,12 +310,12 @@ func (p *PriorityQueue) AddUnschedulableIfNotPresent(pod *v1.Pod) error {
|
||||
}
|
||||
if !p.receivedMoveRequest && isPodUnschedulable(pod) {
|
||||
p.unschedulableQ.addOrUpdate(pod)
|
||||
p.addNominatedPodIfNeeded(pod)
|
||||
p.nominatedPods.add(pod, "")
|
||||
return nil
|
||||
}
|
||||
err := p.activeQ.Add(pod)
|
||||
if err == nil {
|
||||
p.addNominatedPodIfNeeded(pod)
|
||||
p.nominatedPods.add(pod, "")
|
||||
p.cond.Broadcast()
|
||||
}
|
||||
return err
|
||||
@ -373,13 +366,13 @@ func (p *PriorityQueue) Update(oldPod, newPod *v1.Pod) error {
|
||||
defer p.lock.Unlock()
|
||||
// If the pod is already in the active queue, just update it there.
|
||||
if _, exists, _ := p.activeQ.Get(newPod); exists {
|
||||
p.updateNominatedPod(oldPod, newPod)
|
||||
p.nominatedPods.update(oldPod, newPod)
|
||||
err := p.activeQ.Update(newPod)
|
||||
return err
|
||||
}
|
||||
// If the pod is in the unschedulable queue, updating it may make it schedulable.
|
||||
if usPod := p.unschedulableQ.get(newPod); usPod != nil {
|
||||
p.updateNominatedPod(oldPod, newPod)
|
||||
p.nominatedPods.update(oldPod, newPod)
|
||||
if isPodUpdated(oldPod, newPod) {
|
||||
p.unschedulableQ.delete(usPod)
|
||||
err := p.activeQ.Add(newPod)
|
||||
@ -394,7 +387,7 @@ func (p *PriorityQueue) Update(oldPod, newPod *v1.Pod) error {
|
||||
// If pod is not in any of the two queue, we put it in the active queue.
|
||||
err := p.activeQ.Add(newPod)
|
||||
if err == nil {
|
||||
p.addNominatedPodIfNeeded(newPod)
|
||||
p.nominatedPods.add(newPod, "")
|
||||
p.cond.Broadcast()
|
||||
}
|
||||
return err
|
||||
@ -405,7 +398,7 @@ func (p *PriorityQueue) Update(oldPod, newPod *v1.Pod) error {
|
||||
func (p *PriorityQueue) Delete(pod *v1.Pod) error {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
p.deleteNominatedPodIfExists(pod)
|
||||
p.nominatedPods.delete(pod)
|
||||
err := p.activeQ.Delete(pod)
|
||||
if err != nil { // The item was probably not found in the activeQ.
|
||||
p.unschedulableQ.delete(pod)
|
||||
@ -488,16 +481,13 @@ func (p *PriorityQueue) getUnschedulablePodsWithMatchingAffinityTerm(pod *v1.Pod
|
||||
return podsToMove
|
||||
}
|
||||
|
||||
// WaitingPodsForNode returns pods that are nominated to run on the given node,
|
||||
// NominatedPodsForNode returns pods that are nominated to run on the given node,
|
||||
// but they are waiting for other pods to be removed from the node before they
|
||||
// can be actually scheduled.
|
||||
func (p *PriorityQueue) WaitingPodsForNode(nodeName string) []*v1.Pod {
|
||||
func (p *PriorityQueue) NominatedPodsForNode(nodeName string) []*v1.Pod {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
if list, ok := p.nominatedPods[nodeName]; ok {
|
||||
return list
|
||||
}
|
||||
return nil
|
||||
return p.nominatedPods.podsForNode(nodeName)
|
||||
}
|
||||
|
||||
// WaitingPods returns all the waiting pods in the queue.
|
||||
@ -523,13 +513,30 @@ func (p *PriorityQueue) Close() {
|
||||
p.cond.Broadcast()
|
||||
}
|
||||
|
||||
// DeleteNominatedPodIfExists deletes pod from internal cache if it's a nominatedPod
|
||||
// DeleteNominatedPodIfExists deletes pod nominatedPods.
|
||||
func (p *PriorityQueue) DeleteNominatedPodIfExists(pod *v1.Pod) {
|
||||
p.lock.Lock()
|
||||
p.deleteNominatedPodIfExists(pod)
|
||||
p.nominatedPods.delete(pod)
|
||||
p.lock.Unlock()
|
||||
}
|
||||
|
||||
// UpdateNominatedPodForNode adds a pod to the nominated pods of the given node.
|
||||
// This is called during the preemption process after a node is nominated to run
|
||||
// the pod. We update the structure before sending a request to update the pod
|
||||
// object to avoid races with the following scheduling cycles.
|
||||
func (p *PriorityQueue) UpdateNominatedPodForNode(pod *v1.Pod, nodeName string) {
|
||||
p.lock.Lock()
|
||||
p.nominatedPods.add(pod, nodeName)
|
||||
p.lock.Unlock()
|
||||
}
|
||||
|
||||
// NumUnschedulablePods returns the number of unschedulable pods exist in the SchedulingQueue.
|
||||
func (p *PriorityQueue) NumUnschedulablePods() int {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
return len(p.unschedulableQ.pods)
|
||||
}
|
||||
|
||||
// UnschedulablePodsMap holds pods that cannot be scheduled. This data structure
|
||||
// is used to implement unschedulableQ.
|
||||
type UnschedulablePodsMap struct {
|
||||
@ -767,3 +774,77 @@ func newHeap(keyFn KeyFunc, lessFn LessFunc) *Heap {
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// nominatedPodMap is a structure that stores pods nominated to run on nodes.
|
||||
// It exists because nominatedNodeName of pod objects stored in the structure
|
||||
// may be different than what scheduler has here. We should be able to find pods
|
||||
// by their UID and update/delete them.
|
||||
type nominatedPodMap struct {
|
||||
// nominatedPods is a map keyed by a node name and the value is a list of
|
||||
// pods which are nominated to run on the node. These are pods which can be in
|
||||
// the activeQ or unschedulableQ.
|
||||
nominatedPods map[string][]*v1.Pod
|
||||
// nominatedPodToNode is map keyed by a Pod UID to the node name where it is
|
||||
// nominated.
|
||||
nominatedPodToNode map[ktypes.UID]string
|
||||
}
|
||||
|
||||
func (npm *nominatedPodMap) add(p *v1.Pod, nodeName string) {
|
||||
// always delete the pod if it already exist, to ensure we never store more than
|
||||
// one instance of the pod.
|
||||
npm.delete(p)
|
||||
|
||||
nnn := nodeName
|
||||
if len(nnn) == 0 {
|
||||
nnn = NominatedNodeName(p)
|
||||
if len(nnn) == 0 {
|
||||
return
|
||||
}
|
||||
}
|
||||
npm.nominatedPodToNode[p.UID] = nnn
|
||||
for _, np := range npm.nominatedPods[nnn] {
|
||||
if np.UID == p.UID {
|
||||
klog.V(4).Infof("Pod %v/%v already exists in the nominated map!", p.Namespace, p.Name)
|
||||
return
|
||||
}
|
||||
}
|
||||
npm.nominatedPods[nnn] = append(npm.nominatedPods[nnn], p)
|
||||
}
|
||||
|
||||
func (npm *nominatedPodMap) delete(p *v1.Pod) {
|
||||
nnn, ok := npm.nominatedPodToNode[p.UID]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
for i, np := range npm.nominatedPods[nnn] {
|
||||
if np.UID == p.UID {
|
||||
npm.nominatedPods[nnn] = append(npm.nominatedPods[nnn][:i], npm.nominatedPods[nnn][i+1:]...)
|
||||
if len(npm.nominatedPods[nnn]) == 0 {
|
||||
delete(npm.nominatedPods, nnn)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
delete(npm.nominatedPodToNode, p.UID)
|
||||
}
|
||||
|
||||
func (npm *nominatedPodMap) update(oldPod, newPod *v1.Pod) {
|
||||
// We update irrespective of the nominatedNodeName changed or not, to ensure
|
||||
// that pod pointer is updated.
|
||||
npm.delete(oldPod)
|
||||
npm.add(newPod, "")
|
||||
}
|
||||
|
||||
func (npm *nominatedPodMap) podsForNode(nodeName string) []*v1.Pod {
|
||||
if list, ok := npm.nominatedPods[nodeName]; ok {
|
||||
return list
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newNominatedPodMap() *nominatedPodMap {
|
||||
return &nominatedPodMap{
|
||||
nominatedPods: make(map[string][]*v1.Pod),
|
||||
nominatedPodToNode: make(map[ktypes.UID]string),
|
||||
}
|
||||
}
|
||||
|
189
vendor/k8s.io/kubernetes/pkg/scheduler/internal/queue/scheduling_queue_test.go
generated
vendored
189
vendor/k8s.io/kubernetes/pkg/scheduler/internal/queue/scheduling_queue_test.go
generated
vendored
@ -24,6 +24,8 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/scheduler/util"
|
||||
)
|
||||
|
||||
@ -97,8 +99,14 @@ func TestPriorityQueue_Add(t *testing.T) {
|
||||
q.Add(&medPriorityPod)
|
||||
q.Add(&unschedulablePod)
|
||||
q.Add(&highPriorityPod)
|
||||
expectedNominatedPods := map[string][]*v1.Pod{
|
||||
"node1": {&medPriorityPod, &unschedulablePod},
|
||||
expectedNominatedPods := &nominatedPodMap{
|
||||
nominatedPodToNode: map[types.UID]string{
|
||||
medPriorityPod.UID: "node1",
|
||||
unschedulablePod.UID: "node1",
|
||||
},
|
||||
nominatedPods: map[string][]*v1.Pod{
|
||||
"node1": {&medPriorityPod, &unschedulablePod},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(q.nominatedPods, expectedNominatedPods) {
|
||||
t.Errorf("Unexpected nominated map after adding pods. Expected: %v, got: %v", expectedNominatedPods, q.nominatedPods)
|
||||
@ -112,8 +120,8 @@ func TestPriorityQueue_Add(t *testing.T) {
|
||||
if p, err := q.Pop(); err != nil || p != &unschedulablePod {
|
||||
t.Errorf("Expected: %v after Pop, but got: %v", unschedulablePod.Name, p.Name)
|
||||
}
|
||||
if len(q.nominatedPods["node1"]) != 2 {
|
||||
t.Errorf("Expected medPriorityPod and unschedulablePod to be still present in nomindatePods: %v", q.nominatedPods["node1"])
|
||||
if len(q.nominatedPods.nominatedPods["node1"]) != 2 {
|
||||
t.Errorf("Expected medPriorityPod and unschedulablePod to be still present in nomindatePods: %v", q.nominatedPods.nominatedPods["node1"])
|
||||
}
|
||||
}
|
||||
|
||||
@ -123,8 +131,14 @@ func TestPriorityQueue_AddIfNotPresent(t *testing.T) {
|
||||
q.AddIfNotPresent(&highPriNominatedPod) // Must not add anything.
|
||||
q.AddIfNotPresent(&medPriorityPod)
|
||||
q.AddIfNotPresent(&unschedulablePod)
|
||||
expectedNominatedPods := map[string][]*v1.Pod{
|
||||
"node1": {&medPriorityPod, &unschedulablePod},
|
||||
expectedNominatedPods := &nominatedPodMap{
|
||||
nominatedPodToNode: map[types.UID]string{
|
||||
medPriorityPod.UID: "node1",
|
||||
unschedulablePod.UID: "node1",
|
||||
},
|
||||
nominatedPods: map[string][]*v1.Pod{
|
||||
"node1": {&medPriorityPod, &unschedulablePod},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(q.nominatedPods, expectedNominatedPods) {
|
||||
t.Errorf("Unexpected nominated map after adding pods. Expected: %v, got: %v", expectedNominatedPods, q.nominatedPods)
|
||||
@ -135,8 +149,8 @@ func TestPriorityQueue_AddIfNotPresent(t *testing.T) {
|
||||
if p, err := q.Pop(); err != nil || p != &unschedulablePod {
|
||||
t.Errorf("Expected: %v after Pop, but got: %v", unschedulablePod.Name, p.Name)
|
||||
}
|
||||
if len(q.nominatedPods["node1"]) != 2 {
|
||||
t.Errorf("Expected medPriorityPod and unschedulablePod to be still present in nomindatePods: %v", q.nominatedPods["node1"])
|
||||
if len(q.nominatedPods.nominatedPods["node1"]) != 2 {
|
||||
t.Errorf("Expected medPriorityPod and unschedulablePod to be still present in nomindatePods: %v", q.nominatedPods.nominatedPods["node1"])
|
||||
}
|
||||
if q.unschedulableQ.get(&highPriNominatedPod) != &highPriNominatedPod {
|
||||
t.Errorf("Pod %v was not found in the unschedulableQ.", highPriNominatedPod.Name)
|
||||
@ -149,8 +163,15 @@ func TestPriorityQueue_AddUnschedulableIfNotPresent(t *testing.T) {
|
||||
q.AddUnschedulableIfNotPresent(&highPriNominatedPod) // Must not add anything.
|
||||
q.AddUnschedulableIfNotPresent(&medPriorityPod) // This should go to activeQ.
|
||||
q.AddUnschedulableIfNotPresent(&unschedulablePod)
|
||||
expectedNominatedPods := map[string][]*v1.Pod{
|
||||
"node1": {&highPriNominatedPod, &medPriorityPod, &unschedulablePod},
|
||||
expectedNominatedPods := &nominatedPodMap{
|
||||
nominatedPodToNode: map[types.UID]string{
|
||||
medPriorityPod.UID: "node1",
|
||||
unschedulablePod.UID: "node1",
|
||||
highPriNominatedPod.UID: "node1",
|
||||
},
|
||||
nominatedPods: map[string][]*v1.Pod{
|
||||
"node1": {&highPriNominatedPod, &medPriorityPod, &unschedulablePod},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(q.nominatedPods, expectedNominatedPods) {
|
||||
t.Errorf("Unexpected nominated map after adding pods. Expected: %v, got: %v", expectedNominatedPods, q.nominatedPods)
|
||||
@ -161,7 +182,7 @@ func TestPriorityQueue_AddUnschedulableIfNotPresent(t *testing.T) {
|
||||
if p, err := q.Pop(); err != nil || p != &medPriorityPod {
|
||||
t.Errorf("Expected: %v after Pop, but got: %v", medPriorityPod.Name, p.Name)
|
||||
}
|
||||
if len(q.nominatedPods) != 1 {
|
||||
if len(q.nominatedPods.nominatedPods) != 1 {
|
||||
t.Errorf("Expected nomindatePods to have one element: %v", q.nominatedPods)
|
||||
}
|
||||
if q.unschedulableQ.get(&unschedulablePod) != &unschedulablePod {
|
||||
@ -178,8 +199,8 @@ func TestPriorityQueue_Pop(t *testing.T) {
|
||||
if p, err := q.Pop(); err != nil || p != &medPriorityPod {
|
||||
t.Errorf("Expected: %v after Pop, but got: %v", medPriorityPod.Name, p.Name)
|
||||
}
|
||||
if len(q.nominatedPods["node1"]) != 1 {
|
||||
t.Errorf("Expected medPriorityPod to be present in nomindatePods: %v", q.nominatedPods["node1"])
|
||||
if len(q.nominatedPods.nominatedPods["node1"]) != 1 {
|
||||
t.Errorf("Expected medPriorityPod to be present in nomindatePods: %v", q.nominatedPods.nominatedPods["node1"])
|
||||
}
|
||||
}()
|
||||
q.Add(&medPriorityPod)
|
||||
@ -192,7 +213,7 @@ func TestPriorityQueue_Update(t *testing.T) {
|
||||
if _, exists, _ := q.activeQ.Get(&highPriorityPod); !exists {
|
||||
t.Errorf("Expected %v to be added to activeQ.", highPriorityPod.Name)
|
||||
}
|
||||
if len(q.nominatedPods) != 0 {
|
||||
if len(q.nominatedPods.nominatedPods) != 0 {
|
||||
t.Errorf("Expected nomindatePods to be empty: %v", q.nominatedPods)
|
||||
}
|
||||
// Update highPriorityPod and add a nominatedNodeName to it.
|
||||
@ -200,7 +221,7 @@ func TestPriorityQueue_Update(t *testing.T) {
|
||||
if q.activeQ.data.Len() != 1 {
|
||||
t.Error("Expected only one item in activeQ.")
|
||||
}
|
||||
if len(q.nominatedPods) != 1 {
|
||||
if len(q.nominatedPods.nominatedPods) != 1 {
|
||||
t.Errorf("Expected one item in nomindatePods map: %v", q.nominatedPods)
|
||||
}
|
||||
// Updating an unschedulable pod which is not in any of the two queues, should
|
||||
@ -233,11 +254,13 @@ func TestPriorityQueue_Delete(t *testing.T) {
|
||||
if _, exists, _ := q.activeQ.Get(&highPriNominatedPod); exists {
|
||||
t.Errorf("Didn't expect %v to be in activeQ.", highPriorityPod.Name)
|
||||
}
|
||||
if len(q.nominatedPods) != 1 {
|
||||
t.Errorf("Expected nomindatePods to have only 'unschedulablePod': %v", q.nominatedPods)
|
||||
if len(q.nominatedPods.nominatedPods) != 1 {
|
||||
t.Errorf("Expected nomindatePods to have only 'unschedulablePod': %v", q.nominatedPods.nominatedPods)
|
||||
}
|
||||
q.Delete(&unschedulablePod)
|
||||
if len(q.nominatedPods) != 0 {
|
||||
if err := q.Delete(&unschedulablePod); err != nil {
|
||||
t.Errorf("delete failed: %v", err)
|
||||
}
|
||||
if len(q.nominatedPods.nominatedPods) != 0 {
|
||||
t.Errorf("Expected nomindatePods to be empty: %v", q.nominatedPods)
|
||||
}
|
||||
}
|
||||
@ -309,7 +332,7 @@ func TestPriorityQueue_AssignedPodAdded(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestPriorityQueue_WaitingPodsForNode(t *testing.T) {
|
||||
func TestPriorityQueue_NominatedPodsForNode(t *testing.T) {
|
||||
q := NewPriorityQueue()
|
||||
q.Add(&medPriorityPod)
|
||||
q.Add(&unschedulablePod)
|
||||
@ -318,14 +341,83 @@ func TestPriorityQueue_WaitingPodsForNode(t *testing.T) {
|
||||
t.Errorf("Expected: %v after Pop, but got: %v", highPriorityPod.Name, p.Name)
|
||||
}
|
||||
expectedList := []*v1.Pod{&medPriorityPod, &unschedulablePod}
|
||||
if !reflect.DeepEqual(expectedList, q.WaitingPodsForNode("node1")) {
|
||||
if !reflect.DeepEqual(expectedList, q.NominatedPodsForNode("node1")) {
|
||||
t.Error("Unexpected list of nominated Pods for node.")
|
||||
}
|
||||
if q.WaitingPodsForNode("node2") != nil {
|
||||
if q.NominatedPodsForNode("node2") != nil {
|
||||
t.Error("Expected list of nominated Pods for node2 to be empty.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPriorityQueue_UpdateNominatedPodForNode(t *testing.T) {
|
||||
q := NewPriorityQueue()
|
||||
if err := q.Add(&medPriorityPod); err != nil {
|
||||
t.Errorf("add failed: %v", err)
|
||||
}
|
||||
// Update unschedulablePod on a different node than specified in the pod.
|
||||
q.UpdateNominatedPodForNode(&unschedulablePod, "node5")
|
||||
|
||||
// Update nominated node name of a pod on a node that is not specified in the pod object.
|
||||
q.UpdateNominatedPodForNode(&highPriorityPod, "node2")
|
||||
expectedNominatedPods := &nominatedPodMap{
|
||||
nominatedPodToNode: map[types.UID]string{
|
||||
medPriorityPod.UID: "node1",
|
||||
highPriorityPod.UID: "node2",
|
||||
unschedulablePod.UID: "node5",
|
||||
},
|
||||
nominatedPods: map[string][]*v1.Pod{
|
||||
"node1": {&medPriorityPod},
|
||||
"node2": {&highPriorityPod},
|
||||
"node5": {&unschedulablePod},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(q.nominatedPods, expectedNominatedPods) {
|
||||
t.Errorf("Unexpected nominated map after adding pods. Expected: %v, got: %v", expectedNominatedPods, q.nominatedPods)
|
||||
}
|
||||
if p, err := q.Pop(); err != nil || p != &medPriorityPod {
|
||||
t.Errorf("Expected: %v after Pop, but got: %v", medPriorityPod.Name, p.Name)
|
||||
}
|
||||
// List of nominated pods shouldn't change after popping them from the queue.
|
||||
if !reflect.DeepEqual(q.nominatedPods, expectedNominatedPods) {
|
||||
t.Errorf("Unexpected nominated map after popping pods. Expected: %v, got: %v", expectedNominatedPods, q.nominatedPods)
|
||||
}
|
||||
// Update one of the nominated pods that doesn't have nominatedNodeName in the
|
||||
// pod object. It should be updated correctly.
|
||||
q.UpdateNominatedPodForNode(&highPriorityPod, "node4")
|
||||
expectedNominatedPods = &nominatedPodMap{
|
||||
nominatedPodToNode: map[types.UID]string{
|
||||
medPriorityPod.UID: "node1",
|
||||
highPriorityPod.UID: "node4",
|
||||
unschedulablePod.UID: "node5",
|
||||
},
|
||||
nominatedPods: map[string][]*v1.Pod{
|
||||
"node1": {&medPriorityPod},
|
||||
"node4": {&highPriorityPod},
|
||||
"node5": {&unschedulablePod},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(q.nominatedPods, expectedNominatedPods) {
|
||||
t.Errorf("Unexpected nominated map after updating pods. Expected: %v, got: %v", expectedNominatedPods, q.nominatedPods)
|
||||
}
|
||||
|
||||
// Delete a nominated pod that doesn't have nominatedNodeName in the pod
|
||||
// object. It should be deleted.
|
||||
q.DeleteNominatedPodIfExists(&highPriorityPod)
|
||||
expectedNominatedPods = &nominatedPodMap{
|
||||
nominatedPodToNode: map[types.UID]string{
|
||||
medPriorityPod.UID: "node1",
|
||||
unschedulablePod.UID: "node5",
|
||||
},
|
||||
nominatedPods: map[string][]*v1.Pod{
|
||||
"node1": {&medPriorityPod},
|
||||
"node5": {&unschedulablePod},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(q.nominatedPods, expectedNominatedPods) {
|
||||
t.Errorf("Unexpected nominated map after deleting pods. Expected: %v, got: %v", expectedNominatedPods, q.nominatedPods)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnschedulablePodsMap(t *testing.T) {
|
||||
var pods = []*v1.Pod{
|
||||
{
|
||||
@ -512,3 +604,56 @@ func TestSchedulingQueue_Close(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestRecentlyTriedPodsGoBack tests that pods which are recently tried and are
|
||||
// unschedulable go behind other pods with the same priority. This behavior
|
||||
// ensures that an unschedulable pod does not block head of the queue when there
|
||||
// are frequent events that move pods to the active queue.
|
||||
func TestRecentlyTriedPodsGoBack(t *testing.T) {
|
||||
q := NewPriorityQueue()
|
||||
// Add a few pods to priority queue.
|
||||
for i := 0; i < 5; i++ {
|
||||
p := v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("test-pod-%v", i),
|
||||
Namespace: "ns1",
|
||||
UID: types.UID(fmt.Sprintf("tp00%v", i)),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Priority: &highPriority,
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
NominatedNodeName: "node1",
|
||||
},
|
||||
}
|
||||
q.Add(&p)
|
||||
}
|
||||
// Simulate a pod being popped by the scheduler, determined unschedulable, and
|
||||
// then moved back to the active queue.
|
||||
p1, err := q.Pop()
|
||||
if err != nil {
|
||||
t.Errorf("Error while popping the head of the queue: %v", err)
|
||||
}
|
||||
// Update pod condition to unschedulable.
|
||||
podutil.UpdatePodCondition(&p1.Status, &v1.PodCondition{
|
||||
Type: v1.PodScheduled,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: v1.PodReasonUnschedulable,
|
||||
Message: "fake scheduling failure",
|
||||
})
|
||||
// Put in the unschedulable queue.
|
||||
q.AddUnschedulableIfNotPresent(p1)
|
||||
// Move all unschedulable pods to the active queue.
|
||||
q.MoveAllToActiveQueue()
|
||||
// Simulation is over. Now let's pop all pods. The pod popped first should be
|
||||
// the last one we pop here.
|
||||
for i := 0; i < 5; i++ {
|
||||
p, err := q.Pop()
|
||||
if err != nil {
|
||||
t.Errorf("Error while popping pods from the queue: %v", err)
|
||||
}
|
||||
if (i == 4) != (p1 == p) {
|
||||
t.Errorf("A pod tried before is not the last pod popped: i: %v, pod name: %v", i, p.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
8
vendor/k8s.io/kubernetes/pkg/scheduler/scheduler.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/scheduler/scheduler.go
generated
vendored
@ -328,11 +328,19 @@ func (sched *Scheduler) preempt(preemptor *v1.Pod, scheduleErr error) (string, e
|
||||
var nodeName = ""
|
||||
if node != nil {
|
||||
nodeName = node.Name
|
||||
// Update the scheduling queue with the nominated pod information. Without
|
||||
// this, there would be a race condition between the next scheduling cycle
|
||||
// and the time the scheduler receives a Pod Update for the nominated pod.
|
||||
sched.config.SchedulingQueue.UpdateNominatedPodForNode(preemptor, nodeName)
|
||||
|
||||
// Make a call to update nominated node name of the pod on the API server.
|
||||
err = sched.config.PodPreemptor.SetNominatedNodeName(preemptor, nodeName)
|
||||
if err != nil {
|
||||
klog.Errorf("Error in preemption process. Cannot update pod %v/%v annotations: %v", preemptor.Namespace, preemptor.Name, err)
|
||||
sched.config.SchedulingQueue.DeleteNominatedPodIfExists(preemptor)
|
||||
return "", err
|
||||
}
|
||||
|
||||
for _, victim := range victims {
|
||||
if err := sched.config.PodPreemptor.DeletePod(victim); err != nil {
|
||||
klog.Errorf("Error preempting pod %v/%v: %v", victim.Namespace, victim.Name, err)
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/scheduler/testing/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/scheduler/testing/BUILD
generated
vendored
@ -18,7 +18,6 @@ go_library(
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/core/install:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/internal/cache:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/scheduler/testing/fake_lister.go
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/scheduler/testing/fake_lister.go
generated
vendored
@ -26,7 +26,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
)
|
||||
|
||||
var _ algorithm.NodeLister = &FakeNodeLister{}
|
||||
@ -55,7 +54,7 @@ func (f FakePodLister) List(s labels.Selector) (selected []*v1.Pod, err error) {
|
||||
}
|
||||
|
||||
// FilteredList returns pods matching a pod filter and a label selector.
|
||||
func (f FakePodLister) FilteredList(podFilter schedulerinternalcache.PodFilter, s labels.Selector) (selected []*v1.Pod, err error) {
|
||||
func (f FakePodLister) FilteredList(podFilter algorithm.PodFilter, s labels.Selector) (selected []*v1.Pod, err error) {
|
||||
for _, pod := range f {
|
||||
if podFilter(pod) && s.Matches(labels.Set(pod.Labels)) {
|
||||
selected = append(selected, pod)
|
||||
|
23
vendor/k8s.io/kubernetes/pkg/util/ipvs/ipvs_linux.go
generated
vendored
23
vendor/k8s.io/kubernetes/pkg/util/ipvs/ipvs_linux.go
generated
vendored
@ -23,6 +23,7 @@ import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
libipvs "github.com/docker/libnetwork/ipvs"
|
||||
@ -34,6 +35,7 @@ import (
|
||||
type runner struct {
|
||||
exec utilexec.Interface
|
||||
ipvsHandle *libipvs.Handle
|
||||
mu sync.Mutex // Protect Netlink calls
|
||||
}
|
||||
|
||||
// Protocol is the IPVS service protocol type
|
||||
@ -58,6 +60,8 @@ func (runner *runner) AddVirtualServer(vs *VirtualServer) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
runner.mu.Lock()
|
||||
defer runner.mu.Unlock()
|
||||
return runner.ipvsHandle.NewService(svc)
|
||||
}
|
||||
|
||||
@ -67,6 +71,8 @@ func (runner *runner) UpdateVirtualServer(vs *VirtualServer) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
runner.mu.Lock()
|
||||
defer runner.mu.Unlock()
|
||||
return runner.ipvsHandle.UpdateService(svc)
|
||||
}
|
||||
|
||||
@ -76,6 +82,8 @@ func (runner *runner) DeleteVirtualServer(vs *VirtualServer) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
runner.mu.Lock()
|
||||
defer runner.mu.Unlock()
|
||||
return runner.ipvsHandle.DelService(svc)
|
||||
}
|
||||
|
||||
@ -85,7 +93,10 @@ func (runner *runner) GetVirtualServer(vs *VirtualServer) (*VirtualServer, error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
runner.mu.Lock()
|
||||
ipvsSvc, err := runner.ipvsHandle.GetService(svc)
|
||||
runner.mu.Unlock()
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -98,7 +109,9 @@ func (runner *runner) GetVirtualServer(vs *VirtualServer) (*VirtualServer, error
|
||||
|
||||
// GetVirtualServers is part of ipvs.Interface.
|
||||
func (runner *runner) GetVirtualServers() ([]*VirtualServer, error) {
|
||||
runner.mu.Lock()
|
||||
ipvsSvcs, err := runner.ipvsHandle.GetServices()
|
||||
runner.mu.Unlock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -115,6 +128,8 @@ func (runner *runner) GetVirtualServers() ([]*VirtualServer, error) {
|
||||
|
||||
// Flush is part of ipvs.Interface. Currently we delete IPVS services one by one
|
||||
func (runner *runner) Flush() error {
|
||||
runner.mu.Lock()
|
||||
defer runner.mu.Unlock()
|
||||
return runner.ipvsHandle.Flush()
|
||||
}
|
||||
|
||||
@ -128,6 +143,8 @@ func (runner *runner) AddRealServer(vs *VirtualServer, rs *RealServer) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
runner.mu.Lock()
|
||||
defer runner.mu.Unlock()
|
||||
return runner.ipvsHandle.NewDestination(svc, dst)
|
||||
}
|
||||
|
||||
@ -141,6 +158,8 @@ func (runner *runner) DeleteRealServer(vs *VirtualServer, rs *RealServer) error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
runner.mu.Lock()
|
||||
defer runner.mu.Unlock()
|
||||
return runner.ipvsHandle.DelDestination(svc, dst)
|
||||
}
|
||||
|
||||
@ -153,6 +172,8 @@ func (runner *runner) UpdateRealServer(vs *VirtualServer, rs *RealServer) error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
runner.mu.Lock()
|
||||
defer runner.mu.Unlock()
|
||||
return runner.ipvsHandle.UpdateDestination(svc, dst)
|
||||
}
|
||||
|
||||
@ -162,7 +183,9 @@ func (runner *runner) GetRealServers(vs *VirtualServer) ([]*RealServer, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
runner.mu.Lock()
|
||||
dsts, err := runner.ipvsHandle.GetDestinations(svc)
|
||||
runner.mu.Unlock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/util/mount/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/util/mount/BUILD
generated
vendored
@ -9,6 +9,7 @@ go_library(
|
||||
"exec_mount_unsupported.go",
|
||||
"fake.go",
|
||||
"mount.go",
|
||||
"mount_helper.go",
|
||||
"mount_linux.go",
|
||||
"mount_unsupported.go",
|
||||
"mount_windows.go",
|
||||
@ -67,6 +68,7 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"exec_mount_test.go",
|
||||
"mount_helper_test.go",
|
||||
"mount_linux_test.go",
|
||||
"mount_test.go",
|
||||
"mount_windows_test.go",
|
||||
|
10
vendor/k8s.io/kubernetes/pkg/util/mount/fake.go
generated
vendored
10
vendor/k8s.io/kubernetes/pkg/util/mount/fake.go
generated
vendored
@ -30,6 +30,8 @@ type FakeMounter struct {
|
||||
MountPoints []MountPoint
|
||||
Log []FakeAction
|
||||
Filesystem map[string]FileType
|
||||
// Error to return for a path when calling IsLikelyNotMountPoint
|
||||
MountCheckErrors map[string]error
|
||||
// Some tests run things in parallel, make sure the mounter does not produce
|
||||
// any golang's DATA RACE warnings.
|
||||
mutex sync.Mutex
|
||||
@ -119,6 +121,7 @@ func (f *FakeMounter) Unmount(target string) error {
|
||||
}
|
||||
f.MountPoints = newMountpoints
|
||||
f.Log = append(f.Log, FakeAction{Action: FakeActionUnmount, Target: absTarget})
|
||||
delete(f.MountCheckErrors, target)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -141,7 +144,12 @@ func (f *FakeMounter) IsLikelyNotMountPoint(file string) (bool, error) {
|
||||
f.mutex.Lock()
|
||||
defer f.mutex.Unlock()
|
||||
|
||||
_, err := os.Stat(file)
|
||||
err := f.MountCheckErrors[file]
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
_, err = os.Stat(file)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
|
124
vendor/k8s.io/kubernetes/pkg/util/mount/mount_helper.go
generated
vendored
Normal file
124
vendor/k8s.io/kubernetes/pkg/util/mount/mount_helper.go
generated
vendored
Normal file
@ -0,0 +1,124 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package mount
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// CleanupMountPoint unmounts the given path and
|
||||
// deletes the remaining directory if successful.
|
||||
// if extensiveMountPointCheck is true
|
||||
// IsNotMountPoint will be called instead of IsLikelyNotMountPoint.
|
||||
// IsNotMountPoint is more expensive but properly handles bind mounts within the same fs.
|
||||
func CleanupMountPoint(mountPath string, mounter Interface, extensiveMountPointCheck bool) error {
|
||||
// mounter.ExistsPath cannot be used because for containerized kubelet, we need to check
|
||||
// the path in the kubelet container, not on the host.
|
||||
pathExists, pathErr := PathExists(mountPath)
|
||||
if !pathExists {
|
||||
klog.Warningf("Warning: Unmount skipped because path does not exist: %v", mountPath)
|
||||
return nil
|
||||
}
|
||||
corruptedMnt := IsCorruptedMnt(pathErr)
|
||||
if pathErr != nil && !corruptedMnt {
|
||||
return fmt.Errorf("Error checking path: %v", pathErr)
|
||||
}
|
||||
return doCleanupMountPoint(mountPath, mounter, extensiveMountPointCheck, corruptedMnt)
|
||||
}
|
||||
|
||||
// doCleanupMountPoint unmounts the given path and
|
||||
// deletes the remaining directory if successful.
|
||||
// if extensiveMountPointCheck is true
|
||||
// IsNotMountPoint will be called instead of IsLikelyNotMountPoint.
|
||||
// IsNotMountPoint is more expensive but properly handles bind mounts within the same fs.
|
||||
// if corruptedMnt is true, it means that the mountPath is a corrupted mountpoint, and the mount point check
|
||||
// will be skipped
|
||||
func doCleanupMountPoint(mountPath string, mounter Interface, extensiveMountPointCheck bool, corruptedMnt bool) error {
|
||||
if !corruptedMnt {
|
||||
var notMnt bool
|
||||
var err error
|
||||
if extensiveMountPointCheck {
|
||||
notMnt, err = IsNotMountPoint(mounter, mountPath)
|
||||
} else {
|
||||
notMnt, err = mounter.IsLikelyNotMountPoint(mountPath)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if notMnt {
|
||||
klog.Warningf("Warning: %q is not a mountpoint, deleting", mountPath)
|
||||
return os.Remove(mountPath)
|
||||
}
|
||||
}
|
||||
|
||||
// Unmount the mount path
|
||||
klog.V(4).Infof("%q is a mountpoint, unmounting", mountPath)
|
||||
if err := mounter.Unmount(mountPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
notMnt, mntErr := mounter.IsLikelyNotMountPoint(mountPath)
|
||||
if mntErr != nil {
|
||||
return mntErr
|
||||
}
|
||||
if notMnt {
|
||||
klog.V(4).Infof("%q is unmounted, deleting the directory", mountPath)
|
||||
return os.Remove(mountPath)
|
||||
}
|
||||
return fmt.Errorf("Failed to unmount path %v", mountPath)
|
||||
}
|
||||
|
||||
// TODO: clean this up to use pkg/util/file/FileExists
|
||||
// PathExists returns true if the specified path exists.
|
||||
func PathExists(path string) (bool, error) {
|
||||
_, err := os.Stat(path)
|
||||
if err == nil {
|
||||
return true, nil
|
||||
} else if os.IsNotExist(err) {
|
||||
return false, nil
|
||||
} else if IsCorruptedMnt(err) {
|
||||
return true, err
|
||||
} else {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
// IsCorruptedMnt return true if err is about corrupted mount point
|
||||
func IsCorruptedMnt(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
var underlyingError error
|
||||
switch pe := err.(type) {
|
||||
case nil:
|
||||
return false
|
||||
case *os.PathError:
|
||||
underlyingError = pe.Err
|
||||
case *os.LinkError:
|
||||
underlyingError = pe.Err
|
||||
case *os.SyscallError:
|
||||
underlyingError = pe.Err
|
||||
}
|
||||
|
||||
return underlyingError == syscall.ENOTCONN || underlyingError == syscall.ESTALE || underlyingError == syscall.EIO
|
||||
}
|
152
vendor/k8s.io/kubernetes/pkg/util/mount/mount_helper_test.go
generated
vendored
Normal file
152
vendor/k8s.io/kubernetes/pkg/util/mount/mount_helper_test.go
generated
vendored
Normal file
@ -0,0 +1,152 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package mount
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestDoCleanupMountPoint(t *testing.T) {
|
||||
const testMount = "test-mount"
|
||||
const defaultPerm = 0750
|
||||
|
||||
tests := map[string]struct {
|
||||
corruptedMnt bool
|
||||
// Function that prepares the directory structure for the test under
|
||||
// the given base directory.
|
||||
// Returns a fake MountPoint, a fake error for the mount point,
|
||||
// and error if the prepare function encountered a fatal error.
|
||||
prepare func(base string) (MountPoint, error, error)
|
||||
expectErr bool
|
||||
}{
|
||||
"mount-ok": {
|
||||
prepare: func(base string) (MountPoint, error, error) {
|
||||
path := filepath.Join(base, testMount)
|
||||
if err := os.MkdirAll(path, defaultPerm); err != nil {
|
||||
return MountPoint{}, nil, err
|
||||
}
|
||||
return MountPoint{Device: "/dev/sdb", Path: path}, nil, nil
|
||||
},
|
||||
},
|
||||
"mount-corrupted": {
|
||||
prepare: func(base string) (MountPoint, error, error) {
|
||||
path := filepath.Join(base, testMount)
|
||||
if err := os.MkdirAll(path, defaultPerm); err != nil {
|
||||
return MountPoint{}, nil, err
|
||||
}
|
||||
return MountPoint{Device: "/dev/sdb", Path: path}, os.NewSyscallError("fake", syscall.ESTALE), nil
|
||||
},
|
||||
corruptedMnt: true,
|
||||
},
|
||||
"mount-err-not-corrupted": {
|
||||
prepare: func(base string) (MountPoint, error, error) {
|
||||
path := filepath.Join(base, testMount)
|
||||
if err := os.MkdirAll(path, defaultPerm); err != nil {
|
||||
return MountPoint{}, nil, err
|
||||
}
|
||||
return MountPoint{Device: "/dev/sdb", Path: path}, os.NewSyscallError("fake", syscall.ETIMEDOUT), nil
|
||||
},
|
||||
expectErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tt := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
|
||||
tmpDir, err := ioutil.TempDir("", "unmount-mount-point-test")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create tmpdir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
if tt.prepare == nil {
|
||||
t.Fatalf("prepare function required")
|
||||
}
|
||||
|
||||
mountPoint, mountError, err := tt.prepare(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to prepare test: %v", err)
|
||||
}
|
||||
|
||||
fake := &FakeMounter{
|
||||
MountPoints: []MountPoint{mountPoint},
|
||||
MountCheckErrors: map[string]error{mountPoint.Path: mountError},
|
||||
}
|
||||
|
||||
err = doCleanupMountPoint(mountPoint.Path, fake, true, tt.corruptedMnt)
|
||||
if tt.expectErr {
|
||||
if err == nil {
|
||||
t.Errorf("test %s failed, expected error, got none", name)
|
||||
}
|
||||
if err := validateDirExists(mountPoint.Path); err != nil {
|
||||
t.Errorf("test %s failed, mount path doesn't exist: %v", name, err)
|
||||
}
|
||||
}
|
||||
if !tt.expectErr {
|
||||
if err != nil {
|
||||
t.Errorf("test %s failed: %v", name, err)
|
||||
}
|
||||
if err := validateDirNotExists(mountPoint.Path); err != nil {
|
||||
t.Errorf("test %s failed, mount path still exists: %v", name, err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func validateDirEmpty(dir string) error {
|
||||
files, err := ioutil.ReadDir(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(files) != 0 {
|
||||
return fmt.Errorf("Directory %q is not empty", dir)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateDirExists(dir string) error {
|
||||
_, err := ioutil.ReadDir(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateDirNotExists(dir string) error {
|
||||
_, err := ioutil.ReadDir(dir)
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("dir %q still exists", dir)
|
||||
}
|
||||
|
||||
func validateFileExists(file string) error {
|
||||
if _, err := os.Stat(file); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
40
vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux.go
generated
vendored
40
vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux.go
generated
vendored
@ -55,6 +55,7 @@ const (
|
||||
fsckErrorsUncorrected = 4
|
||||
|
||||
// place for subpath mounts
|
||||
// TODO: pass in directory using kubelet_getters instead
|
||||
containerSubPathDirectoryName = "volume-subpaths"
|
||||
// syscall.Openat flags used to traverse directories not following symlinks
|
||||
nofollowFlags = unix.O_RDONLY | unix.O_NOFOLLOW
|
||||
@ -890,15 +891,22 @@ func doCleanSubPaths(mounter Interface, podDir string, volumeName string) error
|
||||
|
||||
// scan /var/lib/kubelet/pods/<uid>/volume-subpaths/<volume>/<container name>/*
|
||||
fullContainerDirPath := filepath.Join(subPathDir, containerDir.Name())
|
||||
subPaths, err := ioutil.ReadDir(fullContainerDirPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading %s: %s", fullContainerDirPath, err)
|
||||
}
|
||||
for _, subPath := range subPaths {
|
||||
if err = doCleanSubPath(mounter, fullContainerDirPath, subPath.Name()); err != nil {
|
||||
err = filepath.Walk(fullContainerDirPath, func(path string, info os.FileInfo, err error) error {
|
||||
if path == fullContainerDirPath {
|
||||
// Skip top level directory
|
||||
return nil
|
||||
}
|
||||
|
||||
// pass through errors and let doCleanSubPath handle them
|
||||
if err = doCleanSubPath(mounter, fullContainerDirPath, filepath.Base(path)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error processing %s: %s", fullContainerDirPath, err)
|
||||
}
|
||||
|
||||
// Whole container has been processed, remove its directory.
|
||||
if err := os.Remove(fullContainerDirPath); err != nil {
|
||||
return fmt.Errorf("error deleting %s: %s", fullContainerDirPath, err)
|
||||
@ -925,22 +933,12 @@ func doCleanSubPath(mounter Interface, fullContainerDirPath, subPathIndex string
|
||||
// process /var/lib/kubelet/pods/<uid>/volume-subpaths/<volume>/<container name>/<subPathName>
|
||||
klog.V(4).Infof("Cleaning up subpath mounts for subpath %v", subPathIndex)
|
||||
fullSubPath := filepath.Join(fullContainerDirPath, subPathIndex)
|
||||
notMnt, err := IsNotMountPoint(mounter, fullSubPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error checking %s for mount: %s", fullSubPath, err)
|
||||
|
||||
if err := CleanupMountPoint(fullSubPath, mounter, true); err != nil {
|
||||
return fmt.Errorf("error cleaning subpath mount %s: %s", fullSubPath, err)
|
||||
}
|
||||
// Unmount it
|
||||
if !notMnt {
|
||||
if err = mounter.Unmount(fullSubPath); err != nil {
|
||||
return fmt.Errorf("error unmounting %s: %s", fullSubPath, err)
|
||||
}
|
||||
klog.V(5).Infof("Unmounted %s", fullSubPath)
|
||||
}
|
||||
// Remove it *non*-recursively, just in case there were some hiccups.
|
||||
if err = os.Remove(fullSubPath); err != nil {
|
||||
return fmt.Errorf("error deleting %s: %s", fullSubPath, err)
|
||||
}
|
||||
klog.V(5).Infof("Removed %s", fullSubPath)
|
||||
|
||||
klog.V(4).Infof("Successfully cleaned subpath directory %s", fullSubPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
38
vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux_test.go
generated
vendored
38
vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux_test.go
generated
vendored
@ -666,44 +666,6 @@ func TestSafeMakeDir(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func validateDirEmpty(dir string) error {
|
||||
files, err := ioutil.ReadDir(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(files) != 0 {
|
||||
return fmt.Errorf("Directory %q is not empty", dir)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateDirExists(dir string) error {
|
||||
_, err := ioutil.ReadDir(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateDirNotExists(dir string) error {
|
||||
_, err := ioutil.ReadDir(dir)
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("dir %q still exists", dir)
|
||||
}
|
||||
|
||||
func validateFileExists(file string) error {
|
||||
if _, err := os.Stat(file); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestRemoveEmptyDirs(t *testing.T) {
|
||||
defaultPerm := os.FileMode(0750)
|
||||
tests := []struct {
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common.go
generated
vendored
@ -40,7 +40,7 @@ import (
|
||||
const (
|
||||
defaultStorageAccountType = compute.StandardLRS
|
||||
defaultAzureDiskKind = v1.AzureManagedDisk
|
||||
defaultAzureDataDiskCachingMode = v1.AzureDataDiskCachingNone
|
||||
defaultAzureDataDiskCachingMode = v1.AzureDataDiskCachingReadOnly
|
||||
)
|
||||
|
||||
type dataDisk struct {
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd.go
generated
vendored
@ -187,7 +187,7 @@ func getMaxDataDiskCount(instanceType string, sizeList *[]compute.VirtualMachine
|
||||
continue
|
||||
}
|
||||
if strings.ToUpper(*size.Name) == vmsize {
|
||||
klog.V(2).Infof("got a matching size in getMaxDataDiskCount, Name: %s, MaxDataDiskCount: %d", *size.Name, *size.MaxDataDiskCount)
|
||||
klog.V(12).Infof("got a matching size in getMaxDataDiskCount, Name: %s, MaxDataDiskCount: %d", *size.Name, *size.MaxDataDiskCount)
|
||||
return int64(*size.MaxDataDiskCount)
|
||||
}
|
||||
}
|
||||
|
15
vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx.go
generated
vendored
15
vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx.go
generated
vendored
@ -20,6 +20,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
volumeclient "github.com/libopenstorage/openstorage/api/client/volume"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -43,7 +44,7 @@ func ProbeVolumePlugins() []volume.VolumePlugin {
|
||||
|
||||
type portworxVolumePlugin struct {
|
||||
host volume.VolumeHost
|
||||
util *PortworxVolumeUtil
|
||||
util *portworxVolumeUtil
|
||||
}
|
||||
|
||||
var _ volume.VolumePlugin = &portworxVolumePlugin{}
|
||||
@ -61,8 +62,18 @@ func getPath(uid types.UID, volName string, host volume.VolumeHost) string {
|
||||
}
|
||||
|
||||
func (plugin *portworxVolumePlugin) Init(host volume.VolumeHost) error {
|
||||
client, err := volumeclient.NewDriverClient(
|
||||
fmt.Sprintf("http://%s:%d", host.GetHostName(), osdMgmtDefaultPort),
|
||||
pxdDriverName, osdDriverVersion, pxDriverName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
plugin.host = host
|
||||
plugin.util = &PortworxVolumeUtil{}
|
||||
plugin.util = &portworxVolumeUtil{
|
||||
portworxClient: client,
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
170
vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx_util.go
generated
vendored
170
vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx_util.go
generated
vendored
@ -34,22 +34,22 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
osdMgmtPort = "9001"
|
||||
osdDriverVersion = "v1"
|
||||
pxdDriverName = "pxd"
|
||||
pvcClaimLabel = "pvc"
|
||||
pvcNamespaceLabel = "namespace"
|
||||
pxServiceName = "portworx-service"
|
||||
pxDriverName = "pxd-sched"
|
||||
osdMgmtDefaultPort = 9001
|
||||
osdDriverVersion = "v1"
|
||||
pxdDriverName = "pxd"
|
||||
pvcClaimLabel = "pvc"
|
||||
pvcNamespaceLabel = "namespace"
|
||||
pxServiceName = "portworx-service"
|
||||
pxDriverName = "pxd-sched"
|
||||
)
|
||||
|
||||
type PortworxVolumeUtil struct {
|
||||
type portworxVolumeUtil struct {
|
||||
portworxClient *osdclient.Client
|
||||
}
|
||||
|
||||
// CreateVolume creates a Portworx volume.
|
||||
func (util *PortworxVolumeUtil) CreateVolume(p *portworxVolumeProvisioner) (string, int64, map[string]string, error) {
|
||||
driver, err := util.getPortworxDriver(p.plugin.host, false /*localOnly*/)
|
||||
func (util *portworxVolumeUtil) CreateVolume(p *portworxVolumeProvisioner) (string, int64, map[string]string, error) {
|
||||
driver, err := util.getPortworxDriver(p.plugin.host)
|
||||
if err != nil || driver == nil {
|
||||
klog.Errorf("Failed to get portworx driver. Err: %v", err)
|
||||
return "", 0, nil, err
|
||||
@ -112,8 +112,8 @@ func (util *PortworxVolumeUtil) CreateVolume(p *portworxVolumeProvisioner) (stri
|
||||
}
|
||||
|
||||
// DeleteVolume deletes a Portworx volume
|
||||
func (util *PortworxVolumeUtil) DeleteVolume(d *portworxVolumeDeleter) error {
|
||||
driver, err := util.getPortworxDriver(d.plugin.host, false /*localOnly*/)
|
||||
func (util *portworxVolumeUtil) DeleteVolume(d *portworxVolumeDeleter) error {
|
||||
driver, err := util.getPortworxDriver(d.plugin.host)
|
||||
if err != nil || driver == nil {
|
||||
klog.Errorf("Failed to get portworx driver. Err: %v", err)
|
||||
return err
|
||||
@ -128,8 +128,8 @@ func (util *PortworxVolumeUtil) DeleteVolume(d *portworxVolumeDeleter) error {
|
||||
}
|
||||
|
||||
// AttachVolume attaches a Portworx Volume
|
||||
func (util *PortworxVolumeUtil) AttachVolume(m *portworxVolumeMounter, attachOptions map[string]string) (string, error) {
|
||||
driver, err := util.getPortworxDriver(m.plugin.host, true /*localOnly*/)
|
||||
func (util *portworxVolumeUtil) AttachVolume(m *portworxVolumeMounter, attachOptions map[string]string) (string, error) {
|
||||
driver, err := util.getLocalPortworxDriver(m.plugin.host)
|
||||
if err != nil || driver == nil {
|
||||
klog.Errorf("Failed to get portworx driver. Err: %v", err)
|
||||
return "", err
|
||||
@ -144,8 +144,8 @@ func (util *PortworxVolumeUtil) AttachVolume(m *portworxVolumeMounter, attachOpt
|
||||
}
|
||||
|
||||
// DetachVolume detaches a Portworx Volume
|
||||
func (util *PortworxVolumeUtil) DetachVolume(u *portworxVolumeUnmounter) error {
|
||||
driver, err := util.getPortworxDriver(u.plugin.host, true /*localOnly*/)
|
||||
func (util *portworxVolumeUtil) DetachVolume(u *portworxVolumeUnmounter) error {
|
||||
driver, err := util.getLocalPortworxDriver(u.plugin.host)
|
||||
if err != nil || driver == nil {
|
||||
klog.Errorf("Failed to get portworx driver. Err: %v", err)
|
||||
return err
|
||||
@ -160,8 +160,8 @@ func (util *PortworxVolumeUtil) DetachVolume(u *portworxVolumeUnmounter) error {
|
||||
}
|
||||
|
||||
// MountVolume mounts a Portworx Volume on the specified mountPath
|
||||
func (util *PortworxVolumeUtil) MountVolume(m *portworxVolumeMounter, mountPath string) error {
|
||||
driver, err := util.getPortworxDriver(m.plugin.host, true /*localOnly*/)
|
||||
func (util *portworxVolumeUtil) MountVolume(m *portworxVolumeMounter, mountPath string) error {
|
||||
driver, err := util.getLocalPortworxDriver(m.plugin.host)
|
||||
if err != nil || driver == nil {
|
||||
klog.Errorf("Failed to get portworx driver. Err: %v", err)
|
||||
return err
|
||||
@ -176,8 +176,8 @@ func (util *PortworxVolumeUtil) MountVolume(m *portworxVolumeMounter, mountPath
|
||||
}
|
||||
|
||||
// UnmountVolume unmounts a Portworx Volume
|
||||
func (util *PortworxVolumeUtil) UnmountVolume(u *portworxVolumeUnmounter, mountPath string) error {
|
||||
driver, err := util.getPortworxDriver(u.plugin.host, true /*localOnly*/)
|
||||
func (util *portworxVolumeUtil) UnmountVolume(u *portworxVolumeUnmounter, mountPath string) error {
|
||||
driver, err := util.getLocalPortworxDriver(u.plugin.host)
|
||||
if err != nil || driver == nil {
|
||||
klog.Errorf("Failed to get portworx driver. Err: %v", err)
|
||||
return err
|
||||
@ -191,8 +191,8 @@ func (util *PortworxVolumeUtil) UnmountVolume(u *portworxVolumeUnmounter, mountP
|
||||
return nil
|
||||
}
|
||||
|
||||
func (util *PortworxVolumeUtil) ResizeVolume(spec *volume.Spec, newSize resource.Quantity, volumeHost volume.VolumeHost) error {
|
||||
driver, err := util.getPortworxDriver(volumeHost, false /*localOnly*/)
|
||||
func (util *portworxVolumeUtil) ResizeVolume(spec *volume.Spec, newSize resource.Quantity, volumeHost volume.VolumeHost) error {
|
||||
driver, err := util.getPortworxDriver(volumeHost)
|
||||
if err != nil || driver == nil {
|
||||
klog.Errorf("Failed to get portworx driver. Err: %v", err)
|
||||
return err
|
||||
@ -254,8 +254,8 @@ func isClientValid(client *osdclient.Client) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func createDriverClient(hostname string) (*osdclient.Client, error) {
|
||||
client, err := volumeclient.NewDriverClient("http://"+hostname+":"+osdMgmtPort,
|
||||
func createDriverClient(hostname string, port int32) (*osdclient.Client, error) {
|
||||
client, err := volumeclient.NewDriverClient(fmt.Sprintf("http://%s:%d", hostname, port),
|
||||
pxdDriverName, osdDriverVersion, pxDriverName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -268,65 +268,105 @@ func createDriverClient(hostname string) (*osdclient.Client, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// getPortworxDriver() returns a Portworx volume driver which can be used for volume operations
|
||||
// localOnly: If true, the returned driver will be connected to Portworx API server on volume host.
|
||||
// If false, driver will be connected to API server on volume host or Portworx k8s service cluster IP
|
||||
// This flag is required to explicitly force certain operations (mount, unmount, detach, attach) to
|
||||
// go to the volume host instead of the k8s service which might route it to any host. This pertains to how
|
||||
// Portworx mounts and attaches a volume to the running container. The node getting these requests needs to
|
||||
// see the pod container mounts (specifically /var/lib/kubelet/pods/<pod_id>)
|
||||
// Operations like create and delete volume don't need to be restricted to local volume host since
|
||||
// any node in the Portworx cluster can co-ordinate the create/delete request and forward the operations to
|
||||
// the Portworx node that will own/owns the data.
|
||||
func (util *PortworxVolumeUtil) getPortworxDriver(volumeHost volume.VolumeHost, localOnly bool) (volumeapi.VolumeDriver, error) {
|
||||
var err error
|
||||
if localOnly {
|
||||
util.portworxClient, err = createDriverClient(volumeHost.GetHostName())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
klog.V(4).Infof("Using portworx local service at: %v as api endpoint", volumeHost.GetHostName())
|
||||
return volumeclient.VolumeDriver(util.portworxClient), nil
|
||||
}
|
||||
}
|
||||
|
||||
// getPortworxDriver returns a Portworx volume driver which can be used for cluster wide operations.
|
||||
// Operations like create and delete volume don't need to be restricted to local volume host since
|
||||
// any node in the Portworx cluster can co-ordinate the create/delete request and forward the operations to
|
||||
// the Portworx node that will own/owns the data.
|
||||
func (util *portworxVolumeUtil) getPortworxDriver(volumeHost volume.VolumeHost) (volumeapi.VolumeDriver, error) {
|
||||
// check if existing saved client is valid
|
||||
if isValid, _ := isClientValid(util.portworxClient); isValid {
|
||||
return volumeclient.VolumeDriver(util.portworxClient), nil
|
||||
}
|
||||
|
||||
// create new client
|
||||
util.portworxClient, err = createDriverClient(volumeHost.GetHostName()) // for backward compatibility
|
||||
var err error
|
||||
util.portworxClient, err = createDriverClient(volumeHost.GetHostName(), osdMgmtDefaultPort) // for backward compatibility
|
||||
if err != nil || util.portworxClient == nil {
|
||||
// Create client from portworx service
|
||||
kubeClient := volumeHost.GetKubeClient()
|
||||
if kubeClient == nil {
|
||||
klog.Error("Failed to get kubeclient when creating portworx client")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
opts := metav1.GetOptions{}
|
||||
svc, err := kubeClient.CoreV1().Services(api.NamespaceSystem).Get(pxServiceName, opts)
|
||||
// Create client from portworx k8s service.
|
||||
svc, err := getPortworxService(volumeHost)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get service. Err: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if svc == nil {
|
||||
klog.Errorf("Service: %v not found. Consult Portworx docs to deploy it.", pxServiceName)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
util.portworxClient, err = createDriverClient(svc.Spec.ClusterIP)
|
||||
// The port here is always the default one since it's the service port
|
||||
util.portworxClient, err = createDriverClient(svc.Spec.ClusterIP, osdMgmtDefaultPort)
|
||||
if err != nil || util.portworxClient == nil {
|
||||
klog.Errorf("Failed to connect to portworx service. Err: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
klog.Infof("Using portworx cluster service at: %v as api endpoint", svc.Spec.ClusterIP)
|
||||
klog.Infof("Using portworx cluster service at: %v:%d as api endpoint",
|
||||
svc.Spec.ClusterIP, osdMgmtDefaultPort)
|
||||
} else {
|
||||
klog.Infof("Using portworx service at: %v as api endpoint", volumeHost.GetHostName())
|
||||
klog.Infof("Using portworx service at: %v:%d as api endpoint",
|
||||
volumeHost.GetHostName(), osdMgmtDefaultPort)
|
||||
}
|
||||
|
||||
return volumeclient.VolumeDriver(util.portworxClient), nil
|
||||
}
|
||||
|
||||
// getLocalPortworxDriver returns driver connected to Portworx API server on volume host.
|
||||
// This is required to force certain operations (mount, unmount, detach, attach) to
|
||||
// go to the volume host instead of the k8s service which might route it to any host. This pertains to how
|
||||
// Portworx mounts and attaches a volume to the running container. The node getting these requests needs to
|
||||
// see the pod container mounts (specifically /var/lib/kubelet/pods/<pod_id>)
|
||||
func (util *portworxVolumeUtil) getLocalPortworxDriver(volumeHost volume.VolumeHost) (volumeapi.VolumeDriver, error) {
|
||||
if util.portworxClient != nil {
|
||||
// check if existing saved client is valid
|
||||
if isValid, _ := isClientValid(util.portworxClient); isValid {
|
||||
return volumeclient.VolumeDriver(util.portworxClient), nil
|
||||
}
|
||||
}
|
||||
|
||||
// Lookup port
|
||||
svc, err := getPortworxService(volumeHost)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
osgMgmtPort := lookupPXAPIPortFromService(svc)
|
||||
util.portworxClient, err = createDriverClient(volumeHost.GetHostName(), osgMgmtPort)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
klog.Infof("Using portworx local service at: %v:%d as api endpoint",
|
||||
volumeHost.GetHostName(), osgMgmtPort)
|
||||
return volumeclient.VolumeDriver(util.portworxClient), nil
|
||||
}
|
||||
|
||||
// lookupPXAPIPortFromService goes over all the ports in the given service and returns the target
|
||||
// port for osdMgmtDefaultPort
|
||||
func lookupPXAPIPortFromService(svc *v1.Service) int32 {
|
||||
for _, p := range svc.Spec.Ports {
|
||||
if p.Port == osdMgmtDefaultPort {
|
||||
return p.TargetPort.IntVal
|
||||
}
|
||||
}
|
||||
return osdMgmtDefaultPort // default
|
||||
}
|
||||
|
||||
// getPortworxService returns the portworx cluster service from the API server
|
||||
func getPortworxService(host volume.VolumeHost) (*v1.Service, error) {
|
||||
kubeClient := host.GetKubeClient()
|
||||
if kubeClient == nil {
|
||||
err := fmt.Errorf("Failed to get kubeclient when creating portworx client")
|
||||
klog.Errorf(err.Error())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
opts := metav1.GetOptions{}
|
||||
svc, err := kubeClient.CoreV1().Services(api.NamespaceSystem).Get(pxServiceName, opts)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get service. Err: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if svc == nil {
|
||||
err = fmt.Errorf("Service: %v not found. Consult Portworx docs to deploy it.", pxServiceName)
|
||||
klog.Errorf(err.Error())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return svc, nil
|
||||
}
|
||||
|
88
vendor/k8s.io/kubernetes/pkg/volume/testing/testing.go
generated
vendored
88
vendor/k8s.io/kubernetes/pkg/volume/testing/testing.go
generated
vendored
@ -504,6 +504,94 @@ func (plugin *FakeVolumePlugin) VolumeLimitKey(spec *Spec) string {
|
||||
return plugin.LimitKey
|
||||
}
|
||||
|
||||
// FakeBasicVolumePlugin implements a basic volume plugin. It wrappers on
|
||||
// FakeVolumePlugin but implements VolumePlugin interface only.
|
||||
// It is useful to test logic involving plugin interfaces.
|
||||
type FakeBasicVolumePlugin struct {
|
||||
Plugin FakeVolumePlugin
|
||||
}
|
||||
|
||||
func (f *FakeBasicVolumePlugin) GetPluginName() string {
|
||||
return f.Plugin.GetPluginName()
|
||||
}
|
||||
|
||||
func (f *FakeBasicVolumePlugin) GetVolumeName(spec *Spec) (string, error) {
|
||||
return f.Plugin.GetVolumeName(spec)
|
||||
}
|
||||
|
||||
// CanSupport tests whether the plugin supports a given volume specification by
|
||||
// testing volume spec name begins with plugin name or not.
|
||||
// This is useful to choose plugin by volume in testing.
|
||||
func (f *FakeBasicVolumePlugin) CanSupport(spec *Spec) bool {
|
||||
return strings.HasPrefix(spec.Name(), f.GetPluginName())
|
||||
}
|
||||
|
||||
func (f *FakeBasicVolumePlugin) ConstructVolumeSpec(ame, mountPath string) (*Spec, error) {
|
||||
return f.Plugin.ConstructVolumeSpec(ame, mountPath)
|
||||
}
|
||||
|
||||
func (f *FakeBasicVolumePlugin) Init(ost VolumeHost) error {
|
||||
return f.Plugin.Init(ost)
|
||||
}
|
||||
|
||||
func (f *FakeBasicVolumePlugin) NewMounter(spec *Spec, pod *v1.Pod, opts VolumeOptions) (Mounter, error) {
|
||||
return f.Plugin.NewMounter(spec, pod, opts)
|
||||
}
|
||||
|
||||
func (f *FakeBasicVolumePlugin) NewUnmounter(volName string, podUID types.UID) (Unmounter, error) {
|
||||
return f.Plugin.NewUnmounter(volName, podUID)
|
||||
}
|
||||
|
||||
func (f *FakeBasicVolumePlugin) RequiresRemount() bool {
|
||||
return f.Plugin.RequiresRemount()
|
||||
}
|
||||
|
||||
func (f *FakeBasicVolumePlugin) SupportsBulkVolumeVerification() bool {
|
||||
return f.Plugin.SupportsBulkVolumeVerification()
|
||||
}
|
||||
|
||||
func (f *FakeBasicVolumePlugin) SupportsMountOption() bool {
|
||||
return f.Plugin.SupportsMountOption()
|
||||
}
|
||||
|
||||
var _ VolumePlugin = &FakeBasicVolumePlugin{}
|
||||
|
||||
// FakeDeviceMountableVolumePlugin implements an device mountable plugin based on FakeBasicVolumePlugin.
|
||||
type FakeDeviceMountableVolumePlugin struct {
|
||||
FakeBasicVolumePlugin
|
||||
}
|
||||
|
||||
func (f *FakeDeviceMountableVolumePlugin) NewDeviceMounter() (DeviceMounter, error) {
|
||||
return f.Plugin.NewDeviceMounter()
|
||||
}
|
||||
|
||||
func (f *FakeDeviceMountableVolumePlugin) NewDeviceUnmounter() (DeviceUnmounter, error) {
|
||||
return f.Plugin.NewDeviceUnmounter()
|
||||
}
|
||||
|
||||
func (f *FakeDeviceMountableVolumePlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
|
||||
return f.Plugin.GetDeviceMountRefs(deviceMountPath)
|
||||
}
|
||||
|
||||
var _ VolumePlugin = &FakeDeviceMountableVolumePlugin{}
|
||||
var _ DeviceMountableVolumePlugin = &FakeDeviceMountableVolumePlugin{}
|
||||
|
||||
// FakeAttachableVolumePlugin implements an attachable plugin based on FakeDeviceMountableVolumePlugin.
|
||||
type FakeAttachableVolumePlugin struct {
|
||||
FakeDeviceMountableVolumePlugin
|
||||
}
|
||||
|
||||
func (f *FakeAttachableVolumePlugin) NewAttacher() (Attacher, error) {
|
||||
return f.Plugin.NewAttacher()
|
||||
}
|
||||
|
||||
func (f *FakeAttachableVolumePlugin) NewDetacher() (Detacher, error) {
|
||||
return f.Plugin.NewDetacher()
|
||||
}
|
||||
|
||||
var _ VolumePlugin = &FakeAttachableVolumePlugin{}
|
||||
var _ AttachableVolumePlugin = &FakeAttachableVolumePlugin{}
|
||||
|
||||
type FakeFileVolumePlugin struct {
|
||||
}
|
||||
|
||||
|
9
vendor/k8s.io/kubernetes/pkg/volume/util/BUILD
generated
vendored
9
vendor/k8s.io/kubernetes/pkg/volume/util/BUILD
generated
vendored
@ -63,7 +63,6 @@ go_test(
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/slice:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
@ -73,8 +72,12 @@ go_test(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
|
||||
],
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
)
|
||||
|
||||
filegroup(
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor.go
generated
vendored
@ -725,7 +725,7 @@ func (oe *operationExecutor) MountVolume(
|
||||
if fsVolume {
|
||||
// Filesystem volume case
|
||||
// Mount/remount a volume when a volume is attached
|
||||
generatedOperations, err = oe.operationGenerator.GenerateMountVolumeFunc(
|
||||
generatedOperations = oe.operationGenerator.GenerateMountVolumeFunc(
|
||||
waitForAttachTimeout, volumeToMount, actualStateOfWorld, isRemount)
|
||||
|
||||
} else {
|
||||
|
@ -389,14 +389,14 @@ func newFakeOperationGenerator(ch chan interface{}, quit chan interface{}) Opera
|
||||
}
|
||||
}
|
||||
|
||||
func (fopg *fakeOperationGenerator) GenerateMountVolumeFunc(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorldMounterUpdater ActualStateOfWorldMounterUpdater, isRemount bool) (volumetypes.GeneratedOperations, error) {
|
||||
func (fopg *fakeOperationGenerator) GenerateMountVolumeFunc(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorldMounterUpdater ActualStateOfWorldMounterUpdater, isRemount bool) volumetypes.GeneratedOperations {
|
||||
opFunc := func() (error, error) {
|
||||
startOperationAndBlock(fopg.ch, fopg.quit)
|
||||
return nil, nil
|
||||
}
|
||||
return volumetypes.GeneratedOperations{
|
||||
OperationFunc: opFunc,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
func (fopg *fakeOperationGenerator) GenerateUnmountVolumeFunc(volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, podsDir string) (volumetypes.GeneratedOperations, error) {
|
||||
opFunc := func() (error, error) {
|
||||
|
110
vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_generator.go
generated
vendored
110
vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_generator.go
generated
vendored
@ -40,6 +40,10 @@ import (
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
|
||||
)
|
||||
|
||||
const (
|
||||
unknownVolumePlugin string = "UnknownVolumePlugin"
|
||||
)
|
||||
|
||||
var _ OperationGenerator = &operationGenerator{}
|
||||
|
||||
type operationGenerator struct {
|
||||
@ -82,7 +86,7 @@ func NewOperationGenerator(kubeClient clientset.Interface,
|
||||
// OperationGenerator interface that extracts out the functions from operation_executor to make it dependency injectable
|
||||
type OperationGenerator interface {
|
||||
// Generates the MountVolume function needed to perform the mount of a volume plugin
|
||||
GenerateMountVolumeFunc(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorldMounterUpdater ActualStateOfWorldMounterUpdater, isRemount bool) (volumetypes.GeneratedOperations, error)
|
||||
GenerateMountVolumeFunc(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorldMounterUpdater ActualStateOfWorldMounterUpdater, isRemount bool) volumetypes.GeneratedOperations
|
||||
|
||||
// Generates the UnmountVolume function needed to perform the unmount of a volume plugin
|
||||
GenerateUnmountVolumeFunc(volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, podsDir string) (volumetypes.GeneratedOperations, error)
|
||||
@ -436,61 +440,61 @@ func (og *operationGenerator) GenerateMountVolumeFunc(
|
||||
waitForAttachTimeout time.Duration,
|
||||
volumeToMount VolumeToMount,
|
||||
actualStateOfWorld ActualStateOfWorldMounterUpdater,
|
||||
isRemount bool) (volumetypes.GeneratedOperations, error) {
|
||||
isRemount bool) volumetypes.GeneratedOperations {
|
||||
// Get mounter plugin
|
||||
volumePluginName := unknownVolumePlugin
|
||||
volumePlugin, err :=
|
||||
og.volumePluginMgr.FindPluginBySpec(volumeToMount.VolumeSpec)
|
||||
if err != nil || volumePlugin == nil {
|
||||
return volumetypes.GeneratedOperations{}, volumeToMount.GenerateErrorDetailed("MountVolume.FindPluginBySpec failed", err)
|
||||
}
|
||||
|
||||
affinityErr := checkNodeAffinity(og, volumeToMount, volumePlugin)
|
||||
if affinityErr != nil {
|
||||
eventErr, detailedErr := volumeToMount.GenerateError("MountVolume.NodeAffinity check failed", affinityErr)
|
||||
og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FailedMountVolume, eventErr.Error())
|
||||
return volumetypes.GeneratedOperations{}, detailedErr
|
||||
}
|
||||
|
||||
volumeMounter, newMounterErr := volumePlugin.NewMounter(
|
||||
volumeToMount.VolumeSpec,
|
||||
volumeToMount.Pod,
|
||||
volume.VolumeOptions{})
|
||||
if newMounterErr != nil {
|
||||
eventErr, detailedErr := volumeToMount.GenerateError("MountVolume.NewMounter initialization failed", newMounterErr)
|
||||
og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FailedMountVolume, eventErr.Error())
|
||||
return volumetypes.GeneratedOperations{}, detailedErr
|
||||
}
|
||||
|
||||
mountCheckError := checkMountOptionSupport(og, volumeToMount, volumePlugin)
|
||||
|
||||
if mountCheckError != nil {
|
||||
eventErr, detailedErr := volumeToMount.GenerateError("MountVolume.MountOptionSupport check failed", mountCheckError)
|
||||
og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.UnsupportedMountOption, eventErr.Error())
|
||||
return volumetypes.GeneratedOperations{}, detailedErr
|
||||
}
|
||||
|
||||
// Get attacher, if possible
|
||||
attachableVolumePlugin, _ :=
|
||||
og.volumePluginMgr.FindAttachablePluginBySpec(volumeToMount.VolumeSpec)
|
||||
var volumeAttacher volume.Attacher
|
||||
if attachableVolumePlugin != nil {
|
||||
volumeAttacher, _ = attachableVolumePlugin.NewAttacher()
|
||||
}
|
||||
|
||||
// get deviceMounter, if possible
|
||||
deviceMountableVolumePlugin, _ := og.volumePluginMgr.FindDeviceMountablePluginBySpec(volumeToMount.VolumeSpec)
|
||||
var volumeDeviceMounter volume.DeviceMounter
|
||||
if deviceMountableVolumePlugin != nil {
|
||||
volumeDeviceMounter, _ = deviceMountableVolumePlugin.NewDeviceMounter()
|
||||
}
|
||||
|
||||
var fsGroup *int64
|
||||
if volumeToMount.Pod.Spec.SecurityContext != nil &&
|
||||
volumeToMount.Pod.Spec.SecurityContext.FSGroup != nil {
|
||||
fsGroup = volumeToMount.Pod.Spec.SecurityContext.FSGroup
|
||||
if err == nil && volumePlugin != nil {
|
||||
volumePluginName = volumePlugin.GetPluginName()
|
||||
}
|
||||
|
||||
mountVolumeFunc := func() (error, error) {
|
||||
if err != nil || volumePlugin == nil {
|
||||
return volumeToMount.GenerateError("MountVolume.FindPluginBySpec failed", err)
|
||||
}
|
||||
|
||||
affinityErr := checkNodeAffinity(og, volumeToMount, volumePlugin)
|
||||
if affinityErr != nil {
|
||||
return volumeToMount.GenerateError("MountVolume.NodeAffinity check failed", affinityErr)
|
||||
}
|
||||
|
||||
volumeMounter, newMounterErr := volumePlugin.NewMounter(
|
||||
volumeToMount.VolumeSpec,
|
||||
volumeToMount.Pod,
|
||||
volume.VolumeOptions{})
|
||||
if newMounterErr != nil {
|
||||
return volumeToMount.GenerateError("MountVolume.NewMounter initialization failed", newMounterErr)
|
||||
|
||||
}
|
||||
|
||||
mountCheckError := checkMountOptionSupport(og, volumeToMount, volumePlugin)
|
||||
|
||||
if mountCheckError != nil {
|
||||
return volumeToMount.GenerateError("MountVolume.MountOptionSupport check failed", mountCheckError)
|
||||
}
|
||||
|
||||
// Get attacher, if possible
|
||||
attachableVolumePlugin, _ :=
|
||||
og.volumePluginMgr.FindAttachablePluginBySpec(volumeToMount.VolumeSpec)
|
||||
var volumeAttacher volume.Attacher
|
||||
if attachableVolumePlugin != nil {
|
||||
volumeAttacher, _ = attachableVolumePlugin.NewAttacher()
|
||||
}
|
||||
|
||||
// get deviceMounter, if possible
|
||||
deviceMountableVolumePlugin, _ := og.volumePluginMgr.FindDeviceMountablePluginBySpec(volumeToMount.VolumeSpec)
|
||||
var volumeDeviceMounter volume.DeviceMounter
|
||||
if deviceMountableVolumePlugin != nil {
|
||||
volumeDeviceMounter, _ = deviceMountableVolumePlugin.NewDeviceMounter()
|
||||
}
|
||||
|
||||
var fsGroup *int64
|
||||
if volumeToMount.Pod.Spec.SecurityContext != nil &&
|
||||
volumeToMount.Pod.Spec.SecurityContext.FSGroup != nil {
|
||||
fsGroup = volumeToMount.Pod.Spec.SecurityContext.FSGroup
|
||||
}
|
||||
|
||||
devicePath := volumeToMount.DevicePath
|
||||
if volumeAttacher != nil {
|
||||
// Wait for attachable volumes to finish attaching
|
||||
@ -536,7 +540,7 @@ func (og *operationGenerator) GenerateMountVolumeFunc(
|
||||
|
||||
// resizeFileSystem will resize the file system if user has requested a resize of
|
||||
// underlying persistent volume and is allowed to do so.
|
||||
resizeSimpleError, resizeDetailedError := og.resizeFileSystem(volumeToMount, devicePath, deviceMountPath, volumePlugin.GetPluginName())
|
||||
resizeSimpleError, resizeDetailedError := og.resizeFileSystem(volumeToMount, devicePath, deviceMountPath, volumePluginName)
|
||||
|
||||
if resizeSimpleError != nil || resizeDetailedError != nil {
|
||||
return resizeSimpleError, resizeDetailedError
|
||||
@ -593,8 +597,8 @@ func (og *operationGenerator) GenerateMountVolumeFunc(
|
||||
return volumetypes.GeneratedOperations{
|
||||
OperationFunc: mountVolumeFunc,
|
||||
EventRecorderFunc: eventRecorderFunc,
|
||||
CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(volumePlugin.GetPluginName(), volumeToMount.VolumeSpec), "volume_mount"),
|
||||
}, nil
|
||||
CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(volumePluginName, volumeToMount.VolumeSpec), "volume_mount"),
|
||||
}
|
||||
}
|
||||
|
||||
func (og *operationGenerator) resizeFileSystem(volumeToMount VolumeToMount, devicePath, deviceMountPath, pluginName string) (simpleErr, detailedErr error) {
|
||||
|
97
vendor/k8s.io/kubernetes/pkg/volume/util/util.go
generated
vendored
97
vendor/k8s.io/kubernetes/pkg/volume/util/util.go
generated
vendored
@ -23,9 +23,8 @@ import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@ -128,8 +127,9 @@ func SetReady(dir string) {
|
||||
|
||||
// UnmountPath is a common unmount routine that unmounts the given path and
|
||||
// deletes the remaining directory if successful.
|
||||
// TODO: Remove this function and change callers to call mount pkg directly
|
||||
func UnmountPath(mountPath string, mounter mount.Interface) error {
|
||||
return UnmountMountPoint(mountPath, mounter, false /* extensiveMountPointCheck */)
|
||||
return mount.CleanupMountPoint(mountPath, mounter, false /* extensiveMountPointCheck */)
|
||||
}
|
||||
|
||||
// UnmountMountPoint is a common unmount routine that unmounts the given path and
|
||||
@ -137,93 +137,21 @@ func UnmountPath(mountPath string, mounter mount.Interface) error {
|
||||
// if extensiveMountPointCheck is true
|
||||
// IsNotMountPoint will be called instead of IsLikelyNotMountPoint.
|
||||
// IsNotMountPoint is more expensive but properly handles bind mounts.
|
||||
// TODO: Change callers to call mount pkg directly
|
||||
func UnmountMountPoint(mountPath string, mounter mount.Interface, extensiveMountPointCheck bool) error {
|
||||
pathExists, pathErr := PathExists(mountPath)
|
||||
if !pathExists {
|
||||
klog.Warningf("Warning: Unmount skipped because path does not exist: %v", mountPath)
|
||||
return nil
|
||||
}
|
||||
corruptedMnt := IsCorruptedMnt(pathErr)
|
||||
if pathErr != nil && !corruptedMnt {
|
||||
return fmt.Errorf("Error checking path: %v", pathErr)
|
||||
}
|
||||
return doUnmountMountPoint(mountPath, mounter, extensiveMountPointCheck, corruptedMnt)
|
||||
}
|
||||
|
||||
// doUnmountMountPoint is a common unmount routine that unmounts the given path and
|
||||
// deletes the remaining directory if successful.
|
||||
// if extensiveMountPointCheck is true
|
||||
// IsNotMountPoint will be called instead of IsLikelyNotMountPoint.
|
||||
// IsNotMountPoint is more expensive but properly handles bind mounts.
|
||||
// if corruptedMnt is true, it means that the mountPath is a corrupted mountpoint, Take it as an argument for convenience of testing
|
||||
func doUnmountMountPoint(mountPath string, mounter mount.Interface, extensiveMountPointCheck bool, corruptedMnt bool) error {
|
||||
if !corruptedMnt {
|
||||
var notMnt bool
|
||||
var err error
|
||||
if extensiveMountPointCheck {
|
||||
notMnt, err = mount.IsNotMountPoint(mounter, mountPath)
|
||||
} else {
|
||||
notMnt, err = mounter.IsLikelyNotMountPoint(mountPath)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if notMnt {
|
||||
klog.Warningf("Warning: %q is not a mountpoint, deleting", mountPath)
|
||||
return os.Remove(mountPath)
|
||||
}
|
||||
}
|
||||
|
||||
// Unmount the mount path
|
||||
klog.V(4).Infof("%q is a mountpoint, unmounting", mountPath)
|
||||
if err := mounter.Unmount(mountPath); err != nil {
|
||||
return err
|
||||
}
|
||||
notMnt, mntErr := mounter.IsLikelyNotMountPoint(mountPath)
|
||||
if mntErr != nil {
|
||||
return mntErr
|
||||
}
|
||||
if notMnt {
|
||||
klog.V(4).Infof("%q is unmounted, deleting the directory", mountPath)
|
||||
return os.Remove(mountPath)
|
||||
}
|
||||
return fmt.Errorf("Failed to unmount path %v", mountPath)
|
||||
return mount.CleanupMountPoint(mountPath, mounter, extensiveMountPointCheck)
|
||||
}
|
||||
|
||||
// PathExists returns true if the specified path exists.
|
||||
// TODO: Change callers to call mount pkg directly
|
||||
func PathExists(path string) (bool, error) {
|
||||
_, err := os.Stat(path)
|
||||
if err == nil {
|
||||
return true, nil
|
||||
} else if os.IsNotExist(err) {
|
||||
return false, nil
|
||||
} else if IsCorruptedMnt(err) {
|
||||
return true, err
|
||||
} else {
|
||||
return false, err
|
||||
}
|
||||
return mount.PathExists(path)
|
||||
}
|
||||
|
||||
// IsCorruptedMnt return true if err is about corrupted mount point
|
||||
// TODO: Change callers to call mount pkg directly
|
||||
func IsCorruptedMnt(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
var underlyingError error
|
||||
switch pe := err.(type) {
|
||||
case nil:
|
||||
return false
|
||||
case *os.PathError:
|
||||
underlyingError = pe.Err
|
||||
case *os.LinkError:
|
||||
underlyingError = pe.Err
|
||||
case *os.SyscallError:
|
||||
underlyingError = pe.Err
|
||||
}
|
||||
|
||||
return underlyingError == syscall.ENOTCONN || underlyingError == syscall.ESTALE || underlyingError == syscall.EIO
|
||||
return mount.IsCorruptedMnt(err)
|
||||
}
|
||||
|
||||
// GetSecretForPod locates secret by name in the pod's namespace and returns secret map
|
||||
@ -825,9 +753,10 @@ func GetUniqueVolumeName(pluginName, volumeName string) v1.UniqueVolumeName {
|
||||
return v1.UniqueVolumeName(fmt.Sprintf("%s/%s", pluginName, volumeName))
|
||||
}
|
||||
|
||||
// GetUniqueVolumeNameForNonAttachableVolume returns the unique volume name
|
||||
// for a non-attachable volume.
|
||||
func GetUniqueVolumeNameForNonAttachableVolume(
|
||||
// GetUniqueVolumeNameFromSpecWithPod returns a unique volume name with pod
|
||||
// name included. This is useful to generate different names for different pods
|
||||
// on same volume.
|
||||
func GetUniqueVolumeNameFromSpecWithPod(
|
||||
podName types.UniquePodName, volumePlugin volume.VolumePlugin, volumeSpec *volume.Spec) v1.UniqueVolumeName {
|
||||
return v1.UniqueVolumeName(
|
||||
fmt.Sprintf("%s/%v-%s", volumePlugin.GetPluginName(), podName, volumeSpec.Name()))
|
||||
|
43
vendor/k8s.io/kubernetes/pkg/volume/util/util_test.go
generated
vendored
43
vendor/k8s.io/kubernetes/pkg/volume/util/util_test.go
generated
vendored
@ -22,10 +22,9 @@ import (
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
|
||||
// util.go uses api.Codecs.LegacyCodec so import this package to do some
|
||||
// resource initialization.
|
||||
@ -33,7 +32,6 @@ import (
|
||||
|
||||
_ "k8s.io/kubernetes/pkg/apis/core/install"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
|
||||
"reflect"
|
||||
"strings"
|
||||
@ -357,45 +355,6 @@ func TestZonesToSet(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestDoUnmountMountPoint(t *testing.T) {
|
||||
|
||||
tmpDir1, err1 := utiltesting.MkTmpdir("umount_test1")
|
||||
if err1 != nil {
|
||||
t.Fatalf("error creating temp dir: %v", err1)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir1)
|
||||
|
||||
tmpDir2, err2 := utiltesting.MkTmpdir("umount_test2")
|
||||
if err2 != nil {
|
||||
t.Fatalf("error creating temp dir: %v", err2)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir2)
|
||||
|
||||
// Second part: want no error
|
||||
tests := []struct {
|
||||
mountPath string
|
||||
corruptedMnt bool
|
||||
}{
|
||||
{
|
||||
mountPath: tmpDir1,
|
||||
corruptedMnt: true,
|
||||
},
|
||||
{
|
||||
mountPath: tmpDir2,
|
||||
corruptedMnt: false,
|
||||
},
|
||||
}
|
||||
|
||||
fake := &mount.FakeMounter{}
|
||||
|
||||
for _, tt := range tests {
|
||||
err := doUnmountMountPoint(tt.mountPath, fake, false, tt.corruptedMnt)
|
||||
if err != nil {
|
||||
t.Errorf("err Expected nil, but got: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCalculateTimeoutForVolume(t *testing.T) {
|
||||
pv := &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
|
Reference in New Issue
Block a user