rebase: update kubernetes to latest

updating the kubernetes release to the
latest in main go.mod

Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
Madhu Rajanna
2024-08-19 10:01:33 +02:00
committed by mergify[bot]
parent 63c4c05b35
commit 5a66991bb3
2173 changed files with 98906 additions and 61334 deletions

View File

@ -52,7 +52,7 @@ func (n *noopExpandableVolumePluginInstance) RequiresRemount(spec *Spec) bool {
return false
}
func (n *noopExpandableVolumePluginInstance) NewMounter(spec *Spec, podRef *v1.Pod, opts VolumeOptions) (Mounter, error) {
func (n *noopExpandableVolumePluginInstance) NewMounter(spec *Spec, podRef *v1.Pod) (Mounter, error) {
return nil, nil
}
@ -68,10 +68,6 @@ func (n *noopExpandableVolumePluginInstance) SupportsMountOption() bool {
return true
}
func (n *noopExpandableVolumePluginInstance) SupportsBulkVolumeVerification() bool {
return false
}
func (n *noopExpandableVolumePluginInstance) RequiresFSResize() bool {
return true
}

View File

@ -39,7 +39,6 @@ import (
storagelistersv1 "k8s.io/client-go/listers/storage/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/kubernetes/pkg/volume/util/hostutil"
"k8s.io/kubernetes/pkg/volume/util/recyclerclient"
"k8s.io/kubernetes/pkg/volume/util/subpath"
@ -83,10 +82,6 @@ type VolumeOptions struct {
// i.e. with required capacity, accessMode, labels matching PVC.Selector and
// so on.
PVC *v1.PersistentVolumeClaim
// Unique name of Kubernetes cluster.
ClusterName string
// Tags to attach to the real volume in the cloud provider - e.g. AWS EBS
CloudTags *map[string]string
// Volume provisioning parameters from StorageClass
Parameters map[string]string
}
@ -154,7 +149,7 @@ type VolumePlugin interface {
// Ownership of the spec pointer in *not* transferred.
// - spec: The v1.Volume spec
// - pod: The enclosing pod
NewMounter(spec *Spec, podRef *v1.Pod, opts VolumeOptions) (Mounter, error)
NewMounter(spec *Spec, podRef *v1.Pod) (Mounter, error)
// NewUnmounter creates a new volume.Unmounter from recoverable state.
// - name: The volume name, as per the v1.Volume spec.
@ -172,11 +167,6 @@ type VolumePlugin interface {
// user specified mount options will result in error creating persistent volumes
SupportsMountOption() bool
// SupportsBulkVolumeVerification checks if volume plugin type is capable
// of enabling bulk polling of all nodes. This can speed up verification of
// attached volumes by quite a bit, but underlying pluging must support it.
SupportsBulkVolumeVerification() bool
// SupportsSELinuxContextMount returns true if volume plugins supports
// mount -o context=XYZ for a given volume.
SupportsSELinuxContextMount(spec *Spec) (bool, error)
@ -263,32 +253,6 @@ type NodeExpandableVolumePlugin interface {
NodeExpand(resizeOptions NodeResizeOptions) (bool, error)
}
// VolumePluginWithAttachLimits is an extended interface of VolumePlugin that restricts number of
// volumes that can be attached to a node.
type VolumePluginWithAttachLimits interface {
VolumePlugin
// Return maximum number of volumes that can be attached to a node for this plugin.
// The key must be same as string returned by VolumeLimitKey function. The returned
// map may look like:
// - { "storage-limits-aws-ebs": 39 }
// - { "storage-limits-gce-pd": 10 }
// A volume plugin may return error from this function - if it can not be used on a given node or not
// applicable in given environment (where environment could be cloudprovider or any other dependency)
// For example - calling this function for EBS volume plugin on a GCE node should
// result in error.
// The returned values are stored in node allocatable property and will be used
// by scheduler to determine how many pods with volumes can be scheduled on given node.
GetVolumeLimits() (map[string]int64, error)
// Return volume limit key string to be used in node capacity constraints
// The key must start with prefix storage-limits-. For example:
// - storage-limits-aws-ebs
// - storage-limits-csi-cinder
// The key should respect character limit of ResourceName type
// This function may be called by kubelet or scheduler to identify node allocatable property
// which stores volumes limits.
VolumeLimitKey(spec *Spec) string
}
// BlockVolumePlugin is an extend interface of VolumePlugin and is used for block volumes support.
type BlockVolumePlugin interface {
VolumePlugin
@ -296,7 +260,7 @@ type BlockVolumePlugin interface {
// Ownership of the spec pointer in *not* transferred.
// - spec: The v1.Volume spec
// - pod: The enclosing pod
NewBlockVolumeMapper(spec *Spec, podRef *v1.Pod, opts VolumeOptions) (BlockVolumeMapper, error)
NewBlockVolumeMapper(spec *Spec, podRef *v1.Pod) (BlockVolumeMapper, error)
// NewBlockVolumeUnmapper creates a new volume.BlockVolumeUnmapper from recoverable state.
// - name: The volume name, as per the v1.Volume spec.
// - podUID: The UID of the enclosing pod
@ -401,16 +365,13 @@ type VolumeHost interface {
// the provided spec. This is used to implement volume plugins which
// "wrap" other plugins. For example, the "secret" volume is
// implemented in terms of the "emptyDir" volume.
NewWrapperMounter(volName string, spec Spec, pod *v1.Pod, opts VolumeOptions) (Mounter, error)
NewWrapperMounter(volName string, spec Spec, pod *v1.Pod) (Mounter, error)
// NewWrapperUnmounter finds an appropriate plugin with which to handle
// the provided spec. See comments on NewWrapperMounter for more
// context.
NewWrapperUnmounter(volName string, spec Spec, podUID types.UID) (Unmounter, error)
// Get cloud provider from kubelet.
GetCloudProvider() cloudprovider.Interface
// Get mounter interface.
GetMounter(pluginName string) mount.Interface
@ -457,7 +418,7 @@ type VolumePluginMgr struct {
plugins map[string]VolumePlugin
prober DynamicPluginProber
probedPlugins map[string]VolumePlugin
loggedDeprecationWarnings sets.String
loggedDeprecationWarnings sets.Set[string]
Host VolumeHost
}
@ -599,7 +560,7 @@ func (pm *VolumePluginMgr) InitPlugins(plugins []VolumePlugin, prober DynamicPlu
defer pm.mutex.Unlock()
pm.Host = host
pm.loggedDeprecationWarnings = sets.NewString()
pm.loggedDeprecationWarnings = sets.New[string]()
if prober == nil {
// Use a dummy prober to prevent nil deference.
@ -751,20 +712,6 @@ func (pm *VolumePluginMgr) refreshProbedPlugins() {
}
}
// ListVolumePluginWithLimits returns plugins that have volume limits on nodes
func (pm *VolumePluginMgr) ListVolumePluginWithLimits() []VolumePluginWithAttachLimits {
pm.mutex.RLock()
defer pm.mutex.RUnlock()
matchedPlugins := []VolumePluginWithAttachLimits{}
for _, v := range pm.plugins {
if plugin, ok := v.(VolumePluginWithAttachLimits); ok {
matchedPlugins = append(matchedPlugins, plugin)
}
}
return matchedPlugins
}
// FindPersistentPluginBySpec looks for a persistent volume plugin that can
// support a given volume specification. If no plugin is found, return an
// error
@ -779,20 +726,6 @@ func (pm *VolumePluginMgr) FindPersistentPluginBySpec(spec *Spec) (PersistentVol
return nil, fmt.Errorf("no persistent volume plugin matched")
}
// FindVolumePluginWithLimitsBySpec returns volume plugin that has a limit on how many
// of them can be attached to a node
func (pm *VolumePluginMgr) FindVolumePluginWithLimitsBySpec(spec *Spec) (VolumePluginWithAttachLimits, error) {
volumePlugin, err := pm.FindPluginBySpec(spec)
if err != nil {
return nil, fmt.Errorf("could not find volume plugin for spec : %#v", spec)
}
if limitedPlugin, ok := volumePlugin.(VolumePluginWithAttachLimits); ok {
return limitedPlugin, nil
}
return nil, fmt.Errorf("no plugin with limits found")
}
// FindPersistentPluginByName fetches a persistent volume plugin by name. If
// no plugin is found, returns error.
func (pm *VolumePluginMgr) FindPersistentPluginByName(name string) (PersistentVolumePlugin, error) {
@ -858,19 +791,6 @@ func (pm *VolumePluginMgr) FindDeletablePluginByName(name string) (DeletableVolu
return nil, fmt.Errorf("no deletable volume plugin matched")
}
// FindCreatablePluginBySpec fetches a persistent volume plugin by name. If
// no plugin is found, returns error.
func (pm *VolumePluginMgr) FindCreatablePluginBySpec(spec *Spec) (ProvisionableVolumePlugin, error) {
volumePlugin, err := pm.FindPluginBySpec(spec)
if err != nil {
return nil, err
}
if provisionableVolumePlugin, ok := volumePlugin.(ProvisionableVolumePlugin); ok {
return provisionableVolumePlugin, nil
}
return nil, fmt.Errorf("no creatable volume plugin matched")
}
// FindAttachablePluginBySpec fetches a persistent volume plugin by spec.
// Unlike the other "FindPlugin" methods, this does not return error if no
// plugin is found. All volumes require a mounter and unmounter, but not

View File

@ -158,7 +158,7 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection, setPerms func(su
}
oldTsPath := filepath.Join(w.targetDir, oldTsDir)
var pathsToRemove sets.String
var pathsToRemove sets.Set[string]
shouldWrite := true
// if there was no old version, there's nothing to remove
if len(oldTsDir) != 0 {
@ -355,10 +355,10 @@ func shouldWriteFile(path string, content []byte) (bool, error) {
// pathsToRemove walks the current version of the data directory and
// determines which paths should be removed (if any) after the payload is
// written to the target directory.
func (w *AtomicWriter) pathsToRemove(payload map[string]FileProjection, oldTsDir string) (sets.String, error) {
paths := sets.NewString()
func (w *AtomicWriter) pathsToRemove(payload map[string]FileProjection, oldTSDir string) (sets.Set[string], error) {
paths := sets.New[string]()
visitor := func(path string, info os.FileInfo, err error) error {
relativePath := strings.TrimPrefix(path, oldTsDir)
relativePath := strings.TrimPrefix(path, oldTSDir)
relativePath = strings.TrimPrefix(relativePath, string(os.PathSeparator))
if relativePath == "" {
return nil
@ -368,15 +368,15 @@ func (w *AtomicWriter) pathsToRemove(payload map[string]FileProjection, oldTsDir
return nil
}
err := filepath.Walk(oldTsDir, visitor)
err := filepath.Walk(oldTSDir, visitor)
if os.IsNotExist(err) {
return nil, nil
} else if err != nil {
return nil, err
}
klog.V(5).Infof("%s: current paths: %+v", w.targetDir, paths.List())
klog.V(5).Infof("%s: current paths: %+v", w.targetDir, sets.List(paths))
newPaths := sets.NewString()
newPaths := sets.New[string]()
for file := range payload {
// add all subpaths for the payload to the set of new paths
// to avoid attempting to remove non-empty dirs
@ -386,7 +386,7 @@ func (w *AtomicWriter) pathsToRemove(payload map[string]FileProjection, oldTsDir
subPath = strings.TrimSuffix(subPath, string(os.PathSeparator))
}
}
klog.V(5).Infof("%s: new paths: %+v", w.targetDir, newPaths.List())
klog.V(5).Infof("%s: new paths: %+v", w.targetDir, sets.List(newPaths))
result := paths.Difference(newPaths)
klog.V(5).Infof("%s: paths to remove: %+v", w.targetDir, result)
@ -444,7 +444,8 @@ func (w *AtomicWriter) writePayloadToDir(payload map[string]FileProjection, dir
if fileProjection.FsUser == nil {
continue
}
if err := os.Chown(fullPath, int(*fileProjection.FsUser), -1); err != nil {
if err := w.chown(fullPath, int(*fileProjection.FsUser), -1); err != nil {
klog.Errorf("%s: unable to change file %s with owner %v: %v", w.logContext, fullPath, int(*fileProjection.FsUser), err)
return err
}
@ -487,7 +488,7 @@ func (w *AtomicWriter) createUserVisibleFiles(payload map[string]FileProjection)
// removeUserVisiblePaths removes the set of paths from the user-visible
// portion of the writer's target directory.
func (w *AtomicWriter) removeUserVisiblePaths(paths sets.String) error {
func (w *AtomicWriter) removeUserVisiblePaths(paths sets.Set[string]) error {
ps := string(os.PathSeparator)
var lasterr error
for p := range paths {

View File

@ -0,0 +1,27 @@
//go:build linux
// +build linux
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import "os"
// chown changes the numeric uid and gid of the named file.
func (w *AtomicWriter) chown(name string, uid, gid int) error {
return os.Chown(name, uid, gid)
}

View File

@ -0,0 +1,33 @@
//go:build !linux
// +build !linux
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"runtime"
"k8s.io/klog/v2"
)
// chown changes the numeric uid and gid of the named file.
// This is a no-op on unsupported platforms.
func (w *AtomicWriter) chown(name string, uid, _ /* gid */ int) error {
klog.Warningf("%s: skipping change of Linux owner %v for file %s; unsupported on %s", w.logContext, uid, name, runtime.GOOS)
return nil
}

View File

@ -225,11 +225,11 @@ func GetQuotaOnDir(m mount.Interface, path string) (common.QuotaID, error) {
return getApplier(path).GetQuotaOnDir(path)
}
func clearQuotaOnDir(m mount.Interface, path string) error {
func clearQuotaOnDir(m mount.Interface, path string, userNamespacesEnabled bool) error {
// Since we may be called without path being in the map,
// we explicitly have to check in this case.
klog.V(4).Infof("clearQuotaOnDir %s", path)
supportsQuotas, err := SupportsQuotas(m, path)
supportsQuotas, err := SupportsQuotas(m, path, userNamespacesEnabled)
if err != nil {
// Log-and-continue instead of returning an error for now
// due to unspecified backwards compatibility concerns (a subject to revise)
@ -269,11 +269,17 @@ func clearQuotaOnDir(m mount.Interface, path string) error {
// don't cache the result because nothing will clean it up.
// However, do cache the device->applier map; the number of devices
// is bounded.
func SupportsQuotas(m mount.Interface, path string) (bool, error) {
// User namespaces prevent changes to project IDs on the filesystem,
// ensuring xfs-quota metrics' reliability; hence, userNamespacesEnabled is checked.
func SupportsQuotas(m mount.Interface, path string, userNamespacesEnabled bool) (bool, error) {
if !enabledQuotasForMonitoring() {
klog.V(3).Info("SupportsQuotas called, but quotas disabled")
return false, nil
}
if !userNamespacesEnabled {
klog.V(3).Info("SupportQuotas called and LocalStorageCapacityIsolationFSQuotaMonitoring enabled, but pod is not in a user namespace")
return false, nil
}
supportsQuotasLock.Lock()
defer supportsQuotasLock.Unlock()
if supportsQuotas, ok := supportsQuotasMap[path]; ok {
@ -307,12 +313,12 @@ func SupportsQuotas(m mount.Interface, path string) (bool, error) {
// AssignQuota chooses the quota ID based on the pod UID and path.
// If the pod UID is identical to another one known, it may (but presently
// doesn't) choose the same quota ID as other volumes in the pod.
func AssignQuota(m mount.Interface, path string, poduid types.UID, bytes *resource.Quantity) error { //nolint:staticcheck
func AssignQuota(m mount.Interface, path string, poduid types.UID, bytes *resource.Quantity, userNamespacesEnabled bool) error { //nolint:staticcheck
if bytes == nil {
return fmt.Errorf("attempting to assign null quota to %s", path)
}
ibytes := bytes.Value()
if ok, err := SupportsQuotas(m, path); !ok {
if ok, err := SupportsQuotas(m, path, userNamespacesEnabled); !ok {
return fmt.Errorf("quotas not supported on %s: %v", path, err)
}
quotaLock.Lock()
@ -410,7 +416,7 @@ func GetInodes(path string) (*resource.Quantity, error) {
}
// ClearQuota -- remove the quota assigned to a directory
func ClearQuota(m mount.Interface, path string) error {
func ClearQuota(m mount.Interface, path string, userNamespacesEnabled bool) error {
klog.V(3).Infof("ClearQuota %s", path)
if !enabledQuotasForMonitoring() {
return fmt.Errorf("clearQuota called, but quotas disabled")
@ -426,7 +432,7 @@ func ClearQuota(m mount.Interface, path string) error {
// be found, which needs to be cleaned up.
defer delete(supportsQuotasMap, path)
defer clearApplier(path)
return clearQuotaOnDir(m, path)
return clearQuotaOnDir(m, path, userNamespacesEnabled)
}
_, ok = podQuotaMap[poduid]
if !ok {
@ -443,7 +449,7 @@ func ClearQuota(m mount.Interface, path string) error {
}
count, ok := podDirCountMap[poduid]
if count <= 1 || !ok {
err = clearQuotaOnDir(m, path)
err = clearQuotaOnDir(m, path, userNamespacesEnabled)
// This error should be noted; we still need to clean up
// and otherwise handle in the same way.
if err != nil {

View File

@ -39,12 +39,12 @@ func GetQuotaOnDir(_ mount.Interface, _ string) (common.QuotaID, error) {
}
// SupportsQuotas -- dummy implementation
func SupportsQuotas(_ mount.Interface, _ string) (bool, error) {
func SupportsQuotas(_ mount.Interface, _ string, _ bool) (bool, error) {
return false, errNotImplemented
}
// AssignQuota -- dummy implementation
func AssignQuota(_ mount.Interface, _ string, _ types.UID, _ *resource.Quantity) error {
func AssignQuota(_ mount.Interface, _ string, _ types.UID, _ *resource.Quantity, _ bool) error {
return errNotImplemented
}
@ -59,6 +59,6 @@ func GetInodes(_ string) (*resource.Quantity, error) {
}
// ClearQuota -- dummy implementation
func ClearQuota(_ mount.Interface, _ string) error {
func ClearQuota(_ mount.Interface, _ string, _ bool) error {
return errNotImplemented
}

View File

@ -40,6 +40,8 @@ var (
knownResizeConditions = map[v1.PersistentVolumeClaimConditionType]bool{
v1.PersistentVolumeClaimFileSystemResizePending: true,
v1.PersistentVolumeClaimResizing: true,
v1.PersistentVolumeClaimControllerResizeError: true,
v1.PersistentVolumeClaimNodeResizeError: true,
}
// AnnPreResizeCapacity annotation is added to a PV when expanding volume.
@ -140,7 +142,7 @@ func MarkResizeInProgressWithResizer(
}
conditions := []v1.PersistentVolumeClaimCondition{progressCondition}
newPVC := pvc.DeepCopy()
newPVC = MergeResizeConditionOnPVC(newPVC, conditions)
newPVC = MergeResizeConditionOnPVC(newPVC, conditions, false /* keepOldResizeConditions */)
newPVC = setResizer(newPVC, resizerName)
return PatchPVCStatus(pvc /*oldPVC*/, newPVC, kubeClient)
}
@ -154,7 +156,7 @@ func MarkControllerReisizeInProgress(pvc *v1.PersistentVolumeClaim, resizerName
}
conditions := []v1.PersistentVolumeClaimCondition{progressCondition}
newPVC := pvc.DeepCopy()
newPVC = MergeResizeConditionOnPVC(newPVC, conditions)
newPVC = MergeResizeConditionOnPVC(newPVC, conditions, false /* keepOldResizeConditions */)
newPVC = mergeStorageResourceStatus(newPVC, v1.PersistentVolumeClaimControllerResizeInProgress)
newPVC = mergeStorageAllocatedResources(newPVC, newSize)
newPVC = setResizer(newPVC, resizerName)
@ -196,7 +198,7 @@ func MarkForFSResize(
newPVC = mergeStorageResourceStatus(newPVC, v1.PersistentVolumeClaimNodeResizePending)
}
newPVC = MergeResizeConditionOnPVC(newPVC, conditions)
newPVC = MergeResizeConditionOnPVC(newPVC, conditions, true /* keepOldResizeConditions */)
updatedPVC, err := PatchPVCStatus(pvc /*oldPVC*/, newPVC, kubeClient)
return updatedPVC, err
}
@ -229,16 +231,25 @@ func MarkFSResizeFinished(
}
}
newPVC = MergeResizeConditionOnPVC(newPVC, []v1.PersistentVolumeClaimCondition{})
newPVC = MergeResizeConditionOnPVC(newPVC, []v1.PersistentVolumeClaimCondition{}, false /* keepOldResizeConditions */)
updatedPVC, err := PatchPVCStatus(pvc /*oldPVC*/, newPVC, kubeClient)
return updatedPVC, err
}
// MarkNodeExpansionFailed marks a PVC for node expansion as failed. Kubelet should not retry expansion
// MarkNodeExpansionInfeasible marks a PVC for node expansion as failed. Kubelet should not retry expansion
// of volumes which are in failed state.
func MarkNodeExpansionFailed(pvc *v1.PersistentVolumeClaim, kubeClient clientset.Interface) (*v1.PersistentVolumeClaim, error) {
func MarkNodeExpansionInfeasible(pvc *v1.PersistentVolumeClaim, kubeClient clientset.Interface, err error) (*v1.PersistentVolumeClaim, error) {
newPVC := pvc.DeepCopy()
newPVC = mergeStorageResourceStatus(newPVC, v1.PersistentVolumeClaimNodeResizeFailed)
newPVC = mergeStorageResourceStatus(newPVC, v1.PersistentVolumeClaimNodeResizeInfeasible)
errorCondition := v1.PersistentVolumeClaimCondition{
Type: v1.PersistentVolumeClaimNodeResizeError,
Status: v1.ConditionTrue,
LastTransitionTime: metav1.Now(),
Message: fmt.Sprintf("failed to expand pvc with %v", err),
}
newPVC = MergeResizeConditionOnPVC(newPVC,
[]v1.PersistentVolumeClaimCondition{errorCondition},
true /* keepOldResizeConditions */)
patchBytes, err := createPVCPatch(pvc, newPVC, false /* addResourceVersionCheck */)
if err != nil {
@ -253,6 +264,30 @@ func MarkNodeExpansionFailed(pvc *v1.PersistentVolumeClaim, kubeClient clientset
return updatedClaim, nil
}
func MarkNodeExpansionFailedCondition(pvc *v1.PersistentVolumeClaim, kubeClient clientset.Interface, err error) (*v1.PersistentVolumeClaim, error) {
newPVC := pvc.DeepCopy()
errorCondition := v1.PersistentVolumeClaimCondition{
Type: v1.PersistentVolumeClaimNodeResizeError,
Status: v1.ConditionTrue,
LastTransitionTime: metav1.Now(),
Message: fmt.Sprintf("failed to expand pvc with %v", err),
}
newPVC = MergeResizeConditionOnPVC(newPVC,
[]v1.PersistentVolumeClaimCondition{errorCondition},
true /* keepOldResizeConditions */)
patchBytes, err := createPVCPatch(pvc, newPVC, false /* addResourceVersionCheck */)
if err != nil {
return pvc, fmt.Errorf("patchPVCStatus failed to patch PVC %q: %w", pvc.Name, err)
}
updatedClaim, updateErr := kubeClient.CoreV1().PersistentVolumeClaims(pvc.Namespace).
Patch(context.TODO(), pvc.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status")
if updateErr != nil {
return pvc, fmt.Errorf("patchPVCStatus failed to patch PVC %q: %w", pvc.Name, updateErr)
}
return updatedClaim, nil
}
// MarkNodeExpansionInProgress marks pvc expansion in progress on node
func MarkNodeExpansionInProgress(pvc *v1.PersistentVolumeClaim, kubeClient clientset.Interface) (*v1.PersistentVolumeClaim, error) {
newPVC := pvc.DeepCopy()
@ -333,7 +368,7 @@ func addResourceVersion(patchBytes []byte, resourceVersion string) ([]byte, erro
// leaving other conditions untouched.
func MergeResizeConditionOnPVC(
pvc *v1.PersistentVolumeClaim,
resizeConditions []v1.PersistentVolumeClaimCondition) *v1.PersistentVolumeClaim {
resizeConditions []v1.PersistentVolumeClaimCondition, keepOldResizeConditions bool) *v1.PersistentVolumeClaim {
resizeConditionMap := map[v1.PersistentVolumeClaimConditionType]*resizeProcessStatus{}
for _, condition := range resizeConditions {
@ -356,6 +391,10 @@ func MergeResizeConditionOnPVC(
newConditions = append(newConditions, condition)
}
newCondition.processed = true
} else if keepOldResizeConditions {
// if keepOldResizeConditions is true, we keep the old resize conditions that were present in the
// existing pvc.Status.Conditions field.
newConditions = append(newConditions, condition)
}
}

View File

@ -102,6 +102,27 @@ func IsFailedPreconditionError(err error) bool {
return errors.As(err, &failedPreconditionError)
}
// InfeasibleError errors are a subset of OperationFinished or final error
// codes. In terms of CSI - this usually means that, the operation is not possible
// in current state with given arguments.
type InfeasibleError struct {
msg string
}
func (err *InfeasibleError) Error() string {
return err.msg
}
// NewInfeasibleError returns a new instance of InfeasibleError
func NewInfeasibleError(msg string) *InfeasibleError {
return &InfeasibleError{msg: msg}
}
func IsInfeasibleError(err error) bool {
var infeasibleError *InfeasibleError
return errors.As(err, &infeasibleError)
}
type OperationNotSupported struct {
msg string
}

View File

@ -22,12 +22,10 @@ import (
"os"
"path/filepath"
"reflect"
"runtime"
"strings"
"time"
v1 "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
apiruntime "k8s.io/apimachinery/pkg/runtime"
@ -36,7 +34,6 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
clientset "k8s.io/client-go/kubernetes"
storagehelpers "k8s.io/component-helpers/storage/volume"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/api/legacyscheme"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
@ -46,7 +43,6 @@ import (
"k8s.io/kubernetes/pkg/volume/util/types"
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
"k8s.io/mount-utils"
utilexec "k8s.io/utils/exec"
"k8s.io/utils/io"
utilstrings "k8s.io/utils/strings"
)
@ -59,10 +55,6 @@ const (
// managed by the attach/detach controller
ControllerManagedAttachAnnotation string = "volumes.kubernetes.io/controller-managed-attach-detach"
// KeepTerminatedPodVolumesAnnotation is the key of the annotation on Node
// that decides if pod volumes are unmounted when pod is terminated
KeepTerminatedPodVolumesAnnotation string = "volumes.kubernetes.io/keep-terminated-pod-volumes"
// MountsInGlobalPDPath is name of the directory appended to a volume plugin
// name to create the place for volume mounts in the global PD path.
MountsInGlobalPDPath = "mounts"
@ -115,22 +107,6 @@ func SetReady(dir string) {
file.Close()
}
// GetSecretForPod locates secret by name in the pod's namespace and returns secret map
func GetSecretForPod(pod *v1.Pod, secretName string, kubeClient clientset.Interface) (map[string]string, error) {
secret := make(map[string]string)
if kubeClient == nil {
return secret, fmt.Errorf("cannot get kube client")
}
secrets, err := kubeClient.CoreV1().Secrets(pod.Namespace).Get(context.TODO(), secretName, metav1.GetOptions{})
if err != nil {
return secret, err
}
for name, data := range secrets.Data {
secret[name] = string(data)
}
return secret, nil
}
// GetSecretForPV locates secret by name and namespace, verifies the secret type, and returns secret map
func GetSecretForPV(secretNamespace, secretName, volumePluginName string, kubeClient clientset.Interface) (map[string]string, error) {
secret := make(map[string]string)
@ -150,23 +126,6 @@ func GetSecretForPV(secretNamespace, secretName, volumePluginName string, kubeCl
return secret, nil
}
// GetClassForVolume locates storage class by persistent volume
func GetClassForVolume(kubeClient clientset.Interface, pv *v1.PersistentVolume) (*storage.StorageClass, error) {
if kubeClient == nil {
return nil, fmt.Errorf("cannot get kube client")
}
className := storagehelpers.GetPersistentVolumeClass(pv)
if className == "" {
return nil, fmt.Errorf("volume has no storage class")
}
class, err := kubeClient.StorageV1().StorageClasses().Get(context.TODO(), className, metav1.GetOptions{})
if err != nil {
return nil, err
}
return class, nil
}
// LoadPodFromFile will read, decode, and return a Pod from a file.
func LoadPodFromFile(filePath string) (*v1.Pod, error) {
if filePath == "" {
@ -204,22 +163,6 @@ func CalculateTimeoutForVolume(minimumTimeout, timeoutIncrement int, pv *v1.Pers
return timeout
}
// GenerateVolumeName returns a PV name with clusterName prefix. The function
// should be used to generate a name of GCE PD or Cinder volume. It basically
// adds "<clusterName>-dynamic-" before the PV name, making sure the resulting
// string fits given length and cuts "dynamic" if not.
func GenerateVolumeName(clusterName, pvName string, maxLength int) string {
prefix := clusterName + "-dynamic"
pvLen := len(pvName)
// cut the "<clusterName>-dynamic" to fit full pvName into maxLength
// +1 for the '-' dash
if pvLen+1+len(prefix) > maxLength {
prefix = prefix[:maxLength-pvLen-1]
}
return prefix + "-" + pvName
}
// GetPath checks if the path from the mounter is empty.
func GetPath(mounter volume.Mounter) (string, error) {
path := mounter.GetPath()
@ -263,7 +206,7 @@ func MountOptionFromSpec(spec *volume.Spec, options ...string) []string {
// JoinMountOptions joins mount options eliminating duplicates
func JoinMountOptions(userOptions []string, systemOptions []string) []string {
allMountOptions := sets.NewString()
allMountOptions := sets.New[string]()
for _, mountOption := range userOptions {
if len(mountOption) > 0 {
@ -274,7 +217,7 @@ func JoinMountOptions(userOptions []string, systemOptions []string) []string {
for _, mountOption := range systemOptions {
allMountOptions.Insert(mountOption)
}
return allMountOptions.List()
return sets.List(allMountOptions)
}
// ContainsAccessMode returns whether the requested mode is contained by modes
@ -558,13 +501,6 @@ func UnmapBlockVolume(
return nil
}
// GetPluginMountDir returns the global mount directory name appended
// to the given plugin name's plugin directory
func GetPluginMountDir(host volume.VolumeHost, name string) string {
mntDir := filepath.Join(host.GetPluginDir(name), MountsInGlobalPDPath)
return mntDir
}
// IsLocalEphemeralVolume determines whether the argument is a local ephemeral
// volume vs. some other type
// Local means the volume is using storage from the local disk that is managed by kubelet.
@ -616,9 +552,9 @@ func GetLocalPersistentVolumeNodeNames(pv *v1.PersistentVolume) []string {
// GetPodVolumeNames returns names of volumes that are used in a pod,
// either as filesystem mount or raw block device, together with list
// of all SELinux contexts of all containers that use the volumes.
func GetPodVolumeNames(pod *v1.Pod) (mounts sets.String, devices sets.String, seLinuxContainerContexts map[string][]*v1.SELinuxOptions) {
mounts = sets.NewString()
devices = sets.NewString()
func GetPodVolumeNames(pod *v1.Pod) (mounts sets.Set[string], devices sets.Set[string], seLinuxContainerContexts map[string][]*v1.SELinuxOptions) {
mounts = sets.New[string]()
devices = sets.New[string]()
seLinuxContainerContexts = make(map[string][]*v1.SELinuxOptions)
podutil.VisitContainers(&pod.Spec, podutil.AllFeatureEnabledContainers(), func(container *v1.Container, containerType podutil.ContainerType) bool {
@ -653,8 +589,7 @@ func GetPodVolumeNames(pod *v1.Pod) (mounts sets.String, devices sets.String, se
// attributes.
func FsUserFrom(pod *v1.Pod) *int64 {
var fsUser *int64
// Exclude ephemeral containers because SecurityContext is not allowed.
podutil.VisitContainers(&pod.Spec, podutil.InitContainers|podutil.Containers, func(container *v1.Container, containerType podutil.ContainerType) bool {
podutil.VisitContainers(&pod.Spec, podutil.AllFeatureEnabledContainers(), func(container *v1.Container, containerType podutil.ContainerType) bool {
runAsUser, ok := securitycontext.DetermineEffectiveRunAsUser(pod, container)
// One container doesn't specify user or there are more than one
// non-root UIDs.
@ -705,25 +640,6 @@ func HasMountRefs(mountPath string, mountRefs []string) bool {
return false
}
// WriteVolumeCache flush disk data given the specified mount path
func WriteVolumeCache(deviceMountPath string, exec utilexec.Interface) error {
// If runtime os is windows, execute Write-VolumeCache powershell command on the disk
if runtime.GOOS == "windows" {
cmdString := "Get-Volume -FilePath $env:mountpath | Write-Volumecache"
cmd := exec.Command("powershell", "/c", cmdString)
env := append(os.Environ(), fmt.Sprintf("mountpath=%s", deviceMountPath))
cmd.SetEnv(env)
klog.V(8).Infof("Executing command: %q", cmdString)
output, err := cmd.CombinedOutput()
klog.Infof("command (%q) execeuted: %v, output: %q", cmdString, err, string(output))
if err != nil {
return fmt.Errorf("command (%q) failed: %v, output: %q", cmdString, err, string(output))
}
}
// For linux runtime, it skips because unmount will automatically flush disk data
return nil
}
// IsMultiAttachAllowed checks if attaching this volume to multiple nodes is definitely not allowed/possible.
// In its current form, this function can only reliably say for which volumes it's definitely forbidden. If it returns
// false, it is not guaranteed that multi-attach is actually supported by the volume type and we must rely on the

View File

@ -19,10 +19,10 @@ package util
import (
"sort"
storagev1alpha1 "k8s.io/api/storage/v1alpha1"
storagev1beta1 "k8s.io/api/storage/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
storagev1alpha1listers "k8s.io/client-go/listers/storage/v1alpha1"
storagev1beta1listers "k8s.io/client-go/listers/storage/v1beta1"
"k8s.io/klog/v2"
)
@ -32,13 +32,13 @@ const (
)
// GetDefaultVolumeAttributesClass returns the default VolumeAttributesClass from the store, or nil.
func GetDefaultVolumeAttributesClass(lister storagev1alpha1listers.VolumeAttributesClassLister, driverName string) (*storagev1alpha1.VolumeAttributesClass, error) {
func GetDefaultVolumeAttributesClass(lister storagev1beta1listers.VolumeAttributesClassLister, driverName string) (*storagev1beta1.VolumeAttributesClass, error) {
list, err := lister.List(labels.Everything())
if err != nil {
return nil, err
}
defaultClasses := []*storagev1alpha1.VolumeAttributesClass{}
defaultClasses := []*storagev1beta1.VolumeAttributesClass{}
for _, class := range list {
if IsDefaultVolumeAttributesClassAnnotation(class.ObjectMeta) && class.DriverName == driverName {
defaultClasses = append(defaultClasses, class)

View File

@ -284,14 +284,6 @@ type DeviceMounter interface {
MountDevice(spec *Spec, devicePath string, deviceMountPath string, deviceMounterArgs DeviceMounterArgs) error
}
type BulkVolumeVerifier interface {
// BulkVerifyVolumes checks whether the list of volumes still attached to the
// clusters in the node. It returns a map which maps from the volume spec to the checking result.
// If an error occurs during check - error should be returned and volume on nodes
// should be assumed as still attached.
BulkVerifyVolumes(volumesByNode map[types.NodeName][]*Spec) (map[types.NodeName]map[*Spec]bool, error)
}
// Detacher can detach a volume from a node.
type Detacher interface {
DeviceUnmounter