mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 02:33:34 +00:00
rebase: bump k8s.io/kubernetes from 1.26.2 to 1.27.2
Bumps [k8s.io/kubernetes](https://github.com/kubernetes/kubernetes) from 1.26.2 to 1.27.2. - [Release notes](https://github.com/kubernetes/kubernetes/releases) - [Commits](https://github.com/kubernetes/kubernetes/compare/v1.26.2...v1.27.2) --- updated-dependencies: - dependency-name: k8s.io/kubernetes dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
committed by
mergify[bot]
parent
0e79135419
commit
07b05616a0
11
vendor/k8s.io/kubernetes/pkg/volume/plugins.go
generated
vendored
11
vendor/k8s.io/kubernetes/pkg/volume/plugins.go
generated
vendored
@ -213,7 +213,7 @@ type DeletableVolumePlugin interface {
|
||||
// NewDeleter creates a new volume.Deleter which knows how to delete this
|
||||
// resource in accordance with the underlying storage provider after the
|
||||
// volume's release from a claim
|
||||
NewDeleter(spec *Spec) (Deleter, error)
|
||||
NewDeleter(logger klog.Logger, spec *Spec) (Deleter, error)
|
||||
}
|
||||
|
||||
// ProvisionableVolumePlugin is an extended interface of VolumePlugin and is
|
||||
@ -223,7 +223,7 @@ type ProvisionableVolumePlugin interface {
|
||||
// NewProvisioner creates a new volume.Provisioner which knows how to
|
||||
// create PersistentVolumes in accordance with the plugin's underlying
|
||||
// storage provider
|
||||
NewProvisioner(options VolumeOptions) (Provisioner, error)
|
||||
NewProvisioner(logger klog.Logger, options VolumeOptions) (Provisioner, error)
|
||||
}
|
||||
|
||||
// AttachableVolumePlugin is an extended interface of VolumePlugin and is used for volumes that require attachment
|
||||
@ -334,13 +334,6 @@ type KubeletVolumeHost interface {
|
||||
WaitForCacheSync() error
|
||||
// Returns hostutil.HostUtils
|
||||
GetHostUtil() hostutil.HostUtils
|
||||
// GetHostIDsForPod if the pod uses user namespaces, takes the uid and
|
||||
// gid inside the container and returns the host UID and GID those are
|
||||
// mapped to on the host. If containerUID/containerGID is nil, then it
|
||||
// returns the host UID/GID for ID 0 inside the container.
|
||||
// If the pod is not using user namespaces, as there is no mapping needed, the
|
||||
// same containerUID and containerGID params are returned.
|
||||
GetHostIDsForPod(pod *v1.Pod, containerUID, containerGID *int64) (hostUID, hostGID *int64, err error)
|
||||
}
|
||||
|
||||
// AttachDetachVolumeHost is a AttachDetach Controller specific interface that plugins can use
|
||||
|
48
vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go
generated
vendored
48
vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go
generated
vendored
@ -86,11 +86,16 @@ const (
|
||||
|
||||
// Write does an atomic projection of the given payload into the writer's target
|
||||
// directory. Input paths must not begin with '..'.
|
||||
// setPerms is an optional pointer to a function that caller can provide to set the
|
||||
// permissions of the newly created files before they are published. The function is
|
||||
// passed subPath which is the name of the timestamped directory that was created
|
||||
// under target directory.
|
||||
//
|
||||
// The Write algorithm is:
|
||||
//
|
||||
// 1. The payload is validated; if the payload is invalid, the function returns
|
||||
// 2. The current timestamped directory is detected by reading the data directory
|
||||
//
|
||||
// 2. The current timestamped directory is detected by reading the data directory
|
||||
// symlink
|
||||
//
|
||||
// 3. The old version of the volume is walked to determine whether any
|
||||
@ -98,13 +103,19 @@ const (
|
||||
//
|
||||
// 4. The data in the current timestamped directory is compared to the projected
|
||||
// data to determine if an update is required.
|
||||
// 5. A new timestamped dir is created
|
||||
//
|
||||
// 6. The payload is written to the new timestamped directory
|
||||
// 7. A symlink to the new timestamped directory ..data_tmp is created that will
|
||||
// become the new data directory
|
||||
// 8. The new data directory symlink is renamed to the data directory; rename is atomic
|
||||
// 9. Symlinks and directory for new user-visible files are created (if needed).
|
||||
// 5. A new timestamped dir is created.
|
||||
//
|
||||
// 6. The payload is written to the new timestamped directory.
|
||||
//
|
||||
// 7. Permissions are set (if setPerms is not nil) on the new timestamped directory and files.
|
||||
//
|
||||
// 8. A symlink to the new timestamped directory ..data_tmp is created that will
|
||||
// become the new data directory.
|
||||
//
|
||||
// 9. The new data directory symlink is renamed to the data directory; rename is atomic.
|
||||
//
|
||||
// 10. Symlinks and directory for new user-visible files are created (if needed).
|
||||
//
|
||||
// For example, consider the files:
|
||||
// <target-dir>/podName
|
||||
@ -123,9 +134,10 @@ const (
|
||||
// linking everything else. On Windows, if a target does not exist, the created symlink
|
||||
// will not work properly if the target ends up being a directory.
|
||||
//
|
||||
// 10. Old paths are removed from the user-visible portion of the target directory
|
||||
// 11. The previous timestamped directory is removed, if it exists
|
||||
func (w *AtomicWriter) Write(payload map[string]FileProjection) error {
|
||||
// 11. Old paths are removed from the user-visible portion of the target directory.
|
||||
//
|
||||
// 12. The previous timestamped directory is removed, if it exists.
|
||||
func (w *AtomicWriter) Write(payload map[string]FileProjection, setPerms func(subPath string) error) error {
|
||||
// (1)
|
||||
cleanPayload, err := validatePayload(payload)
|
||||
if err != nil {
|
||||
@ -185,6 +197,14 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection) error {
|
||||
klog.V(4).Infof("%s: performed write of new data to ts data directory: %s", w.logContext, tsDir)
|
||||
|
||||
// (7)
|
||||
if setPerms != nil {
|
||||
if err := setPerms(tsDirName); err != nil {
|
||||
klog.Errorf("%s: error applying ownership settings: %v", w.logContext, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// (8)
|
||||
newDataDirPath := filepath.Join(w.targetDir, newDataDirName)
|
||||
if err = os.Symlink(tsDirName, newDataDirPath); err != nil {
|
||||
os.RemoveAll(tsDir)
|
||||
@ -192,7 +212,7 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// (8)
|
||||
// (9)
|
||||
if runtime.GOOS == "windows" {
|
||||
os.Remove(dataDirPath)
|
||||
err = os.Symlink(tsDirName, dataDirPath)
|
||||
@ -207,19 +227,19 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// (9)
|
||||
// (10)
|
||||
if err = w.createUserVisibleFiles(cleanPayload); err != nil {
|
||||
klog.Errorf("%s: error creating visible symlinks in %s: %v", w.logContext, w.targetDir, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// (10)
|
||||
// (11)
|
||||
if err = w.removeUserVisiblePaths(pathsToRemove); err != nil {
|
||||
klog.Errorf("%s: error removing old visible symlinks: %v", w.logContext, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// (11)
|
||||
// (12)
|
||||
if len(oldTsDir) > 0 {
|
||||
if err = os.RemoveAll(oldTsPath); err != nil {
|
||||
klog.Errorf("%s: error removing old data directory %s: %v", w.logContext, oldTsDir, err)
|
||||
|
28
vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/common/quota_common.go
generated
vendored
Normal file
28
vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/common/quota_common.go
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package common
|
||||
|
||||
// QuotaID is generic quota identifier.
|
||||
// Data type based on quotactl(2).
|
||||
type QuotaID int32
|
||||
|
||||
const (
|
||||
// UnknownQuotaID -- cannot determine whether a quota is in force
|
||||
UnknownQuotaID QuotaID = -1
|
||||
// BadQuotaID -- Invalid quota
|
||||
BadQuotaID QuotaID = 0
|
||||
)
|
@ -23,17 +23,6 @@ import (
|
||||
"regexp"
|
||||
)
|
||||
|
||||
// QuotaID is generic quota identifier.
|
||||
// Data type based on quotactl(2).
|
||||
type QuotaID int32
|
||||
|
||||
const (
|
||||
// UnknownQuotaID -- cannot determine whether a quota is in force
|
||||
UnknownQuotaID QuotaID = -1
|
||||
// BadQuotaID -- Invalid quota
|
||||
BadQuotaID QuotaID = 0
|
||||
)
|
||||
|
||||
// QuotaType -- type of quota to be applied
|
||||
type QuotaType int
|
||||
|
8
vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/project.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/project.go
generated
vendored
@ -164,6 +164,9 @@ func readProjectFiles(projects *os.File, projid *os.File) projectsList {
|
||||
return projectsList{parseProjFile(projects, parseProject), parseProjFile(projid, parseProjid)}
|
||||
}
|
||||
|
||||
// findAvailableQuota finds the next available quota from the FirstQuota
|
||||
// it returns error if QuotaIDIsInUse returns error when getting quota id in use;
|
||||
// it searches at most maxUnusedQuotasToSearch(128) time
|
||||
func findAvailableQuota(path string, idMap map[common.QuotaID]bool) (common.QuotaID, error) {
|
||||
unusedQuotasSearched := 0
|
||||
for id := common.FirstQuota; true; id++ {
|
||||
@ -187,13 +190,13 @@ func addDirToProject(path string, id common.QuotaID, list *projectsList) (common
|
||||
idMap := make(map[common.QuotaID]bool)
|
||||
for _, project := range list.projects {
|
||||
if project.data == path {
|
||||
if id != project.id {
|
||||
if id != common.BadQuotaID && id != project.id {
|
||||
return common.BadQuotaID, false, fmt.Errorf("attempt to reassign project ID for %s", path)
|
||||
}
|
||||
// Trying to reassign a directory to the project it's
|
||||
// already in. Maybe this should be an error, but for
|
||||
// now treat it as an idempotent operation
|
||||
return id, false, nil
|
||||
return project.id, false, nil
|
||||
}
|
||||
idMap[project.id] = true
|
||||
}
|
||||
@ -318,6 +321,7 @@ func writeProjectFiles(fProjects *os.File, fProjid *os.File, writeProjid bool, l
|
||||
return fmt.Errorf("unable to write project files: %v", err)
|
||||
}
|
||||
|
||||
// if ID is common.BadQuotaID, generate new project id if the dir is not in a project
|
||||
func createProjectID(path string, ID common.QuotaID) (common.QuotaID, error) {
|
||||
quotaIDLock.Lock()
|
||||
defer quotaIDLock.Unlock()
|
||||
|
5
vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/quota.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/quota.go
generated
vendored
@ -23,10 +23,15 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/volume/util/fsquota/common"
|
||||
)
|
||||
|
||||
// Interface -- quota interface
|
||||
type Interface interface {
|
||||
// GetQuotaOnDir gets the quota ID (if any) that applies to
|
||||
// this directory
|
||||
GetQuotaOnDir(m mount.Interface, path string) (common.QuotaID, error)
|
||||
|
||||
// Does the path provided support quotas, and if so, what types
|
||||
SupportsQuotas(m mount.Interface, path string) (bool, error)
|
||||
// Assign a quota (picked by the quota mechanism) to a path,
|
||||
|
45
vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/quota_linux.go
generated
vendored
45
vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/quota_linux.go
generated
vendored
@ -35,6 +35,9 @@ import (
|
||||
"k8s.io/kubernetes/pkg/volume/util/fsquota/common"
|
||||
)
|
||||
|
||||
// Pod -> External Pod UID
|
||||
var podUidMap = make(map[types.UID]types.UID)
|
||||
|
||||
// Pod -> ID
|
||||
var podQuotaMap = make(map[types.UID]common.QuotaID)
|
||||
|
||||
@ -214,7 +217,7 @@ func setQuotaOnDir(path string, id common.QuotaID, bytes int64) error {
|
||||
return getApplier(path).SetQuotaOnDir(path, id, bytes)
|
||||
}
|
||||
|
||||
func getQuotaOnDir(m mount.Interface, path string) (common.QuotaID, error) {
|
||||
func GetQuotaOnDir(m mount.Interface, path string) (common.QuotaID, error) {
|
||||
_, _, err := getFSInfo(m, path)
|
||||
if err != nil {
|
||||
return common.BadQuotaID, err
|
||||
@ -235,7 +238,7 @@ func clearQuotaOnDir(m mount.Interface, path string) error {
|
||||
if !supportsQuotas {
|
||||
return nil
|
||||
}
|
||||
projid, err := getQuotaOnDir(m, path)
|
||||
projid, err := GetQuotaOnDir(m, path)
|
||||
if err == nil && projid != common.BadQuotaID {
|
||||
// This means that we have a quota on the directory but
|
||||
// we can't clear it. That's not good.
|
||||
@ -304,7 +307,7 @@ func SupportsQuotas(m mount.Interface, path string) (bool, error) {
|
||||
// AssignQuota chooses the quota ID based on the pod UID and path.
|
||||
// If the pod UID is identical to another one known, it may (but presently
|
||||
// doesn't) choose the same quota ID as other volumes in the pod.
|
||||
func AssignQuota(m mount.Interface, path string, poduid types.UID, bytes *resource.Quantity) error { //nolint:staticcheck // SA4009 poduid is overwritten by design, see comment below
|
||||
func AssignQuota(m mount.Interface, path string, poduid types.UID, bytes *resource.Quantity) error { //nolint:staticcheck
|
||||
if bytes == nil {
|
||||
return fmt.Errorf("attempting to assign null quota to %s", path)
|
||||
}
|
||||
@ -314,20 +317,32 @@ func AssignQuota(m mount.Interface, path string, poduid types.UID, bytes *resour
|
||||
}
|
||||
quotaLock.Lock()
|
||||
defer quotaLock.Unlock()
|
||||
// Current policy is to set individual quotas on each volumes.
|
||||
// Current policy is to set individual quotas on each volume,
|
||||
// for each new volume we generate a random UUID and we use that as
|
||||
// the internal pod uid.
|
||||
// From fsquota point of view each volume is attached to a
|
||||
// single unique pod.
|
||||
// If we decide later that we want to assign one quota for all
|
||||
// volumes in a pod, we can simply remove this line of code.
|
||||
// volumes in a pod, we can simply use poduid parameter directly
|
||||
// If and when we decide permanently that we're going to adopt
|
||||
// one quota per volume, we can rip all of the pod code out.
|
||||
poduid = types.UID(uuid.NewUUID()) //nolint:staticcheck // SA4009 poduid is overwritten by design, see comment above
|
||||
if pod, ok := dirPodMap[path]; ok && pod != poduid {
|
||||
return fmt.Errorf("requesting quota on existing directory %s but different pod %s %s", path, pod, poduid)
|
||||
externalPodUid := poduid
|
||||
internalPodUid, ok := dirPodMap[path]
|
||||
if ok {
|
||||
if podUidMap[internalPodUid] != externalPodUid {
|
||||
return fmt.Errorf("requesting quota on existing directory %s but different pod %s %s", path, podUidMap[internalPodUid], externalPodUid)
|
||||
}
|
||||
} else {
|
||||
internalPodUid = types.UID(uuid.NewUUID())
|
||||
}
|
||||
oid, ok := podQuotaMap[poduid]
|
||||
oid, ok := podQuotaMap[internalPodUid]
|
||||
if ok {
|
||||
if quotaSizeMap[oid] != ibytes {
|
||||
return fmt.Errorf("requesting quota of different size: old %v new %v", quotaSizeMap[oid], bytes)
|
||||
}
|
||||
if _, ok := dirPodMap[path]; ok {
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
oid = common.BadQuotaID
|
||||
}
|
||||
@ -342,12 +357,13 @@ func AssignQuota(m mount.Interface, path string, poduid types.UID, bytes *resour
|
||||
ibytes = -1
|
||||
}
|
||||
if err = setQuotaOnDir(path, id, ibytes); err == nil {
|
||||
quotaPodMap[id] = poduid
|
||||
quotaPodMap[id] = internalPodUid
|
||||
quotaSizeMap[id] = ibytes
|
||||
podQuotaMap[poduid] = id
|
||||
podQuotaMap[internalPodUid] = id
|
||||
dirQuotaMap[path] = id
|
||||
dirPodMap[path] = poduid
|
||||
podDirCountMap[poduid]++
|
||||
dirPodMap[path] = internalPodUid
|
||||
podUidMap[internalPodUid] = externalPodUid
|
||||
podDirCountMap[internalPodUid]++
|
||||
klog.V(4).Infof("Assigning quota ID %d (%d) to %s", id, ibytes, path)
|
||||
return nil
|
||||
}
|
||||
@ -415,7 +431,7 @@ func ClearQuota(m mount.Interface, path string) error {
|
||||
if !ok {
|
||||
return fmt.Errorf("clearQuota: No quota available for %s", path)
|
||||
}
|
||||
projid, err := getQuotaOnDir(m, path)
|
||||
projid, err := GetQuotaOnDir(m, path)
|
||||
if err != nil {
|
||||
// Log-and-continue instead of returning an error for now
|
||||
// due to unspecified backwards compatibility concerns (a subject to revise)
|
||||
@ -436,6 +452,7 @@ func ClearQuota(m mount.Interface, path string) error {
|
||||
delete(quotaPodMap, podQuotaMap[poduid])
|
||||
delete(podDirCountMap, poduid)
|
||||
delete(podQuotaMap, poduid)
|
||||
delete(podUidMap, poduid)
|
||||
} else {
|
||||
err = removeProjectID(path, projid)
|
||||
podDirCountMap[poduid]--
|
||||
|
5
vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/quota_unsupported.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/quota_unsupported.go
generated
vendored
@ -22,6 +22,7 @@ package fsquota
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"k8s.io/kubernetes/pkg/volume/util/fsquota/common"
|
||||
"k8s.io/mount-utils"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
@ -33,6 +34,10 @@ import (
|
||||
|
||||
var errNotImplemented = errors.New("not implemented")
|
||||
|
||||
func GetQuotaOnDir(_ mount.Interface, _ string) (common.QuotaID, error) {
|
||||
return common.BadQuotaID, errNotImplemented
|
||||
}
|
||||
|
||||
// SupportsQuotas -- dummy implementation
|
||||
func SupportsQuotas(_ mount.Interface, _ string) (bool, error) {
|
||||
return false, errNotImplemented
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/fake_hostutil.go
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/fake_hostutil.go
generated
vendored
@ -120,5 +120,6 @@ func (hu *FakeHostUtil) GetMode(pathname string) (os.FileMode, error) {
|
||||
// GetSELinuxMountContext returns value of -o context=XYZ mount option on
|
||||
// given mount point.
|
||||
func (hu *FakeHostUtil) GetSELinuxMountContext(pathname string) (string, error) {
|
||||
return "", errors.New("not implemented")
|
||||
// This pretends the OS does not support SELinux.
|
||||
return "", nil
|
||||
}
|
||||
|
17
vendor/k8s.io/kubernetes/pkg/volume/util/resize_util.go
generated
vendored
17
vendor/k8s.io/kubernetes/pkg/volume/util/resize_util.go
generated
vendored
@ -229,23 +229,6 @@ func MarkFSResizeFinished(
|
||||
return updatedPVC, err
|
||||
}
|
||||
|
||||
func MarkControllerExpansionFailed(pvc *v1.PersistentVolumeClaim, kubeClient clientset.Interface) (*v1.PersistentVolumeClaim, error) {
|
||||
expansionFailedOnController := v1.PersistentVolumeClaimControllerExpansionFailed
|
||||
newPVC := pvc.DeepCopy()
|
||||
newPVC.Status.ResizeStatus = &expansionFailedOnController
|
||||
patchBytes, err := createPVCPatch(pvc, newPVC, false /* addResourceVersionCheck */)
|
||||
if err != nil {
|
||||
return pvc, fmt.Errorf("patchPVCStatus failed to patch PVC %q: %v", pvc.Name, err)
|
||||
}
|
||||
|
||||
updatedClaim, updateErr := kubeClient.CoreV1().PersistentVolumeClaims(pvc.Namespace).
|
||||
Patch(context.TODO(), pvc.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status")
|
||||
if updateErr != nil {
|
||||
return pvc, fmt.Errorf("patchPVCStatus failed to patch PVC %q: %v", pvc.Name, updateErr)
|
||||
}
|
||||
return updatedClaim, nil
|
||||
}
|
||||
|
||||
// MarkNodeExpansionFailed marks a PVC for node expansion as failed. Kubelet should not retry expansion
|
||||
// of volumes which are in failed state.
|
||||
func MarkNodeExpansionFailed(pvc *v1.PersistentVolumeClaim, kubeClient clientset.Interface) (*v1.PersistentVolumeClaim, error) {
|
||||
|
38
vendor/k8s.io/kubernetes/pkg/volume/util/util.go
generated
vendored
38
vendor/k8s.io/kubernetes/pkg/volume/util/util.go
generated
vendored
@ -576,6 +576,44 @@ func IsLocalEphemeralVolume(volume v1.Volume) bool {
|
||||
volume.ConfigMap != nil
|
||||
}
|
||||
|
||||
// GetLocalPersistentVolumeNodeNames returns the node affinity node name(s) for
|
||||
// local PersistentVolumes. nil is returned if the PV does not have any
|
||||
// specific node affinity node selector terms and match expressions.
|
||||
// PersistentVolume with node affinity has select and match expressions
|
||||
// in the form of:
|
||||
//
|
||||
// nodeAffinity:
|
||||
// required:
|
||||
// nodeSelectorTerms:
|
||||
// - matchExpressions:
|
||||
// - key: kubernetes.io/hostname
|
||||
// operator: In
|
||||
// values:
|
||||
// - <node1>
|
||||
// - <node2>
|
||||
func GetLocalPersistentVolumeNodeNames(pv *v1.PersistentVolume) []string {
|
||||
if pv == nil || pv.Spec.NodeAffinity == nil || pv.Spec.NodeAffinity.Required == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var result sets.Set[string]
|
||||
for _, term := range pv.Spec.NodeAffinity.Required.NodeSelectorTerms {
|
||||
var nodes sets.Set[string]
|
||||
for _, matchExpr := range term.MatchExpressions {
|
||||
if matchExpr.Key == v1.LabelHostname && matchExpr.Operator == v1.NodeSelectorOpIn {
|
||||
if nodes == nil {
|
||||
nodes = sets.New(matchExpr.Values...)
|
||||
} else {
|
||||
nodes = nodes.Intersection(sets.New(matchExpr.Values...))
|
||||
}
|
||||
}
|
||||
}
|
||||
result = result.Union(nodes)
|
||||
}
|
||||
|
||||
return sets.List(result)
|
||||
}
|
||||
|
||||
// GetPodVolumeNames returns names of volumes that are used in a pod,
|
||||
// either as filesystem mount or raw block device, together with list
|
||||
// of all SELinux contexts of all containers that use the volumes.
|
||||
|
10
vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/volume_path_handler_linux.go
generated
vendored
10
vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/volume_path_handler_linux.go
generated
vendored
@ -139,7 +139,7 @@ func getLoopDeviceFromSysfs(path string) (string, error) {
|
||||
}
|
||||
|
||||
// Return the first match.
|
||||
backingFilePath := strings.TrimSpace(string(data))
|
||||
backingFilePath := cleanBackingFilePath(string(data))
|
||||
if backingFilePath == path || backingFilePath == realPath {
|
||||
return fmt.Sprintf("/dev/%s", filepath.Base(device)), nil
|
||||
}
|
||||
@ -148,6 +148,14 @@ func getLoopDeviceFromSysfs(path string) (string, error) {
|
||||
return "", errors.New(ErrDeviceNotFound)
|
||||
}
|
||||
|
||||
// cleanPath remove any trailing substrings that are not part of the backing file path.
|
||||
func cleanBackingFilePath(path string) string {
|
||||
// If the block device was deleted, the path will contain a "(deleted)" suffix
|
||||
path = strings.TrimSpace(path)
|
||||
path = strings.TrimSuffix(path, "(deleted)")
|
||||
return strings.TrimSpace(path)
|
||||
}
|
||||
|
||||
// FindGlobalMapPathUUIDFromPod finds {pod uuid} bind mount under globalMapPath
|
||||
// corresponding to map path symlink, and then return global map path with pod uuid.
|
||||
// (See pkg/volume/volume.go for details on a global map path and a pod device map path.)
|
||||
|
16
vendor/k8s.io/kubernetes/pkg/volume/volume_linux.go
generated
vendored
16
vendor/k8s.io/kubernetes/pkg/volume/volume_linux.go
generated
vendored
@ -40,22 +40,22 @@ const (
|
||||
// SetVolumeOwnership modifies the given volume to be owned by
|
||||
// fsGroup, and sets SetGid so that newly created files are owned by
|
||||
// fsGroup. If fsGroup is nil nothing is done.
|
||||
func SetVolumeOwnership(mounter Mounter, fsGroup *int64, fsGroupChangePolicy *v1.PodFSGroupChangePolicy, completeFunc func(types.CompleteFuncParam)) error {
|
||||
func SetVolumeOwnership(mounter Mounter, dir string, fsGroup *int64, fsGroupChangePolicy *v1.PodFSGroupChangePolicy, completeFunc func(types.CompleteFuncParam)) error {
|
||||
if fsGroup == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
timer := time.AfterFunc(30*time.Second, func() {
|
||||
klog.Warningf("Setting volume ownership for %s and fsGroup set. If the volume has a lot of files then setting volume ownership could be slow, see https://github.com/kubernetes/kubernetes/issues/69699", mounter.GetPath())
|
||||
klog.Warningf("Setting volume ownership for %s and fsGroup set. If the volume has a lot of files then setting volume ownership could be slow, see https://github.com/kubernetes/kubernetes/issues/69699", dir)
|
||||
})
|
||||
defer timer.Stop()
|
||||
|
||||
if skipPermissionChange(mounter, fsGroup, fsGroupChangePolicy) {
|
||||
klog.V(3).InfoS("Skipping permission and ownership change for volume", "path", mounter.GetPath())
|
||||
if skipPermissionChange(mounter, dir, fsGroup, fsGroupChangePolicy) {
|
||||
klog.V(3).InfoS("Skipping permission and ownership change for volume", "path", dir)
|
||||
return nil
|
||||
}
|
||||
|
||||
err := walkDeep(mounter.GetPath(), func(path string, info os.FileInfo, err error) error {
|
||||
err := walkDeep(dir, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -104,14 +104,12 @@ func changeFilePermission(filename string, fsGroup *int64, readonly bool, info o
|
||||
return nil
|
||||
}
|
||||
|
||||
func skipPermissionChange(mounter Mounter, fsGroup *int64, fsGroupChangePolicy *v1.PodFSGroupChangePolicy) bool {
|
||||
dir := mounter.GetPath()
|
||||
|
||||
func skipPermissionChange(mounter Mounter, dir string, fsGroup *int64, fsGroupChangePolicy *v1.PodFSGroupChangePolicy) bool {
|
||||
if fsGroupChangePolicy == nil || *fsGroupChangePolicy != v1.FSGroupChangeOnRootMismatch {
|
||||
klog.V(4).InfoS("Perform recursive ownership change for directory", "path", dir)
|
||||
return false
|
||||
}
|
||||
return !requiresPermissionChange(mounter.GetPath(), fsGroup, mounter.GetAttributes().ReadOnly)
|
||||
return !requiresPermissionChange(dir, fsGroup, mounter.GetAttributes().ReadOnly)
|
||||
}
|
||||
|
||||
func requiresPermissionChange(rootDir string, fsGroup *int64, readonly bool) bool {
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/volume/volume_unsupported.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/volume/volume_unsupported.go
generated
vendored
@ -24,6 +24,6 @@ import (
|
||||
"k8s.io/kubernetes/pkg/volume/util/types"
|
||||
)
|
||||
|
||||
func SetVolumeOwnership(mounter Mounter, fsGroup *int64, fsGroupChangePolicy *v1.PodFSGroupChangePolicy, completeFunc func(types.CompleteFuncParam)) error {
|
||||
func SetVolumeOwnership(mounter Mounter, dir string, fsGroup *int64, fsGroupChangePolicy *v1.PodFSGroupChangePolicy, completeFunc func(types.CompleteFuncParam)) error {
|
||||
return nil
|
||||
}
|
||||
|
Reference in New Issue
Block a user