rebase: bump the k8s-dependencies group in /e2e with 4 updates

Bumps the k8s-dependencies group in /e2e with 4 updates: [k8s.io/apimachinery](https://github.com/kubernetes/apimachinery), [k8s.io/cloud-provider](https://github.com/kubernetes/cloud-provider), [k8s.io/kubernetes](https://github.com/kubernetes/kubernetes) and [k8s.io/pod-security-admission](https://github.com/kubernetes/pod-security-admission).

Updates `k8s.io/apimachinery` from 0.33.0 to 0.33.1
- [Commits](https://github.com/kubernetes/apimachinery/compare/v0.33.0...v0.33.1)

Updates `k8s.io/cloud-provider` from 0.33.0 to 0.33.1
- [Commits](https://github.com/kubernetes/cloud-provider/compare/v0.33.0...v0.33.1)

Updates `k8s.io/kubernetes` from 1.33.0 to 1.33.1
- [Release notes](https://github.com/kubernetes/kubernetes/releases)
- [Commits](https://github.com/kubernetes/kubernetes/compare/v1.33.0...v1.33.1)

Updates `k8s.io/pod-security-admission` from 0.33.0 to 0.33.1
- [Commits](https://github.com/kubernetes/pod-security-admission/compare/v0.33.0...v0.33.1)

---
updated-dependencies:
- dependency-name: k8s.io/apimachinery
  dependency-version: 0.33.1
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: k8s-dependencies
- dependency-name: k8s.io/cloud-provider
  dependency-version: 0.33.1
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: k8s-dependencies
- dependency-name: k8s.io/kubernetes
  dependency-version: 1.33.1
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: k8s-dependencies
- dependency-name: k8s.io/pod-security-admission
  dependency-version: 0.33.1
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: k8s-dependencies
...

Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
dependabot[bot]
2025-05-19 20:48:03 +00:00
committed by mergify[bot]
parent f6c26d354c
commit d05ebd3456
20 changed files with 765 additions and 295 deletions

View File

@ -247,10 +247,12 @@ func splitYAMLDocument(data []byte, atEOF bool) (advance int, token []byte, err
// finding a non-YAML-delimited series of objects), it will not switch to YAML.
// Once it switches to YAML it will not switch back to JSON.
type YAMLOrJSONDecoder struct {
json *json.Decoder
yaml *YAMLToJSONDecoder
stream *StreamReader
count int // how many objects have been decoded
json *json.Decoder
jsonConsumed int64 // of the stream total, how much was JSON?
yaml *YAMLToJSONDecoder
yamlConsumed int64 // of the stream total, how much was YAML?
stream *StreamReader
count int // how many objects have been decoded
}
type JSONSyntaxError struct {
@ -299,8 +301,10 @@ func (d *YAMLOrJSONDecoder) Decode(into interface{}) error {
if d.json != nil {
err := d.json.Decode(into)
if err == nil {
d.stream.Consume(int(d.json.InputOffset()) - d.stream.Consumed())
d.count++
consumed := d.json.InputOffset() - d.jsonConsumed
d.stream.Consume(int(consumed))
d.jsonConsumed += consumed
return nil
}
if err == io.EOF { //nolint:errorlint
@ -334,7 +338,9 @@ func (d *YAMLOrJSONDecoder) Decode(into interface{}) error {
if d.yaml != nil {
err := d.yaml.Decode(into)
if err == nil {
d.stream.Consume(d.yaml.InputOffset() - d.stream.Consumed())
consumed := int64(d.yaml.InputOffset()) - d.yamlConsumed
d.stream.Consume(int(consumed))
d.yamlConsumed += consumed
d.count++
return nil
}
@ -375,6 +381,7 @@ func (d *YAMLOrJSONDecoder) consumeWhitespace() error {
if err == io.EOF { //nolint:errorlint
break
}
consumed += sz
}
return io.EOF
}

View File

@ -745,9 +745,10 @@ func (alloc *allocator) allocateOne(r deviceIndices, allocateSubRequest bool) (b
return alloc.allocateOne(deviceIndices{claimIndex: r.claimIndex, requestIndex: r.requestIndex + 1}, false)
}
// Before trying to allocate devices, check if allocating the devices
// in the current request will put us over the threshold.
numDevicesAfterAlloc := len(alloc.result[r.claimIndex].devices) + requestData.numDevices
// We can calculate this by adding the number of already allocated devices with the number
// of devices in the current request, and then finally subtract the deviceIndex since we
// don't want to double count any devices already allocated for the current request.
numDevicesAfterAlloc := len(alloc.result[r.claimIndex].devices) + requestData.numDevices - r.deviceIndex
if numDevicesAfterAlloc > resourceapi.AllocationResultsMaxSize {
// Don't return an error here since we want to keep searching for
// a solution that works.

View File

@ -236,6 +236,27 @@ func MarkFSResizeFinished(
return updatedPVC, err
}
func MarkNodeExpansionFinishedWithRecovery(
pvc *v1.PersistentVolumeClaim,
newSize resource.Quantity,
kubeClient clientset.Interface) (*v1.PersistentVolumeClaim, error) {
newPVC := pvc.DeepCopy()
newPVC.Status.Capacity[v1.ResourceStorage] = newSize
allocatedResourceStatusMap := newPVC.Status.AllocatedResourceStatuses
delete(allocatedResourceStatusMap, v1.ResourceStorage)
if len(allocatedResourceStatusMap) == 0 {
newPVC.Status.AllocatedResourceStatuses = nil
} else {
newPVC.Status.AllocatedResourceStatuses = allocatedResourceStatusMap
}
newPVC = MergeResizeConditionOnPVC(newPVC, []v1.PersistentVolumeClaimCondition{}, false /* keepOldResizeConditions */)
updatedPVC, err := PatchPVCStatus(pvc /*oldPVC*/, newPVC, kubeClient)
return updatedPVC, err
}
// MarkNodeExpansionInfeasible marks a PVC for node expansion as failed. Kubelet should not retry expansion
// of volumes which are in failed state.
func MarkNodeExpansionInfeasible(pvc *v1.PersistentVolumeClaim, kubeClient clientset.Interface, err error) (*v1.PersistentVolumeClaim, error) {

View File

@ -15,4 +15,4 @@ limitations under the License.
*/
// Package mount defines an interface to mounting filesystems.
package mount // import "k8s.io/mount-utils"
package mount

View File

@ -35,7 +35,6 @@ import (
"github.com/moby/sys/mountinfo"
"golang.org/x/sys/unix"
inuserns "github.com/moby/sys/userns"
"k8s.io/klog/v2"
utilexec "k8s.io/utils/exec"
)
@ -114,7 +113,7 @@ func (mounter *Mounter) hasSystemd() bool {
// Map unix.Statfs mount flags ro, nodev, noexec, nosuid, noatime, relatime,
// nodiratime to mount option flag strings.
func getUserNSBindMountOptions(path string, statfs func(path string, buf *unix.Statfs_t) (err error)) ([]string, error) {
func getBindMountOptions(path string, statfs func(path string, buf *unix.Statfs_t) (err error)) ([]string, error) {
var s unix.Statfs_t
var mountOpts []string
if err := statfs(path, &s); err != nil {
@ -137,32 +136,23 @@ func getUserNSBindMountOptions(path string, statfs func(path string, buf *unix.S
return mountOpts, nil
}
// Do a bind mount including the needed remount for applying the bind opts.
// If the remount fails and we are running in a user namespace
// figure out if the source filesystem has the ro, nodev, noexec, nosuid,
// noatime, relatime or nodiratime flag set and try another remount with the found flags.
// Performs a bind mount with the specified options, and then remounts
// the mount point with the same `nodev`, `nosuid`, `noexec`, `nosuid`, `noatime`,
// `relatime`, `nodiratime` options as the original mount point.
func (mounter *Mounter) bindMountSensitive(mounterPath string, mountCmd string, source string, target string, fstype string, bindOpts []string, bindRemountOpts []string, bindRemountOptsSensitive []string, mountFlags []string, systemdMountRequired bool) error {
err := mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, bindOpts, bindRemountOptsSensitive, mountFlags, systemdMountRequired)
err := mounter.doMount(mounterPath, mountCmd, source, target, fstype, bindOpts, bindRemountOptsSensitive, mountFlags, systemdMountRequired)
if err != nil {
return err
}
err = mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, bindRemountOpts, bindRemountOptsSensitive, mountFlags, systemdMountRequired)
if inuserns.RunningInUserNS() {
if err == nil {
return nil
}
// Check if the source has ro, nodev, noexec, nosuid, noatime, relatime,
// nodiratime flag...
fixMountOpts, err := getUserNSBindMountOptions(source, unix.Statfs)
if err != nil {
return &os.PathError{Op: "statfs", Path: source, Err: err}
}
// ... and retry the mount with flags found above.
bindRemountOpts = append(bindRemountOpts, fixMountOpts...)
return mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, bindRemountOpts, bindRemountOptsSensitive, mountFlags, systemdMountRequired)
} else {
return err
// Check if the source has ro, nodev, noexec, nosuid, noatime, relatime,
// nodiratime flag...
fixMountOpts, err := getBindMountOptions(source, unix.Statfs)
if err != nil {
return &os.PathError{Op: "statfs", Path: source, Err: err}
}
// ... and retry the mount with flags found above.
bindRemountOpts = append(bindRemountOpts, fixMountOpts...)
return mounter.doMount(mounterPath, mountCmd, source, target, fstype, bindRemountOpts, bindRemountOptsSensitive, mountFlags, systemdMountRequired)
}
// Mount mounts source to target as fstype with given options. 'source' and 'fstype' must
@ -732,7 +722,7 @@ func (mounter *SafeFormatAndMount) GetDiskFormat(disk string) (string, error) {
return getDiskFormat(mounter.Exec, disk)
}
// ListProcMounts is shared with NsEnterMounter
// ListProcMounts returns a list of all mounted filesystems.
func ListProcMounts(mountFilePath string) ([]MountPoint, error) {
content, err := readMountInfo(mountFilePath)
if err != nil {
@ -786,7 +776,6 @@ func parseProcMounts(content []byte) ([]MountPoint, error) {
// Some filesystems may share a source name, e.g. tmpfs. And for bind mounting,
// it's possible to mount a non-root path of a filesystem, so we need to use
// root path and major:minor to represent mount source uniquely.
// This implementation is shared between Linux and NsEnterMounter
func SearchMountPoints(hostSource, mountInfoPath string) ([]string, error) {
mis, err := ParseMountInfo(mountInfoPath)
if err != nil {

View File

@ -242,6 +242,10 @@ func (mounter *Mounter) IsLikelyNotMountPoint(file string) (bool, error) {
if stat.Mode()&os.ModeSymlink != 0 {
return false, err
}
// go1.23 behavior change: https://github.com/golang/go/issues/63703#issuecomment-2535941458
if stat.Mode()&os.ModeIrregular != 0 {
return false, err
}
return true, nil
}
@ -329,30 +333,3 @@ func ListVolumesOnDisk(diskID string) (volumeIDs []string, err error) {
volumeIds := strings.Split(strings.TrimSpace(string(output)), "\r\n")
return volumeIds, nil
}
// getAllParentLinks walks all symbolic links and return all the parent targets recursively
func getAllParentLinks(path string) ([]string, error) {
const maxIter = 255
links := []string{}
for {
links = append(links, path)
if len(links) > maxIter {
return links, fmt.Errorf("unexpected length of parent links: %v", links)
}
fi, err := os.Lstat(path)
if err != nil {
return links, fmt.Errorf("Lstat: %v", err)
}
if fi.Mode()&os.ModeSymlink == 0 {
break
}
path, err = os.Readlink(path)
if err != nil {
return links, fmt.Errorf("Readlink error: %v", err)
}
}
return links, nil
}

View File

@ -117,11 +117,6 @@ func (resizefs *ResizeFs) NeedResize(devicePath string, deviceMountPath string)
return false, nil
}
deviceSize, err := resizefs.getDeviceSize(devicePath)
if err != nil {
return false, err
}
var fsSize, blockSize uint64
format, err := getDiskFormat(resizefs.exec, devicePath)
if err != nil {
formatErr := fmt.Errorf("ResizeFS.Resize - error checking format for device %s: %v", devicePath, err)
@ -134,30 +129,28 @@ func (resizefs *ResizeFs) NeedResize(devicePath string, deviceMountPath string)
return false, nil
}
klog.V(3).Infof("ResizeFs.needResize - checking mounted volume %s", devicePath)
switch format {
case "ext3", "ext4":
blockSize, fsSize, err = resizefs.getExtSize(devicePath)
klog.V(5).Infof("Ext size: filesystem size=%d, block size=%d", fsSize, blockSize)
case "xfs":
blockSize, fsSize, err = resizefs.getXFSSize(deviceMountPath)
klog.V(5).Infof("Xfs size: filesystem size=%d, block size=%d, err=%v", fsSize, blockSize, err)
case "ext3", "ext4", "xfs":
// For ext3/ext4/xfs, recommendation received from linux filesystem folks is to let
// resize2fs/xfs_growfs do the check for us. So we will not do any check here.
return true, nil
case "btrfs":
blockSize, fsSize, err = resizefs.getBtrfsSize(devicePath)
deviceSize, err := resizefs.getDeviceSize(devicePath)
if err != nil {
return false, err
}
blockSize, fsSize, err := resizefs.getBtrfsSize(devicePath)
klog.V(5).Infof("Btrfs size: filesystem size=%d, block size=%d, err=%v", fsSize, blockSize, err)
if err != nil {
return false, err
}
if deviceSize <= fsSize+blockSize {
return false, nil
}
return true, nil
default:
klog.Errorf("Not able to parse given filesystem info. fsType: %s, will not resize", format)
return false, fmt.Errorf("Could not parse fs info on given filesystem format: %s. Supported fs types are: xfs, ext3, ext4", format)
return false, fmt.Errorf("could not parse fs info of given filesystem format: %s. Supported fs types are: xfs, ext3, ext4", format)
}
if err != nil {
return false, err
}
// Tolerate one block difference, just in case of rounding errors somewhere.
klog.V(5).Infof("Volume %s: device size=%d, filesystem size=%d, block size=%d", devicePath, deviceSize, fsSize, blockSize)
if deviceSize <= fsSize+blockSize {
return false, nil
}
return true, nil
}
func (resizefs *ResizeFs) getDeviceSize(devicePath string) (uint64, error) {
@ -173,56 +166,6 @@ func (resizefs *ResizeFs) getDeviceSize(devicePath string) (uint64, error) {
return size, nil
}
func (resizefs *ResizeFs) getDeviceRO(devicePath string) (bool, error) {
output, err := resizefs.exec.Command(blockDev, "--getro", devicePath).CombinedOutput()
outStr := strings.TrimSpace(string(output))
if err != nil {
return false, fmt.Errorf("failed to get readonly bit from device %s: %s: %s", devicePath, err, outStr)
}
switch outStr {
case "0":
return false, nil
case "1":
return true, nil
default:
return false, fmt.Errorf("Failed readonly device check. Expected 1 or 0, got '%s'", outStr)
}
}
func (resizefs *ResizeFs) getExtSize(devicePath string) (uint64, uint64, error) {
output, err := resizefs.exec.Command("dumpe2fs", "-h", devicePath).CombinedOutput()
if err != nil {
return 0, 0, fmt.Errorf("failed to read size of filesystem on %s: %s: %s", devicePath, err, string(output))
}
blockSize, blockCount, _ := resizefs.parseFsInfoOutput(string(output), ":", "block size", "block count")
if blockSize == 0 {
return 0, 0, fmt.Errorf("could not find block size of device %s", devicePath)
}
if blockCount == 0 {
return 0, 0, fmt.Errorf("could not find block count of device %s", devicePath)
}
return blockSize, blockSize * blockCount, nil
}
func (resizefs *ResizeFs) getXFSSize(devicePath string) (uint64, uint64, error) {
output, err := resizefs.exec.Command("xfs_io", "-c", "statfs", devicePath).CombinedOutput()
if err != nil {
return 0, 0, fmt.Errorf("failed to read size of filesystem on %s: %s: %s", devicePath, err, string(output))
}
blockSize, blockCount, _ := resizefs.parseFsInfoOutput(string(output), "=", "geom.bsize", "geom.datablocks")
if blockSize == 0 {
return 0, 0, fmt.Errorf("could not find block size of device %s", devicePath)
}
if blockCount == 0 {
return 0, 0, fmt.Errorf("could not find block count of device %s", devicePath)
}
return blockSize, blockSize * blockCount, nil
}
func (resizefs *ResizeFs) getBtrfsSize(devicePath string) (uint64, uint64, error) {
output, err := resizefs.exec.Command("btrfs", "inspect-internal", "dump-super", "-f", devicePath).CombinedOutput()
if err != nil {
@ -268,29 +211,18 @@ func (resizefs *ResizeFs) parseBtrfsInfoOutput(cmdOutput string, blockSizeKey st
return blockSize, blockCount, err
}
func (resizefs *ResizeFs) parseFsInfoOutput(cmdOutput string, spliter string, blockSizeKey string, blockCountKey string) (uint64, uint64, error) {
lines := strings.Split(cmdOutput, "\n")
var blockSize, blockCount uint64
var err error
for _, line := range lines {
tokens := strings.Split(line, spliter)
if len(tokens) != 2 {
continue
}
key, value := strings.ToLower(strings.TrimSpace(tokens[0])), strings.ToLower(strings.TrimSpace(tokens[1]))
if key == blockSizeKey {
blockSize, err = strconv.ParseUint(value, 10, 64)
if err != nil {
return 0, 0, fmt.Errorf("failed to parse block size %s: %s", value, err)
}
}
if key == blockCountKey {
blockCount, err = strconv.ParseUint(value, 10, 64)
if err != nil {
return 0, 0, fmt.Errorf("failed to parse block count %s: %s", value, err)
}
}
func (resizefs *ResizeFs) getDeviceRO(devicePath string) (bool, error) {
output, err := resizefs.exec.Command(blockDev, "--getro", devicePath).CombinedOutput()
outStr := strings.TrimSpace(string(output))
if err != nil {
return false, fmt.Errorf("failed to get readonly bit from device %s: %w: %s", devicePath, err, outStr)
}
switch outStr {
case "0":
return false, nil
case "1":
return true, nil
default:
return false, fmt.Errorf("failed readonly device check. Expected 1 or 0, got '%s'", outStr)
}
return blockSize, blockCount, err
}