Merge pull request #138 from ceph/devel

Sync the upstream changes from `ceph/ceph-csi:devel` into the `devel` branch
This commit is contained in:
OpenShift Merge Robot 2022-10-14 04:09:17 -04:00 committed by GitHub
commit 1cce89fc93
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 147 additions and 40 deletions

View File

@ -33,7 +33,6 @@
- [6. Upgrade NFS Nodeplugin resources](#6-upgrade-nfs-nodeplugin-resources)
- [6.1 Update the NFS Nodeplugin RBAC](#61-update-the-nfs-nodeplugin-rbac)
- [6.2 Update the NFS Nodeplugin daemonset](#62-update-the-nfs-nodeplugin-daemonset)
- [6.3 Delete the old NFS Nodeplugin daemonset](#63-delete-the-old-nfs-nodeplugin-daemonset)
- [CSI Sidecar containers consideration](#csi-sidecar-containers-consideration)
## Pre-upgrade considerations
@ -392,13 +391,6 @@ daemonset.apps/csi-nfsplugin configured
service/csi-metrics-nfsplugin configured
```
##### 6.3 Delete the old NFS Nodeplugin daemonset
```bash
$ kubectl delete daemonsets.apps csi-nfs-node
daemonset.apps "csi-nfs-node" deleted
```
we have successfully upgraded nfs csi from v3.6 to v3.7
### CSI Sidecar containers consideration

View File

@ -124,6 +124,9 @@ func waitForDeploymentInAvailableState(clientSet kubernetes.Interface, name, ns
if isRetryableAPIError(err) {
return false, nil
}
if apierrs.IsNotFound(err) {
return false, nil
}
e2elog.Logf("%q deployment to be Available (%d seconds elapsed)", name, int(time.Since(start).Seconds()))
return false, err
@ -390,6 +393,12 @@ func waitForContainersArgsUpdate(
) error {
e2elog.Logf("waiting for deployment updates %s/%s", ns, deploymentName)
// wait for the deployment to be available
err := waitForDeploymentInAvailableState(c, deploymentName, ns, deployTimeout)
if err != nil {
return fmt.Errorf("deployment %s/%s did not become available yet: %w", ns, deploymentName, err)
}
// Scale down to 0.
scale, err := c.AppsV1().Deployments(ns).GetScale(context.TODO(), deploymentName, metav1.GetOptions{})
if err != nil {

View File

@ -39,6 +39,7 @@ func init() {
flag.BoolVar(&deployNFS, "deploy-nfs", false, "deploy nfs csi driver")
flag.BoolVar(&testCephFS, "test-cephfs", true, "test cephFS csi driver")
flag.BoolVar(&testRBD, "test-rbd", true, "test rbd csi driver")
flag.BoolVar(&testNBD, "test-nbd", false, "test rbd csi driver with rbd-nbd mounter")
flag.BoolVar(&testNFS, "test-nfs", false, "test nfs csi driver")
flag.BoolVar(&helmTest, "helm-test", false, "tests running on deployment via helm")
flag.BoolVar(&upgradeTesting, "upgrade-testing", false, "perform upgrade testing")

View File

@ -284,9 +284,13 @@ func deletePVCAndValidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClai
int(time.Since(start).Seconds()))
pvc, err = c.CoreV1().PersistentVolumeClaims(nameSpace).Get(context.TODO(), name, metav1.GetOptions{})
if err == nil {
e2elog.Logf("PVC %s (status: %s) has not been deleted yet, rechecking...", name, pvc.Status)
return false, nil
}
if isRetryableAPIError(err) {
e2elog.Logf("failed to verify deletion of PVC %s (status: %s): %v", name, pvc.Status, err)
return false, nil
}
if !apierrs.IsNotFound(err) {
@ -294,11 +298,15 @@ func deletePVCAndValidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClai
}
// Examine the pv.ClaimRef and UID. Expect nil values.
_, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{})
oldPV, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{})
if err == nil {
e2elog.Logf("PV %s (status: %s) has not been deleted yet, rechecking...", pv.Name, oldPV.Status)
return false, nil
}
if isRetryableAPIError(err) {
e2elog.Logf("failed to verify deletion of PV %s (status: %s): %v", pv.Name, oldPV.Status, err)
return false, nil
}
if !apierrs.IsNotFound(err) {

View File

@ -1047,6 +1047,12 @@ var _ = Describe("RBD", func() {
})
By("create a PVC and bind it to an app using rbd-nbd mounter", func() {
if !testNBD {
e2elog.Logf("skipping NBD test")
return
}
err := deleteResource(rbdExamplePath + "storageclass.yaml")
if err != nil {
e2elog.Failf("failed to delete storageclass: %v", err)
@ -1083,6 +1089,12 @@ var _ = Describe("RBD", func() {
})
By("Resize rbd-nbd PVC and check application directory size", func() {
if !testNBD {
e2elog.Logf("skipping NBD test")
return
}
if util.CheckKernelSupport(kernelRelease, nbdResizeSupport) {
err := deleteResource(rbdExamplePath + "storageclass.yaml")
if err != nil {
@ -1290,6 +1302,12 @@ var _ = Describe("RBD", func() {
By("create PVC with journaling,fast-diff image-features and bind it to an app using rbd-nbd mounter",
func() {
if !testNBD {
e2elog.Logf("skipping NBD test")
return
}
if util.CheckKernelSupport(kernelRelease, fastDiffSupport) {
err := deleteResource(rbdExamplePath + "storageclass.yaml")
if err != nil {
@ -1330,6 +1348,12 @@ var _ = Describe("RBD", func() {
// NOTE: RWX is restricted for FileSystem VolumeMode at ceph-csi,
// see pull#261 for more details.
By("Create RWX+Block Mode PVC and bind to multiple pods via deployment using rbd-nbd mounter", func() {
if !testNBD {
e2elog.Logf("skipping NBD test")
return
}
err := deleteResource(rbdExamplePath + "storageclass.yaml")
if err != nil {
e2elog.Failf("failed to delete storageclass: %v", err)
@ -1415,6 +1439,12 @@ var _ = Describe("RBD", func() {
})
By("Create ROX+FS Mode PVC and bind to multiple pods via deployment using rbd-nbd mounter", func() {
if !testNBD {
e2elog.Logf("skipping NBD test")
return
}
err := deleteResource(rbdExamplePath + "storageclass.yaml")
if err != nil {
e2elog.Failf("failed to delete storageclass: %v", err)
@ -1540,6 +1570,12 @@ var _ = Describe("RBD", func() {
})
By("Create ROX+Block Mode PVC and bind to multiple pods via deployment using rbd-nbd mounter", func() {
if !testNBD {
e2elog.Logf("skipping NBD test")
return
}
err := deleteResource(rbdExamplePath + "storageclass.yaml")
if err != nil {
e2elog.Failf("failed to delete storageclass: %v", err)
@ -1666,6 +1702,12 @@ var _ = Describe("RBD", func() {
})
By("perform IO on rbd-nbd volume after nodeplugin restart", func() {
if !testNBD {
e2elog.Logf("skipping NBD test")
return
}
err := deleteResource(rbdExamplePath + "storageclass.yaml")
if err != nil {
e2elog.Failf("failed to delete storageclass: %v", err)
@ -1830,6 +1872,12 @@ var _ = Describe("RBD", func() {
})
By("create a PVC and bind it to an app using rbd-nbd mounter with encryption", func() {
if !testNBD {
e2elog.Logf("skipping NBD test")
return
}
err := deleteResource(rbdExamplePath + "storageclass.yaml")
if err != nil {
e2elog.Failf("failed to delete storageclass: %v", err)
@ -2199,6 +2247,12 @@ var _ = Describe("RBD", func() {
By(
"create a PVC and Bind it to an app with journaling/exclusive-lock image-features and rbd-nbd mounter",
func() {
if !testNBD {
e2elog.Logf("skipping NBD test")
return
}
err := deleteResource(rbdExamplePath + "storageclass.yaml")
if err != nil {
e2elog.Failf("failed to delete storageclass: %v", err)

View File

@ -85,6 +85,7 @@ var (
deployNFS bool
testCephFS bool
testRBD bool
testNBD bool
testNFS bool
helmTest bool
upgradeTesting bool

View File

@ -637,7 +637,7 @@ func (ns *NodeServer) NodeGetVolumeStats(
}
if stat.Mode().IsDir() {
return csicommon.FilesystemNodeGetVolumeStats(ctx, ns.Mounter, targetPath)
return csicommon.FilesystemNodeGetVolumeStats(ctx, ns.Mounter, targetPath, false)
}
return nil, status.Errorf(codes.InvalidArgument, "targetpath %q is not a directory or device", targetPath)

View File

@ -241,6 +241,7 @@ func FilesystemNodeGetVolumeStats(
ctx context.Context,
mounter mount.Interface,
targetPath string,
includeInodes bool,
) (*csi.NodeGetVolumeStatsResponse, error) {
isMnt, err := util.IsMountPoint(mounter, targetPath)
if err != nil {
@ -274,6 +275,19 @@ func FilesystemNodeGetVolumeStats(
if !ok {
log.ErrorLog(ctx, "failed to fetch used bytes")
}
res := &csi.NodeGetVolumeStatsResponse{
Usage: []*csi.VolumeUsage{
{
Available: requirePositive(available),
Total: requirePositive(capacity),
Used: requirePositive(used),
Unit: csi.VolumeUsage_BYTES,
},
},
}
if includeInodes {
inodes, ok := (*(volMetrics.Inodes)).AsInt64()
if !ok {
log.ErrorLog(ctx, "failed to fetch available inodes")
@ -290,22 +304,15 @@ func FilesystemNodeGetVolumeStats(
log.ErrorLog(ctx, "failed to fetch used inodes")
}
return &csi.NodeGetVolumeStatsResponse{
Usage: []*csi.VolumeUsage{
{
Available: requirePositive(available),
Total: requirePositive(capacity),
Used: requirePositive(used),
Unit: csi.VolumeUsage_BYTES,
},
{
res.Usage = append(res.Usage, &csi.VolumeUsage{
Available: requirePositive(inodesFree),
Total: requirePositive(inodes),
Used: requirePositive(inodesUsed),
Unit: csi.VolumeUsage_INODES,
},
},
}, nil
})
}
return res, nil
}
// requirePositive returns the value for `x` when it is greater or equal to 0,

View File

@ -88,7 +88,7 @@ func TestFilesystemNodeGetVolumeStats(t *testing.T) {
// retry until a mountpoint is found
for {
stats, err := FilesystemNodeGetVolumeStats(context.TODO(), mount.New(""), cwd)
stats, err := FilesystemNodeGetVolumeStats(context.TODO(), mount.New(""), cwd, true)
if err != nil && cwd != "/" && strings.HasSuffix(err.Error(), "is not mounted") {
// try again with the parent directory
cwd = filepath.Dir(cwd)

View File

@ -182,7 +182,7 @@ func (ns *NodeServer) NodeGetVolumeStats(
}
if stat.Mode().IsDir() {
return csicommon.FilesystemNodeGetVolumeStats(ctx, ns.Mounter, targetPath)
return csicommon.FilesystemNodeGetVolumeStats(ctx, ns.Mounter, targetPath, false)
}
return nil, status.Errorf(codes.InvalidArgument,

View File

@ -1240,7 +1240,7 @@ func (ns *NodeServer) NodeGetVolumeStats(
}
if stat.Mode().IsDir() {
return csicommon.FilesystemNodeGetVolumeStats(ctx, ns.Mounter, targetPath)
return csicommon.FilesystemNodeGetVolumeStats(ctx, ns.Mounter, targetPath, true)
} else if (stat.Mode() & os.ModeDevice) == os.ModeDevice {
return blockNodeGetVolumeStats(ctx, targetPath)
}

View File

@ -136,7 +136,7 @@ install() {
mkdir -p ${TEMP}
# shellcheck disable=SC2021
dist=$(echo "${dist}" | tr "[A-Z]" "[a-z]")
wget "https://get.helm.sh/helm-${HELM_VERSION}-${dist}-${arch}.tar.gz" -O "${TEMP}/helm.tar.gz"
wget "https://get.helm.sh/helm-${HELM_VERSION}-${dist}-${arch}.tar.gz" -O "${TEMP}/helm.tar.gz" || exit 1
tar -C "${TEMP}" -zxvf "${TEMP}/helm.tar.gz"
fi
echo "Helm install successful"

View File

@ -22,7 +22,7 @@ function copy_image_to_cluster() {
if [ -z "$(${CONTAINER_CMD} images -q "${build_image}")" ]; then
${CONTAINER_CMD} pull "${build_image}"
fi
if [[ "${VM_DRIVER}" == "none" ]]; then
if [[ "${VM_DRIVER}" == "none" ]] || [[ "${VM_DRIVER}" == "podman" ]]; then
${CONTAINER_CMD} tag "${build_image}" "${final_image}"
return
fi
@ -139,6 +139,36 @@ function validate_sidecar() {
done
}
# install_podman_wrapper creates /usr/bin/podman.wrapper which adds /sys
# filesystem mount points when a privileged container is started. This makes it
# possible to map RBD devices in the container that minikube creates when
# VM_DRIVER=podman is used.
function install_podman_wrapper() {
if [[ -e /usr/bin/podman.wrapper ]]
then
return
fi
# disabled single quoted check, the script should be created as is
# shellcheck disable=SC2016
echo '#!/bin/sh
if [[ "${1}" = run ]]
then
if (echo "${@}" | grep -q privileged)
then
shift
exec /usr/bin/podman.real run -v /sys:/sys:rw -v /dev:/dev:rw --systemd=true "${@}"
fi
fi
exec /usr/bin/podman.real "${@}"
' > /usr/bin/podman.wrapper
chmod +x /usr/bin/podman.wrapper
mv /usr/bin/podman /usr/bin/podman.real
ln -s podman.wrapper /usr/bin/podman
}
# Storage providers and the default storage class is not needed for Ceph-CSI
# testing. In order to reduce resources and potential conflicts between storage
# plugins, disable them.
@ -185,7 +215,7 @@ K8S_FEATURE_GATES=${K8S_FEATURE_GATES:-""}
# kubelet.resolv-conf needs to point to a file, not a symlink
# the default minikube VM has /etc/resolv.conf -> /run/systemd/resolve/resolv.conf
RESOLV_CONF='/run/systemd/resolve/resolv.conf'
if [[ "${VM_DRIVER}" == "none" ]] && [[ ! -e "${RESOLV_CONF}" ]]; then
if { [[ "${VM_DRIVER}" == "none" ]] || [[ "${VM_DRIVER}" == "podman" ]]; } && [[ ! -e "${RESOLV_CONF}" ]]; then
# in case /run/systemd/resolve/resolv.conf does not exist, use the
# standard /etc/resolv.conf (with symlink resolved)
RESOLV_CONF="$(readlink -f /etc/resolv.conf)"
@ -216,6 +246,8 @@ up)
if [[ "${VM_DRIVER}" == "none" ]]; then
mkdir -p "$HOME"/.kube "$HOME"/.minikube
install_kubectl
elif [[ "${VM_DRIVER}" == "podman" ]]; then
install_podman_wrapper
fi
disable_storage_addons
@ -234,11 +266,14 @@ up)
# create a link so the default dataDirHostPath will work for this
# environment
if [[ "${VM_DRIVER}" != "none" ]]; then
if [[ "${VM_DRIVER}" != "none" ]] && [[ "${VM_DRIVER}" != "podman" ]]; then
wait_for_ssh
# shellcheck disable=SC2086
${minikube} ssh "sudo mkdir -p /mnt/${DISK}/var/lib/rook;sudo ln -s /mnt/${DISK}/var/lib/rook /var/lib/rook"
fi
if [[ "${VM_DRIVER}" = "podman" ]]; then
${minikube} ssh "sudo mount -oremount,rw /sys"
fi
${minikube} kubectl -- cluster-info
;;
down)