mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-01-18 02:39:30 +00:00
Merge pull request #138 from ceph/devel
Sync the upstream changes from `ceph/ceph-csi:devel` into the `devel` branch
This commit is contained in:
commit
1cce89fc93
@ -33,7 +33,6 @@
|
|||||||
- [6. Upgrade NFS Nodeplugin resources](#6-upgrade-nfs-nodeplugin-resources)
|
- [6. Upgrade NFS Nodeplugin resources](#6-upgrade-nfs-nodeplugin-resources)
|
||||||
- [6.1 Update the NFS Nodeplugin RBAC](#61-update-the-nfs-nodeplugin-rbac)
|
- [6.1 Update the NFS Nodeplugin RBAC](#61-update-the-nfs-nodeplugin-rbac)
|
||||||
- [6.2 Update the NFS Nodeplugin daemonset](#62-update-the-nfs-nodeplugin-daemonset)
|
- [6.2 Update the NFS Nodeplugin daemonset](#62-update-the-nfs-nodeplugin-daemonset)
|
||||||
- [6.3 Delete the old NFS Nodeplugin daemonset](#63-delete-the-old-nfs-nodeplugin-daemonset)
|
|
||||||
- [CSI Sidecar containers consideration](#csi-sidecar-containers-consideration)
|
- [CSI Sidecar containers consideration](#csi-sidecar-containers-consideration)
|
||||||
|
|
||||||
## Pre-upgrade considerations
|
## Pre-upgrade considerations
|
||||||
@ -392,13 +391,6 @@ daemonset.apps/csi-nfsplugin configured
|
|||||||
service/csi-metrics-nfsplugin configured
|
service/csi-metrics-nfsplugin configured
|
||||||
```
|
```
|
||||||
|
|
||||||
##### 6.3 Delete the old NFS Nodeplugin daemonset
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ kubectl delete daemonsets.apps csi-nfs-node
|
|
||||||
daemonset.apps "csi-nfs-node" deleted
|
|
||||||
```
|
|
||||||
|
|
||||||
we have successfully upgraded nfs csi from v3.6 to v3.7
|
we have successfully upgraded nfs csi from v3.6 to v3.7
|
||||||
|
|
||||||
### CSI Sidecar containers consideration
|
### CSI Sidecar containers consideration
|
||||||
|
@ -124,6 +124,9 @@ func waitForDeploymentInAvailableState(clientSet kubernetes.Interface, name, ns
|
|||||||
if isRetryableAPIError(err) {
|
if isRetryableAPIError(err) {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
if apierrs.IsNotFound(err) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
e2elog.Logf("%q deployment to be Available (%d seconds elapsed)", name, int(time.Since(start).Seconds()))
|
e2elog.Logf("%q deployment to be Available (%d seconds elapsed)", name, int(time.Since(start).Seconds()))
|
||||||
|
|
||||||
return false, err
|
return false, err
|
||||||
@ -390,6 +393,12 @@ func waitForContainersArgsUpdate(
|
|||||||
) error {
|
) error {
|
||||||
e2elog.Logf("waiting for deployment updates %s/%s", ns, deploymentName)
|
e2elog.Logf("waiting for deployment updates %s/%s", ns, deploymentName)
|
||||||
|
|
||||||
|
// wait for the deployment to be available
|
||||||
|
err := waitForDeploymentInAvailableState(c, deploymentName, ns, deployTimeout)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("deployment %s/%s did not become available yet: %w", ns, deploymentName, err)
|
||||||
|
}
|
||||||
|
|
||||||
// Scale down to 0.
|
// Scale down to 0.
|
||||||
scale, err := c.AppsV1().Deployments(ns).GetScale(context.TODO(), deploymentName, metav1.GetOptions{})
|
scale, err := c.AppsV1().Deployments(ns).GetScale(context.TODO(), deploymentName, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -39,6 +39,7 @@ func init() {
|
|||||||
flag.BoolVar(&deployNFS, "deploy-nfs", false, "deploy nfs csi driver")
|
flag.BoolVar(&deployNFS, "deploy-nfs", false, "deploy nfs csi driver")
|
||||||
flag.BoolVar(&testCephFS, "test-cephfs", true, "test cephFS csi driver")
|
flag.BoolVar(&testCephFS, "test-cephfs", true, "test cephFS csi driver")
|
||||||
flag.BoolVar(&testRBD, "test-rbd", true, "test rbd csi driver")
|
flag.BoolVar(&testRBD, "test-rbd", true, "test rbd csi driver")
|
||||||
|
flag.BoolVar(&testNBD, "test-nbd", false, "test rbd csi driver with rbd-nbd mounter")
|
||||||
flag.BoolVar(&testNFS, "test-nfs", false, "test nfs csi driver")
|
flag.BoolVar(&testNFS, "test-nfs", false, "test nfs csi driver")
|
||||||
flag.BoolVar(&helmTest, "helm-test", false, "tests running on deployment via helm")
|
flag.BoolVar(&helmTest, "helm-test", false, "tests running on deployment via helm")
|
||||||
flag.BoolVar(&upgradeTesting, "upgrade-testing", false, "perform upgrade testing")
|
flag.BoolVar(&upgradeTesting, "upgrade-testing", false, "perform upgrade testing")
|
||||||
|
10
e2e/pvc.go
10
e2e/pvc.go
@ -284,9 +284,13 @@ func deletePVCAndValidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClai
|
|||||||
int(time.Since(start).Seconds()))
|
int(time.Since(start).Seconds()))
|
||||||
pvc, err = c.CoreV1().PersistentVolumeClaims(nameSpace).Get(context.TODO(), name, metav1.GetOptions{})
|
pvc, err = c.CoreV1().PersistentVolumeClaims(nameSpace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
e2elog.Logf("PVC %s (status: %s) has not been deleted yet, rechecking...", name, pvc.Status)
|
||||||
|
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
if isRetryableAPIError(err) {
|
if isRetryableAPIError(err) {
|
||||||
|
e2elog.Logf("failed to verify deletion of PVC %s (status: %s): %v", name, pvc.Status, err)
|
||||||
|
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
if !apierrs.IsNotFound(err) {
|
if !apierrs.IsNotFound(err) {
|
||||||
@ -294,11 +298,15 @@ func deletePVCAndValidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClai
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Examine the pv.ClaimRef and UID. Expect nil values.
|
// Examine the pv.ClaimRef and UID. Expect nil values.
|
||||||
_, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{})
|
oldPV, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
e2elog.Logf("PV %s (status: %s) has not been deleted yet, rechecking...", pv.Name, oldPV.Status)
|
||||||
|
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
if isRetryableAPIError(err) {
|
if isRetryableAPIError(err) {
|
||||||
|
e2elog.Logf("failed to verify deletion of PV %s (status: %s): %v", pv.Name, oldPV.Status, err)
|
||||||
|
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
if !apierrs.IsNotFound(err) {
|
if !apierrs.IsNotFound(err) {
|
||||||
|
54
e2e/rbd.go
54
e2e/rbd.go
@ -1047,6 +1047,12 @@ var _ = Describe("RBD", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
By("create a PVC and bind it to an app using rbd-nbd mounter", func() {
|
By("create a PVC and bind it to an app using rbd-nbd mounter", func() {
|
||||||
|
if !testNBD {
|
||||||
|
e2elog.Logf("skipping NBD test")
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
err := deleteResource(rbdExamplePath + "storageclass.yaml")
|
err := deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to delete storageclass: %v", err)
|
e2elog.Failf("failed to delete storageclass: %v", err)
|
||||||
@ -1083,6 +1089,12 @@ var _ = Describe("RBD", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
By("Resize rbd-nbd PVC and check application directory size", func() {
|
By("Resize rbd-nbd PVC and check application directory size", func() {
|
||||||
|
if !testNBD {
|
||||||
|
e2elog.Logf("skipping NBD test")
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
if util.CheckKernelSupport(kernelRelease, nbdResizeSupport) {
|
if util.CheckKernelSupport(kernelRelease, nbdResizeSupport) {
|
||||||
err := deleteResource(rbdExamplePath + "storageclass.yaml")
|
err := deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1290,6 +1302,12 @@ var _ = Describe("RBD", func() {
|
|||||||
|
|
||||||
By("create PVC with journaling,fast-diff image-features and bind it to an app using rbd-nbd mounter",
|
By("create PVC with journaling,fast-diff image-features and bind it to an app using rbd-nbd mounter",
|
||||||
func() {
|
func() {
|
||||||
|
if !testNBD {
|
||||||
|
e2elog.Logf("skipping NBD test")
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
if util.CheckKernelSupport(kernelRelease, fastDiffSupport) {
|
if util.CheckKernelSupport(kernelRelease, fastDiffSupport) {
|
||||||
err := deleteResource(rbdExamplePath + "storageclass.yaml")
|
err := deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1330,6 +1348,12 @@ var _ = Describe("RBD", func() {
|
|||||||
// NOTE: RWX is restricted for FileSystem VolumeMode at ceph-csi,
|
// NOTE: RWX is restricted for FileSystem VolumeMode at ceph-csi,
|
||||||
// see pull#261 for more details.
|
// see pull#261 for more details.
|
||||||
By("Create RWX+Block Mode PVC and bind to multiple pods via deployment using rbd-nbd mounter", func() {
|
By("Create RWX+Block Mode PVC and bind to multiple pods via deployment using rbd-nbd mounter", func() {
|
||||||
|
if !testNBD {
|
||||||
|
e2elog.Logf("skipping NBD test")
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
err := deleteResource(rbdExamplePath + "storageclass.yaml")
|
err := deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to delete storageclass: %v", err)
|
e2elog.Failf("failed to delete storageclass: %v", err)
|
||||||
@ -1415,6 +1439,12 @@ var _ = Describe("RBD", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
By("Create ROX+FS Mode PVC and bind to multiple pods via deployment using rbd-nbd mounter", func() {
|
By("Create ROX+FS Mode PVC and bind to multiple pods via deployment using rbd-nbd mounter", func() {
|
||||||
|
if !testNBD {
|
||||||
|
e2elog.Logf("skipping NBD test")
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
err := deleteResource(rbdExamplePath + "storageclass.yaml")
|
err := deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to delete storageclass: %v", err)
|
e2elog.Failf("failed to delete storageclass: %v", err)
|
||||||
@ -1540,6 +1570,12 @@ var _ = Describe("RBD", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
By("Create ROX+Block Mode PVC and bind to multiple pods via deployment using rbd-nbd mounter", func() {
|
By("Create ROX+Block Mode PVC and bind to multiple pods via deployment using rbd-nbd mounter", func() {
|
||||||
|
if !testNBD {
|
||||||
|
e2elog.Logf("skipping NBD test")
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
err := deleteResource(rbdExamplePath + "storageclass.yaml")
|
err := deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to delete storageclass: %v", err)
|
e2elog.Failf("failed to delete storageclass: %v", err)
|
||||||
@ -1666,6 +1702,12 @@ var _ = Describe("RBD", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
By("perform IO on rbd-nbd volume after nodeplugin restart", func() {
|
By("perform IO on rbd-nbd volume after nodeplugin restart", func() {
|
||||||
|
if !testNBD {
|
||||||
|
e2elog.Logf("skipping NBD test")
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
err := deleteResource(rbdExamplePath + "storageclass.yaml")
|
err := deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to delete storageclass: %v", err)
|
e2elog.Failf("failed to delete storageclass: %v", err)
|
||||||
@ -1830,6 +1872,12 @@ var _ = Describe("RBD", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
By("create a PVC and bind it to an app using rbd-nbd mounter with encryption", func() {
|
By("create a PVC and bind it to an app using rbd-nbd mounter with encryption", func() {
|
||||||
|
if !testNBD {
|
||||||
|
e2elog.Logf("skipping NBD test")
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
err := deleteResource(rbdExamplePath + "storageclass.yaml")
|
err := deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to delete storageclass: %v", err)
|
e2elog.Failf("failed to delete storageclass: %v", err)
|
||||||
@ -2199,6 +2247,12 @@ var _ = Describe("RBD", func() {
|
|||||||
By(
|
By(
|
||||||
"create a PVC and Bind it to an app with journaling/exclusive-lock image-features and rbd-nbd mounter",
|
"create a PVC and Bind it to an app with journaling/exclusive-lock image-features and rbd-nbd mounter",
|
||||||
func() {
|
func() {
|
||||||
|
if !testNBD {
|
||||||
|
e2elog.Logf("skipping NBD test")
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
err := deleteResource(rbdExamplePath + "storageclass.yaml")
|
err := deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to delete storageclass: %v", err)
|
e2elog.Failf("failed to delete storageclass: %v", err)
|
||||||
|
@ -85,6 +85,7 @@ var (
|
|||||||
deployNFS bool
|
deployNFS bool
|
||||||
testCephFS bool
|
testCephFS bool
|
||||||
testRBD bool
|
testRBD bool
|
||||||
|
testNBD bool
|
||||||
testNFS bool
|
testNFS bool
|
||||||
helmTest bool
|
helmTest bool
|
||||||
upgradeTesting bool
|
upgradeTesting bool
|
||||||
|
@ -637,7 +637,7 @@ func (ns *NodeServer) NodeGetVolumeStats(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if stat.Mode().IsDir() {
|
if stat.Mode().IsDir() {
|
||||||
return csicommon.FilesystemNodeGetVolumeStats(ctx, ns.Mounter, targetPath)
|
return csicommon.FilesystemNodeGetVolumeStats(ctx, ns.Mounter, targetPath, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, status.Errorf(codes.InvalidArgument, "targetpath %q is not a directory or device", targetPath)
|
return nil, status.Errorf(codes.InvalidArgument, "targetpath %q is not a directory or device", targetPath)
|
||||||
|
@ -241,6 +241,7 @@ func FilesystemNodeGetVolumeStats(
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
mounter mount.Interface,
|
mounter mount.Interface,
|
||||||
targetPath string,
|
targetPath string,
|
||||||
|
includeInodes bool,
|
||||||
) (*csi.NodeGetVolumeStatsResponse, error) {
|
) (*csi.NodeGetVolumeStatsResponse, error) {
|
||||||
isMnt, err := util.IsMountPoint(mounter, targetPath)
|
isMnt, err := util.IsMountPoint(mounter, targetPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -274,23 +275,8 @@ func FilesystemNodeGetVolumeStats(
|
|||||||
if !ok {
|
if !ok {
|
||||||
log.ErrorLog(ctx, "failed to fetch used bytes")
|
log.ErrorLog(ctx, "failed to fetch used bytes")
|
||||||
}
|
}
|
||||||
inodes, ok := (*(volMetrics.Inodes)).AsInt64()
|
|
||||||
if !ok {
|
|
||||||
log.ErrorLog(ctx, "failed to fetch available inodes")
|
|
||||||
|
|
||||||
return nil, status.Error(codes.Unknown, "failed to fetch available inodes")
|
res := &csi.NodeGetVolumeStatsResponse{
|
||||||
}
|
|
||||||
inodesFree, ok := (*(volMetrics.InodesFree)).AsInt64()
|
|
||||||
if !ok {
|
|
||||||
log.ErrorLog(ctx, "failed to fetch free inodes")
|
|
||||||
}
|
|
||||||
|
|
||||||
inodesUsed, ok := (*(volMetrics.InodesUsed)).AsInt64()
|
|
||||||
if !ok {
|
|
||||||
log.ErrorLog(ctx, "failed to fetch used inodes")
|
|
||||||
}
|
|
||||||
|
|
||||||
return &csi.NodeGetVolumeStatsResponse{
|
|
||||||
Usage: []*csi.VolumeUsage{
|
Usage: []*csi.VolumeUsage{
|
||||||
{
|
{
|
||||||
Available: requirePositive(available),
|
Available: requirePositive(available),
|
||||||
@ -298,14 +284,35 @@ func FilesystemNodeGetVolumeStats(
|
|||||||
Used: requirePositive(used),
|
Used: requirePositive(used),
|
||||||
Unit: csi.VolumeUsage_BYTES,
|
Unit: csi.VolumeUsage_BYTES,
|
||||||
},
|
},
|
||||||
{
|
|
||||||
Available: requirePositive(inodesFree),
|
|
||||||
Total: requirePositive(inodes),
|
|
||||||
Used: requirePositive(inodesUsed),
|
|
||||||
Unit: csi.VolumeUsage_INODES,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
}, nil
|
}
|
||||||
|
|
||||||
|
if includeInodes {
|
||||||
|
inodes, ok := (*(volMetrics.Inodes)).AsInt64()
|
||||||
|
if !ok {
|
||||||
|
log.ErrorLog(ctx, "failed to fetch available inodes")
|
||||||
|
|
||||||
|
return nil, status.Error(codes.Unknown, "failed to fetch available inodes")
|
||||||
|
}
|
||||||
|
inodesFree, ok := (*(volMetrics.InodesFree)).AsInt64()
|
||||||
|
if !ok {
|
||||||
|
log.ErrorLog(ctx, "failed to fetch free inodes")
|
||||||
|
}
|
||||||
|
|
||||||
|
inodesUsed, ok := (*(volMetrics.InodesUsed)).AsInt64()
|
||||||
|
if !ok {
|
||||||
|
log.ErrorLog(ctx, "failed to fetch used inodes")
|
||||||
|
}
|
||||||
|
|
||||||
|
res.Usage = append(res.Usage, &csi.VolumeUsage{
|
||||||
|
Available: requirePositive(inodesFree),
|
||||||
|
Total: requirePositive(inodes),
|
||||||
|
Used: requirePositive(inodesUsed),
|
||||||
|
Unit: csi.VolumeUsage_INODES,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// requirePositive returns the value for `x` when it is greater or equal to 0,
|
// requirePositive returns the value for `x` when it is greater or equal to 0,
|
||||||
|
@ -88,7 +88,7 @@ func TestFilesystemNodeGetVolumeStats(t *testing.T) {
|
|||||||
|
|
||||||
// retry until a mountpoint is found
|
// retry until a mountpoint is found
|
||||||
for {
|
for {
|
||||||
stats, err := FilesystemNodeGetVolumeStats(context.TODO(), mount.New(""), cwd)
|
stats, err := FilesystemNodeGetVolumeStats(context.TODO(), mount.New(""), cwd, true)
|
||||||
if err != nil && cwd != "/" && strings.HasSuffix(err.Error(), "is not mounted") {
|
if err != nil && cwd != "/" && strings.HasSuffix(err.Error(), "is not mounted") {
|
||||||
// try again with the parent directory
|
// try again with the parent directory
|
||||||
cwd = filepath.Dir(cwd)
|
cwd = filepath.Dir(cwd)
|
||||||
|
@ -182,7 +182,7 @@ func (ns *NodeServer) NodeGetVolumeStats(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if stat.Mode().IsDir() {
|
if stat.Mode().IsDir() {
|
||||||
return csicommon.FilesystemNodeGetVolumeStats(ctx, ns.Mounter, targetPath)
|
return csicommon.FilesystemNodeGetVolumeStats(ctx, ns.Mounter, targetPath, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, status.Errorf(codes.InvalidArgument,
|
return nil, status.Errorf(codes.InvalidArgument,
|
||||||
|
@ -1240,7 +1240,7 @@ func (ns *NodeServer) NodeGetVolumeStats(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if stat.Mode().IsDir() {
|
if stat.Mode().IsDir() {
|
||||||
return csicommon.FilesystemNodeGetVolumeStats(ctx, ns.Mounter, targetPath)
|
return csicommon.FilesystemNodeGetVolumeStats(ctx, ns.Mounter, targetPath, true)
|
||||||
} else if (stat.Mode() & os.ModeDevice) == os.ModeDevice {
|
} else if (stat.Mode() & os.ModeDevice) == os.ModeDevice {
|
||||||
return blockNodeGetVolumeStats(ctx, targetPath)
|
return blockNodeGetVolumeStats(ctx, targetPath)
|
||||||
}
|
}
|
||||||
|
@ -136,7 +136,7 @@ install() {
|
|||||||
mkdir -p ${TEMP}
|
mkdir -p ${TEMP}
|
||||||
# shellcheck disable=SC2021
|
# shellcheck disable=SC2021
|
||||||
dist=$(echo "${dist}" | tr "[A-Z]" "[a-z]")
|
dist=$(echo "${dist}" | tr "[A-Z]" "[a-z]")
|
||||||
wget "https://get.helm.sh/helm-${HELM_VERSION}-${dist}-${arch}.tar.gz" -O "${TEMP}/helm.tar.gz"
|
wget "https://get.helm.sh/helm-${HELM_VERSION}-${dist}-${arch}.tar.gz" -O "${TEMP}/helm.tar.gz" || exit 1
|
||||||
tar -C "${TEMP}" -zxvf "${TEMP}/helm.tar.gz"
|
tar -C "${TEMP}" -zxvf "${TEMP}/helm.tar.gz"
|
||||||
fi
|
fi
|
||||||
echo "Helm install successful"
|
echo "Helm install successful"
|
||||||
|
@ -22,7 +22,7 @@ function copy_image_to_cluster() {
|
|||||||
if [ -z "$(${CONTAINER_CMD} images -q "${build_image}")" ]; then
|
if [ -z "$(${CONTAINER_CMD} images -q "${build_image}")" ]; then
|
||||||
${CONTAINER_CMD} pull "${build_image}"
|
${CONTAINER_CMD} pull "${build_image}"
|
||||||
fi
|
fi
|
||||||
if [[ "${VM_DRIVER}" == "none" ]]; then
|
if [[ "${VM_DRIVER}" == "none" ]] || [[ "${VM_DRIVER}" == "podman" ]]; then
|
||||||
${CONTAINER_CMD} tag "${build_image}" "${final_image}"
|
${CONTAINER_CMD} tag "${build_image}" "${final_image}"
|
||||||
return
|
return
|
||||||
fi
|
fi
|
||||||
@ -139,6 +139,36 @@ function validate_sidecar() {
|
|||||||
done
|
done
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# install_podman_wrapper creates /usr/bin/podman.wrapper which adds /sys
|
||||||
|
# filesystem mount points when a privileged container is started. This makes it
|
||||||
|
# possible to map RBD devices in the container that minikube creates when
|
||||||
|
# VM_DRIVER=podman is used.
|
||||||
|
function install_podman_wrapper() {
|
||||||
|
if [[ -e /usr/bin/podman.wrapper ]]
|
||||||
|
then
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
# disabled single quoted check, the script should be created as is
|
||||||
|
# shellcheck disable=SC2016
|
||||||
|
echo '#!/bin/sh
|
||||||
|
if [[ "${1}" = run ]]
|
||||||
|
then
|
||||||
|
if (echo "${@}" | grep -q privileged)
|
||||||
|
then
|
||||||
|
shift
|
||||||
|
exec /usr/bin/podman.real run -v /sys:/sys:rw -v /dev:/dev:rw --systemd=true "${@}"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
exec /usr/bin/podman.real "${@}"
|
||||||
|
' > /usr/bin/podman.wrapper
|
||||||
|
chmod +x /usr/bin/podman.wrapper
|
||||||
|
|
||||||
|
mv /usr/bin/podman /usr/bin/podman.real
|
||||||
|
ln -s podman.wrapper /usr/bin/podman
|
||||||
|
}
|
||||||
|
|
||||||
# Storage providers and the default storage class is not needed for Ceph-CSI
|
# Storage providers and the default storage class is not needed for Ceph-CSI
|
||||||
# testing. In order to reduce resources and potential conflicts between storage
|
# testing. In order to reduce resources and potential conflicts between storage
|
||||||
# plugins, disable them.
|
# plugins, disable them.
|
||||||
@ -185,7 +215,7 @@ K8S_FEATURE_GATES=${K8S_FEATURE_GATES:-""}
|
|||||||
# kubelet.resolv-conf needs to point to a file, not a symlink
|
# kubelet.resolv-conf needs to point to a file, not a symlink
|
||||||
# the default minikube VM has /etc/resolv.conf -> /run/systemd/resolve/resolv.conf
|
# the default minikube VM has /etc/resolv.conf -> /run/systemd/resolve/resolv.conf
|
||||||
RESOLV_CONF='/run/systemd/resolve/resolv.conf'
|
RESOLV_CONF='/run/systemd/resolve/resolv.conf'
|
||||||
if [[ "${VM_DRIVER}" == "none" ]] && [[ ! -e "${RESOLV_CONF}" ]]; then
|
if { [[ "${VM_DRIVER}" == "none" ]] || [[ "${VM_DRIVER}" == "podman" ]]; } && [[ ! -e "${RESOLV_CONF}" ]]; then
|
||||||
# in case /run/systemd/resolve/resolv.conf does not exist, use the
|
# in case /run/systemd/resolve/resolv.conf does not exist, use the
|
||||||
# standard /etc/resolv.conf (with symlink resolved)
|
# standard /etc/resolv.conf (with symlink resolved)
|
||||||
RESOLV_CONF="$(readlink -f /etc/resolv.conf)"
|
RESOLV_CONF="$(readlink -f /etc/resolv.conf)"
|
||||||
@ -216,6 +246,8 @@ up)
|
|||||||
if [[ "${VM_DRIVER}" == "none" ]]; then
|
if [[ "${VM_DRIVER}" == "none" ]]; then
|
||||||
mkdir -p "$HOME"/.kube "$HOME"/.minikube
|
mkdir -p "$HOME"/.kube "$HOME"/.minikube
|
||||||
install_kubectl
|
install_kubectl
|
||||||
|
elif [[ "${VM_DRIVER}" == "podman" ]]; then
|
||||||
|
install_podman_wrapper
|
||||||
fi
|
fi
|
||||||
|
|
||||||
disable_storage_addons
|
disable_storage_addons
|
||||||
@ -234,11 +266,14 @@ up)
|
|||||||
|
|
||||||
# create a link so the default dataDirHostPath will work for this
|
# create a link so the default dataDirHostPath will work for this
|
||||||
# environment
|
# environment
|
||||||
if [[ "${VM_DRIVER}" != "none" ]]; then
|
if [[ "${VM_DRIVER}" != "none" ]] && [[ "${VM_DRIVER}" != "podman" ]]; then
|
||||||
wait_for_ssh
|
wait_for_ssh
|
||||||
# shellcheck disable=SC2086
|
# shellcheck disable=SC2086
|
||||||
${minikube} ssh "sudo mkdir -p /mnt/${DISK}/var/lib/rook;sudo ln -s /mnt/${DISK}/var/lib/rook /var/lib/rook"
|
${minikube} ssh "sudo mkdir -p /mnt/${DISK}/var/lib/rook;sudo ln -s /mnt/${DISK}/var/lib/rook /var/lib/rook"
|
||||||
fi
|
fi
|
||||||
|
if [[ "${VM_DRIVER}" = "podman" ]]; then
|
||||||
|
${minikube} ssh "sudo mount -oremount,rw /sys"
|
||||||
|
fi
|
||||||
${minikube} kubectl -- cluster-info
|
${minikube} kubectl -- cluster-info
|
||||||
;;
|
;;
|
||||||
down)
|
down)
|
||||||
|
Loading…
Reference in New Issue
Block a user