mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-01-18 02:39:30 +00:00
Merge pull request #163 from ceph/devel
sync downstream devel with upstream devel
This commit is contained in:
commit
4675681559
18
.github/workflows/mergify-copy-labels.yaml
vendored
Normal file
18
.github/workflows/mergify-copy-labels.yaml
vendored
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
---
|
||||||
|
# yamllint disable rule:line-length
|
||||||
|
name: Mergify merge-queue labels copier
|
||||||
|
# yamllint disable-line rule:truthy
|
||||||
|
on:
|
||||||
|
pull_request_target:
|
||||||
|
types:
|
||||||
|
- opened
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
mergify-merge-queue-labels-copier:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Copying labels
|
||||||
|
uses: Mergifyio/gha-mergify-merge-queue-labels-copier@main
|
||||||
|
with:
|
||||||
|
additional-labels: 'ok-to-test'
|
||||||
|
token: ${{ secrets.CEPH_CSI_BOT_TOKEN }}
|
82
.github/workflows/pull-request-commentor.yaml
vendored
82
.github/workflows/pull-request-commentor.yaml
vendored
@ -10,106 +10,97 @@ on:
|
|||||||
- labeled
|
- labeled
|
||||||
jobs:
|
jobs:
|
||||||
add-comment:
|
add-comment:
|
||||||
# yamllint disable-line rule:line-length
|
if: >
|
||||||
if: github.event.label.name == 'ok-to-test' && github.event.pull_request.merged != 'true'
|
(github.event.label.name == 'ok-to-test' &&
|
||||||
|
github.event.pull_request.merged != true)
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
permissions:
|
permissions:
|
||||||
pull-requests: write
|
pull-requests: write
|
||||||
steps:
|
steps:
|
||||||
- name: Add comment to trigger external storage tests for Kubernetes 1.23
|
|
||||||
uses: peter-evans/create-or-update-comment@v2
|
|
||||||
with:
|
|
||||||
issue-number: ${{ github.event.pull_request.number }}
|
|
||||||
body: |
|
|
||||||
/test ci/centos/k8s-e2e-external-storage/1.23
|
|
||||||
|
|
||||||
- name: Add comment to trigger external storage tests for Kubernetes 1.24
|
|
||||||
uses: peter-evans/create-or-update-comment@v2
|
|
||||||
with:
|
|
||||||
issue-number: ${{ github.event.pull_request.number }}
|
|
||||||
body: |
|
|
||||||
/test ci/centos/k8s-e2e-external-storage/1.24
|
|
||||||
|
|
||||||
- name: Add comment to trigger external storage tests for Kubernetes 1.25
|
- name: Add comment to trigger external storage tests for Kubernetes 1.25
|
||||||
uses: peter-evans/create-or-update-comment@v2
|
uses: peter-evans/create-or-update-comment@v3
|
||||||
with:
|
with:
|
||||||
|
token: ${{ secrets.CEPH_CSI_BOT_TOKEN }}
|
||||||
issue-number: ${{ github.event.pull_request.number }}
|
issue-number: ${{ github.event.pull_request.number }}
|
||||||
body: |
|
body: |
|
||||||
/test ci/centos/k8s-e2e-external-storage/1.25
|
/test ci/centos/k8s-e2e-external-storage/1.25
|
||||||
|
|
||||||
- name: Add comment to trigger external storage tests for Kubernetes 1.26
|
- name: Add comment to trigger external storage tests for Kubernetes 1.26
|
||||||
uses: peter-evans/create-or-update-comment@v2
|
uses: peter-evans/create-or-update-comment@v3
|
||||||
with:
|
with:
|
||||||
|
token: ${{ secrets.CEPH_CSI_BOT_TOKEN }}
|
||||||
issue-number: ${{ github.event.pull_request.number }}
|
issue-number: ${{ github.event.pull_request.number }}
|
||||||
body: |
|
body: |
|
||||||
/test ci/centos/k8s-e2e-external-storage/1.26
|
/test ci/centos/k8s-e2e-external-storage/1.26
|
||||||
|
|
||||||
- name: Add comment to trigger helm E2E tests for Kubernetes 1.23
|
- name: Add comment to trigger external storage tests for Kubernetes 1.27
|
||||||
uses: peter-evans/create-or-update-comment@v2
|
uses: peter-evans/create-or-update-comment@v3
|
||||||
with:
|
with:
|
||||||
|
token: ${{ secrets.CEPH_CSI_BOT_TOKEN }}
|
||||||
issue-number: ${{ github.event.pull_request.number }}
|
issue-number: ${{ github.event.pull_request.number }}
|
||||||
body: |
|
body: |
|
||||||
/test ci/centos/mini-e2e-helm/k8s-1.23
|
/test ci/centos/k8s-e2e-external-storage/1.27
|
||||||
|
|
||||||
- name: Add comment to trigger helm E2E tests for Kubernetes 1.24
|
|
||||||
uses: peter-evans/create-or-update-comment@v2
|
|
||||||
with:
|
|
||||||
issue-number: ${{ github.event.pull_request.number }}
|
|
||||||
body: |
|
|
||||||
/test ci/centos/mini-e2e-helm/k8s-1.24
|
|
||||||
|
|
||||||
- name: Add comment to trigger helm E2E tests for Kubernetes 1.25
|
- name: Add comment to trigger helm E2E tests for Kubernetes 1.25
|
||||||
uses: peter-evans/create-or-update-comment@v2
|
uses: peter-evans/create-or-update-comment@v3
|
||||||
with:
|
with:
|
||||||
|
token: ${{ secrets.CEPH_CSI_BOT_TOKEN }}
|
||||||
issue-number: ${{ github.event.pull_request.number }}
|
issue-number: ${{ github.event.pull_request.number }}
|
||||||
body: |
|
body: |
|
||||||
/test ci/centos/mini-e2e-helm/k8s-1.25
|
/test ci/centos/mini-e2e-helm/k8s-1.25
|
||||||
|
|
||||||
- name: Add comment to trigger helm E2E tests for Kubernetes 1.26
|
- name: Add comment to trigger helm E2E tests for Kubernetes 1.26
|
||||||
uses: peter-evans/create-or-update-comment@v2
|
uses: peter-evans/create-or-update-comment@v3
|
||||||
with:
|
with:
|
||||||
|
token: ${{ secrets.CEPH_CSI_BOT_TOKEN }}
|
||||||
issue-number: ${{ github.event.pull_request.number }}
|
issue-number: ${{ github.event.pull_request.number }}
|
||||||
body: |
|
body: |
|
||||||
/test ci/centos/mini-e2e-helm/k8s-1.26
|
/test ci/centos/mini-e2e-helm/k8s-1.26
|
||||||
|
|
||||||
- name: Add comment to trigger E2E tests for Kubernetes 1.23
|
- name: Add comment to trigger helm E2E tests for Kubernetes 1.27
|
||||||
uses: peter-evans/create-or-update-comment@v2
|
uses: peter-evans/create-or-update-comment@v3
|
||||||
with:
|
with:
|
||||||
|
token: ${{ secrets.CEPH_CSI_BOT_TOKEN }}
|
||||||
issue-number: ${{ github.event.pull_request.number }}
|
issue-number: ${{ github.event.pull_request.number }}
|
||||||
body: |
|
body: |
|
||||||
/test ci/centos/mini-e2e/k8s-1.23
|
/test ci/centos/mini-e2e-helm/k8s-1.27
|
||||||
|
|
||||||
- name: Add comment to trigger E2E tests for Kubernetes 1.24
|
|
||||||
uses: peter-evans/create-or-update-comment@v2
|
|
||||||
with:
|
|
||||||
issue-number: ${{ github.event.pull_request.number }}
|
|
||||||
body: |
|
|
||||||
/test ci/centos/mini-e2e/k8s-1.24
|
|
||||||
|
|
||||||
- name: Add comment to trigger E2E tests for Kubernetes 1.25
|
- name: Add comment to trigger E2E tests for Kubernetes 1.25
|
||||||
uses: peter-evans/create-or-update-comment@v2
|
uses: peter-evans/create-or-update-comment@v3
|
||||||
with:
|
with:
|
||||||
|
token: ${{ secrets.CEPH_CSI_BOT_TOKEN }}
|
||||||
issue-number: ${{ github.event.pull_request.number }}
|
issue-number: ${{ github.event.pull_request.number }}
|
||||||
body: |
|
body: |
|
||||||
/test ci/centos/mini-e2e/k8s-1.25
|
/test ci/centos/mini-e2e/k8s-1.25
|
||||||
|
|
||||||
- name: Add comment to trigger E2E tests for Kubernetes 1.26
|
- name: Add comment to trigger E2E tests for Kubernetes 1.26
|
||||||
uses: peter-evans/create-or-update-comment@v2
|
uses: peter-evans/create-or-update-comment@v3
|
||||||
with:
|
with:
|
||||||
|
token: ${{ secrets.CEPH_CSI_BOT_TOKEN }}
|
||||||
issue-number: ${{ github.event.pull_request.number }}
|
issue-number: ${{ github.event.pull_request.number }}
|
||||||
body: |
|
body: |
|
||||||
/test ci/centos/mini-e2e/k8s-1.26
|
/test ci/centos/mini-e2e/k8s-1.26
|
||||||
|
|
||||||
- name: Add comment to trigger cephfs upgrade tests
|
- name: Add comment to trigger E2E tests for Kubernetes 1.27
|
||||||
uses: peter-evans/create-or-update-comment@v2
|
uses: peter-evans/create-or-update-comment@v3
|
||||||
with:
|
with:
|
||||||
|
token: ${{ secrets.CEPH_CSI_BOT_TOKEN }}
|
||||||
|
issue-number: ${{ github.event.pull_request.number }}
|
||||||
|
body: |
|
||||||
|
/test ci/centos/mini-e2e/k8s-1.27
|
||||||
|
|
||||||
|
- name: Add comment to trigger cephfs upgrade tests
|
||||||
|
uses: peter-evans/create-or-update-comment@v3
|
||||||
|
with:
|
||||||
|
token: ${{ secrets.CEPH_CSI_BOT_TOKEN }}
|
||||||
issue-number: ${{ github.event.pull_request.number }}
|
issue-number: ${{ github.event.pull_request.number }}
|
||||||
body: |
|
body: |
|
||||||
/test ci/centos/upgrade-tests-cephfs
|
/test ci/centos/upgrade-tests-cephfs
|
||||||
|
|
||||||
- name: Add comment to trigger rbd upgrade tests
|
- name: Add comment to trigger rbd upgrade tests
|
||||||
uses: peter-evans/create-or-update-comment@v2
|
uses: peter-evans/create-or-update-comment@v3
|
||||||
with:
|
with:
|
||||||
|
token: ${{ secrets.CEPH_CSI_BOT_TOKEN }}
|
||||||
issue-number: ${{ github.event.pull_request.number }}
|
issue-number: ${{ github.event.pull_request.number }}
|
||||||
body: |
|
body: |
|
||||||
/test ci/centos/upgrade-tests-rbd
|
/test ci/centos/upgrade-tests-rbd
|
||||||
@ -117,6 +108,7 @@ jobs:
|
|||||||
- name: remove ok-to-test-label after commenting
|
- name: remove ok-to-test-label after commenting
|
||||||
uses: actions/github-script@v6
|
uses: actions/github-script@v6
|
||||||
with:
|
with:
|
||||||
|
github-token: ${{ secrets.CEPH_CSI_BOT_TOKEN }}
|
||||||
script: |
|
script: |
|
||||||
github.rest.issues.removeLabel({
|
github.rest.issues.removeLabel({
|
||||||
issue_number: context.issue.number,
|
issue_number: context.issue.number,
|
||||||
|
1
.github/workflows/retest.yaml
vendored
1
.github/workflows/retest.yaml
vendored
@ -11,6 +11,7 @@ permissions:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
retest:
|
retest:
|
||||||
|
if: github.repository == 'ceph/ceph-csi'
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
# path to the retest action
|
# path to the retest action
|
||||||
|
2
.github/workflows/stale.yaml
vendored
2
.github/workflows/stale.yaml
vendored
@ -18,7 +18,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: github.repository == 'ceph/ceph-csi'
|
if: github.repository == 'ceph/ceph-csi'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/stale@v7
|
- uses: actions/stale@v8
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
days-before-issue-stale: 30
|
days-before-issue-stale: 30
|
||||||
|
74
.mergify.yml
74
.mergify.yml
@ -29,15 +29,15 @@ queue_rules:
|
|||||||
- "status-success=golangci-lint"
|
- "status-success=golangci-lint"
|
||||||
- "status-success=mod-check"
|
- "status-success=mod-check"
|
||||||
- "status-success=lint-extras"
|
- "status-success=lint-extras"
|
||||||
- "status-success=ci/centos/k8s-e2e-external-storage/1.23"
|
|
||||||
- "status-success=ci/centos/k8s-e2e-external-storage/1.24"
|
|
||||||
- "status-success=ci/centos/k8s-e2e-external-storage/1.25"
|
- "status-success=ci/centos/k8s-e2e-external-storage/1.25"
|
||||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.23"
|
- "status-success=ci/centos/k8s-e2e-external-storage/1.26"
|
||||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.24"
|
- "status-success=ci/centos/k8s-e2e-external-storage/1.27"
|
||||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.25"
|
- "status-success=ci/centos/mini-e2e-helm/k8s-1.25"
|
||||||
- "status-success=ci/centos/mini-e2e/k8s-1.23"
|
- "status-success=ci/centos/mini-e2e-helm/k8s-1.26"
|
||||||
- "status-success=ci/centos/mini-e2e/k8s-1.24"
|
- "status-success=ci/centos/mini-e2e-helm/k8s-1.27"
|
||||||
- "status-success=ci/centos/mini-e2e/k8s-1.25"
|
- "status-success=ci/centos/mini-e2e/k8s-1.25"
|
||||||
|
- "status-success=ci/centos/mini-e2e/k8s-1.26"
|
||||||
|
- "status-success=ci/centos/mini-e2e/k8s-1.27"
|
||||||
- "status-success=ci/centos/upgrade-tests-cephfs"
|
- "status-success=ci/centos/upgrade-tests-cephfs"
|
||||||
- "status-success=ci/centos/upgrade-tests-rbd"
|
- "status-success=ci/centos/upgrade-tests-rbd"
|
||||||
- and:
|
- and:
|
||||||
@ -46,12 +46,16 @@ queue_rules:
|
|||||||
- "status-success=ci/centos/jjb-validate"
|
- "status-success=ci/centos/jjb-validate"
|
||||||
|
|
||||||
pull_request_rules:
|
pull_request_rules:
|
||||||
- name: start CI jobs for queued PR
|
- name: start CI jobs for PRs in the merge queue
|
||||||
conditions:
|
conditions:
|
||||||
- base~=^(devel)|(release-.+)$
|
- base~=^(devel)|(release-.+)$
|
||||||
- "check-pending=Queue: Embarked in merge train"
|
|
||||||
- not:
|
- not:
|
||||||
check-pending~=^ci/centos
|
check-pending~=^ci/centos
|
||||||
|
- not:
|
||||||
|
status-success~=^ci/centos
|
||||||
|
- or:
|
||||||
|
- "check-pending=Queue: Embarked in merge train"
|
||||||
|
- author=mergify[bot]
|
||||||
actions:
|
actions:
|
||||||
label:
|
label:
|
||||||
add:
|
add:
|
||||||
@ -91,15 +95,15 @@ pull_request_rules:
|
|||||||
- "status-success=golangci-lint"
|
- "status-success=golangci-lint"
|
||||||
- "status-success=mod-check"
|
- "status-success=mod-check"
|
||||||
- "status-success=lint-extras"
|
- "status-success=lint-extras"
|
||||||
- "status-success=ci/centos/k8s-e2e-external-storage/1.23"
|
|
||||||
- "status-success=ci/centos/k8s-e2e-external-storage/1.24"
|
|
||||||
- "status-success=ci/centos/k8s-e2e-external-storage/1.25"
|
- "status-success=ci/centos/k8s-e2e-external-storage/1.25"
|
||||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.23"
|
- "status-success=ci/centos/k8s-e2e-external-storage/1.26"
|
||||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.24"
|
- "status-success=ci/centos/k8s-e2e-external-storage/1.27"
|
||||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.25"
|
- "status-success=ci/centos/mini-e2e-helm/k8s-1.25"
|
||||||
- "status-success=ci/centos/mini-e2e/k8s-1.23"
|
- "status-success=ci/centos/mini-e2e-helm/k8s-1.26"
|
||||||
- "status-success=ci/centos/mini-e2e/k8s-1.24"
|
- "status-success=ci/centos/mini-e2e-helm/k8s-1.27"
|
||||||
- "status-success=ci/centos/mini-e2e/k8s-1.25"
|
- "status-success=ci/centos/mini-e2e/k8s-1.25"
|
||||||
|
- "status-success=ci/centos/mini-e2e/k8s-1.26"
|
||||||
|
- "status-success=ci/centos/mini-e2e/k8s-1.27"
|
||||||
- "status-success=ci/centos/upgrade-tests-cephfs"
|
- "status-success=ci/centos/upgrade-tests-cephfs"
|
||||||
- "status-success=ci/centos/upgrade-tests-rbd"
|
- "status-success=ci/centos/upgrade-tests-rbd"
|
||||||
- "status-success=DCO"
|
- "status-success=DCO"
|
||||||
@ -130,15 +134,15 @@ pull_request_rules:
|
|||||||
- "status-success=commitlint"
|
- "status-success=commitlint"
|
||||||
- "status-success=mod-check"
|
- "status-success=mod-check"
|
||||||
- "status-success=lint-extras"
|
- "status-success=lint-extras"
|
||||||
- "status-success=ci/centos/k8s-e2e-external-storage/1.23"
|
|
||||||
- "status-success=ci/centos/k8s-e2e-external-storage/1.24"
|
|
||||||
- "status-success=ci/centos/k8s-e2e-external-storage/1.25"
|
- "status-success=ci/centos/k8s-e2e-external-storage/1.25"
|
||||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.23"
|
- "status-success=ci/centos/k8s-e2e-external-storage/1.26"
|
||||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.24"
|
- "status-success=ci/centos/k8s-e2e-external-storage/1.27"
|
||||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.25"
|
- "status-success=ci/centos/mini-e2e-helm/k8s-1.25"
|
||||||
- "status-success=ci/centos/mini-e2e/k8s-1.23"
|
- "status-success=ci/centos/mini-e2e-helm/k8s-1.26"
|
||||||
- "status-success=ci/centos/mini-e2e/k8s-1.24"
|
- "status-success=ci/centos/mini-e2e-helm/k8s-1.27"
|
||||||
- "status-success=ci/centos/mini-e2e/k8s-1.25"
|
- "status-success=ci/centos/mini-e2e/k8s-1.25"
|
||||||
|
- "status-success=ci/centos/mini-e2e/k8s-1.26"
|
||||||
|
- "status-success=ci/centos/mini-e2e/k8s-1.27"
|
||||||
- "status-success=ci/centos/upgrade-tests-cephfs"
|
- "status-success=ci/centos/upgrade-tests-cephfs"
|
||||||
- "status-success=ci/centos/upgrade-tests-rbd"
|
- "status-success=ci/centos/upgrade-tests-rbd"
|
||||||
- "status-success=DCO"
|
- "status-success=DCO"
|
||||||
@ -161,15 +165,15 @@ pull_request_rules:
|
|||||||
- "status-success=mod-check"
|
- "status-success=mod-check"
|
||||||
- "status-success=lint-extras"
|
- "status-success=lint-extras"
|
||||||
- "#changes-requested-reviews-by=0"
|
- "#changes-requested-reviews-by=0"
|
||||||
- "status-success=ci/centos/k8s-e2e-external-storage/1.23"
|
|
||||||
- "status-success=ci/centos/k8s-e2e-external-storage/1.24"
|
|
||||||
- "status-success=ci/centos/k8s-e2e-external-storage/1.25"
|
- "status-success=ci/centos/k8s-e2e-external-storage/1.25"
|
||||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.23"
|
- "status-success=ci/centos/k8s-e2e-external-storage/1.26"
|
||||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.24"
|
- "status-success=ci/centos/k8s-e2e-external-storage/1.27"
|
||||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.25"
|
- "status-success=ci/centos/mini-e2e-helm/k8s-1.25"
|
||||||
- "status-success=ci/centos/mini-e2e/k8s-1.23"
|
- "status-success=ci/centos/mini-e2e-helm/k8s-1.26"
|
||||||
- "status-success=ci/centos/mini-e2e/k8s-1.24"
|
- "status-success=ci/centos/mini-e2e-helm/k8s-1.27"
|
||||||
- "status-success=ci/centos/mini-e2e/k8s-1.25"
|
- "status-success=ci/centos/mini-e2e/k8s-1.25"
|
||||||
|
- "status-success=ci/centos/mini-e2e/k8s-1.26"
|
||||||
|
- "status-success=ci/centos/mini-e2e/k8s-1.27"
|
||||||
- "status-success=ci/centos/upgrade-tests-cephfs"
|
- "status-success=ci/centos/upgrade-tests-cephfs"
|
||||||
- "status-success=ci/centos/upgrade-tests-rbd"
|
- "status-success=ci/centos/upgrade-tests-rbd"
|
||||||
- "status-success=DCO"
|
- "status-success=DCO"
|
||||||
@ -178,15 +182,6 @@ pull_request_rules:
|
|||||||
name: default
|
name: default
|
||||||
delete_head_branch: {}
|
delete_head_branch: {}
|
||||||
|
|
||||||
- name: backport patches to release-v3.6 branch
|
|
||||||
conditions:
|
|
||||||
- base=devel
|
|
||||||
- label=backport-to-release-v3.6
|
|
||||||
actions:
|
|
||||||
backport:
|
|
||||||
branches:
|
|
||||||
- release-v3.6
|
|
||||||
|
|
||||||
- name: backport patches to release-v3.7 branch
|
- name: backport patches to release-v3.7 branch
|
||||||
conditions:
|
conditions:
|
||||||
- base=devel
|
- base=devel
|
||||||
@ -196,6 +191,15 @@ pull_request_rules:
|
|||||||
branches:
|
branches:
|
||||||
- release-v3.7
|
- release-v3.7
|
||||||
|
|
||||||
|
- name: backport patches to release-v3.8 branch
|
||||||
|
conditions:
|
||||||
|
- base=devel
|
||||||
|
- label=backport-to-release-v3.8
|
||||||
|
actions:
|
||||||
|
backport:
|
||||||
|
branches:
|
||||||
|
- release-v3.8
|
||||||
|
|
||||||
- name: remove outdated approvals on ci/centos
|
- name: remove outdated approvals on ci/centos
|
||||||
conditions:
|
conditions:
|
||||||
- base=ci/centos
|
- base=ci/centos
|
||||||
|
@ -8,7 +8,7 @@ repos:
|
|||||||
- id: check-signoff
|
- id: check-signoff
|
||||||
|
|
||||||
# Catch gofmt issues, if any.
|
# Catch gofmt issues, if any.
|
||||||
- repo: git://github.com/dnephin/pre-commit-golang
|
- repo: https://github.com/dnephin/pre-commit-golang
|
||||||
rev: v0.3.5
|
rev: v0.3.5
|
||||||
hooks:
|
hooks:
|
||||||
- id: go-fmt
|
- id: go-fmt
|
||||||
|
68
README.md
68
README.md
@ -56,11 +56,10 @@ environments.
|
|||||||
|
|
||||||
| Ceph CSI Version | Container Orchestrator Name | Version Tested|
|
| Ceph CSI Version | Container Orchestrator Name | Version Tested|
|
||||||
| -----------------| --------------------------- | --------------|
|
| -----------------| --------------------------- | --------------|
|
||||||
|
| v3.8.0 | Kubernetes | v1.24, v1.25, v1.26, v1.27|
|
||||||
| v3.7.2 | Kubernetes | v1.22, v1.23, v1.24|
|
| v3.7.2 | Kubernetes | v1.22, v1.23, v1.24|
|
||||||
| v3.7.1 | Kubernetes | v1.22, v1.23, v1.24|
|
| v3.7.1 | Kubernetes | v1.22, v1.23, v1.24|
|
||||||
| v3.7.0 | Kubernetes | v1.22, v1.23, v1.24|
|
| v3.7.0 | Kubernetes | v1.22, v1.23, v1.24|
|
||||||
| v3.6.1 | Kubernetes | v1.21, v1.22, v1.23|
|
|
||||||
| v3.6.0 | Kubernetes | v1.21, v1.22, v1.23|
|
|
||||||
|
|
||||||
There is work in progress to make this CO-independent and thus
|
There is work in progress to make this CO-independent and thus
|
||||||
support other orchestration environments (Nomad, Mesos..etc).
|
support other orchestration environments (Nomad, Mesos..etc).
|
||||||
@ -70,8 +69,8 @@ NOTE:
|
|||||||
The supported window of Ceph CSI versions is "N.(x-1)":
|
The supported window of Ceph CSI versions is "N.(x-1)":
|
||||||
(N (Latest major release) . (x (Latest minor release) - 1)).
|
(N (Latest major release) . (x (Latest minor release) - 1)).
|
||||||
|
|
||||||
For example, if the Ceph CSI latest major version is `3.7.0` today, support is
|
For example, if the Ceph CSI latest major version is `3.8.0` today, support is
|
||||||
provided for the versions above `3.6.0`. If users are running an unsupported
|
provided for the versions above `3.7.0`. If users are running an unsupported
|
||||||
Ceph CSI version, they will be asked to upgrade when requesting support.
|
Ceph CSI version, they will be asked to upgrade when requesting support.
|
||||||
|
|
||||||
## Support Matrix
|
## Support Matrix
|
||||||
@ -83,31 +82,31 @@ for its support details.
|
|||||||
|
|
||||||
| Plugin | Features | Feature Status | CSI Driver Version | CSI Spec Version | Ceph Cluster Version | Kubernetes Version |
|
| Plugin | Features | Feature Status | CSI Driver Version | CSI Spec Version | Ceph Cluster Version | Kubernetes Version |
|
||||||
| ------ | --------------------------------------------------------- | -------------- | ------------------ | ---------------- | -------------------- | ------------------ |
|
| ------ | --------------------------------------------------------- | -------------- | ------------------ | ---------------- | -------------------- | ------------------ |
|
||||||
| RBD | Dynamically provision, de-provision Block mode RWO volume | GA | >= v1.0.0 | >= v1.0.0 | Nautilus (>=15.0.0) | >= v1.14.0 |
|
| RBD | Dynamically provision, de-provision Block mode RWO volume | GA | >= v1.0.0 | >= v1.0.0 | Octopus (>=15.0.0) | >= v1.14.0 |
|
||||||
| | Dynamically provision, de-provision Block mode RWX volume | GA | >= v1.0.0 | >= v1.0.0 | Nautilus (>=15.0.0) | >= v1.14.0 |
|
| | Dynamically provision, de-provision Block mode RWX volume | GA | >= v1.0.0 | >= v1.0.0 | Octopus (>=15.0.0) | >= v1.14.0 |
|
||||||
| | Dynamically provision, de-provision Block mode RWOP volume| Alpha | >= v3.5.0 | >= v1.5.0 | Nautilus (>=15.0.0) | >= v1.22.0 |
|
| | Dynamically provision, de-provision Block mode RWOP volume| Alpha | >= v3.5.0 | >= v1.5.0 | Octopus (>=15.0.0) | >= v1.22.0 |
|
||||||
| | Dynamically provision, de-provision File mode RWO volume | GA | >= v1.0.0 | >= v1.0.0 | Nautilus (>=15.0.0) | >= v1.14.0 |
|
| | Dynamically provision, de-provision File mode RWO volume | GA | >= v1.0.0 | >= v1.0.0 | Octopus (>=15.0.0) | >= v1.14.0 |
|
||||||
| | Dynamically provision, de-provision File mode RWOP volume | Alpha | >= v3.5.0 | >= v1.5.0 | Nautilus (>=15.0.0) | >= v1.22.0 |
|
| | Dynamically provision, de-provision File mode RWOP volume | Alpha | >= v3.5.0 | >= v1.5.0 | Octopus (>=15.0.0) | >= v1.22.0 |
|
||||||
| | Provision File Mode ROX volume from snapshot | Alpha | >= v3.0.0 | >= v1.0.0 | Nautilus (>=v15.0.0) | >= v1.17.0 |
|
| | Provision File Mode ROX volume from snapshot | Alpha | >= v3.0.0 | >= v1.0.0 | Octopus (>=v15.0.0) | >= v1.17.0 |
|
||||||
| | Provision File Mode ROX volume from another volume | Alpha | >= v3.0.0 | >= v1.0.0 | Nautilus (>=v15.0.0) | >= v1.16.0 |
|
| | Provision File Mode ROX volume from another volume | Alpha | >= v3.0.0 | >= v1.0.0 | Octopus (>=v15.0.0) | >= v1.16.0 |
|
||||||
| | Provision Block Mode ROX volume from snapshot | Alpha | >= v3.0.0 | >= v1.0.0 | Nautilus (>=v15.0.0) | >= v1.17.0 |
|
| | Provision Block Mode ROX volume from snapshot | Alpha | >= v3.0.0 | >= v1.0.0 | Octopus (>=v15.0.0) | >= v1.17.0 |
|
||||||
| | Provision Block Mode ROX volume from another volume | Alpha | >= v3.0.0 | >= v1.0.0 | Nautilus (>=v15.0.0) | >= v1.16.0 |
|
| | Provision Block Mode ROX volume from another volume | Alpha | >= v3.0.0 | >= v1.0.0 | Octopus (>=v15.0.0) | >= v1.16.0 |
|
||||||
| | Creating and deleting snapshot | GA | >= v1.0.0 | >= v1.0.0 | Nautilus (>=15.0.0) | >= v1.17.0 |
|
| | Creating and deleting snapshot | GA | >= v1.0.0 | >= v1.0.0 | Octopus (>=15.0.0) | >= v1.17.0 |
|
||||||
| | Provision volume from snapshot | GA | >= v1.0.0 | >= v1.0.0 | Nautilus (>=15.0.0) | >= v1.17.0 |
|
| | Provision volume from snapshot | GA | >= v1.0.0 | >= v1.0.0 | Octopus (>=15.0.0) | >= v1.17.0 |
|
||||||
| | Provision volume from another volume | GA | >= v1.0.0 | >= v1.0.0 | Nautilus (>=15.0.0) | >= v1.16.0 |
|
| | Provision volume from another volume | GA | >= v1.0.0 | >= v1.0.0 | Octopus (>=15.0.0) | >= v1.16.0 |
|
||||||
| | Expand volume | Beta | >= v2.0.0 | >= v1.1.0 | Nautilus (>=15.0.0) | >= v1.15.0 |
|
| | Expand volume | Beta | >= v2.0.0 | >= v1.1.0 | Octopus (>=15.0.0) | >= v1.15.0 |
|
||||||
| | Volume/PV Metrics of File Mode Volume | GA | >= v1.2.0 | >= v1.1.0 | Nautilus (>=15.0.0) | >= v1.15.0 |
|
| | Volume/PV Metrics of File Mode Volume | GA | >= v1.2.0 | >= v1.1.0 | Octopus (>=15.0.0) | >= v1.15.0 |
|
||||||
| | Volume/PV Metrics of Block Mode Volume | GA | >= v1.2.0 | >= v1.1.0 | Nautilus (>=15.0.0) | >= v1.21.0 |
|
| | Volume/PV Metrics of Block Mode Volume | GA | >= v1.2.0 | >= v1.1.0 | Octopus (>=15.0.0) | >= v1.21.0 |
|
||||||
| | Topology Aware Provisioning Support | Alpha | >= v2.1.0 | >= v1.1.0 | Nautilus (>=15.0.0) | >= v1.14.0 |
|
| | Topology Aware Provisioning Support | Alpha | >= v2.1.0 | >= v1.1.0 | Octopus (>=15.0.0) | >= v1.14.0 |
|
||||||
| CephFS | Dynamically provision, de-provision File mode RWO volume | GA | >= v1.1.0 | >= v1.0.0 | Nautilus (>=15.0.0) | >= v1.14.0 |
|
| CephFS | Dynamically provision, de-provision File mode RWO volume | GA | >= v1.1.0 | >= v1.0.0 | Octopus (>=15.0.0) | >= v1.14.0 |
|
||||||
| | Dynamically provision, de-provision File mode RWX volume | GA | >= v1.1.0 | >= v1.0.0 | Nautilus (>=v15.0.0) | >= v1.14.0 |
|
| | Dynamically provision, de-provision File mode RWX volume | GA | >= v1.1.0 | >= v1.0.0 | Octopus (>=v15.0.0) | >= v1.14.0 |
|
||||||
| | Dynamically provision, de-provision File mode ROX volume | Alpha | >= v3.0.0 | >= v1.0.0 | Nautilus (>=v15.0.0) | >= v1.14.0 |
|
| | Dynamically provision, de-provision File mode ROX volume | Alpha | >= v3.0.0 | >= v1.0.0 | Octopus (>=v15.0.0) | >= v1.14.0 |
|
||||||
| | Dynamically provision, de-provision File mode RWOP volume | Alpha | >= v3.5.0 | >= v1.5.0 | Nautilus (>=15.0.0) | >= v1.22.0 |
|
| | Dynamically provision, de-provision File mode RWOP volume | Alpha | >= v3.5.0 | >= v1.5.0 | Octopus (>=15.0.0) | >= v1.22.0 |
|
||||||
| | Creating and deleting snapshot | GA | >= v3.1.0 | >= v1.0.0 | Octopus (>=v15.2.4) | >= v1.17.0 |
|
| | Creating and deleting snapshot | GA | >= v3.1.0 | >= v1.0.0 | Octopus (>=v15.2.4) | >= v1.17.0 |
|
||||||
| | Provision volume from snapshot | GA | >= v3.1.0 | >= v1.0.0 | Octopus (>=v15.2.4) | >= v1.17.0 |
|
| | Provision volume from snapshot | GA | >= v3.1.0 | >= v1.0.0 | Octopus (>=v15.2.4) | >= v1.17.0 |
|
||||||
| | Provision volume from another volume | GA | >= v3.1.0 | >= v1.0.0 | Octopus (>=v15.2.4) | >= v1.16.0 |
|
| | Provision volume from another volume | GA | >= v3.1.0 | >= v1.0.0 | Octopus (>=v15.2.4) | >= v1.16.0 |
|
||||||
| | Expand volume | Beta | >= v2.0.0 | >= v1.1.0 | Nautilus (>=v15.0.0) | >= v1.15.0 |
|
| | Expand volume | Beta | >= v2.0.0 | >= v1.1.0 | Octopus (>=v15.0.0) | >= v1.15.0 |
|
||||||
| | Volume/PV Metrics of File Mode Volume | GA | >= v1.2.0 | >= v1.1.0 | Nautilus (>=v15.0.0) | >= v1.15.0 |
|
| | Volume/PV Metrics of File Mode Volume | GA | >= v1.2.0 | >= v1.1.0 | Octopus (>=v15.0.0) | >= v1.15.0 |
|
||||||
| NFS | Dynamically provision, de-provision File mode RWO volume | Alpha | >= v3.6.0 | >= v1.0.0 | Pacific (>=16.2.0) | >= v1.14.0 |
|
| NFS | Dynamically provision, de-provision File mode RWO volume | Alpha | >= v3.6.0 | >= v1.0.0 | Pacific (>=16.2.0) | >= v1.14.0 |
|
||||||
| | Dynamically provision, de-provision File mode RWX volume | Alpha | >= v3.6.0 | >= v1.0.0 | Pacific (>=16.2.0) | >= v1.14.0 |
|
| | Dynamically provision, de-provision File mode RWX volume | Alpha | >= v3.6.0 | >= v1.0.0 | Pacific (>=16.2.0) | >= v1.14.0 |
|
||||||
| | Dynamically provision, de-provision File mode ROX volume | Alpha | >= v3.6.0 | >= v1.0.0 | Pacific (>=16.2.0) | >= v1.14.0 |
|
| | Dynamically provision, de-provision File mode ROX volume | Alpha | >= v3.6.0 | >= v1.0.0 | Pacific (>=16.2.0) | >= v1.14.0 |
|
||||||
@ -131,14 +130,15 @@ in the Kubernetes documentation.
|
|||||||
| Ceph CSI Release/Branch | Container image name | Image Tag |
|
| Ceph CSI Release/Branch | Container image name | Image Tag |
|
||||||
| ----------------------- | ---------------------------- | --------- |
|
| ----------------------- | ---------------------------- | --------- |
|
||||||
| devel (Branch) | quay.io/cephcsi/cephcsi | canary |
|
| devel (Branch) | quay.io/cephcsi/cephcsi | canary |
|
||||||
|
| v3.8.0 (Release) | quay.io/cephcsi/cephcsi | v3.8.0 |
|
||||||
| v3.7.2 (Release) | quay.io/cephcsi/cephcsi | v3.7.2 |
|
| v3.7.2 (Release) | quay.io/cephcsi/cephcsi | v3.7.2 |
|
||||||
| v3.7.1 (Release) | quay.io/cephcsi/cephcsi | v3.7.1 |
|
| v3.7.1 (Release) | quay.io/cephcsi/cephcsi | v3.7.1 |
|
||||||
| v3.7.0 (Release) | quay.io/cephcsi/cephcsi | v3.7.0 |
|
| v3.7.0 (Release) | quay.io/cephcsi/cephcsi | v3.7.0 |
|
||||||
| v3.6.1 (Release) | quay.io/cephcsi/cephcsi | v3.6.1 |
|
|
||||||
| v3.6.0 (Release) | quay.io/cephcsi/cephcsi | v3.6.0 |
|
|
||||||
|
|
||||||
| Deprecated Ceph CSI Release/Branch | Container image name | Image Tag |
|
| Deprecated Ceph CSI Release/Branch | Container image name | Image Tag |
|
||||||
| ----------------------- | --------------------------------| --------- |
|
| ----------------------- | --------------------------------| --------- |
|
||||||
|
| v3.6.1 (Release) | quay.io/cephcsi/cephcsi | v3.6.1 |
|
||||||
|
| v3.6.0 (Release) | quay.io/cephcsi/cephcsi | v3.6.0 |
|
||||||
| v3.5.1 (Release) | quay.io/cephcsi/cephcsi | v3.5.1 |
|
| v3.5.1 (Release) | quay.io/cephcsi/cephcsi | v3.5.1 |
|
||||||
| v3.5.0 (Release) | quay.io/cephcsi/cephcsi | v3.5.0 |
|
| v3.5.0 (Release) | quay.io/cephcsi/cephcsi | v3.5.0 |
|
||||||
| v3.4.0 (Release) | quay.io/cephcsi/cephcsi | v3.4.0 |
|
| v3.4.0 (Release) | quay.io/cephcsi/cephcsi | v3.4.0 |
|
||||||
@ -180,8 +180,8 @@ More details are available [here](https://github.com/ceph/ceph-csi/issues/463)
|
|||||||
|
|
||||||
## Dev standup
|
## Dev standup
|
||||||
|
|
||||||
A regular dev standup takes place every [Monday,Tuesday and Thursday at
|
A regular dev standup takes place every [Tuesday at
|
||||||
12:00 PM UTC](https://meet.google.com/nnn-txfp-cge). Convert to your local
|
12:00 PM UTC](https://meet.google.com/vit-qdhw-nyh) Convert to your local
|
||||||
timezone by executing command `date -d "12:00 UTC"` on terminal
|
timezone by executing command `date -d "12:00 UTC"` on terminal
|
||||||
|
|
||||||
Any changes to the meeting schedule will be added to the [agenda
|
Any changes to the meeting schedule will be added to the [agenda
|
||||||
@ -191,15 +191,15 @@ Anyone who wants to discuss the direction of the project, design and
|
|||||||
implementation reviews, or general questions with the broader community is
|
implementation reviews, or general questions with the broader community is
|
||||||
welcome and encouraged to join.
|
welcome and encouraged to join.
|
||||||
|
|
||||||
- Meeting link: <https://meet.google.com/nnn-txfp-cge>
|
- Meeting link: <https://meet.google.com/vit-qdhw-nyh>
|
||||||
- [Current agenda](https://hackmd.io/6GL90WFGQL-L4DcIfIAKeQ)
|
- [Current agenda](https://hackmd.io/6GL90WFGQL-L4DcIfIAKeQ)
|
||||||
|
|
||||||
## Contact
|
## Contact
|
||||||
|
|
||||||
Please use the following to reach members of the community:
|
Please use the following to reach members of the community:
|
||||||
|
|
||||||
- Slack: Join our [slack channel](https://cephcsi.slack.com) to discuss
|
- Slack: Join our [Slack channel](https://ceph-storage.slack.com) to discuss
|
||||||
anything related to this project. You can join the slack by
|
anything related to this project. You can join the Slack by this [invite
|
||||||
this [invite link](https://bit.ly/2MeS4KY )
|
link](https://bit.ly/40FQu7u)
|
||||||
- Forums: [ceph-csi](https://groups.google.com/forum/#!forum/ceph-csi)
|
- Forums: [ceph-csi](https://groups.google.com/forum/#!forum/ceph-csi)
|
||||||
- Twitter: [@CephCsi](https://twitter.com/CephCsi)
|
- Twitter: [@CephCsi](https://twitter.com/CephCsi)
|
||||||
|
@ -1,15 +1,15 @@
|
|||||||
# retest-action
|
# retest-action
|
||||||
|
|
||||||
This is a github action built using the golang and the [github
|
This is a github action built using the golang and the [github
|
||||||
api](github.com/google/go-github). The main idea behind this one is to retest
|
api](https://github.com/google/go-github). The main idea behind this one is to retest
|
||||||
the failed tests on the approved PR's to avoid burden on the
|
the failed tests on the approved PR's to avoid burden on the
|
||||||
maintainer's/author's to retest all the failed tests.
|
maintainer's/author's to retest all the failed tests.
|
||||||
|
|
||||||
* List the pull requests from the github organization.
|
* List the pull requests from the github organization.
|
||||||
* Check PR is open and have required approvals.
|
* Check PR is open and have required approvals.
|
||||||
* Check PR as the required label to continue to retest.
|
* Check PR has the required label to continue to retest.
|
||||||
* Pulls the failed test details.
|
* Pulls the failed test details.
|
||||||
* Check failed test has reached the maximum limit.
|
* Check failed test has reached the maximum limit.
|
||||||
* If the limit has not reached the action will post the `retest` command on the
|
* If the limit has not reached, the action will post the `retest` command on the
|
||||||
PR with log location for further debugging.
|
PR with log location for further debugging.
|
||||||
* If the limit has reached the Pull Request will be skipped.
|
* If the limit has reached, the Pull Request will be skipped.
|
||||||
|
@ -4,13 +4,13 @@ go 1.18
|
|||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/google/go-github v17.0.0+incompatible
|
github.com/google/go-github v17.0.0+incompatible
|
||||||
golang.org/x/oauth2 v0.5.0
|
golang.org/x/oauth2 v0.9.0
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/golang/protobuf v1.5.2 // indirect
|
github.com/golang/protobuf v1.5.2 // indirect
|
||||||
github.com/google/go-querystring v1.1.0 // indirect
|
github.com/google/go-querystring v1.1.0 // indirect
|
||||||
golang.org/x/net v0.7.0 // indirect
|
golang.org/x/net v0.11.0 // indirect
|
||||||
google.golang.org/appengine v1.6.7 // indirect
|
google.golang.org/appengine v1.6.7 // indirect
|
||||||
google.golang.org/protobuf v1.28.1 // indirect
|
google.golang.org/protobuf v1.28.1 // indirect
|
||||||
)
|
)
|
||||||
|
@ -11,10 +11,10 @@ github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD
|
|||||||
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
|
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||||
golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g=
|
golang.org/x/net v0.11.0 h1:Gi2tvZIJyBtO9SDr1q9h5hEQCp/4L2RQ+ar0qjx2oNU=
|
||||||
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ=
|
||||||
golang.org/x/oauth2 v0.5.0 h1:HuArIo48skDwlrvM3sEdHXElYslAMsf3KwRkkW4MC4s=
|
golang.org/x/oauth2 v0.9.0 h1:BPpt2kU7oMRq3kCHAA1tbSEshXRw1LpG2ztgDwrzuAs=
|
||||||
golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I=
|
golang.org/x/oauth2 v0.9.0/go.mod h1:qYgFZaFiu6Wg24azG8bdV52QJXJGbZzIIsRCdVKzbLw=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
|
12
actions/retest/vendor/golang.org/x/oauth2/README.md
generated
vendored
12
actions/retest/vendor/golang.org/x/oauth2/README.md
generated
vendored
@ -19,7 +19,7 @@ See pkg.go.dev for further documentation and examples.
|
|||||||
* [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2)
|
* [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2)
|
||||||
* [pkg.go.dev/golang.org/x/oauth2/google](https://pkg.go.dev/golang.org/x/oauth2/google)
|
* [pkg.go.dev/golang.org/x/oauth2/google](https://pkg.go.dev/golang.org/x/oauth2/google)
|
||||||
|
|
||||||
## Policy for new packages
|
## Policy for new endpoints
|
||||||
|
|
||||||
We no longer accept new provider-specific packages in this repo if all
|
We no longer accept new provider-specific packages in this repo if all
|
||||||
they do is add a single endpoint variable. If you just want to add a
|
they do is add a single endpoint variable. If you just want to add a
|
||||||
@ -29,8 +29,12 @@ package.
|
|||||||
|
|
||||||
## Report Issues / Send Patches
|
## Report Issues / Send Patches
|
||||||
|
|
||||||
This repository uses Gerrit for code changes. To learn how to submit changes to
|
|
||||||
this repository, see https://golang.org/doc/contribute.html.
|
|
||||||
|
|
||||||
The main issue tracker for the oauth2 repository is located at
|
The main issue tracker for the oauth2 repository is located at
|
||||||
https://github.com/golang/oauth2/issues.
|
https://github.com/golang/oauth2/issues.
|
||||||
|
|
||||||
|
This repository uses Gerrit for code changes. To learn how to submit changes to
|
||||||
|
this repository, see https://golang.org/doc/contribute.html. In particular:
|
||||||
|
|
||||||
|
* Excluding trivial changes, all contributions should be connected to an existing issue.
|
||||||
|
* API changes must go through the [change proposal process](https://go.dev/s/proposal-process) before they can be accepted.
|
||||||
|
* The code owners are listed at [dev.golang.org/owners](https://dev.golang.org/owners#:~:text=x/oauth2).
|
||||||
|
2
actions/retest/vendor/golang.org/x/oauth2/internal/oauth2.go
generated
vendored
2
actions/retest/vendor/golang.org/x/oauth2/internal/oauth2.go
generated
vendored
@ -14,7 +14,7 @@ import (
|
|||||||
|
|
||||||
// ParseKey converts the binary contents of a private key file
|
// ParseKey converts the binary contents of a private key file
|
||||||
// to an *rsa.PrivateKey. It detects whether the private key is in a
|
// to an *rsa.PrivateKey. It detects whether the private key is in a
|
||||||
// PEM container or not. If so, it extracts the the private key
|
// PEM container or not. If so, it extracts the private key
|
||||||
// from PEM container before conversion. It only supports PEM
|
// from PEM container before conversion. It only supports PEM
|
||||||
// containers with no passphrase.
|
// containers with no passphrase.
|
||||||
func ParseKey(key []byte) (*rsa.PrivateKey, error) {
|
func ParseKey(key []byte) (*rsa.PrivateKey, error) {
|
||||||
|
52
actions/retest/vendor/golang.org/x/oauth2/internal/token.go
generated
vendored
52
actions/retest/vendor/golang.org/x/oauth2/internal/token.go
generated
vendored
@ -55,12 +55,18 @@ type Token struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// tokenJSON is the struct representing the HTTP response from OAuth2
|
// tokenJSON is the struct representing the HTTP response from OAuth2
|
||||||
// providers returning a token in JSON form.
|
// providers returning a token or error in JSON form.
|
||||||
|
// https://datatracker.ietf.org/doc/html/rfc6749#section-5.1
|
||||||
type tokenJSON struct {
|
type tokenJSON struct {
|
||||||
AccessToken string `json:"access_token"`
|
AccessToken string `json:"access_token"`
|
||||||
TokenType string `json:"token_type"`
|
TokenType string `json:"token_type"`
|
||||||
RefreshToken string `json:"refresh_token"`
|
RefreshToken string `json:"refresh_token"`
|
||||||
ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number
|
ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number
|
||||||
|
// error fields
|
||||||
|
// https://datatracker.ietf.org/doc/html/rfc6749#section-5.2
|
||||||
|
ErrorCode string `json:"error"`
|
||||||
|
ErrorDescription string `json:"error_description"`
|
||||||
|
ErrorURI string `json:"error_uri"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *tokenJSON) expiry() (t time.Time) {
|
func (e *tokenJSON) expiry() (t time.Time) {
|
||||||
@ -236,21 +242,29 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
|
return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
|
||||||
}
|
}
|
||||||
if code := r.StatusCode; code < 200 || code > 299 {
|
|
||||||
return nil, &RetrieveError{
|
failureStatus := r.StatusCode < 200 || r.StatusCode > 299
|
||||||
|
retrieveError := &RetrieveError{
|
||||||
Response: r,
|
Response: r,
|
||||||
Body: body,
|
Body: body,
|
||||||
}
|
// attempt to populate error detail below
|
||||||
}
|
}
|
||||||
|
|
||||||
var token *Token
|
var token *Token
|
||||||
content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type"))
|
content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type"))
|
||||||
switch content {
|
switch content {
|
||||||
case "application/x-www-form-urlencoded", "text/plain":
|
case "application/x-www-form-urlencoded", "text/plain":
|
||||||
|
// some endpoints return a query string
|
||||||
vals, err := url.ParseQuery(string(body))
|
vals, err := url.ParseQuery(string(body))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
if failureStatus {
|
||||||
|
return nil, retrieveError
|
||||||
}
|
}
|
||||||
|
return nil, fmt.Errorf("oauth2: cannot parse response: %v", err)
|
||||||
|
}
|
||||||
|
retrieveError.ErrorCode = vals.Get("error")
|
||||||
|
retrieveError.ErrorDescription = vals.Get("error_description")
|
||||||
|
retrieveError.ErrorURI = vals.Get("error_uri")
|
||||||
token = &Token{
|
token = &Token{
|
||||||
AccessToken: vals.Get("access_token"),
|
AccessToken: vals.Get("access_token"),
|
||||||
TokenType: vals.Get("token_type"),
|
TokenType: vals.Get("token_type"),
|
||||||
@ -265,8 +279,14 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) {
|
|||||||
default:
|
default:
|
||||||
var tj tokenJSON
|
var tj tokenJSON
|
||||||
if err = json.Unmarshal(body, &tj); err != nil {
|
if err = json.Unmarshal(body, &tj); err != nil {
|
||||||
return nil, err
|
if failureStatus {
|
||||||
|
return nil, retrieveError
|
||||||
}
|
}
|
||||||
|
return nil, fmt.Errorf("oauth2: cannot parse json: %v", err)
|
||||||
|
}
|
||||||
|
retrieveError.ErrorCode = tj.ErrorCode
|
||||||
|
retrieveError.ErrorDescription = tj.ErrorDescription
|
||||||
|
retrieveError.ErrorURI = tj.ErrorURI
|
||||||
token = &Token{
|
token = &Token{
|
||||||
AccessToken: tj.AccessToken,
|
AccessToken: tj.AccessToken,
|
||||||
TokenType: tj.TokenType,
|
TokenType: tj.TokenType,
|
||||||
@ -276,17 +296,37 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) {
|
|||||||
}
|
}
|
||||||
json.Unmarshal(body, &token.Raw) // no error checks for optional fields
|
json.Unmarshal(body, &token.Raw) // no error checks for optional fields
|
||||||
}
|
}
|
||||||
|
// according to spec, servers should respond status 400 in error case
|
||||||
|
// https://www.rfc-editor.org/rfc/rfc6749#section-5.2
|
||||||
|
// but some unorthodox servers respond 200 in error case
|
||||||
|
if failureStatus || retrieveError.ErrorCode != "" {
|
||||||
|
return nil, retrieveError
|
||||||
|
}
|
||||||
if token.AccessToken == "" {
|
if token.AccessToken == "" {
|
||||||
return nil, errors.New("oauth2: server response missing access_token")
|
return nil, errors.New("oauth2: server response missing access_token")
|
||||||
}
|
}
|
||||||
return token, nil
|
return token, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// mirrors oauth2.RetrieveError
|
||||||
type RetrieveError struct {
|
type RetrieveError struct {
|
||||||
Response *http.Response
|
Response *http.Response
|
||||||
Body []byte
|
Body []byte
|
||||||
|
ErrorCode string
|
||||||
|
ErrorDescription string
|
||||||
|
ErrorURI string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *RetrieveError) Error() string {
|
func (r *RetrieveError) Error() string {
|
||||||
|
if r.ErrorCode != "" {
|
||||||
|
s := fmt.Sprintf("oauth2: %q", r.ErrorCode)
|
||||||
|
if r.ErrorDescription != "" {
|
||||||
|
s += fmt.Sprintf(" %q", r.ErrorDescription)
|
||||||
|
}
|
||||||
|
if r.ErrorURI != "" {
|
||||||
|
s += fmt.Sprintf(" %q", r.ErrorURI)
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body)
|
return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body)
|
||||||
}
|
}
|
||||||
|
33
actions/retest/vendor/golang.org/x/oauth2/oauth2.go
generated
vendored
33
actions/retest/vendor/golang.org/x/oauth2/oauth2.go
generated
vendored
@ -16,6 +16,7 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"golang.org/x/oauth2/internal"
|
"golang.org/x/oauth2/internal"
|
||||||
)
|
)
|
||||||
@ -140,7 +141,7 @@ func SetAuthURLParam(key, value string) AuthCodeOption {
|
|||||||
//
|
//
|
||||||
// State is a token to protect the user from CSRF attacks. You must
|
// State is a token to protect the user from CSRF attacks. You must
|
||||||
// always provide a non-empty string and validate that it matches the
|
// always provide a non-empty string and validate that it matches the
|
||||||
// the state query parameter on your redirect callback.
|
// state query parameter on your redirect callback.
|
||||||
// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info.
|
// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info.
|
||||||
//
|
//
|
||||||
// Opts may include AccessTypeOnline or AccessTypeOffline, as well
|
// Opts may include AccessTypeOnline or AccessTypeOffline, as well
|
||||||
@ -290,6 +291,8 @@ type reuseTokenSource struct {
|
|||||||
|
|
||||||
mu sync.Mutex // guards t
|
mu sync.Mutex // guards t
|
||||||
t *Token
|
t *Token
|
||||||
|
|
||||||
|
expiryDelta time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
// Token returns the current token if it's still valid, else will
|
// Token returns the current token if it's still valid, else will
|
||||||
@ -305,6 +308,7 @@ func (s *reuseTokenSource) Token() (*Token, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
t.expiryDelta = s.expiryDelta
|
||||||
s.t = t
|
s.t = t
|
||||||
return t, nil
|
return t, nil
|
||||||
}
|
}
|
||||||
@ -379,3 +383,30 @@ func ReuseTokenSource(t *Token, src TokenSource) TokenSource {
|
|||||||
new: src,
|
new: src,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ReuseTokenSource returns a TokenSource that acts in the same manner as the
|
||||||
|
// TokenSource returned by ReuseTokenSource, except the expiry buffer is
|
||||||
|
// configurable. The expiration time of a token is calculated as
|
||||||
|
// t.Expiry.Add(-earlyExpiry).
|
||||||
|
func ReuseTokenSourceWithExpiry(t *Token, src TokenSource, earlyExpiry time.Duration) TokenSource {
|
||||||
|
// Don't wrap a reuseTokenSource in itself. That would work,
|
||||||
|
// but cause an unnecessary number of mutex operations.
|
||||||
|
// Just build the equivalent one.
|
||||||
|
if rt, ok := src.(*reuseTokenSource); ok {
|
||||||
|
if t == nil {
|
||||||
|
// Just use it directly, but set the expiryDelta to earlyExpiry,
|
||||||
|
// so the behavior matches what the user expects.
|
||||||
|
rt.expiryDelta = earlyExpiry
|
||||||
|
return rt
|
||||||
|
}
|
||||||
|
src = rt.new
|
||||||
|
}
|
||||||
|
if t != nil {
|
||||||
|
t.expiryDelta = earlyExpiry
|
||||||
|
}
|
||||||
|
return &reuseTokenSource{
|
||||||
|
t: t,
|
||||||
|
new: src,
|
||||||
|
expiryDelta: earlyExpiry,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
33
actions/retest/vendor/golang.org/x/oauth2/token.go
generated
vendored
33
actions/retest/vendor/golang.org/x/oauth2/token.go
generated
vendored
@ -16,10 +16,10 @@ import (
|
|||||||
"golang.org/x/oauth2/internal"
|
"golang.org/x/oauth2/internal"
|
||||||
)
|
)
|
||||||
|
|
||||||
// expiryDelta determines how earlier a token should be considered
|
// defaultExpiryDelta determines how earlier a token should be considered
|
||||||
// expired than its actual expiration time. It is used to avoid late
|
// expired than its actual expiration time. It is used to avoid late
|
||||||
// expirations due to client-server time mismatches.
|
// expirations due to client-server time mismatches.
|
||||||
const expiryDelta = 10 * time.Second
|
const defaultExpiryDelta = 10 * time.Second
|
||||||
|
|
||||||
// Token represents the credentials used to authorize
|
// Token represents the credentials used to authorize
|
||||||
// the requests to access protected resources on the OAuth 2.0
|
// the requests to access protected resources on the OAuth 2.0
|
||||||
@ -52,6 +52,11 @@ type Token struct {
|
|||||||
// raw optionally contains extra metadata from the server
|
// raw optionally contains extra metadata from the server
|
||||||
// when updating a token.
|
// when updating a token.
|
||||||
raw interface{}
|
raw interface{}
|
||||||
|
|
||||||
|
// expiryDelta is used to calculate when a token is considered
|
||||||
|
// expired, by subtracting from Expiry. If zero, defaultExpiryDelta
|
||||||
|
// is used.
|
||||||
|
expiryDelta time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
// Type returns t.TokenType if non-empty, else "Bearer".
|
// Type returns t.TokenType if non-empty, else "Bearer".
|
||||||
@ -127,6 +132,11 @@ func (t *Token) expired() bool {
|
|||||||
if t.Expiry.IsZero() {
|
if t.Expiry.IsZero() {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
expiryDelta := defaultExpiryDelta
|
||||||
|
if t.expiryDelta != 0 {
|
||||||
|
expiryDelta = t.expiryDelta
|
||||||
|
}
|
||||||
return t.Expiry.Round(0).Add(-expiryDelta).Before(timeNow())
|
return t.Expiry.Round(0).Add(-expiryDelta).Before(timeNow())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -165,14 +175,31 @@ func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// RetrieveError is the error returned when the token endpoint returns a
|
// RetrieveError is the error returned when the token endpoint returns a
|
||||||
// non-2XX HTTP status code.
|
// non-2XX HTTP status code or populates RFC 6749's 'error' parameter.
|
||||||
|
// https://datatracker.ietf.org/doc/html/rfc6749#section-5.2
|
||||||
type RetrieveError struct {
|
type RetrieveError struct {
|
||||||
Response *http.Response
|
Response *http.Response
|
||||||
// Body is the body that was consumed by reading Response.Body.
|
// Body is the body that was consumed by reading Response.Body.
|
||||||
// It may be truncated.
|
// It may be truncated.
|
||||||
Body []byte
|
Body []byte
|
||||||
|
// ErrorCode is RFC 6749's 'error' parameter.
|
||||||
|
ErrorCode string
|
||||||
|
// ErrorDescription is RFC 6749's 'error_description' parameter.
|
||||||
|
ErrorDescription string
|
||||||
|
// ErrorURI is RFC 6749's 'error_uri' parameter.
|
||||||
|
ErrorURI string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *RetrieveError) Error() string {
|
func (r *RetrieveError) Error() string {
|
||||||
|
if r.ErrorCode != "" {
|
||||||
|
s := fmt.Sprintf("oauth2: %q", r.ErrorCode)
|
||||||
|
if r.ErrorDescription != "" {
|
||||||
|
s += fmt.Sprintf(" %q", r.ErrorDescription)
|
||||||
|
}
|
||||||
|
if r.ErrorURI != "" {
|
||||||
|
s += fmt.Sprintf(" %q", r.ErrorURI)
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body)
|
return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body)
|
||||||
}
|
}
|
||||||
|
4
actions/retest/vendor/modules.txt
vendored
4
actions/retest/vendor/modules.txt
vendored
@ -7,10 +7,10 @@ github.com/google/go-github/github
|
|||||||
# github.com/google/go-querystring v1.1.0
|
# github.com/google/go-querystring v1.1.0
|
||||||
## explicit; go 1.10
|
## explicit; go 1.10
|
||||||
github.com/google/go-querystring/query
|
github.com/google/go-querystring/query
|
||||||
# golang.org/x/net v0.7.0
|
# golang.org/x/net v0.11.0
|
||||||
## explicit; go 1.17
|
## explicit; go 1.17
|
||||||
golang.org/x/net/context
|
golang.org/x/net/context
|
||||||
# golang.org/x/oauth2 v0.5.0
|
# golang.org/x/oauth2 v0.9.0
|
||||||
## explicit; go 1.17
|
## explicit; go 1.17
|
||||||
golang.org/x/oauth2
|
golang.org/x/oauth2
|
||||||
golang.org/x/oauth2/internal
|
golang.org/x/oauth2/internal
|
||||||
|
74
api/deploy/kubernetes/cephfs/csi-config-map.go
Normal file
74
api/deploy/kubernetes/cephfs/csi-config-map.go
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2023 The Ceph-CSI Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cephfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
_ "embed"
|
||||||
|
"fmt"
|
||||||
|
"text/template"
|
||||||
|
|
||||||
|
"github.com/ghodss/yaml"
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
//go:embed csi-config-map.yaml
|
||||||
|
var csiConfigMap string
|
||||||
|
|
||||||
|
type CSIConfigMapValues struct {
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
|
||||||
|
var CSIConfigMapDefaults = CSIConfigMapValues{
|
||||||
|
Name: "ceph-csi-config",
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCSIConfigMap takes a name from the CSIConfigMapValues struct and relaces
|
||||||
|
// the value in the template. A ConfigMap object is returned which can be
|
||||||
|
// created in the Kubernetes cluster.
|
||||||
|
func NewCSIConfigMap(values CSIConfigMapValues) (*v1.ConfigMap, error) {
|
||||||
|
data, err := NewCSIConfigMapYAML(values)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
cm := &v1.ConfigMap{}
|
||||||
|
err = yaml.Unmarshal([]byte(data), cm)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed convert YAML to %T: %w", cm, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return cm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCSIConfigMapYAML takes a name from the CSIConfigMapValues struct and
|
||||||
|
// relaces the value in the template. A ConfigMap object in YAML is returned
|
||||||
|
// which can be created in the Kubernetes cluster.
|
||||||
|
func NewCSIConfigMapYAML(values CSIConfigMapValues) (string, error) {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
|
||||||
|
tmpl, err := template.New("CSIConfigMap").Parse(csiConfigMap)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("failed to parse template: %w", err)
|
||||||
|
}
|
||||||
|
err = tmpl.Execute(&buf, values)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("failed to replace values in template: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.String(), nil
|
||||||
|
}
|
8
api/deploy/kubernetes/cephfs/csi-config-map.yaml
Normal file
8
api/deploy/kubernetes/cephfs/csi-config-map.yaml
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: "{{ .Name }}"
|
||||||
|
data:
|
||||||
|
config.json: |-
|
||||||
|
[]
|
38
api/deploy/kubernetes/cephfs/csi-config-map_test.go
Normal file
38
api/deploy/kubernetes/cephfs/csi-config-map_test.go
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2023 The Ceph-CSI Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cephfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNewCSIConfigMap(t *testing.T) {
|
||||||
|
cm, err := NewCSIConfigMap(CSIConfigMapDefaults)
|
||||||
|
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotNil(t, cm)
|
||||||
|
require.Equal(t, cm.Name, CSIConfigMapDefaults.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewCSIConfigMapYAML(t *testing.T) {
|
||||||
|
yaml, err := NewCSIConfigMapYAML(CSIConfigMapDefaults)
|
||||||
|
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotEqual(t, "", yaml)
|
||||||
|
}
|
74
api/deploy/kubernetes/cephfs/csidriver.go
Normal file
74
api/deploy/kubernetes/cephfs/csidriver.go
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2023 The Ceph-CSI Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cephfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
_ "embed"
|
||||||
|
"fmt"
|
||||||
|
"text/template"
|
||||||
|
|
||||||
|
"github.com/ghodss/yaml"
|
||||||
|
storagev1 "k8s.io/api/storage/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
//go:embed csidriver.yaml
|
||||||
|
var csiDriver string
|
||||||
|
|
||||||
|
type CSIDriverValues struct {
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
|
||||||
|
var CSIDriverDefaults = CSIDriverValues{
|
||||||
|
Name: "cephfs.csi.ceph.com",
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCSIDriver takes a driver name from the CSIDriverValues struct and
|
||||||
|
// replaces the value in the template. A CSIDriver object is returned which can
|
||||||
|
// be created in the Kubernetes cluster.
|
||||||
|
func NewCSIDriver(values CSIDriverValues) (*storagev1.CSIDriver, error) {
|
||||||
|
data, err := NewCSIDriverYAML(values)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
driver := &storagev1.CSIDriver{}
|
||||||
|
err = yaml.Unmarshal([]byte(data), driver)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed convert YAML to %T: %w", driver, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return driver, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCSIDriverYAML takes a driver name from the CSIDriverValues struct and
|
||||||
|
// replaces the value in the template. A CSIDriver object in YAML is returned
|
||||||
|
// which can be created in the Kubernetes cluster.
|
||||||
|
func NewCSIDriverYAML(values CSIDriverValues) (string, error) {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
|
||||||
|
tmpl, err := template.New("CSIDriver").Parse(csiDriver)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("failed to parse template: %w", err)
|
||||||
|
}
|
||||||
|
err = tmpl.Execute(&buf, values)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("failed to replace values in template: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.String(), nil
|
||||||
|
}
|
9
api/deploy/kubernetes/cephfs/csidriver.yaml
Normal file
9
api/deploy/kubernetes/cephfs/csidriver.yaml
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
apiVersion: storage.k8s.io/v1
|
||||||
|
kind: CSIDriver
|
||||||
|
metadata:
|
||||||
|
name: "{{ .Name }}"
|
||||||
|
spec:
|
||||||
|
attachRequired: false
|
||||||
|
podInfoOnMount: false
|
||||||
|
fsGroupPolicy: File
|
38
api/deploy/kubernetes/cephfs/csidriver_test.go
Normal file
38
api/deploy/kubernetes/cephfs/csidriver_test.go
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2023 The Ceph-CSI Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cephfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNewCSIDriver(t *testing.T) {
|
||||||
|
driver, err := NewCSIDriver(CSIDriverDefaults)
|
||||||
|
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotNil(t, driver)
|
||||||
|
require.Equal(t, driver.Name, CSIDriverDefaults.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewCSIDriverYAML(t *testing.T) {
|
||||||
|
yaml, err := NewCSIDriverYAML(CSIDriverDefaults)
|
||||||
|
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotEqual(t, "", yaml)
|
||||||
|
}
|
@ -10,7 +10,7 @@ allowHostNetwork: true
|
|||||||
# This need to be set to true as we use HostPath
|
# This need to be set to true as we use HostPath
|
||||||
allowHostDirVolumePlugin: true
|
allowHostDirVolumePlugin: true
|
||||||
priority:
|
priority:
|
||||||
# SYS_ADMIN is needed for rbd to execture rbd map command
|
# SYS_ADMIN is needed for rbd to execute rbd map command
|
||||||
allowedCapabilities: ["SYS_ADMIN"]
|
allowedCapabilities: ["SYS_ADMIN"]
|
||||||
# Needed as we run liveness container on daemonset pods
|
# Needed as we run liveness container on daemonset pods
|
||||||
allowHostPorts: true
|
allowHostPorts: true
|
||||||
|
21
api/go.mod
21
api/go.mod
@ -4,9 +4,9 @@ go 1.18
|
|||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/ghodss/yaml v1.0.0
|
github.com/ghodss/yaml v1.0.0
|
||||||
github.com/openshift/api v0.0.0-20210927171657-636513e97fda
|
github.com/openshift/api v0.0.0-20230320192226-1fc631efd341
|
||||||
github.com/stretchr/testify v1.8.1
|
github.com/stretchr/testify v1.8.4
|
||||||
k8s.io/api v0.26.1
|
k8s.io/api v0.27.2
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
@ -18,14 +18,17 @@ require (
|
|||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
golang.org/x/net v0.7.0 // indirect
|
golang.org/x/net v0.8.0 // indirect
|
||||||
golang.org/x/text v0.7.0 // indirect
|
golang.org/x/text v0.8.0 // indirect
|
||||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
k8s.io/apimachinery v0.26.1 // indirect
|
k8s.io/apimachinery v0.27.2 // indirect
|
||||||
k8s.io/klog/v2 v2.80.1 // indirect
|
k8s.io/klog/v2 v2.90.1 // indirect
|
||||||
k8s.io/utils v0.0.0-20221108210102-8e77b1f39fe2 // indirect
|
k8s.io/utils v0.0.0-20230209194617-a36077c30491 // indirect
|
||||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
|
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// version 3.9 is really old, don't use that!
|
||||||
|
exclude github.com/openshift/api v3.9.0+incompatible
|
||||||
|
231
api/go.sum
231
api/go.sum
@ -1,277 +1,88 @@
|
|||||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
|
||||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
|
||||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
|
||||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
|
||||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
|
||||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
|
||||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
|
||||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
|
||||||
github.com/dave/dst v0.26.2/go.mod h1:UMDJuIRPfyUCC78eFuB+SV/WI8oDeyFDvM/JR6NI3IU=
|
|
||||||
github.com/dave/gopackages v0.0.0-20170318123100-46e7023ec56e/go.mod h1:i00+b/gKdIDIxuLDFob7ustLAVqhsZRk2qVZrArELGQ=
|
|
||||||
github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg=
|
|
||||||
github.com/dave/kerr v0.0.0-20170318121727-bc25dd6abe8e/go.mod h1:qZqlPyPvfsDJt+3wHJ1EvSXDuVjFTK0j2p/ca+gtsb8=
|
|
||||||
github.com/dave/rebecca v0.9.1/go.mod h1:N6XYdMD/OKw3lkF3ywh8Z6wPGuwNFDNtWYEMFWEmXBA=
|
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
|
||||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
|
||||||
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
|
||||||
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
|
||||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
|
||||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
|
||||||
github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
|
||||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
|
||||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
|
||||||
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
|
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
|
||||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
|
||||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
|
||||||
github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
|
||||||
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||||
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
|
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
|
||||||
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
|
||||||
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
|
||||||
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
|
|
||||||
github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
|
|
||||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
|
||||||
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
|
||||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
|
||||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
|
||||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
|
||||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
|
||||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
|
||||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
|
||||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
|
||||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
|
||||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
|
||||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
|
||||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
|
||||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
|
||||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
|
||||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
|
||||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
|
||||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
|
||||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
|
||||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
|
||||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
|
||||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/pprof v0.0.0-20181127221834-b4f47329b966/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
|
||||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
|
||||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
|
||||||
github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
|
|
||||||
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
|
|
||||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
|
||||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
|
||||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
|
||||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
|
||||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
|
||||||
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
|
||||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
|
||||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
|
||||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
|
||||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
|
||||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
|
||||||
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
|
||||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
|
||||||
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
|
||||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
|
||||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
|
||||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||||
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
github.com/openshift/api v0.0.0-20230320192226-1fc631efd341 h1:PhLdiIlVqgN4frwrG8lNlbQdJ4eJcGdjX/vhlN6xupk=
|
||||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
github.com/openshift/api v0.0.0-20230320192226-1fc631efd341/go.mod h1:ctXNyWanKEjGj8sss1KjjHQ3ENKFm33FFnS5BKaIPh4=
|
||||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
|
|
||||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
|
||||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
|
||||||
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
|
||||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
|
||||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
|
||||||
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
|
||||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
|
||||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
|
||||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
|
||||||
github.com/openshift/api v0.0.0-20210927171657-636513e97fda h1:VoJmrqbFDuqzjlByItbjx/HxmReK4LC+X3Jt2Wv2Ogs=
|
|
||||||
github.com/openshift/api v0.0.0-20210927171657-636513e97fda/go.mod h1:RsQCVJu4qhUawxxDP7pGlwU3IA4F01wYm3qKEu29Su8=
|
|
||||||
github.com/openshift/build-machinery-go v0.0.0-20210712174854-1bb7fd1518d3/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE=
|
|
||||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
|
||||||
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
|
||||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
|
||||||
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
|
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
|
||||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
|
||||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
|
||||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
|
||||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
|
||||||
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
|
|
||||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
|
||||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
|
||||||
golang.org/x/arch v0.0.0-20180920145803-b19384d3c130/go.mod h1:cYlCBUl1MsqxdiKgmc4uh7TxZfWSFLOGSRR090WDxt8=
|
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
|
||||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
|
||||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
|
||||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
|
||||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|
||||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
|
||||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
|
||||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
|
||||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
|
||||||
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
||||||
golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g=
|
|
||||||
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20180903190138-2b024373dcd9/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
|
||||||
golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
|
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
|
||||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
|
||||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
|
||||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
|
||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
|
||||||
golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
|
||||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
|
||||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
|
||||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
|
||||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
|
||||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
|
||||||
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
|
||||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
|
||||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
|
||||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
|
||||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
|
||||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
|
||||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
|
||||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
|
||||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
|
||||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
|
||||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
|
||||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
|
||||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
|
||||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
|
||||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
|
|
||||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
|
||||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
|
||||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||||
gopkg.in/src-d/go-billy.v4 v4.3.0/go.mod h1:tm33zBoOwxjYHZIE+OV8bxTWFMJLrconzFMd38aARFk=
|
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
|
||||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|
||||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|
||||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|
||||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|
||||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
|
||||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
k8s.io/api v0.27.2 h1:+H17AJpUMvl+clT+BPnKf0E3ksMAzoBBg7CntpSuADo=
|
||||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
k8s.io/api v0.27.2/go.mod h1:ENmbocXfBT2ADujUXcBhHV55RIT31IIEvkntP6vZKS4=
|
||||||
k8s.io/api v0.22.1/go.mod h1:bh13rkTp3F1XEaLGykbyRD2QaTTzPm0e/BMd8ptFONY=
|
k8s.io/apimachinery v0.27.2 h1:vBjGaKKieaIreI+oQwELalVG4d8f3YAMNpWLzDXkxeg=
|
||||||
k8s.io/api v0.26.1 h1:f+SWYiPd/GsiWwVRz+NbFyCgvv75Pk9NK6dlkZgpCRQ=
|
k8s.io/apimachinery v0.27.2/go.mod h1:XNfZ6xklnMCOGGFNqXG7bUrQCoR04dh/E7FprV6pb+E=
|
||||||
k8s.io/api v0.26.1/go.mod h1:xd/GBNgR0f707+ATNyPmQ1oyKSgndzXij81FzWGsejg=
|
k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw=
|
||||||
k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0=
|
k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||||
k8s.io/apimachinery v0.26.1 h1:8EZ/eGJL+hY/MYCNwhmDzVqq2lPl3N3Bo8rvweJwXUQ=
|
k8s.io/utils v0.0.0-20230209194617-a36077c30491 h1:r0BAOLElQnnFhE/ApUsg3iHdVYYPBjNSSOMowRZxxsY=
|
||||||
k8s.io/apimachinery v0.26.1/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74=
|
k8s.io/utils v0.0.0-20230209194617-a36077c30491/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||||
k8s.io/code-generator v0.22.1/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o=
|
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||||
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||||
k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
|
||||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
|
||||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
|
||||||
k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
|
|
||||||
k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4=
|
|
||||||
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
|
||||||
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
|
|
||||||
k8s.io/utils v0.0.0-20221108210102-8e77b1f39fe2 h1:GfD9OzL11kvZN5iArC6oTS7RTj7oJOIfnislxYlqTj8=
|
|
||||||
k8s.io/utils v0.0.0-20221108210102-8e77b1f39fe2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
|
||||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k=
|
|
||||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
|
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
|
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
|
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
|
||||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
|
||||||
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
|
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
|
||||||
|
21
build.env
21
build.env
@ -16,29 +16,32 @@ BASE_IMAGE=quay.io/ceph/ceph:v17
|
|||||||
CEPH_VERSION=quincy
|
CEPH_VERSION=quincy
|
||||||
|
|
||||||
# standard Golang options
|
# standard Golang options
|
||||||
GOLANG_VERSION=1.19.5
|
GOLANG_VERSION=1.20.4
|
||||||
GO111MODULE=on
|
GO111MODULE=on
|
||||||
|
|
||||||
# commitlint version
|
# commitlint version
|
||||||
COMMITLINT_VERSION=latest
|
COMMITLINT_VERSION=latest
|
||||||
|
|
||||||
# static checks and linters
|
# static checks and linters
|
||||||
GOLANGCI_VERSION=v1.47.3
|
GOLANGCI_VERSION=v1.53.0
|
||||||
|
|
||||||
# external snapshotter version
|
# external snapshotter version
|
||||||
# Refer: https://github.com/kubernetes-csi/external-snapshotter/releases
|
# Refer: https://github.com/kubernetes-csi/external-snapshotter/releases
|
||||||
SNAPSHOT_VERSION=v6.1.0
|
SNAPSHOT_VERSION=v6.2.2
|
||||||
|
|
||||||
# "go test" configuration
|
# "go test" configuration
|
||||||
# set to stdout or html to enable coverage reporting, disabled by default
|
# set to stdout or html to enable coverage reporting, disabled by default
|
||||||
#TEST_COVERAGE=html
|
#TEST_COVERAGE=html
|
||||||
#GO_COVER_DIR=_output/
|
#GO_COVER_DIR=_output/
|
||||||
|
|
||||||
|
# URL for the script to install Helm
|
||||||
|
HELM_SCRIPT=https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
|
||||||
|
|
||||||
# helm chart generation, testing and publishing
|
# helm chart generation, testing and publishing
|
||||||
HELM_VERSION=v3.10.1
|
HELM_VERSION=v3.10.1
|
||||||
|
|
||||||
# minikube settings
|
# minikube settings
|
||||||
MINIKUBE_VERSION=v1.29.0
|
MINIKUBE_VERSION=v1.30.1
|
||||||
VM_DRIVER=none
|
VM_DRIVER=none
|
||||||
CHANGE_MINIKUBE_NONE_USER=true
|
CHANGE_MINIKUBE_NONE_USER=true
|
||||||
|
|
||||||
@ -48,11 +51,11 @@ ROOK_VERSION=v1.10.9
|
|||||||
ROOK_CEPH_CLUSTER_IMAGE=quay.io/ceph/ceph:v17
|
ROOK_CEPH_CLUSTER_IMAGE=quay.io/ceph/ceph:v17
|
||||||
|
|
||||||
# CSI sidecar version
|
# CSI sidecar version
|
||||||
CSI_ATTACHER_VERSION=v4.0.0
|
CSI_ATTACHER_VERSION=v4.3.0
|
||||||
CSI_SNAPSHOTTER_VERSION=v6.1.0
|
CSI_SNAPSHOTTER_VERSION=v6.2.2
|
||||||
CSI_RESIZER_VERSION=v1.6.0
|
CSI_RESIZER_VERSION=v1.8.0
|
||||||
CSI_PROVISIONER_VERSION=v3.3.0
|
CSI_PROVISIONER_VERSION=v3.5.0
|
||||||
CSI_NODE_DRIVER_REGISTRAR_VERSION=v2.6.2
|
CSI_NODE_DRIVER_REGISTRAR_VERSION=v2.8.0
|
||||||
|
|
||||||
# e2e settings
|
# e2e settings
|
||||||
# - enable CEPH_CSI_RUN_ALL_TESTS when running tests with if it has root
|
# - enable CEPH_CSI_RUN_ALL_TESTS when running tests with if it has root
|
||||||
|
@ -121,9 +121,10 @@ charts and their default values.
|
|||||||
| `nodeplugin.name` | Specifies the nodeplugin name | `nodeplugin` |
|
| `nodeplugin.name` | Specifies the nodeplugin name | `nodeplugin` |
|
||||||
| `nodeplugin.updateStrategy` | Specifies the update Strategy. If you are using ceph-fuse client set this value to OnDelete | `RollingUpdate` |
|
| `nodeplugin.updateStrategy` | Specifies the update Strategy. If you are using ceph-fuse client set this value to OnDelete | `RollingUpdate` |
|
||||||
| `nodeplugin.priorityClassName` | Set user created priorityclassName for csi plugin pods. default is system-node-critical which is highest priority | `system-node-critical` |
|
| `nodeplugin.priorityClassName` | Set user created priorityclassName for csi plugin pods. default is system-node-critical which is highest priority | `system-node-critical` |
|
||||||
|
| `nodeplugin.imagePullSecrets` | Specifies imagePullSecrets for containers | `[]` |
|
||||||
| `nodeplugin.profiling.enabled` | Specifies whether profiling should be enabled | `false` |
|
| `nodeplugin.profiling.enabled` | Specifies whether profiling should be enabled | `false` |
|
||||||
| `nodeplugin.registrar.image.repository` | Node-Registrar image repository URL | `registry.k8s.io/sig-storage/csi-node-driver-registrar` |
|
| `nodeplugin.registrar.image.repository` | Node-Registrar image repository URL | `registry.k8s.io/sig-storage/csi-node-driver-registrar` |
|
||||||
| `nodeplugin.registrar.image.tag` | Image tag | `v2.6.2` |
|
| `nodeplugin.registrar.image.tag` | Image tag | `v2.8.0` |
|
||||||
| `nodeplugin.registrar.image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
| `nodeplugin.registrar.image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
||||||
| `nodeplugin.plugin.image.repository` | Nodeplugin image repository URL | `quay.io/cephcsi/cephcsi` |
|
| `nodeplugin.plugin.image.repository` | Nodeplugin image repository URL | `quay.io/cephcsi/cephcsi` |
|
||||||
| `nodeplugin.plugin.image.tag` | Image tag | `canary` |
|
| `nodeplugin.plugin.image.tag` | Image tag | `canary` |
|
||||||
@ -141,19 +142,20 @@ charts and their default values.
|
|||||||
| `provisioner.setmetadata` | Set metadata on volume | `true` |
|
| `provisioner.setmetadata` | Set metadata on volume | `true` |
|
||||||
| `provisioner.priorityClassName` | Set user created priorityclassName for csi provisioner pods. Default is `system-cluster-critical` which is less priority than `system-node-critical` | `system-cluster-critical` |
|
| `provisioner.priorityClassName` | Set user created priorityclassName for csi provisioner pods. Default is `system-cluster-critical` which is less priority than `system-node-critical` | `system-cluster-critical` |
|
||||||
| `provisioner.enableHostNetwork` | Specifies whether hostNetwork is enabled for provisioner pod. | `false` |
|
| `provisioner.enableHostNetwork` | Specifies whether hostNetwork is enabled for provisioner pod. | `false` |
|
||||||
|
| `provisioner.imagePullSecrets` | Specifies imagePullSecrets for containers | `[]` |
|
||||||
| `provisioner.profiling.enabled` | Specifies whether profiling should be enabled | `false` |
|
| `provisioner.profiling.enabled` | Specifies whether profiling should be enabled | `false` |
|
||||||
| `provisioner.provisioner.image.repository` | Specifies the csi-provisioner image repository URL | `registry.k8s.io/sig-storage/csi-provisioner` |
|
| `provisioner.provisioner.image.repository` | Specifies the csi-provisioner image repository URL | `registry.k8s.io/sig-storage/csi-provisioner` |
|
||||||
| `provisioner.provisioner.image.tag` | Specifies image tag | `v3.3.0` |
|
| `provisioner.provisioner.image.tag` | Specifies image tag | `v3.5.0` |
|
||||||
| `provisioner.provisioner.image.pullPolicy` | Specifies pull policy | `IfNotPresent` |
|
| `provisioner.provisioner.image.pullPolicy` | Specifies pull policy | `IfNotPresent` |
|
||||||
| `provisioner.provisioner.image.extraArgs` | Specifies extra arguments for the provisioner sidecar | `[]` |
|
| `provisioner.provisioner.image.extraArgs` | Specifies extra arguments for the provisioner sidecar | `[]` |
|
||||||
| `provisioner.resizer.image.repository` | Specifies the csi-resizer image repository URL | `registry.k8s.io/sig-storage/csi-resizer` |
|
| `provisioner.resizer.image.repository` | Specifies the csi-resizer image repository URL | `registry.k8s.io/sig-storage/csi-resizer` |
|
||||||
| `provisioner.resizer.image.tag` | Specifies image tag | `v1.6.0` |
|
| `provisioner.resizer.image.tag` | Specifies image tag | `v1.8.0` |
|
||||||
| `provisioner.resizer.image.pullPolicy` | Specifies pull policy | `IfNotPresent` |
|
| `provisioner.resizer.image.pullPolicy` | Specifies pull policy | `IfNotPresent` |
|
||||||
| `provisioner.resizer.image.extraArgs` | Specifies extra arguments for the resizer sidecar | `[]` |
|
| `provisioner.resizer.image.extraArgs` | Specifies extra arguments for the resizer sidecar | `[]` |
|
||||||
| `provisioner.resizer.name` | Specifies the name of csi-resizer sidecar | `resizer` |
|
| `provisioner.resizer.name` | Specifies the name of csi-resizer sidecar | `resizer` |
|
||||||
| `provisioner.resizer.enabled` | Specifies whether resizer sidecar is enabled | `true` |
|
| `provisioner.resizer.enabled` | Specifies whether resizer sidecar is enabled | `true` |
|
||||||
| `provisioner.snapshotter.image.repository` | Specifies the csi-snapshotter image repository URL | `registry.k8s.io/sig-storage/csi-snapshotter` |
|
| `provisioner.snapshotter.image.repository` | Specifies the csi-snapshotter image repository URL | `registry.k8s.io/sig-storage/csi-snapshotter` |
|
||||||
| `provisioner.snapshotter.image.tag` | Specifies image tag | `v6.1.0` |
|
| `provisioner.snapshotter.image.tag` | Specifies image tag | `v6.2.2` |
|
||||||
| `provisioner.snapshotter.image.pullPolicy` | Specifies pull policy | `IfNotPresent` |
|
| `provisioner.snapshotter.image.pullPolicy` | Specifies pull policy | `IfNotPresent` |
|
||||||
| `provisioner.snapshotter.image.extraArgs` | Specifies extra arguments for the snapshotter sidecar | `[]` |
|
| `provisioner.snapshotter.image.extraArgs` | Specifies extra arguments for the snapshotter sidecar | `[]` |
|
||||||
| `provisioner.nodeSelector` | Specifies the node selector for provisioner deployment | `{}` |
|
| `provisioner.nodeSelector` | Specifies the node selector for provisioner deployment | `{}` |
|
||||||
|
@ -37,6 +37,10 @@ spec:
|
|||||||
# to use e.g. Rook orchestrated cluster, and mons' FQDN is
|
# to use e.g. Rook orchestrated cluster, and mons' FQDN is
|
||||||
# resolved through k8s service, set dns policy to cluster first
|
# resolved through k8s service, set dns policy to cluster first
|
||||||
dnsPolicy: ClusterFirstWithHostNet
|
dnsPolicy: ClusterFirstWithHostNet
|
||||||
|
{{- if .Values.nodeplugin.imagePullSecrets }}
|
||||||
|
imagePullSecrets:
|
||||||
|
{{ toYaml .Values.nodeplugin.imagePullSecrets | indent 8 -}}
|
||||||
|
{{- end }}
|
||||||
containers:
|
containers:
|
||||||
- name: driver-registrar
|
- name: driver-registrar
|
||||||
# This is necessary only for systems with SELinux, where
|
# This is necessary only for systems with SELinux, where
|
||||||
|
@ -48,11 +48,23 @@ spec:
|
|||||||
values:
|
values:
|
||||||
- {{ .Values.provisioner.name }}
|
- {{ .Values.provisioner.name }}
|
||||||
topologyKey: "kubernetes.io/hostname"
|
topologyKey: "kubernetes.io/hostname"
|
||||||
|
{{- if .Values.provisioner.affinity }}
|
||||||
|
{{ toYaml .Values.provisioner.affinity | indent 8 -}}
|
||||||
|
{{- end -}}
|
||||||
|
{{- else -}}
|
||||||
|
{{- if .Values.provisioner.affinity }}
|
||||||
|
affinity:
|
||||||
|
{{ toYaml .Values.provisioner.affinity | indent 8 -}}
|
||||||
|
{{- end -}}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
serviceAccountName: {{ include "ceph-csi-cephfs.serviceAccountName.provisioner" . }}
|
serviceAccountName: {{ include "ceph-csi-cephfs.serviceAccountName.provisioner" . }}
|
||||||
hostNetwork: {{ .Values.provisioner.enableHostNetwork }}
|
hostNetwork: {{ .Values.provisioner.enableHostNetwork }}
|
||||||
{{- if .Values.provisioner.priorityClassName }}
|
{{- if .Values.provisioner.priorityClassName }}
|
||||||
priorityClassName: {{ .Values.provisioner.priorityClassName }}
|
priorityClassName: {{ .Values.provisioner.priorityClassName }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.provisioner.imagePullSecrets }}
|
||||||
|
imagePullSecrets:
|
||||||
|
{{ toYaml .Values.provisioner.imagePullSecrets | indent 8 -}}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
containers:
|
containers:
|
||||||
- name: csi-provisioner
|
- name: csi-provisioner
|
||||||
@ -228,10 +240,6 @@ spec:
|
|||||||
emptyDir: {
|
emptyDir: {
|
||||||
medium: "Memory"
|
medium: "Memory"
|
||||||
}
|
}
|
||||||
{{- if .Values.provisioner.affinity }}
|
|
||||||
affinity:
|
|
||||||
{{ toYaml .Values.provisioner.affinity | indent 8 -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- if .Values.provisioner.nodeSelector }}
|
{{- if .Values.provisioner.nodeSelector }}
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
{{ toYaml .Values.provisioner.nodeSelector | indent 8 -}}
|
{{ toYaml .Values.provisioner.nodeSelector | indent 8 -}}
|
||||||
|
@ -81,13 +81,18 @@ nodeplugin:
|
|||||||
loadBalancerIP: ""
|
loadBalancerIP: ""
|
||||||
loadBalancerSourceRanges: []
|
loadBalancerSourceRanges: []
|
||||||
|
|
||||||
|
## Reference to one or more secrets to be used when pulling images
|
||||||
|
##
|
||||||
|
imagePullSecrets: []
|
||||||
|
# - name: "image-pull-secret"
|
||||||
|
|
||||||
profiling:
|
profiling:
|
||||||
enabled: false
|
enabled: false
|
||||||
|
|
||||||
registrar:
|
registrar:
|
||||||
image:
|
image:
|
||||||
repository: registry.k8s.io/sig-storage/csi-node-driver-registrar
|
repository: registry.k8s.io/sig-storage/csi-node-driver-registrar
|
||||||
tag: v2.6.2
|
tag: v2.8.0
|
||||||
pullPolicy: IfNotPresent
|
pullPolicy: IfNotPresent
|
||||||
resources: {}
|
resources: {}
|
||||||
|
|
||||||
@ -168,13 +173,18 @@ provisioner:
|
|||||||
loadBalancerIP: ""
|
loadBalancerIP: ""
|
||||||
loadBalancerSourceRanges: []
|
loadBalancerSourceRanges: []
|
||||||
|
|
||||||
|
## Reference to one or more secrets to be used when pulling images
|
||||||
|
##
|
||||||
|
imagePullSecrets: []
|
||||||
|
# - name: "image-pull-secret"
|
||||||
|
|
||||||
profiling:
|
profiling:
|
||||||
enabled: false
|
enabled: false
|
||||||
|
|
||||||
provisioner:
|
provisioner:
|
||||||
image:
|
image:
|
||||||
repository: registry.k8s.io/sig-storage/csi-provisioner
|
repository: registry.k8s.io/sig-storage/csi-provisioner
|
||||||
tag: v3.3.0
|
tag: v3.5.0
|
||||||
pullPolicy: IfNotPresent
|
pullPolicy: IfNotPresent
|
||||||
resources: {}
|
resources: {}
|
||||||
## For further options, check
|
## For further options, check
|
||||||
@ -189,7 +199,7 @@ provisioner:
|
|||||||
enabled: true
|
enabled: true
|
||||||
image:
|
image:
|
||||||
repository: registry.k8s.io/sig-storage/csi-resizer
|
repository: registry.k8s.io/sig-storage/csi-resizer
|
||||||
tag: v1.6.0
|
tag: v1.8.0
|
||||||
pullPolicy: IfNotPresent
|
pullPolicy: IfNotPresent
|
||||||
resources: {}
|
resources: {}
|
||||||
## For further options, check
|
## For further options, check
|
||||||
@ -199,7 +209,7 @@ provisioner:
|
|||||||
snapshotter:
|
snapshotter:
|
||||||
image:
|
image:
|
||||||
repository: registry.k8s.io/sig-storage/csi-snapshotter
|
repository: registry.k8s.io/sig-storage/csi-snapshotter
|
||||||
tag: v6.1.0
|
tag: v6.2.2
|
||||||
pullPolicy: IfNotPresent
|
pullPolicy: IfNotPresent
|
||||||
resources: {}
|
resources: {}
|
||||||
## For further options, check
|
## For further options, check
|
||||||
|
@ -123,9 +123,10 @@ charts and their default values.
|
|||||||
| `nodeplugin.name` | Specifies the nodeplugins name | `nodeplugin` |
|
| `nodeplugin.name` | Specifies the nodeplugins name | `nodeplugin` |
|
||||||
| `nodeplugin.updateStrategy` | Specifies the update Strategy. If you are using ceph-fuse client set this value to OnDelete | `RollingUpdate` |
|
| `nodeplugin.updateStrategy` | Specifies the update Strategy. If you are using ceph-fuse client set this value to OnDelete | `RollingUpdate` |
|
||||||
| `nodeplugin.priorityClassName` | Set user created priorityclassName for csi plugin pods. default is system-node-critical which is highest priority | `system-node-critical` |
|
| `nodeplugin.priorityClassName` | Set user created priorityclassName for csi plugin pods. default is system-node-critical which is highest priority | `system-node-critical` |
|
||||||
|
| `nodeplugin.imagePullSecrets` | Specifies imagePullSecrets for containers | `[]` |
|
||||||
| `nodeplugin.profiling.enabled` | Specifies whether profiling should be enabled | `false` |
|
| `nodeplugin.profiling.enabled` | Specifies whether profiling should be enabled | `false` |
|
||||||
| `nodeplugin.registrar.image.repository` | Node Registrar image repository URL | `registry.k8s.io/sig-storage/csi-node-driver-registrar` |
|
| `nodeplugin.registrar.image.repository` | Node Registrar image repository URL | `registry.k8s.io/sig-storage/csi-node-driver-registrar` |
|
||||||
| `nodeplugin.registrar.image.tag` | Image tag | `v2.6.2` |
|
| `nodeplugin.registrar.image.tag` | Image tag | `v2.8.0` |
|
||||||
| `nodeplugin.registrar.image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
| `nodeplugin.registrar.image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
||||||
| `nodeplugin.plugin.image.repository` | Nodeplugin image repository URL | `quay.io/cephcsi/cephcsi` |
|
| `nodeplugin.plugin.image.repository` | Nodeplugin image repository URL | `quay.io/cephcsi/cephcsi` |
|
||||||
| `nodeplugin.plugin.image.tag` | Image tag | `canary` |
|
| `nodeplugin.plugin.image.tag` | Image tag | `canary` |
|
||||||
@ -147,25 +148,26 @@ charts and their default values.
|
|||||||
| `provisioner.setmetadata` | Set metadata on volume | `true` |
|
| `provisioner.setmetadata` | Set metadata on volume | `true` |
|
||||||
| `provisioner.priorityClassName` | Set user created priorityclassName for csi provisioner pods. Default is `system-cluster-critical` which is less priority than `system-node-critical` | `system-cluster-critical` |
|
| `provisioner.priorityClassName` | Set user created priorityclassName for csi provisioner pods. Default is `system-cluster-critical` which is less priority than `system-node-critical` | `system-cluster-critical` |
|
||||||
| `provisioner.enableHostNetwork` | Specifies whether hostNetwork is enabled for provisioner pod. | `false` |
|
| `provisioner.enableHostNetwork` | Specifies whether hostNetwork is enabled for provisioner pod. | `false` |
|
||||||
|
| `provisioner.imagePullSecrets` | Specifies imagePullSecrets for containers | `[]` |
|
||||||
| `provisioner.profiling.enabled` | Specifies whether profiling should be enabled | `false` |
|
| `provisioner.profiling.enabled` | Specifies whether profiling should be enabled | `false` |
|
||||||
| `provisioner.provisioner.image.repository` | Specifies the csi-provisioner image repository URL | `registry.k8s.io/sig-storage/csi-provisioner` |
|
| `provisioner.provisioner.image.repository` | Specifies the csi-provisioner image repository URL | `registry.k8s.io/sig-storage/csi-provisioner` |
|
||||||
| `provisioner.provisioner.image.tag` | Specifies image tag | `v3.3.0` |
|
| `provisioner.provisioner.image.tag` | Specifies image tag | `v3.5.0` |
|
||||||
| `provisioner.provisioner.image.pullPolicy` | Specifies pull policy | `IfNotPresent` |
|
| `provisioner.provisioner.image.pullPolicy` | Specifies pull policy | `IfNotPresent` |
|
||||||
| `provisioner.provisioner.image.extraArgs` | Specifies extra arguments for the provisioner sidecar | `[]` |
|
| `provisioner.provisioner.image.extraArgs` | Specifies extra arguments for the provisioner sidecar | `[]` |
|
||||||
| `provisioner.attacher.image.repository` | Specifies the csi-attacher image repository URL | `registry.k8s.io/sig-storage/csi-attacher` |
|
| `provisioner.attacher.image.repository` | Specifies the csi-attacher image repository URL | `registry.k8s.io/sig-storage/csi-attacher` |
|
||||||
| `provisioner.attacher.image.tag` | Specifies image tag | `v4.0.0` |
|
| `provisioner.attacher.image.tag` | Specifies image tag | `v4.3.0` |
|
||||||
| `provisioner.attacher.image.pullPolicy` | Specifies pull policy | `IfNotPresent` |
|
| `provisioner.attacher.image.pullPolicy` | Specifies pull policy | `IfNotPresent` |
|
||||||
| `provisioner.attacher.image.extraArgs` | Specifies extra arguments for the attacher sidecar | `[]` |
|
| `provisioner.attacher.image.extraArgs` | Specifies extra arguments for the attacher sidecar | `[]` |
|
||||||
| `provisioner.attacher.name` | Specifies the name of csi-attacher sidecar | `attacher` |
|
| `provisioner.attacher.name` | Specifies the name of csi-attacher sidecar | `attacher` |
|
||||||
| `provisioner.attacher.enabled` | Specifies whether attacher sidecar is enabled | `true` |
|
| `provisioner.attacher.enabled` | Specifies whether attacher sidecar is enabled | `true` |
|
||||||
| `provisioner.resizer.image.repository` | Specifies the csi-resizer image repository URL | `registry.k8s.io/sig-storage/csi-resizer` |
|
| `provisioner.resizer.image.repository` | Specifies the csi-resizer image repository URL | `registry.k8s.io/sig-storage/csi-resizer` |
|
||||||
| `provisioner.resizer.image.tag` | Specifies image tag | `v1.6.0` |
|
| `provisioner.resizer.image.tag` | Specifies image tag | `v1.8.0` |
|
||||||
| `provisioner.resizer.image.pullPolicy` | Specifies pull policy | `IfNotPresent` |
|
| `provisioner.resizer.image.pullPolicy` | Specifies pull policy | `IfNotPresent` |
|
||||||
| `provisioner.resizer.image.extraArgs` | Specifies extra arguments for the resizer sidecar | `[]` |
|
| `provisioner.resizer.image.extraArgs` | Specifies extra arguments for the resizer sidecar | `[]` |
|
||||||
| `provisioner.resizer.name` | Specifies the name of csi-resizer sidecar | `resizer` |
|
| `provisioner.resizer.name` | Specifies the name of csi-resizer sidecar | `resizer` |
|
||||||
| `provisioner.resizer.enabled` | Specifies whether resizer sidecar is enabled | `true` |
|
| `provisioner.resizer.enabled` | Specifies whether resizer sidecar is enabled | `true` |
|
||||||
| `provisioner.snapshotter.image.repository` | Specifies the csi-snapshotter image repository URL | `registry.k8s.io/sig-storage/csi-snapshotter` |
|
| `provisioner.snapshotter.image.repository` | Specifies the csi-snapshotter image repository URL | `registry.k8s.io/sig-storage/csi-snapshotter` |
|
||||||
| `provisioner.snapshotter.image.tag` | Specifies image tag | `v6.1.0` |
|
| `provisioner.snapshotter.image.tag` | Specifies image tag | `v6.2.2` |
|
||||||
| `provisioner.snapshotter.image.pullPolicy` | Specifies pull policy | `IfNotPresent` |
|
| `provisioner.snapshotter.image.pullPolicy` | Specifies pull policy | `IfNotPresent` |
|
||||||
| `provisioner.snapshotter.image.extraArgs` | Specifies extra arguments for the snapshotter sidecar | `[]` |
|
| `provisioner.snapshotter.image.extraArgs` | Specifies extra arguments for the snapshotter sidecar | `[]` |
|
||||||
| `provisioner.nodeSelector` | Specifies the node selector for provisioner deployment | `{}` |
|
| `provisioner.nodeSelector` | Specifies the node selector for provisioner deployment | `{}` |
|
||||||
|
@ -37,6 +37,10 @@ spec:
|
|||||||
# to use e.g. Rook orchestrated cluster, and mons' FQDN is
|
# to use e.g. Rook orchestrated cluster, and mons' FQDN is
|
||||||
# resolved through k8s service, set dns policy to cluster first
|
# resolved through k8s service, set dns policy to cluster first
|
||||||
dnsPolicy: ClusterFirstWithHostNet
|
dnsPolicy: ClusterFirstWithHostNet
|
||||||
|
{{- if .Values.nodeplugin.imagePullSecrets }}
|
||||||
|
imagePullSecrets:
|
||||||
|
{{ toYaml .Values.nodeplugin.imagePullSecrets | indent 8 -}}
|
||||||
|
{{- end }}
|
||||||
containers:
|
containers:
|
||||||
- name: driver-registrar
|
- name: driver-registrar
|
||||||
# This is necessary only for systems with SELinux, where
|
# This is necessary only for systems with SELinux, where
|
||||||
|
@ -48,11 +48,23 @@ spec:
|
|||||||
values:
|
values:
|
||||||
- {{ .Values.provisioner.name }}
|
- {{ .Values.provisioner.name }}
|
||||||
topologyKey: "kubernetes.io/hostname"
|
topologyKey: "kubernetes.io/hostname"
|
||||||
|
{{- if .Values.provisioner.affinity }}
|
||||||
|
{{ toYaml .Values.provisioner.affinity | indent 8 -}}
|
||||||
|
{{- end -}}
|
||||||
|
{{- else -}}
|
||||||
|
{{- if .Values.provisioner.affinity }}
|
||||||
|
affinity:
|
||||||
|
{{ toYaml .Values.provisioner.affinity | indent 8 -}}
|
||||||
|
{{- end -}}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
serviceAccountName: {{ include "ceph-csi-rbd.serviceAccountName.provisioner" . }}
|
serviceAccountName: {{ include "ceph-csi-rbd.serviceAccountName.provisioner" . }}
|
||||||
hostNetwork: {{ .Values.provisioner.enableHostNetwork }}
|
hostNetwork: {{ .Values.provisioner.enableHostNetwork }}
|
||||||
{{- if .Values.provisioner.priorityClassName }}
|
{{- if .Values.provisioner.priorityClassName }}
|
||||||
priorityClassName: {{ .Values.provisioner.priorityClassName }}
|
priorityClassName: {{ .Values.provisioner.priorityClassName }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.provisioner.imagePullSecrets }}
|
||||||
|
imagePullSecrets:
|
||||||
|
{{ toYaml .Values.provisioner.imagePullSecrets | indent 8 -}}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
containers:
|
containers:
|
||||||
- name: csi-provisioner
|
- name: csi-provisioner
|
||||||
@ -309,10 +321,6 @@ spec:
|
|||||||
path: oidc-token
|
path: oidc-token
|
||||||
expirationSeconds: 3600
|
expirationSeconds: 3600
|
||||||
audience: ceph-csi-kms
|
audience: ceph-csi-kms
|
||||||
{{- if .Values.provisioner.affinity }}
|
|
||||||
affinity:
|
|
||||||
{{ toYaml .Values.provisioner.affinity | indent 8 -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- if .Values.provisioner.nodeSelector }}
|
{{- if .Values.provisioner.nodeSelector }}
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
{{ toYaml .Values.provisioner.nodeSelector | indent 8 -}}
|
{{ toYaml .Values.provisioner.nodeSelector | indent 8 -}}
|
||||||
|
@ -103,6 +103,11 @@ nodeplugin:
|
|||||||
loadBalancerIP: ""
|
loadBalancerIP: ""
|
||||||
loadBalancerSourceRanges: []
|
loadBalancerSourceRanges: []
|
||||||
|
|
||||||
|
## Reference to one or more secrets to be used when pulling images
|
||||||
|
##
|
||||||
|
imagePullSecrets: []
|
||||||
|
# - name: "image-pull-secret"
|
||||||
|
|
||||||
profiling:
|
profiling:
|
||||||
# enable profiling to check for memory leaks
|
# enable profiling to check for memory leaks
|
||||||
enabled: false
|
enabled: false
|
||||||
@ -110,7 +115,7 @@ nodeplugin:
|
|||||||
registrar:
|
registrar:
|
||||||
image:
|
image:
|
||||||
repository: registry.k8s.io/sig-storage/csi-node-driver-registrar
|
repository: registry.k8s.io/sig-storage/csi-node-driver-registrar
|
||||||
tag: v2.6.2
|
tag: v2.8.0
|
||||||
pullPolicy: IfNotPresent
|
pullPolicy: IfNotPresent
|
||||||
resources: {}
|
resources: {}
|
||||||
|
|
||||||
@ -200,6 +205,11 @@ provisioner:
|
|||||||
loadBalancerIP: ""
|
loadBalancerIP: ""
|
||||||
loadBalancerSourceRanges: []
|
loadBalancerSourceRanges: []
|
||||||
|
|
||||||
|
## Reference to one or more secrets to be used when pulling images
|
||||||
|
##
|
||||||
|
imagePullSecrets: []
|
||||||
|
# - name: "image-pull-secret"
|
||||||
|
|
||||||
profiling:
|
profiling:
|
||||||
# enable profiling to check for memory leaks
|
# enable profiling to check for memory leaks
|
||||||
enabled: false
|
enabled: false
|
||||||
@ -207,7 +217,7 @@ provisioner:
|
|||||||
provisioner:
|
provisioner:
|
||||||
image:
|
image:
|
||||||
repository: gcr.io/k8s-staging-sig-storage/csi-provisioner
|
repository: gcr.io/k8s-staging-sig-storage/csi-provisioner
|
||||||
tag: v3.3.0
|
tag: v3.5.0
|
||||||
pullPolicy: IfNotPresent
|
pullPolicy: IfNotPresent
|
||||||
resources: {}
|
resources: {}
|
||||||
## For further options, check
|
## For further options, check
|
||||||
@ -222,7 +232,7 @@ provisioner:
|
|||||||
enabled: true
|
enabled: true
|
||||||
image:
|
image:
|
||||||
repository: registry.k8s.io/sig-storage/csi-attacher
|
repository: registry.k8s.io/sig-storage/csi-attacher
|
||||||
tag: v4.0.0
|
tag: v4.3.0
|
||||||
pullPolicy: IfNotPresent
|
pullPolicy: IfNotPresent
|
||||||
resources: {}
|
resources: {}
|
||||||
## For further options, check
|
## For further options, check
|
||||||
@ -234,7 +244,7 @@ provisioner:
|
|||||||
enabled: true
|
enabled: true
|
||||||
image:
|
image:
|
||||||
repository: registry.k8s.io/sig-storage/csi-resizer
|
repository: registry.k8s.io/sig-storage/csi-resizer
|
||||||
tag: v1.6.0
|
tag: v1.8.0
|
||||||
pullPolicy: IfNotPresent
|
pullPolicy: IfNotPresent
|
||||||
resources: {}
|
resources: {}
|
||||||
## For further options, check
|
## For further options, check
|
||||||
@ -244,7 +254,7 @@ provisioner:
|
|||||||
snapshotter:
|
snapshotter:
|
||||||
image:
|
image:
|
||||||
repository: registry.k8s.io/sig-storage/csi-snapshotter
|
repository: registry.k8s.io/sig-storage/csi-snapshotter
|
||||||
tag: v6.1.0
|
tag: v6.2.2
|
||||||
pullPolicy: IfNotPresent
|
pullPolicy: IfNotPresent
|
||||||
resources: {}
|
resources: {}
|
||||||
## For further options, check
|
## For further options, check
|
||||||
|
@ -208,24 +208,7 @@ func main() {
|
|||||||
logAndExit(err.Error())
|
logAndExit(err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
// the driver may need a higher PID limit for handling all concurrent requests
|
setPIDLimit(&conf)
|
||||||
if conf.PidLimit != 0 {
|
|
||||||
currentLimit, pidErr := util.GetPIDLimit()
|
|
||||||
if pidErr != nil {
|
|
||||||
klog.Errorf("Failed to get the PID limit, can not reconfigure: %v", pidErr)
|
|
||||||
} else {
|
|
||||||
log.DefaultLog("Initial PID limit is set to %d", currentLimit)
|
|
||||||
err = util.SetPIDLimit(conf.PidLimit)
|
|
||||||
switch {
|
|
||||||
case err != nil:
|
|
||||||
klog.Errorf("Failed to set new PID limit to %d: %v", conf.PidLimit, err)
|
|
||||||
case conf.PidLimit == -1:
|
|
||||||
log.DefaultLog("Reconfigured PID limit to %d (max)", conf.PidLimit)
|
|
||||||
default:
|
|
||||||
log.DefaultLog("Reconfigured PID limit to %d", conf.PidLimit)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if conf.EnableGRPCMetrics || conf.Vtype == livenessType {
|
if conf.EnableGRPCMetrics || conf.Vtype == livenessType {
|
||||||
// validate metrics endpoint
|
// validate metrics endpoint
|
||||||
@ -282,6 +265,28 @@ func main() {
|
|||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func setPIDLimit(conf *util.Config) {
|
||||||
|
// set pidLimit only for NodeServer
|
||||||
|
// the driver may need a higher PID limit for handling all concurrent requests
|
||||||
|
if conf.IsNodeServer && conf.PidLimit != 0 {
|
||||||
|
currentLimit, pidErr := util.GetPIDLimit()
|
||||||
|
if pidErr != nil {
|
||||||
|
klog.Errorf("Failed to get the PID limit, can not reconfigure: %v", pidErr)
|
||||||
|
} else {
|
||||||
|
log.DefaultLog("Initial PID limit is set to %d", currentLimit)
|
||||||
|
err := util.SetPIDLimit(conf.PidLimit)
|
||||||
|
switch {
|
||||||
|
case err != nil:
|
||||||
|
klog.Errorf("Failed to set new PID limit to %d: %v", conf.PidLimit, err)
|
||||||
|
case conf.PidLimit == -1:
|
||||||
|
log.DefaultLog("Reconfigured PID limit to %d (max)", conf.PidLimit)
|
||||||
|
default:
|
||||||
|
log.DefaultLog("Reconfigured PID limit to %d", conf.PidLimit)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// initControllers will initialize all the controllers.
|
// initControllers will initialize all the controllers.
|
||||||
func initControllers() {
|
func initControllers() {
|
||||||
// Add list of controller here.
|
// Add list of controller here.
|
||||||
|
@ -104,7 +104,7 @@ CSI_CHARTS_DIR=$(mktemp -d)
|
|||||||
|
|
||||||
pushd "${CSI_CHARTS_DIR}" >/dev/null
|
pushd "${CSI_CHARTS_DIR}" >/dev/null
|
||||||
|
|
||||||
curl -L https://git.io/get_helm.sh | bash -s -- --version "${HELM_VERSION}"
|
curl -L "${HELM_SCRIPT}" | bash -s -- --version "${HELM_VERSION}"
|
||||||
|
|
||||||
build_step "cloning ceph/csi-charts repository"
|
build_step "cloning ceph/csi-charts repository"
|
||||||
git clone https://github.com/ceph/csi-charts
|
git clone https://github.com/ceph/csi-charts
|
||||||
|
@ -15,6 +15,8 @@
|
|||||||
.PHONY: all
|
.PHONY: all
|
||||||
all: \
|
all: \
|
||||||
scc.yaml \
|
scc.yaml \
|
||||||
|
cephfs/kubernetes/csidriver.yaml \
|
||||||
|
cephfs/kubernetes/csi-config-map.yaml \
|
||||||
nfs/kubernetes/csidriver.yaml \
|
nfs/kubernetes/csidriver.yaml \
|
||||||
nfs/kubernetes/csi-config-map.yaml \
|
nfs/kubernetes/csi-config-map.yaml \
|
||||||
rbd/kubernetes/csidriver.yaml \
|
rbd/kubernetes/csidriver.yaml \
|
||||||
@ -23,6 +25,12 @@ all: \
|
|||||||
scc.yaml: ../api/deploy/ocp/scc.yaml ../api/deploy/ocp/scc.go
|
scc.yaml: ../api/deploy/ocp/scc.yaml ../api/deploy/ocp/scc.go
|
||||||
$(MAKE) -C ../tools generate-deploy
|
$(MAKE) -C ../tools generate-deploy
|
||||||
|
|
||||||
|
cephfs/kubernetes/csidriver.yaml: ../api/deploy/kubernetes/cephfs/csidriver.yaml ../api/deploy/kubernetes/cephfs/csidriver.go
|
||||||
|
$(MAKE) -C ../tools generate-deploy
|
||||||
|
|
||||||
|
cephfs/kubernetes/csi-config-map.yaml: ../api/deploy/kubernetes/cephfs/csi-config-map.*
|
||||||
|
$(MAKE) -C ../tools generate-deploy
|
||||||
|
|
||||||
nfs/kubernetes/csidriver.yaml: ../api/deploy/kubernetes/nfs/csidriver.yaml ../api/deploy/kubernetes/nfs/csidriver.go
|
nfs/kubernetes/csidriver.yaml: ../api/deploy/kubernetes/nfs/csidriver.yaml ../api/deploy/kubernetes/nfs/csidriver.go
|
||||||
$(MAKE) -C ../tools generate-deploy
|
$(MAKE) -C ../tools generate-deploy
|
||||||
|
|
||||||
|
@ -43,7 +43,7 @@ spec:
|
|||||||
priorityClassName: system-cluster-critical
|
priorityClassName: system-cluster-critical
|
||||||
containers:
|
containers:
|
||||||
- name: csi-provisioner
|
- name: csi-provisioner
|
||||||
image: registry.k8s.io/sig-storage/csi-provisioner:v3.3.0
|
image: registry.k8s.io/sig-storage/csi-provisioner:v3.5.0
|
||||||
args:
|
args:
|
||||||
- "--csi-address=$(ADDRESS)"
|
- "--csi-address=$(ADDRESS)"
|
||||||
- "--v=1"
|
- "--v=1"
|
||||||
@ -62,7 +62,7 @@ spec:
|
|||||||
- name: socket-dir
|
- name: socket-dir
|
||||||
mountPath: /csi
|
mountPath: /csi
|
||||||
- name: csi-resizer
|
- name: csi-resizer
|
||||||
image: registry.k8s.io/sig-storage/csi-resizer:v1.6.0
|
image: registry.k8s.io/sig-storage/csi-resizer:v1.8.0
|
||||||
args:
|
args:
|
||||||
- "--csi-address=$(ADDRESS)"
|
- "--csi-address=$(ADDRESS)"
|
||||||
- "--v=1"
|
- "--v=1"
|
||||||
@ -79,7 +79,7 @@ spec:
|
|||||||
- name: socket-dir
|
- name: socket-dir
|
||||||
mountPath: /csi
|
mountPath: /csi
|
||||||
- name: csi-snapshotter
|
- name: csi-snapshotter
|
||||||
image: registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0
|
image: registry.k8s.io/sig-storage/csi-snapshotter:v6.2.2
|
||||||
args:
|
args:
|
||||||
- "--csi-address=$(ADDRESS)"
|
- "--csi-address=$(ADDRESS)"
|
||||||
- "--v=1"
|
- "--v=1"
|
||||||
|
@ -27,7 +27,7 @@ spec:
|
|||||||
securityContext:
|
securityContext:
|
||||||
privileged: true
|
privileged: true
|
||||||
allowPrivilegeEscalation: true
|
allowPrivilegeEscalation: true
|
||||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.6.2
|
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.8.0
|
||||||
args:
|
args:
|
||||||
- "--v=1"
|
- "--v=1"
|
||||||
- "--csi-address=/csi/csi.sock"
|
- "--csi-address=/csi/csi.sock"
|
||||||
|
@ -1,8 +1,15 @@
|
|||||||
|
#
|
||||||
|
# /!\ DO NOT MODIFY THIS FILE
|
||||||
|
#
|
||||||
|
# This file has been automatically generated by Ceph-CSI yamlgen.
|
||||||
|
# The source for the contents can be found in the api/deploy directory, make
|
||||||
|
# your modifications there.
|
||||||
|
#
|
||||||
---
|
---
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: ConfigMap
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: "ceph-csi-config"
|
||||||
data:
|
data:
|
||||||
config.json: |-
|
config.json: |-
|
||||||
[]
|
[]
|
||||||
metadata:
|
|
||||||
name: ceph-csi-config
|
|
||||||
|
@ -1,9 +1,17 @@
|
|||||||
|
#
|
||||||
|
# /!\ DO NOT MODIFY THIS FILE
|
||||||
|
#
|
||||||
|
# This file has been automatically generated by Ceph-CSI yamlgen.
|
||||||
|
# The source for the contents can be found in the api/deploy directory, make
|
||||||
|
# your modifications there.
|
||||||
|
#
|
||||||
---
|
---
|
||||||
apiVersion: storage.k8s.io/v1
|
apiVersion: storage.k8s.io/v1
|
||||||
kind: CSIDriver
|
kind: CSIDriver
|
||||||
metadata:
|
metadata:
|
||||||
name: cephfs.csi.ceph.com
|
name: "cephfs.csi.ceph.com"
|
||||||
spec:
|
spec:
|
||||||
attachRequired: false
|
attachRequired: false
|
||||||
podInfoOnMount: false
|
podInfoOnMount: false
|
||||||
fsGroupPolicy: File
|
fsGroupPolicy: File
|
||||||
|
seLinuxMount: true
|
||||||
|
@ -40,7 +40,7 @@ spec:
|
|||||||
topologyKey: "kubernetes.io/hostname"
|
topologyKey: "kubernetes.io/hostname"
|
||||||
containers:
|
containers:
|
||||||
- name: csi-provisioner
|
- name: csi-provisioner
|
||||||
image: registry.k8s.io/sig-storage/csi-provisioner:v3.3.0
|
image: registry.k8s.io/sig-storage/csi-provisioner:v3.5.0
|
||||||
args:
|
args:
|
||||||
- "--csi-address=$(ADDRESS)"
|
- "--csi-address=$(ADDRESS)"
|
||||||
- "--v=1"
|
- "--v=1"
|
||||||
@ -57,7 +57,7 @@ spec:
|
|||||||
- name: socket-dir
|
- name: socket-dir
|
||||||
mountPath: /csi
|
mountPath: /csi
|
||||||
- name: csi-resizer
|
- name: csi-resizer
|
||||||
image: registry.k8s.io/sig-storage/csi-resizer:v1.6.0
|
image: registry.k8s.io/sig-storage/csi-resizer:v1.8.0
|
||||||
args:
|
args:
|
||||||
- "--csi-address=$(ADDRESS)"
|
- "--csi-address=$(ADDRESS)"
|
||||||
- "--v=1"
|
- "--v=1"
|
||||||
@ -73,7 +73,7 @@ spec:
|
|||||||
- name: socket-dir
|
- name: socket-dir
|
||||||
mountPath: /csi
|
mountPath: /csi
|
||||||
- name: csi-snapshotter
|
- name: csi-snapshotter
|
||||||
image: registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0
|
image: registry.k8s.io/sig-storage/csi-snapshotter:v6.2.2
|
||||||
args:
|
args:
|
||||||
- "--csi-address=$(ADDRESS)"
|
- "--csi-address=$(ADDRESS)"
|
||||||
- "--v=1"
|
- "--v=1"
|
||||||
|
@ -27,7 +27,7 @@ spec:
|
|||||||
securityContext:
|
securityContext:
|
||||||
privileged: true
|
privileged: true
|
||||||
allowPrivilegeEscalation: true
|
allowPrivilegeEscalation: true
|
||||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.6.2
|
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.8.0
|
||||||
args:
|
args:
|
||||||
- "--v=1"
|
- "--v=1"
|
||||||
- "--csi-address=/csi/csi.sock"
|
- "--csi-address=/csi/csi.sock"
|
||||||
|
@ -13,5 +13,6 @@ metadata:
|
|||||||
spec:
|
spec:
|
||||||
attachRequired: false
|
attachRequired: false
|
||||||
fsGroupPolicy: File
|
fsGroupPolicy: File
|
||||||
|
seLinuxMount: true
|
||||||
volumeLifecycleModes:
|
volumeLifecycleModes:
|
||||||
- Persistent
|
- Persistent
|
||||||
|
@ -47,7 +47,7 @@ spec:
|
|||||||
priorityClassName: system-cluster-critical
|
priorityClassName: system-cluster-critical
|
||||||
containers:
|
containers:
|
||||||
- name: csi-provisioner
|
- name: csi-provisioner
|
||||||
image: registry.k8s.io/sig-storage/csi-provisioner:v3.3.0
|
image: registry.k8s.io/sig-storage/csi-provisioner:v3.5.0
|
||||||
args:
|
args:
|
||||||
- "--csi-address=$(ADDRESS)"
|
- "--csi-address=$(ADDRESS)"
|
||||||
- "--v=1"
|
- "--v=1"
|
||||||
@ -69,7 +69,7 @@ spec:
|
|||||||
- name: socket-dir
|
- name: socket-dir
|
||||||
mountPath: /csi
|
mountPath: /csi
|
||||||
- name: csi-snapshotter
|
- name: csi-snapshotter
|
||||||
image: registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0
|
image: registry.k8s.io/sig-storage/csi-snapshotter:v6.2.2
|
||||||
args:
|
args:
|
||||||
- "--csi-address=$(ADDRESS)"
|
- "--csi-address=$(ADDRESS)"
|
||||||
- "--v=1"
|
- "--v=1"
|
||||||
@ -84,7 +84,7 @@ spec:
|
|||||||
- name: socket-dir
|
- name: socket-dir
|
||||||
mountPath: /csi
|
mountPath: /csi
|
||||||
- name: csi-attacher
|
- name: csi-attacher
|
||||||
image: registry.k8s.io/sig-storage/csi-attacher:v4.0.0
|
image: registry.k8s.io/sig-storage/csi-attacher:v4.3.0
|
||||||
args:
|
args:
|
||||||
- "--v=1"
|
- "--v=1"
|
||||||
- "--csi-address=$(ADDRESS)"
|
- "--csi-address=$(ADDRESS)"
|
||||||
@ -99,7 +99,7 @@ spec:
|
|||||||
- name: socket-dir
|
- name: socket-dir
|
||||||
mountPath: /csi
|
mountPath: /csi
|
||||||
- name: csi-resizer
|
- name: csi-resizer
|
||||||
image: registry.k8s.io/sig-storage/csi-resizer:v1.6.0
|
image: registry.k8s.io/sig-storage/csi-resizer:v1.8.0
|
||||||
args:
|
args:
|
||||||
- "--csi-address=$(ADDRESS)"
|
- "--csi-address=$(ADDRESS)"
|
||||||
- "--v=1"
|
- "--v=1"
|
||||||
|
@ -29,7 +29,7 @@ spec:
|
|||||||
securityContext:
|
securityContext:
|
||||||
privileged: true
|
privileged: true
|
||||||
allowPrivilegeEscalation: true
|
allowPrivilegeEscalation: true
|
||||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.6.2
|
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.8.0
|
||||||
args:
|
args:
|
||||||
- "--v=1"
|
- "--v=1"
|
||||||
- "--csi-address=/csi/csi.sock"
|
- "--csi-address=/csi/csi.sock"
|
||||||
|
@ -13,4 +13,5 @@ metadata:
|
|||||||
spec:
|
spec:
|
||||||
attachRequired: true
|
attachRequired: true
|
||||||
podInfoOnMount: false
|
podInfoOnMount: false
|
||||||
|
seLinuxMount: true
|
||||||
fsGroupPolicy: File
|
fsGroupPolicy: File
|
||||||
|
@ -17,7 +17,7 @@ allowHostNetwork: true
|
|||||||
# This need to be set to true as we use HostPath
|
# This need to be set to true as we use HostPath
|
||||||
allowHostDirVolumePlugin: true
|
allowHostDirVolumePlugin: true
|
||||||
priority:
|
priority:
|
||||||
# SYS_ADMIN is needed for rbd to execture rbd map command
|
# SYS_ADMIN is needed for rbd to execute rbd map command
|
||||||
allowedCapabilities: ["SYS_ADMIN"]
|
allowedCapabilities: ["SYS_ADMIN"]
|
||||||
# Needed as we run liveness container on daemonset pods
|
# Needed as we run liveness container on daemonset pods
|
||||||
allowHostPorts: true
|
allowHostPorts: true
|
||||||
|
@ -9,6 +9,7 @@
|
|||||||
- [Upgrading from v3.4 to v3.5](#upgrading-from-v34-to-v35)
|
- [Upgrading from v3.4 to v3.5](#upgrading-from-v34-to-v35)
|
||||||
- [Upgrading from v3.5 to v3.6](#upgrading-from-v35-to-v36)
|
- [Upgrading from v3.5 to v3.6](#upgrading-from-v35-to-v36)
|
||||||
- [Upgrading from v3.6 to v3.7](#upgrading-from-v36-to-v37)
|
- [Upgrading from v3.6 to v3.7](#upgrading-from-v36-to-v37)
|
||||||
|
- [Upgrading from v3.7 to v3.8](#upgrading-from-v37-to-v38)
|
||||||
- [Upgrading CephFS](#upgrading-cephfs)
|
- [Upgrading CephFS](#upgrading-cephfs)
|
||||||
- [1. Upgrade CephFS Provisioner resources](#1-upgrade-cephfs-provisioner-resources)
|
- [1. Upgrade CephFS Provisioner resources](#1-upgrade-cephfs-provisioner-resources)
|
||||||
- [1.1 Update the CephFS Provisioner RBAC](#11-update-the-cephfs-provisioner-rbac)
|
- [1.1 Update the CephFS Provisioner RBAC](#11-update-the-cephfs-provisioner-rbac)
|
||||||
@ -55,7 +56,7 @@ To avoid this issue in future upgrades, we recommend that you do not use the
|
|||||||
fuse client as of now.
|
fuse client as of now.
|
||||||
|
|
||||||
This guide will walk you through the steps to upgrade the software in a cluster
|
This guide will walk you through the steps to upgrade the software in a cluster
|
||||||
from v3.4 to v3.5
|
from v3.7 to v3.8
|
||||||
|
|
||||||
### Snapshot-controller and snapshot crd
|
### Snapshot-controller and snapshot crd
|
||||||
|
|
||||||
@ -94,6 +95,11 @@ to upgrade from cephcsi v3.5 to v3.6
|
|||||||
|
|
||||||
## Upgrading from v3.6 to v3.7
|
## Upgrading from v3.6 to v3.7
|
||||||
|
|
||||||
|
Refer [upgrade-from-v3.6-v3.7](https://github.com/ceph/ceph-csi/blob/v3.7.2/docs/ceph-csi-upgrade.md)
|
||||||
|
to upgrade from cephcsi v3.6 to v3.7
|
||||||
|
|
||||||
|
## Upgrading from v3.7 to v3.8
|
||||||
|
|
||||||
**Ceph-csi releases from devel are expressly unsupported.** It is strongly
|
**Ceph-csi releases from devel are expressly unsupported.** It is strongly
|
||||||
recommended that you use [official
|
recommended that you use [official
|
||||||
releases](https://github.com/ceph/ceph-csi/releases) of Ceph-csi. Unreleased
|
releases](https://github.com/ceph/ceph-csi/releases) of Ceph-csi. Unreleased
|
||||||
@ -102,15 +108,15 @@ that will not be supported in the official releases. Builds from the devel
|
|||||||
branch can have functionality changed and even removed at any time without
|
branch can have functionality changed and even removed at any time without
|
||||||
compatibility support and without prior notice.
|
compatibility support and without prior notice.
|
||||||
|
|
||||||
**Also, we do not recommend any direct upgrades to 3.7 except from 3.6 to 3.7.**
|
**Also, we do not recommend any direct upgrades to 3.8 except from 3.7 to 3.8.**
|
||||||
For example, upgrading from 3.5 to 3.7 is not recommended.
|
For example, upgrading from 3.6 to 3.8 is not recommended.
|
||||||
|
|
||||||
git checkout v3.7.2 tag
|
git checkout v3.8.0 tag
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/ceph/ceph-csi.git
|
git clone https://github.com/ceph/ceph-csi.git
|
||||||
cd ./ceph-csi
|
cd ./ceph-csi
|
||||||
git checkout v3.7.2
|
git checkout v3.8.0
|
||||||
```
|
```
|
||||||
|
|
||||||
```console
|
```console
|
||||||
@ -246,7 +252,7 @@ kubectl delete role cephfs-csi-nodeplugin-psp --ignore-not-found
|
|||||||
kubectl delete rolebinding cephfs-csi-nodeplugin-psp --ignore-not-found
|
kubectl delete rolebinding cephfs-csi-nodeplugin-psp --ignore-not-found
|
||||||
```
|
```
|
||||||
|
|
||||||
we have successfully upgraded cephfs csi from v3.6 to v3.7
|
we have successfully upgraded cephfs csi from v3.7 to v3.8
|
||||||
|
|
||||||
### Upgrading RBD
|
### Upgrading RBD
|
||||||
|
|
||||||
@ -329,7 +335,7 @@ kubectl delete role rbd-csi-vault-token-review-psp --ignore-not-found
|
|||||||
kubectl delete rolebinding rbd-csi-vault-token-review-psp --ignore-not-found
|
kubectl delete rolebinding rbd-csi-vault-token-review-psp --ignore-not-found
|
||||||
```
|
```
|
||||||
|
|
||||||
we have successfully upgraded RBD csi from v3.6 to v3.7
|
we have successfully upgraded RBD csi from v3.7 to v3.8
|
||||||
|
|
||||||
### Upgrading NFS
|
### Upgrading NFS
|
||||||
|
|
||||||
@ -391,7 +397,7 @@ daemonset.apps/csi-nfsplugin configured
|
|||||||
service/csi-metrics-nfsplugin configured
|
service/csi-metrics-nfsplugin configured
|
||||||
```
|
```
|
||||||
|
|
||||||
we have successfully upgraded nfs csi from v3.6 to v3.7
|
we have successfully upgraded nfs csi from v3.7 to v3.8
|
||||||
|
|
||||||
### CSI Sidecar containers consideration
|
### CSI Sidecar containers consideration
|
||||||
|
|
||||||
|
@ -148,7 +148,7 @@ for more information.
|
|||||||
**Deploy Ceph configuration ConfigMap for CSI pods:**
|
**Deploy Ceph configuration ConfigMap for CSI pods:**
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
kubectl create -f ../../../examples/ceph-conf.yaml
|
kubectl create -f ../../ceph-conf.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
**Deploy CSI sidecar containers:**
|
**Deploy CSI sidecar containers:**
|
||||||
@ -204,7 +204,7 @@ test the deployment further.
|
|||||||
|
|
||||||
### Notes on volume deletion
|
### Notes on volume deletion
|
||||||
|
|
||||||
Dynamically povisioned volumes are deleted by the driver, when requested to
|
Dynamically provisioned volumes are deleted by the driver, when requested to
|
||||||
do so. Statically provisioned volumes, from plugin versions less than or
|
do so. Statically provisioned volumes, from plugin versions less than or
|
||||||
equal to 1.0.0, are a no-op when a delete operation is performed against the
|
equal to 1.0.0, are a no-op when a delete operation is performed against the
|
||||||
same, and are expected to be deleted on the Ceph cluster by the user.
|
same, and are expected to be deleted on the Ceph cluster by the user.
|
||||||
@ -235,4 +235,4 @@ can even be shared.
|
|||||||
However, not all KMS are supported in order to be compatible with
|
However, not all KMS are supported in order to be compatible with
|
||||||
[fscrypt](https://github.com/google/fscrypt). In general KMS that
|
[fscrypt](https://github.com/google/fscrypt). In general KMS that
|
||||||
either store secrets to use directly (Vault), or allow access to the
|
either store secrets to use directly (Vault), or allow access to the
|
||||||
plain password (Kubernets Secrets) work.
|
plain password (Kubernetes Secrets) work.
|
||||||
|
@ -61,6 +61,7 @@ make image-cephcsi
|
|||||||
| `volumeNamePrefix` | no | Prefix to use for naming RBD images (defaults to `csi-vol-`). |
|
| `volumeNamePrefix` | no | Prefix to use for naming RBD images (defaults to `csi-vol-`). |
|
||||||
| `snapshotNamePrefix` | no | Prefix to use for naming RBD snapshot images (defaults to `csi-snap-`). |
|
| `snapshotNamePrefix` | no | Prefix to use for naming RBD snapshot images (defaults to `csi-snap-`). |
|
||||||
| `imageFeatures` | no | RBD image features. CSI RBD currently supports `layering`, `journaling`, `exclusive-lock`, `object-map`, `fast-diff`, `deep-flatten` features. deep-flatten is added for cloned images. Refer <https://docs.ceph.com/en/latest/rbd/rbd-config-ref/#image-features> for image feature dependencies. |
|
| `imageFeatures` | no | RBD image features. CSI RBD currently supports `layering`, `journaling`, `exclusive-lock`, `object-map`, `fast-diff`, `deep-flatten` features. deep-flatten is added for cloned images. Refer <https://docs.ceph.com/en/latest/rbd/rbd-config-ref/#image-features> for image feature dependencies. |
|
||||||
|
| `mkfsOptions` | no | Options to pass to the `mkfs` command while creating the filesystem on the RBD device. Check the man-page for the `mkfs` command for the filesystem for more details. When `mkfsOptions` is set here, the defaults will not be used, consider including them in this parameter. |
|
||||||
| `tryOtherMounters` | no | Specifies whether to try other mounters in case if the current mounter fails to mount the rbd image for any reason |
|
| `tryOtherMounters` | no | Specifies whether to try other mounters in case if the current mounter fails to mount the rbd image for any reason |
|
||||||
| `mapOptions` | no | Map options to use when mapping rbd image. See [krbd](https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options) and [nbd](https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options) options. |
|
| `mapOptions` | no | Map options to use when mapping rbd image. See [krbd](https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options) and [nbd](https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options) options. |
|
||||||
| `unmapOptions` | no | Unmap options to use when unmapping rbd image. See [krbd](https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options) and [nbd](https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options) options. |
|
| `unmapOptions` | no | Unmap options to use when unmapping rbd image. See [krbd](https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options) and [nbd](https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options) options. |
|
||||||
@ -133,7 +134,7 @@ for more information.
|
|||||||
**Deploy Ceph configuration ConfigMap for CSI pods:**
|
**Deploy Ceph configuration ConfigMap for CSI pods:**
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
kubectl create -f ../example/ceph-config.yaml
|
kubectl create -f ../../ceph-conf.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
**Deploy CSI sidecar containers:**
|
**Deploy CSI sidecar containers:**
|
||||||
|
@ -47,7 +47,6 @@ csi:
|
|||||||
volumeAttributes:
|
volumeAttributes:
|
||||||
clusterID: rook-ceph
|
clusterID: rook-ceph
|
||||||
imageFeatures: layering
|
imageFeatures: layering
|
||||||
imageFormat: "2"
|
|
||||||
imageName: csi-vol-0c23de1c-18fb-11eb-a903-0242ac110005
|
imageName: csi-vol-0c23de1c-18fb-11eb-a903-0242ac110005
|
||||||
journalPool: replicapool
|
journalPool: replicapool
|
||||||
pool: replicapool
|
pool: replicapool
|
||||||
|
@ -31,7 +31,7 @@ it is **highly** encouraged to:
|
|||||||
* Run
|
* Run
|
||||||
|
|
||||||
```console
|
```console
|
||||||
go get -d github.com/ceph/ceph-csi`
|
go get -d github.com/ceph/ceph-csi
|
||||||
```
|
```
|
||||||
|
|
||||||
This will just download the source and not build it. The downloaded source
|
This will just download the source and not build it. The downloaded source
|
||||||
@ -40,7 +40,7 @@ it is **highly** encouraged to:
|
|||||||
* Add your fork as a git remote:
|
* Add your fork as a git remote:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
git remote add fork https://github.com/<your-github-username>/ceph-csi`
|
git remote add fork https://github.com/<your-github-username>/ceph-csi
|
||||||
```
|
```
|
||||||
|
|
||||||
* Set up a pre-commit hook to catch issues locally.
|
* Set up a pre-commit hook to catch issues locally.
|
||||||
@ -327,7 +327,7 @@ are certain about the flaky test failure behavior, then comment on the PR
|
|||||||
indicating the logs about a particular test that went flaky and use the
|
indicating the logs about a particular test that went flaky and use the
|
||||||
appropriate command to retrigger the job[s].
|
appropriate command to retrigger the job[s].
|
||||||
If you are uncertain about the CI failure, we prefer that you ping us on
|
If you are uncertain about the CI failure, we prefer that you ping us on
|
||||||
[Slack channel #ci](https://cephcsi.slack.com) with more details on
|
[Slack channel #ceph-csi](https://ceph-storage.slack.com) with more details on
|
||||||
failures before retriggering the jobs, we will be happy to help.
|
failures before retriggering the jobs, we will be happy to help.
|
||||||
|
|
||||||
### Retesting failed Jobs
|
### Retesting failed Jobs
|
||||||
|
@ -23,7 +23,7 @@ csi_liveness 1
|
|||||||
```
|
```
|
||||||
|
|
||||||
Promethues can be deployed through the promethues operator described [here](https://coreos.com/operators/prometheus/docs/latest/user-guides/getting-started.html).
|
Promethues can be deployed through the promethues operator described [here](https://coreos.com/operators/prometheus/docs/latest/user-guides/getting-started.html).
|
||||||
The [service-monitor](../examples/service-monitor.yaml) will tell promethues how
|
The [service-monitor](../deploy/service-monitor.yaml) will tell promethues how
|
||||||
to pull metrics out of CSI.
|
to pull metrics out of CSI.
|
||||||
|
|
||||||
Each CSI pod has a service to expose the endpoint to prometheus. By default, rbd
|
Each CSI pod has a service to expose the endpoint to prometheus. By default, rbd
|
||||||
|
@ -23,7 +23,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
snapapi "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1"
|
snapapi "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1"
|
||||||
. "github.com/onsi/ginkgo/v2" // nolint
|
. "github.com/onsi/ginkgo/v2" //nolint:golint // e2e uses By() and other Ginkgo functions
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
@ -67,6 +67,7 @@ func deleteCephfsPlugin() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func createORDeleteCephfsResources(action kubectlAction) {
|
func createORDeleteCephfsResources(action kubectlAction) {
|
||||||
|
cephConfigFile := getConfigFile(cephConfconfigMap, deployPath, examplePath)
|
||||||
resources := []ResourceDeployer{
|
resources := []ResourceDeployer{
|
||||||
// shared resources
|
// shared resources
|
||||||
&yamlResource{
|
&yamlResource{
|
||||||
@ -74,7 +75,7 @@ func createORDeleteCephfsResources(action kubectlAction) {
|
|||||||
allowMissing: true,
|
allowMissing: true,
|
||||||
},
|
},
|
||||||
&yamlResource{
|
&yamlResource{
|
||||||
filename: examplePath + cephConfconfigMap,
|
filename: cephConfigFile,
|
||||||
allowMissing: true,
|
allowMissing: true,
|
||||||
},
|
},
|
||||||
// dependencies for provisioner
|
// dependencies for provisioner
|
||||||
@ -227,7 +228,7 @@ var _ = Describe(cephfsType, func() {
|
|||||||
logsCSIPods("app=csi-cephfsplugin", c)
|
logsCSIPods("app=csi-cephfsplugin", c)
|
||||||
|
|
||||||
// log all details from the namespace where Ceph-CSI is deployed
|
// log all details from the namespace where Ceph-CSI is deployed
|
||||||
e2edebug.DumpAllNamespaceInfo(c, cephCSINamespace)
|
e2edebug.DumpAllNamespaceInfo(context.TODO(), c, cephCSINamespace)
|
||||||
}
|
}
|
||||||
err := deleteConfigMap(cephFSDirPath)
|
err := deleteConfigMap(cephFSDirPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -317,6 +318,25 @@ var _ = Describe(cephfsType, func() {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
By("verify mountOptions support", func() {
|
||||||
|
err := createCephfsStorageClass(f.ClientSet, f, true, nil)
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to create CephFS storageclass: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = verifySeLinuxMountOption(f, pvcPath, appPath,
|
||||||
|
cephFSDeamonSetName, cephFSContainerName, cephCSINamespace)
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to verify mount options: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to delete CephFS storageclass: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
By("verify generic ephemeral volume support", func() {
|
By("verify generic ephemeral volume support", func() {
|
||||||
err := createCephfsStorageClass(f.ClientSet, f, true, nil)
|
err := createCephfsStorageClass(f.ClientSet, f, true, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1944,8 +1964,6 @@ var _ = Describe(cephfsType, func() {
|
|||||||
framework.Failf("failed to delete PVC or application: %v", err)
|
framework.Failf("failed to delete PVC or application: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
validateCephFSSnapshotCount(f, 0, subvolumegroup, pv)
|
|
||||||
|
|
||||||
err = deletePVCAndApp("", f, pvc, app)
|
err = deletePVCAndApp("", f, pvc, app)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Failf("failed to delete PVC or application: %v", err)
|
framework.Failf("failed to delete PVC or application: %v", err)
|
||||||
|
@ -66,6 +66,12 @@ func createCephfsStorageClass(
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
// TODO: remove this once the ceph-csi driver release-v3.9 is completed
|
||||||
|
// and upgrade tests are done from v3.9 to devel.
|
||||||
|
// The mountOptions from previous are not compatible with NodeStageVolume
|
||||||
|
// request.
|
||||||
|
sc.MountOptions = []string{}
|
||||||
|
|
||||||
sc.Parameters["fsName"] = fileSystemName
|
sc.Parameters["fsName"] = fileSystemName
|
||||||
sc.Parameters["csi.storage.k8s.io/provisioner-secret-namespace"] = cephCSINamespace
|
sc.Parameters["csi.storage.k8s.io/provisioner-secret-namespace"] = cephCSINamespace
|
||||||
sc.Parameters["csi.storage.k8s.io/provisioner-secret-name"] = cephFSProvisionerSecretName
|
sc.Parameters["csi.storage.k8s.io/provisioner-secret-name"] = cephFSProvisionerSecretName
|
||||||
@ -102,8 +108,8 @@ func createCephfsStorageClass(
|
|||||||
|
|
||||||
timeout := time.Duration(deployTimeout) * time.Minute
|
timeout := time.Duration(deployTimeout) * time.Minute
|
||||||
|
|
||||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
return wait.PollUntilContextTimeout(context.TODO(), poll, timeout, true, func(ctx context.Context) (bool, error) {
|
||||||
_, err = c.StorageV1().StorageClasses().Create(context.TODO(), &sc, metav1.CreateOptions{})
|
_, err = c.StorageV1().StorageClasses().Create(ctx, &sc, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("error creating StorageClass %q: %v", sc.Name, err)
|
framework.Logf("error creating StorageClass %q: %v", sc.Name, err)
|
||||||
if isRetryableAPIError(err) {
|
if isRetryableAPIError(err) {
|
||||||
|
@ -21,7 +21,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
. "github.com/onsi/gomega" // nolint
|
. "github.com/onsi/gomega" //nolint:golint // e2e uses Expect() and other Gomega functions
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
@ -47,7 +47,7 @@ func deployVault(c kubernetes.Interface, deployTimeout int) {
|
|||||||
"cm",
|
"cm",
|
||||||
"ceph-csi-encryption-kms-config",
|
"ceph-csi-encryption-kms-config",
|
||||||
"--ignore-not-found=true")
|
"--ignore-not-found=true")
|
||||||
Expect(err).Should(BeNil())
|
Expect(err).ShouldNot(HaveOccurred())
|
||||||
|
|
||||||
createORDeleteVault(kubectlCreate)
|
createORDeleteVault(kubectlCreate)
|
||||||
opt := metav1.ListOptions{
|
opt := metav1.ListOptions{
|
||||||
@ -55,11 +55,11 @@ func deployVault(c kubernetes.Interface, deployTimeout int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pods, err := c.CoreV1().Pods(cephCSINamespace).List(context.TODO(), opt)
|
pods, err := c.CoreV1().Pods(cephCSINamespace).List(context.TODO(), opt)
|
||||||
Expect(err).Should(BeNil())
|
Expect(err).ShouldNot(HaveOccurred())
|
||||||
Expect(len(pods.Items)).Should(Equal(1))
|
Expect(pods.Items).Should(HaveLen(1))
|
||||||
name := pods.Items[0].Name
|
name := pods.Items[0].Name
|
||||||
err = waitForPodInRunningState(name, cephCSINamespace, c, deployTimeout, noError)
|
err = waitForPodInRunningState(name, cephCSINamespace, c, deployTimeout, noError)
|
||||||
Expect(err).Should(BeNil())
|
Expect(err).ShouldNot(HaveOccurred())
|
||||||
}
|
}
|
||||||
|
|
||||||
func deleteVault() {
|
func deleteVault() {
|
||||||
@ -123,7 +123,7 @@ func createTenantServiceAccount(c kubernetes.Interface, ns string) error {
|
|||||||
// were created with createTenantServiceAccount.
|
// were created with createTenantServiceAccount.
|
||||||
func deleteTenantServiceAccount(ns string) {
|
func deleteTenantServiceAccount(ns string) {
|
||||||
err := createORDeleteTenantServiceAccount(kubectlDelete, ns)
|
err := createORDeleteTenantServiceAccount(kubectlDelete, ns)
|
||||||
Expect(err).Should(BeNil())
|
Expect(err).ShouldNot(HaveOccurred())
|
||||||
}
|
}
|
||||||
|
|
||||||
// createORDeleteTenantServiceAccount is a helper that reads the tenant-sa.yaml
|
// createORDeleteTenantServiceAccount is a helper that reads the tenant-sa.yaml
|
||||||
|
@ -18,7 +18,6 @@ package e2e
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"time"
|
"time"
|
||||||
@ -87,15 +86,16 @@ func createDeploymentApp(clientSet kubernetes.Interface, app *appsv1.Deployment,
|
|||||||
// deleteDeploymentApp deletes the deployment object.
|
// deleteDeploymentApp deletes the deployment object.
|
||||||
func deleteDeploymentApp(clientSet kubernetes.Interface, name, ns string, deployTimeout int) error {
|
func deleteDeploymentApp(clientSet kubernetes.Interface, name, ns string, deployTimeout int) error {
|
||||||
timeout := time.Duration(deployTimeout) * time.Minute
|
timeout := time.Duration(deployTimeout) * time.Minute
|
||||||
err := clientSet.AppsV1().Deployments(ns).Delete(context.TODO(), name, metav1.DeleteOptions{})
|
ctx := context.TODO()
|
||||||
|
err := clientSet.AppsV1().Deployments(ns).Delete(ctx, name, metav1.DeleteOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to delete deployment: %w", err)
|
return fmt.Errorf("failed to delete deployment: %w", err)
|
||||||
}
|
}
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
framework.Logf("Waiting for deployment %q to be deleted", name)
|
framework.Logf("Waiting for deployment %q to be deleted", name)
|
||||||
|
|
||||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
return wait.PollUntilContextTimeout(ctx, poll, timeout, true, func(ctx context.Context) (bool, error) {
|
||||||
_, err := clientSet.AppsV1().Deployments(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
_, err := clientSet.AppsV1().Deployments(ns).Get(ctx, name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if isRetryableAPIError(err) {
|
if isRetryableAPIError(err) {
|
||||||
return false, nil
|
return false, nil
|
||||||
@ -118,8 +118,8 @@ func waitForDeploymentInAvailableState(clientSet kubernetes.Interface, name, ns
|
|||||||
start := time.Now()
|
start := time.Now()
|
||||||
framework.Logf("Waiting up to %q to be in Available state", name)
|
framework.Logf("Waiting up to %q to be in Available state", name)
|
||||||
|
|
||||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
return wait.PollUntilContextTimeout(context.TODO(), poll, timeout, true, func(ctx context.Context) (bool, error) {
|
||||||
d, err := clientSet.AppsV1().Deployments(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
d, err := clientSet.AppsV1().Deployments(ns).Get(ctx, name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if isRetryableAPIError(err) {
|
if isRetryableAPIError(err) {
|
||||||
return false, nil
|
return false, nil
|
||||||
@ -145,8 +145,8 @@ func waitForDeploymentComplete(clientSet kubernetes.Interface, name, ns string,
|
|||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
timeout := time.Duration(deployTimeout) * time.Minute
|
timeout := time.Duration(deployTimeout) * time.Minute
|
||||||
err = wait.PollImmediate(poll, timeout, func() (bool, error) {
|
err = wait.PollUntilContextTimeout(context.TODO(), poll, timeout, true, func(ctx context.Context) (bool, error) {
|
||||||
deployment, err = clientSet.AppsV1().Deployments(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
deployment, err = clientSet.AppsV1().Deployments(ns).Get(ctx, name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if isRetryableAPIError(err) {
|
if isRetryableAPIError(err) {
|
||||||
return false, nil
|
return false, nil
|
||||||
@ -175,7 +175,7 @@ func waitForDeploymentComplete(clientSet kubernetes.Interface, name, ns string,
|
|||||||
return false, nil
|
return false, nil
|
||||||
})
|
})
|
||||||
|
|
||||||
if errors.Is(err, wait.ErrWaitTimeout) {
|
if wait.Interrupted(err) {
|
||||||
err = fmt.Errorf("%s", reason)
|
err = fmt.Errorf("%s", reason)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -311,8 +311,8 @@ func waitForDeploymentUpdateScale(
|
|||||||
) error {
|
) error {
|
||||||
t := time.Duration(timeout) * time.Minute
|
t := time.Duration(timeout) * time.Minute
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
err := wait.PollImmediate(poll, t, func() (bool, error) {
|
err := wait.PollUntilContextTimeout(context.TODO(), poll, t, true, func(ctx context.Context) (bool, error) {
|
||||||
scaleResult, upsErr := c.AppsV1().Deployments(ns).UpdateScale(context.TODO(),
|
scaleResult, upsErr := c.AppsV1().Deployments(ns).UpdateScale(ctx,
|
||||||
deploymentName, scale, metav1.UpdateOptions{})
|
deploymentName, scale, metav1.UpdateOptions{})
|
||||||
if upsErr != nil {
|
if upsErr != nil {
|
||||||
if isRetryableAPIError(upsErr) {
|
if isRetryableAPIError(upsErr) {
|
||||||
@ -347,9 +347,9 @@ func waitForDeploymentUpdate(
|
|||||||
) error {
|
) error {
|
||||||
t := time.Duration(timeout) * time.Minute
|
t := time.Duration(timeout) * time.Minute
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
err := wait.PollImmediate(poll, t, func() (bool, error) {
|
err := wait.PollUntilContextTimeout(context.TODO(), poll, t, true, func(ctx context.Context) (bool, error) {
|
||||||
_, upErr := c.AppsV1().Deployments(deployment.Namespace).Update(
|
_, upErr := c.AppsV1().Deployments(deployment.Namespace).Update(
|
||||||
context.TODO(), deployment, metav1.UpdateOptions{})
|
ctx, deployment, metav1.UpdateOptions{})
|
||||||
if upErr != nil {
|
if upErr != nil {
|
||||||
if isRetryableAPIError(upErr) {
|
if isRetryableAPIError(upErr) {
|
||||||
return false, nil
|
return false, nil
|
||||||
@ -392,6 +392,7 @@ func waitForContainersArgsUpdate(
|
|||||||
timeout int,
|
timeout int,
|
||||||
) error {
|
) error {
|
||||||
framework.Logf("waiting for deployment updates %s/%s", ns, deploymentName)
|
framework.Logf("waiting for deployment updates %s/%s", ns, deploymentName)
|
||||||
|
ctx := context.TODO()
|
||||||
|
|
||||||
// wait for the deployment to be available
|
// wait for the deployment to be available
|
||||||
err := waitForDeploymentInAvailableState(c, deploymentName, ns, deployTimeout)
|
err := waitForDeploymentInAvailableState(c, deploymentName, ns, deployTimeout)
|
||||||
@ -400,7 +401,7 @@ func waitForContainersArgsUpdate(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Scale down to 0.
|
// Scale down to 0.
|
||||||
scale, err := c.AppsV1().Deployments(ns).GetScale(context.TODO(), deploymentName, metav1.GetOptions{})
|
scale, err := c.AppsV1().Deployments(ns).GetScale(ctx, deploymentName, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error get scale deployment %s/%s: %w", ns, deploymentName, err)
|
return fmt.Errorf("error get scale deployment %s/%s: %w", ns, deploymentName, err)
|
||||||
}
|
}
|
||||||
@ -413,7 +414,7 @@ func waitForContainersArgsUpdate(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Update deployment.
|
// Update deployment.
|
||||||
deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{})
|
deployment, err := c.AppsV1().Deployments(ns).Get(ctx, deploymentName, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error get deployment %s/%s: %w", ns, deploymentName, err)
|
return fmt.Errorf("error get deployment %s/%s: %w", ns, deploymentName, err)
|
||||||
}
|
}
|
||||||
@ -457,8 +458,8 @@ func waitForContainersArgsUpdate(
|
|||||||
// wait for scale to become count
|
// wait for scale to become count
|
||||||
t := time.Duration(timeout) * time.Minute
|
t := time.Duration(timeout) * time.Minute
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
err = wait.PollImmediate(poll, t, func() (bool, error) {
|
err = wait.PollUntilContextTimeout(ctx, poll, t, true, func(ctx context.Context) (bool, error) {
|
||||||
deploy, getErr := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{})
|
deploy, getErr := c.AppsV1().Deployments(ns).Get(ctx, deploymentName, metav1.GetOptions{})
|
||||||
if getErr != nil {
|
if getErr != nil {
|
||||||
if isRetryableAPIError(getErr) {
|
if isRetryableAPIError(getErr) {
|
||||||
return false, nil
|
return false, nil
|
||||||
|
@ -21,8 +21,9 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
// nolint:lll // error string cannot be split into multiple lines as is a
|
|
||||||
// output from kubectl.
|
// output from kubectl.
|
||||||
|
//
|
||||||
|
//nolint:lll // error string cannot be split into multiple lines as is a
|
||||||
func TestGetStdErr(t *testing.T) {
|
func TestGetStdErr(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
|
@ -122,8 +122,9 @@ func (vc *vaultConfig) canGetPassphrase() bool {
|
|||||||
|
|
||||||
// getPassphrase method will execute few commands to try read the secret for
|
// getPassphrase method will execute few commands to try read the secret for
|
||||||
// specified key from inside the vault container:
|
// specified key from inside the vault container:
|
||||||
// * authenticate with vault and ignore any stdout (we do not need output)
|
// - authenticate with vault and ignore any stdout (we do not need output)
|
||||||
// * issue get request for particular key
|
// - issue get request for particular key
|
||||||
|
//
|
||||||
// resulting in stdOut (first entry in tuple) - output that contains the key
|
// resulting in stdOut (first entry in tuple) - output that contains the key
|
||||||
// or stdErr (second entry in tuple) - error getting the key.
|
// or stdErr (second entry in tuple) - error getting the key.
|
||||||
func (vc *vaultConfig) getPassphrase(f *framework.Framework, key string) (string, string) {
|
func (vc *vaultConfig) getPassphrase(f *framework.Framework, key string) (string, string) {
|
||||||
|
@ -46,7 +46,7 @@ func logsCSIPods(label string, c clientset.Interface) {
|
|||||||
func kubectlLogPod(c clientset.Interface, pod *v1.Pod) {
|
func kubectlLogPod(c clientset.Interface, pod *v1.Pod) {
|
||||||
container := pod.Spec.Containers
|
container := pod.Spec.Containers
|
||||||
for i := range container {
|
for i := range container {
|
||||||
logs, err := frameworkPod.GetPodLogs(c, pod.Namespace, pod.Name, container[i].Name)
|
logs, err := frameworkPod.GetPodLogs(context.TODO(), c, pod.Namespace, pod.Name, container[i].Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logs, err = getPreviousPodLogs(c, pod.Namespace, pod.Name, container[i].Name)
|
logs, err = getPreviousPodLogs(c, pod.Namespace, pod.Name, container[i].Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -30,7 +30,8 @@ import (
|
|||||||
// composeIntreeMigVolID create a volID similar to intree migration volID
|
// composeIntreeMigVolID create a volID similar to intree migration volID
|
||||||
// the migration volID format looks like below
|
// the migration volID format looks like below
|
||||||
// mig-mons-<hash>-image-<UUID_<poolhash>
|
// mig-mons-<hash>-image-<UUID_<poolhash>
|
||||||
// nolint:lll // ex: "mig_mons-b7f67366bb43f32e07d8a261a7840da9_image-e0b45b52-7e09-47d3-8f1b-806995fa4412_706f6f6c5f7265706c6963615f706f6f6c
|
//
|
||||||
|
//nolint:lll // ex: "mig_mons-b7f67366bb43f32e07d8a261a7840da9_image-e0b45b52-7e09-47d3-8f1b-806995fa4412_706f6f6c5f7265706c6963615f706f6f6c
|
||||||
func composeIntreeMigVolID(mons, rbdImageName string) string {
|
func composeIntreeMigVolID(mons, rbdImageName string) string {
|
||||||
poolField := hex.EncodeToString([]byte(defaultRBDPool))
|
poolField := hex.EncodeToString([]byte(defaultRBDPool))
|
||||||
monsField := monsPrefix + getMonsHash(mons)
|
monsField := monsPrefix + getMonsHash(mons)
|
||||||
|
@ -38,13 +38,14 @@ func createNamespace(c kubernetes.Interface, name string) error {
|
|||||||
Name: name,
|
Name: name,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
_, err := c.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{})
|
ctx := context.TODO()
|
||||||
|
_, err := c.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{})
|
||||||
if err != nil && !apierrs.IsAlreadyExists(err) {
|
if err != nil && !apierrs.IsAlreadyExists(err) {
|
||||||
return fmt.Errorf("failed to create namespace: %w", err)
|
return fmt.Errorf("failed to create namespace: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
return wait.PollUntilContextTimeout(ctx, poll, timeout, true, func(ctx context.Context) (bool, error) {
|
||||||
_, err := c.CoreV1().Namespaces().Get(context.TODO(), name, metav1.GetOptions{})
|
_, err := c.CoreV1().Namespaces().Get(ctx, name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("Error getting namespace: '%s': %v", name, err)
|
framework.Logf("Error getting namespace: '%s': %v", name, err)
|
||||||
if apierrs.IsNotFound(err) {
|
if apierrs.IsNotFound(err) {
|
||||||
@ -63,13 +64,14 @@ func createNamespace(c kubernetes.Interface, name string) error {
|
|||||||
|
|
||||||
func deleteNamespace(c kubernetes.Interface, name string) error {
|
func deleteNamespace(c kubernetes.Interface, name string) error {
|
||||||
timeout := time.Duration(deployTimeout) * time.Minute
|
timeout := time.Duration(deployTimeout) * time.Minute
|
||||||
err := c.CoreV1().Namespaces().Delete(context.TODO(), name, metav1.DeleteOptions{})
|
ctx := context.TODO()
|
||||||
|
err := c.CoreV1().Namespaces().Delete(ctx, name, metav1.DeleteOptions{})
|
||||||
if err != nil && !apierrs.IsNotFound(err) {
|
if err != nil && !apierrs.IsNotFound(err) {
|
||||||
return fmt.Errorf("failed to delete namespace: %w", err)
|
return fmt.Errorf("failed to delete namespace: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
return wait.PollUntilContextTimeout(ctx, poll, timeout, true, func(ctx context.Context) (bool, error) {
|
||||||
_, err = c.CoreV1().Namespaces().Get(context.TODO(), name, metav1.GetOptions{})
|
_, err = c.CoreV1().Namespaces().Get(ctx, name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if apierrs.IsNotFound(err) {
|
if apierrs.IsNotFound(err) {
|
||||||
return true, nil
|
return true, nil
|
||||||
|
49
e2e/nfs.go
49
e2e/nfs.go
@ -24,7 +24,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
snapapi "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1"
|
snapapi "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1"
|
||||||
. "github.com/onsi/ginkgo/v2" // nolint
|
. "github.com/onsi/ginkgo/v2" //nolint:golint // e2e uses By() and other Ginkgo functions
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
@ -43,6 +43,7 @@ var (
|
|||||||
nfsRookCephNFS = "rook-nfs.yaml"
|
nfsRookCephNFS = "rook-nfs.yaml"
|
||||||
nfsDeploymentName = "csi-nfsplugin-provisioner"
|
nfsDeploymentName = "csi-nfsplugin-provisioner"
|
||||||
nfsDeamonSetName = "csi-nfsplugin"
|
nfsDeamonSetName = "csi-nfsplugin"
|
||||||
|
nfsContainerName = "csi-nfsplugin"
|
||||||
nfsDirPath = "../deploy/nfs/kubernetes/"
|
nfsDirPath = "../deploy/nfs/kubernetes/"
|
||||||
nfsExamplePath = examplePath + "nfs/"
|
nfsExamplePath = examplePath + "nfs/"
|
||||||
nfsPoolName = ".nfs"
|
nfsPoolName = ".nfs"
|
||||||
@ -79,6 +80,7 @@ func deleteNFSPlugin() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func createORDeleteNFSResources(f *framework.Framework, action kubectlAction) {
|
func createORDeleteNFSResources(f *framework.Framework, action kubectlAction) {
|
||||||
|
cephConfigFile := getConfigFile(cephConfconfigMap, deployPath, examplePath)
|
||||||
resources := []ResourceDeployer{
|
resources := []ResourceDeployer{
|
||||||
// shared resources
|
// shared resources
|
||||||
&yamlResource{
|
&yamlResource{
|
||||||
@ -86,7 +88,7 @@ func createORDeleteNFSResources(f *framework.Framework, action kubectlAction) {
|
|||||||
allowMissing: true,
|
allowMissing: true,
|
||||||
},
|
},
|
||||||
&yamlResource{
|
&yamlResource{
|
||||||
filename: examplePath + cephConfconfigMap,
|
filename: cephConfigFile,
|
||||||
allowMissing: true,
|
allowMissing: true,
|
||||||
},
|
},
|
||||||
// dependencies for provisioner
|
// dependencies for provisioner
|
||||||
@ -183,8 +185,8 @@ func createNFSStorageClass(
|
|||||||
|
|
||||||
timeout := time.Duration(deployTimeout) * time.Minute
|
timeout := time.Duration(deployTimeout) * time.Minute
|
||||||
|
|
||||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
return wait.PollUntilContextTimeout(context.TODO(), poll, timeout, true, func(ctx context.Context) (bool, error) {
|
||||||
_, err = c.StorageV1().StorageClasses().Create(context.TODO(), &sc, metav1.CreateOptions{})
|
_, err = c.StorageV1().StorageClasses().Create(ctx, &sc, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("error creating StorageClass %q: %v", sc.Name, err)
|
framework.Logf("error creating StorageClass %q: %v", sc.Name, err)
|
||||||
if apierrs.IsAlreadyExists(err) {
|
if apierrs.IsAlreadyExists(err) {
|
||||||
@ -294,7 +296,7 @@ var _ = Describe("nfs", func() {
|
|||||||
logsCSIPods("app=csi-nfsplugin", c)
|
logsCSIPods("app=csi-nfsplugin", c)
|
||||||
|
|
||||||
// log all details from the namespace where Ceph-CSI is deployed
|
// log all details from the namespace where Ceph-CSI is deployed
|
||||||
e2edebug.DumpAllNamespaceInfo(c, cephCSINamespace)
|
e2edebug.DumpAllNamespaceInfo(context.TODO(), c, cephCSINamespace)
|
||||||
}
|
}
|
||||||
err := deleteConfigMap(nfsDirPath)
|
err := deleteConfigMap(nfsDirPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -362,6 +364,24 @@ var _ = Describe("nfs", func() {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
By("verify mountOptions support", func() {
|
||||||
|
err := createNFSStorageClass(f.ClientSet, f, false, nil)
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to create NFS storageclass: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = verifySeLinuxMountOption(f, pvcPath, appPath,
|
||||||
|
nfsDeamonSetName, nfsContainerName, cephCSINamespace)
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to verify mount options: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = deleteResource(nfsExamplePath + "storageclass.yaml")
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to delete NFS storageclass: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
By("verify RWOP volume support", func() {
|
By("verify RWOP volume support", func() {
|
||||||
err := createNFSStorageClass(f.ClientSet, f, false, nil)
|
err := createNFSStorageClass(f.ClientSet, f, false, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -408,7 +428,24 @@ var _ = Describe("nfs", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
By("create a storageclass with pool and a PVC then bind it to an app", func() {
|
By("create a storageclass with pool and a PVC then bind it to an app", func() {
|
||||||
err := createNFSStorageClass(f.ClientSet, f, false, nil)
|
err := createNFSStorageClass(f.ClientSet, f, true, nil)
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to create NFS storageclass: %v", err)
|
||||||
|
}
|
||||||
|
err = validatePVCAndAppBinding(pvcPath, appPath, f)
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to validate NFS pvc and application binding: %v", err)
|
||||||
|
}
|
||||||
|
err = deleteResource(nfsExamplePath + "storageclass.yaml")
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to delete NFS storageclass: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
By("create a storageclass with sys,krb5i security and a PVC then bind it to an app", func() {
|
||||||
|
err := createNFSStorageClass(f.ClientSet, f, false, map[string]string{
|
||||||
|
"secTypes": "sys,krb5i",
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Failf("failed to create NFS storageclass: %v", err)
|
framework.Failf("failed to create NFS storageclass: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -60,7 +60,7 @@ func checkNodeHasLabel(c kubernetes.Interface, labelKey, labelValue string) erro
|
|||||||
return fmt.Errorf("failed to list node: %w", err)
|
return fmt.Errorf("failed to list node: %w", err)
|
||||||
}
|
}
|
||||||
for i := range nodes.Items {
|
for i := range nodes.Items {
|
||||||
e2enode.ExpectNodeHasLabel(c, nodes.Items[i].Name, labelKey, labelValue)
|
e2enode.ExpectNodeHasLabel(context.TODO(), c, nodes.Items[i].Name, labelKey, labelValue)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
132
e2e/pod.go
132
e2e/pod.go
@ -31,6 +31,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/client/conditions"
|
"k8s.io/kubernetes/pkg/client/conditions"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
|
frameworkPod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
)
|
)
|
||||||
|
|
||||||
const errRWOPConflict = "node has pod using PersistentVolumeClaim with the same name and ReadWriteOncePod access mode."
|
const errRWOPConflict = "node has pod using PersistentVolumeClaim with the same name and ReadWriteOncePod access mode."
|
||||||
@ -60,8 +61,8 @@ func waitForDaemonSets(name, ns string, c kubernetes.Interface, t int) error {
|
|||||||
start := time.Now()
|
start := time.Now()
|
||||||
framework.Logf("Waiting up to %v for all daemonsets in namespace '%s' to start", timeout, ns)
|
framework.Logf("Waiting up to %v for all daemonsets in namespace '%s' to start", timeout, ns)
|
||||||
|
|
||||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
return wait.PollUntilContextTimeout(context.TODO(), poll, timeout, true, func(ctx context.Context) (bool, error) {
|
||||||
ds, err := c.AppsV1().DaemonSets(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
ds, err := c.AppsV1().DaemonSets(ns).Get(ctx, name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("Error getting daemonsets in namespace: '%s': %v", ns, err)
|
framework.Logf("Error getting daemonsets in namespace: '%s': %v", ns, err)
|
||||||
if strings.Contains(err.Error(), "not found") {
|
if strings.Contains(err.Error(), "not found") {
|
||||||
@ -97,8 +98,8 @@ func findPodAndContainerName(f *framework.Framework, ns, cn string, opt *metav1.
|
|||||||
podList *v1.PodList
|
podList *v1.PodList
|
||||||
listErr error
|
listErr error
|
||||||
)
|
)
|
||||||
err := wait.PollImmediate(poll, timeout, func() (bool, error) {
|
err := wait.PollUntilContextTimeout(context.TODO(), poll, timeout, true, func(ctx context.Context) (bool, error) {
|
||||||
podList, listErr = e2epod.PodClientNS(f, ns).List(context.TODO(), *opt)
|
podList, listErr = e2epod.PodClientNS(f, ns).List(ctx, *opt)
|
||||||
if listErr != nil {
|
if listErr != nil {
|
||||||
if isRetryableAPIError(listErr) {
|
if isRetryableAPIError(listErr) {
|
||||||
return false, nil
|
return false, nil
|
||||||
@ -164,6 +165,28 @@ func execCommandInDaemonsetPod(
|
|||||||
f *framework.Framework,
|
f *framework.Framework,
|
||||||
c, daemonsetName, nodeName, containerName, ns string,
|
c, daemonsetName, nodeName, containerName, ns string,
|
||||||
) (string, error) {
|
) (string, error) {
|
||||||
|
podName, err := getDaemonsetPodOnNode(f, daemonsetName, nodeName, ns)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd := []string{"/bin/sh", "-c", c}
|
||||||
|
podOpt := e2epod.ExecOptions{
|
||||||
|
Command: cmd,
|
||||||
|
Namespace: ns,
|
||||||
|
PodName: podName,
|
||||||
|
ContainerName: containerName,
|
||||||
|
CaptureStdout: true,
|
||||||
|
CaptureStderr: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
_ /* stdout */, stderr, err := execWithRetry(f, &podOpt)
|
||||||
|
|
||||||
|
return stderr, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// getDaemonsetPodOnNode returns the name of a daemonset pod on a particular node.
|
||||||
|
func getDaemonsetPodOnNode(f *framework.Framework, daemonsetName, nodeName, ns string) (string, error) {
|
||||||
selector, err := getDaemonSetLabelSelector(f, ns, daemonsetName)
|
selector, err := getDaemonSetLabelSelector(f, ns, daemonsetName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
@ -187,19 +210,7 @@ func execCommandInDaemonsetPod(
|
|||||||
return "", fmt.Errorf("%s daemonset pod on node %s in namespace %s not found", daemonsetName, nodeName, ns)
|
return "", fmt.Errorf("%s daemonset pod on node %s in namespace %s not found", daemonsetName, nodeName, ns)
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd := []string{"/bin/sh", "-c", c}
|
return podName, nil
|
||||||
podOpt := e2epod.ExecOptions{
|
|
||||||
Command: cmd,
|
|
||||||
Namespace: ns,
|
|
||||||
PodName: podName,
|
|
||||||
ContainerName: containerName,
|
|
||||||
CaptureStdout: true,
|
|
||||||
CaptureStderr: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
_ /* stdout */, stderr, err := execWithRetry(f, &podOpt)
|
|
||||||
|
|
||||||
return stderr, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// listPods returns slice of pods matching given ListOptions and namespace.
|
// listPods returns slice of pods matching given ListOptions and namespace.
|
||||||
@ -215,7 +226,7 @@ func listPods(f *framework.Framework, ns string, opt *metav1.ListOptions) ([]v1.
|
|||||||
func execWithRetry(f *framework.Framework, opts *e2epod.ExecOptions) (string, string, error) {
|
func execWithRetry(f *framework.Framework, opts *e2epod.ExecOptions) (string, string, error) {
|
||||||
timeout := time.Duration(deployTimeout) * time.Minute
|
timeout := time.Duration(deployTimeout) * time.Minute
|
||||||
var stdOut, stdErr string
|
var stdOut, stdErr string
|
||||||
err := wait.PollImmediate(poll, timeout, func() (bool, error) {
|
err := wait.PollUntilContextTimeout(context.TODO(), poll, timeout, true, func(_ context.Context) (bool, error) {
|
||||||
var execErr error
|
var execErr error
|
||||||
stdOut, stdErr, execErr = e2epod.ExecWithOptions(f, *opts)
|
stdOut, stdErr, execErr = e2epod.ExecWithOptions(f, *opts)
|
||||||
if execErr != nil {
|
if execErr != nil {
|
||||||
@ -353,8 +364,8 @@ func waitForPodInRunningState(name, ns string, c kubernetes.Interface, t int, ex
|
|||||||
start := time.Now()
|
start := time.Now()
|
||||||
framework.Logf("Waiting up to %v to be in Running state", name)
|
framework.Logf("Waiting up to %v to be in Running state", name)
|
||||||
|
|
||||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
return wait.PollUntilContextTimeout(context.TODO(), poll, timeout, true, func(ctx context.Context) (bool, error) {
|
||||||
pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
pod, err := c.CoreV1().Pods(ns).Get(ctx, name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if isRetryableAPIError(err) {
|
if isRetryableAPIError(err) {
|
||||||
return false, nil
|
return false, nil
|
||||||
@ -369,7 +380,7 @@ func waitForPodInRunningState(name, ns string, c kubernetes.Interface, t int, ex
|
|||||||
return false, conditions.ErrPodCompleted
|
return false, conditions.ErrPodCompleted
|
||||||
case v1.PodPending:
|
case v1.PodPending:
|
||||||
if expectedError != "" {
|
if expectedError != "" {
|
||||||
events, err := c.CoreV1().Events(ns).List(context.TODO(), metav1.ListOptions{
|
events, err := c.CoreV1().Events(ns).List(ctx, metav1.ListOptions{
|
||||||
FieldSelector: fmt.Sprintf("involvedObject.name=%s", name),
|
FieldSelector: fmt.Sprintf("involvedObject.name=%s", name),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -395,15 +406,16 @@ func waitForPodInRunningState(name, ns string, c kubernetes.Interface, t int, ex
|
|||||||
|
|
||||||
func deletePod(name, ns string, c kubernetes.Interface, t int) error {
|
func deletePod(name, ns string, c kubernetes.Interface, t int) error {
|
||||||
timeout := time.Duration(t) * time.Minute
|
timeout := time.Duration(t) * time.Minute
|
||||||
err := c.CoreV1().Pods(ns).Delete(context.TODO(), name, metav1.DeleteOptions{})
|
ctx := context.TODO()
|
||||||
|
err := c.CoreV1().Pods(ns).Delete(ctx, name, metav1.DeleteOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to delete app: %w", err)
|
return fmt.Errorf("failed to delete app: %w", err)
|
||||||
}
|
}
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
framework.Logf("Waiting for pod %v to be deleted", name)
|
framework.Logf("Waiting for pod %v to be deleted", name)
|
||||||
|
|
||||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
return wait.PollUntilContextTimeout(ctx, poll, timeout, true, func(ctx context.Context) (bool, error) {
|
||||||
_, err := c.CoreV1().Pods(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
_, err := c.CoreV1().Pods(ns).Get(ctx, name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if isRetryableAPIError(err) {
|
if isRetryableAPIError(err) {
|
||||||
return false, nil
|
return false, nil
|
||||||
@ -420,7 +432,7 @@ func deletePod(name, ns string, c kubernetes.Interface, t int) error {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// nolint:unparam // currently skipNotFound is always false, this can change in the future
|
//nolint:unparam // currently skipNotFound is always false, this can change in the future
|
||||||
func deletePodWithLabel(label, ns string, skipNotFound bool) error {
|
func deletePodWithLabel(label, ns string, skipNotFound bool) error {
|
||||||
err := retryKubectlArgs(
|
err := retryKubectlArgs(
|
||||||
ns,
|
ns,
|
||||||
@ -541,3 +553,73 @@ func validateRWOPPodCreation(
|
|||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// verifySeLinuxMountOption verifies the SeLinux context MountOption added to PV.Spec.MountOption
|
||||||
|
// is successfully used by nodeplugin during mounting by checking for its presence in the
|
||||||
|
// nodeplugin container logs.
|
||||||
|
func verifySeLinuxMountOption(
|
||||||
|
f *framework.Framework,
|
||||||
|
pvcPath, appPath, daemonSetName, cn, ns string,
|
||||||
|
) error {
|
||||||
|
mountOption := "context=\"system_u:object_r:container_file_t:s0:c0,c1\""
|
||||||
|
|
||||||
|
// create PVC
|
||||||
|
pvc, err := loadPVC(pvcPath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to load pvc: %w", err)
|
||||||
|
}
|
||||||
|
pvc.Namespace = f.UniqueName
|
||||||
|
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create PVC: %w", err)
|
||||||
|
}
|
||||||
|
// modify PV spec.MountOptions
|
||||||
|
pv, err := getBoundPV(f.ClientSet, pvc)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get PV: %w", err)
|
||||||
|
}
|
||||||
|
pv.Spec.MountOptions = []string{mountOption}
|
||||||
|
|
||||||
|
// update PV
|
||||||
|
_, err = f.ClientSet.CoreV1().PersistentVolumes().Update(context.TODO(), pv, metav1.UpdateOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to update pv: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
app, err := loadApp(appPath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to load application: %w", err)
|
||||||
|
}
|
||||||
|
app.Namespace = f.UniqueName
|
||||||
|
err = createApp(f.ClientSet, app, deployTimeout)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create application: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pod, err := f.ClientSet.CoreV1().Pods(f.UniqueName).Get(context.TODO(), app.Name, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
framework.Logf("Error occurred getting pod %s in namespace %s", app.Name, f.UniqueName)
|
||||||
|
|
||||||
|
return fmt.Errorf("failed to get pod: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
nodepluginPodName, err := getDaemonsetPodOnNode(f, daemonSetName, pod.Spec.NodeName, ns)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get daemonset pod on node: %w", err)
|
||||||
|
}
|
||||||
|
logs, err := frameworkPod.GetPodLogs(context.TODO(), f.ClientSet, ns, nodepluginPodName, cn)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get pod logs from container %s/%s/%s : %w", ns, nodepluginPodName, cn, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !strings.Contains(logs, mountOption) {
|
||||||
|
return fmt.Errorf("mount option %s not found in logs: %s", mountOption, logs)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = deletePVCAndApp("", f, pvc, app)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to delete PVC and application: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
52
e2e/pvc.go
52
e2e/pvc.go
@ -44,9 +44,10 @@ func loadPVC(path string) (*v1.PersistentVolumeClaim, error) {
|
|||||||
|
|
||||||
func createPVCAndvalidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, t int) error {
|
func createPVCAndvalidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, t int) error {
|
||||||
timeout := time.Duration(t) * time.Minute
|
timeout := time.Duration(t) * time.Minute
|
||||||
|
ctx := context.TODO()
|
||||||
pv := &v1.PersistentVolume{}
|
pv := &v1.PersistentVolume{}
|
||||||
var err error
|
var err error
|
||||||
_, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{})
|
_, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to create pvc: %w", err)
|
return fmt.Errorf("failed to create pvc: %w", err)
|
||||||
}
|
}
|
||||||
@ -58,9 +59,9 @@ func createPVCAndvalidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClai
|
|||||||
start := time.Now()
|
start := time.Now()
|
||||||
framework.Logf("Waiting up to %v to be in Bound state", pvc)
|
framework.Logf("Waiting up to %v to be in Bound state", pvc)
|
||||||
|
|
||||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
return wait.PollUntilContextTimeout(ctx, poll, timeout, true, func(ctx context.Context) (bool, error) {
|
||||||
framework.Logf("waiting for PVC %s (%d seconds elapsed)", name, int(time.Since(start).Seconds()))
|
framework.Logf("waiting for PVC %s (%d seconds elapsed)", name, int(time.Since(start).Seconds()))
|
||||||
pvc, err = c.CoreV1().PersistentVolumeClaims(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
pvc, err = c.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("Error getting pvc %q in namespace %q: %v", name, namespace, err)
|
framework.Logf("Error getting pvc %q in namespace %q: %v", name, namespace, err)
|
||||||
if isRetryableAPIError(err) {
|
if isRetryableAPIError(err) {
|
||||||
@ -77,7 +78,7 @@ func createPVCAndvalidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClai
|
|||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
pv, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{})
|
pv, err = c.CoreV1().PersistentVolumes().Get(ctx, pvc.Spec.VolumeName, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if isRetryableAPIError(err) {
|
if isRetryableAPIError(err) {
|
||||||
return false, nil
|
return false, nil
|
||||||
@ -89,6 +90,7 @@ func createPVCAndvalidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClai
|
|||||||
return false, fmt.Errorf("failed to get pv: %w", err)
|
return false, fmt.Errorf("failed to get pv: %w", err)
|
||||||
}
|
}
|
||||||
err = e2epv.WaitOnPVandPVC(
|
err = e2epv.WaitOnPVandPVC(
|
||||||
|
ctx,
|
||||||
c,
|
c,
|
||||||
&framework.TimeoutContext{ClaimBound: timeout, PVBound: timeout},
|
&framework.TimeoutContext{ClaimBound: timeout, PVBound: timeout},
|
||||||
namespace,
|
namespace,
|
||||||
@ -116,11 +118,12 @@ func createPVCAndPV(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, pv *v
|
|||||||
}
|
}
|
||||||
|
|
||||||
func deletePVCAndPV(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, t int) error {
|
func deletePVCAndPV(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, t int) error {
|
||||||
err := c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, metav1.DeleteOptions{})
|
ctx := context.TODO()
|
||||||
|
err := c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(ctx, pvc.Name, metav1.DeleteOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to delete pvc: %w", err)
|
return fmt.Errorf("failed to delete pvc: %w", err)
|
||||||
}
|
}
|
||||||
err = c.CoreV1().PersistentVolumes().Delete(context.TODO(), pv.Name, metav1.DeleteOptions{})
|
err = c.CoreV1().PersistentVolumes().Delete(ctx, pv.Name, metav1.DeleteOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to delete pv: %w", err)
|
return fmt.Errorf("failed to delete pv: %w", err)
|
||||||
}
|
}
|
||||||
@ -129,7 +132,7 @@ func deletePVCAndPV(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, pv *v
|
|||||||
start := time.Now()
|
start := time.Now()
|
||||||
|
|
||||||
pvcToDelete := pvc
|
pvcToDelete := pvc
|
||||||
err = wait.PollImmediate(poll, timeout, func() (bool, error) {
|
err = wait.PollUntilContextTimeout(ctx, poll, timeout, true, func(ctx context.Context) (bool, error) {
|
||||||
// Check that the PVC is deleted.
|
// Check that the PVC is deleted.
|
||||||
framework.Logf(
|
framework.Logf(
|
||||||
"waiting for PVC %s in state %s to be deleted (%d seconds elapsed)",
|
"waiting for PVC %s in state %s to be deleted (%d seconds elapsed)",
|
||||||
@ -138,7 +141,7 @@ func deletePVCAndPV(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, pv *v
|
|||||||
int(time.Since(start).Seconds()))
|
int(time.Since(start).Seconds()))
|
||||||
pvcToDelete, err = c.CoreV1().
|
pvcToDelete, err = c.CoreV1().
|
||||||
PersistentVolumeClaims(pvc.Namespace).
|
PersistentVolumeClaims(pvc.Namespace).
|
||||||
Get(context.TODO(), pvc.Name, metav1.GetOptions{})
|
Get(ctx, pvc.Name, metav1.GetOptions{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if pvcToDelete.Status.Phase == "" {
|
if pvcToDelete.Status.Phase == "" {
|
||||||
// this is unexpected, an empty Phase is not defined
|
// this is unexpected, an empty Phase is not defined
|
||||||
@ -167,7 +170,7 @@ func deletePVCAndPV(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, pv *v
|
|||||||
start = time.Now()
|
start = time.Now()
|
||||||
pvToDelete := pv
|
pvToDelete := pv
|
||||||
|
|
||||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
return wait.PollUntilContextTimeout(ctx, poll, timeout, true, func(ctx context.Context) (bool, error) {
|
||||||
// Check that the PV is deleted.
|
// Check that the PV is deleted.
|
||||||
framework.Logf(
|
framework.Logf(
|
||||||
"waiting for PV %s in state %s to be deleted (%d seconds elapsed)",
|
"waiting for PV %s in state %s to be deleted (%d seconds elapsed)",
|
||||||
@ -175,7 +178,7 @@ func deletePVCAndPV(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, pv *v
|
|||||||
pvToDelete.Status.String(),
|
pvToDelete.Status.String(),
|
||||||
int(time.Since(start).Seconds()))
|
int(time.Since(start).Seconds()))
|
||||||
|
|
||||||
pvToDelete, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{})
|
pvToDelete, err = c.CoreV1().PersistentVolumes().Get(ctx, pv.Name, metav1.GetOptions{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
@ -196,8 +199,13 @@ func getPersistentVolumeClaim(c kubernetes.Interface, namespace, name string) (*
|
|||||||
var pvc *v1.PersistentVolumeClaim
|
var pvc *v1.PersistentVolumeClaim
|
||||||
var err error
|
var err error
|
||||||
timeout := time.Duration(deployTimeout) * time.Minute
|
timeout := time.Duration(deployTimeout) * time.Minute
|
||||||
err = wait.PollImmediate(1*time.Second, timeout, func() (bool, error) {
|
err = wait.PollUntilContextTimeout(
|
||||||
pvc, err = c.CoreV1().PersistentVolumeClaims(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
context.TODO(),
|
||||||
|
1*time.Second,
|
||||||
|
timeout,
|
||||||
|
true,
|
||||||
|
func(ctx context.Context) (bool, error) {
|
||||||
|
pvc, err = c.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("Error getting pvc %q in namespace %q: %v", name, namespace, err)
|
framework.Logf("Error getting pvc %q in namespace %q: %v", name, namespace, err)
|
||||||
if isRetryableAPIError(err) {
|
if isRetryableAPIError(err) {
|
||||||
@ -219,8 +227,13 @@ func getPersistentVolume(c kubernetes.Interface, name string) (*v1.PersistentVol
|
|||||||
var pv *v1.PersistentVolume
|
var pv *v1.PersistentVolume
|
||||||
var err error
|
var err error
|
||||||
timeout := time.Duration(deployTimeout) * time.Minute
|
timeout := time.Duration(deployTimeout) * time.Minute
|
||||||
err = wait.PollImmediate(1*time.Second, timeout, func() (bool, error) {
|
err = wait.PollUntilContextTimeout(
|
||||||
pv, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), name, metav1.GetOptions{})
|
context.TODO(),
|
||||||
|
1*time.Second,
|
||||||
|
timeout,
|
||||||
|
true,
|
||||||
|
func(ctx context.Context) (bool, error) {
|
||||||
|
pv, err = c.CoreV1().PersistentVolumes().Get(ctx, name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("Error getting pv %q: %v", name, err)
|
framework.Logf("Error getting pv %q: %v", name, err)
|
||||||
if isRetryableAPIError(err) {
|
if isRetryableAPIError(err) {
|
||||||
@ -256,6 +269,7 @@ func deletePVCAndValidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClai
|
|||||||
timeout := time.Duration(t) * time.Minute
|
timeout := time.Duration(t) * time.Minute
|
||||||
nameSpace := pvc.Namespace
|
nameSpace := pvc.Namespace
|
||||||
name := pvc.Name
|
name := pvc.Name
|
||||||
|
ctx := context.TODO()
|
||||||
var err error
|
var err error
|
||||||
framework.Logf("Deleting PersistentVolumeClaim %v on namespace %v", name, nameSpace)
|
framework.Logf("Deleting PersistentVolumeClaim %v on namespace %v", name, nameSpace)
|
||||||
|
|
||||||
@ -268,20 +282,20 @@ func deletePVCAndValidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClai
|
|||||||
return fmt.Errorf("failed to get pv: %w", err)
|
return fmt.Errorf("failed to get pv: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = c.CoreV1().PersistentVolumeClaims(nameSpace).Delete(context.TODO(), name, metav1.DeleteOptions{})
|
err = c.CoreV1().PersistentVolumeClaims(nameSpace).Delete(ctx, name, metav1.DeleteOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("delete of PVC %v failed: %w", name, err)
|
return fmt.Errorf("delete of PVC %v failed: %w", name, err)
|
||||||
}
|
}
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
|
|
||||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
return wait.PollUntilContextTimeout(ctx, poll, timeout, true, func(ctx context.Context) (bool, error) {
|
||||||
// Check that the PVC is really deleted.
|
// Check that the PVC is really deleted.
|
||||||
framework.Logf(
|
framework.Logf(
|
||||||
"waiting for PVC %s in state %s to be deleted (%d seconds elapsed)",
|
"waiting for PVC %s in state %s to be deleted (%d seconds elapsed)",
|
||||||
name,
|
name,
|
||||||
pvc.Status.String(),
|
pvc.Status.String(),
|
||||||
int(time.Since(start).Seconds()))
|
int(time.Since(start).Seconds()))
|
||||||
pvc, err = c.CoreV1().PersistentVolumeClaims(nameSpace).Get(context.TODO(), name, metav1.GetOptions{})
|
pvc, err = c.CoreV1().PersistentVolumeClaims(nameSpace).Get(ctx, name, metav1.GetOptions{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
framework.Logf("PVC %s (status: %s) has not been deleted yet, rechecking...", name, pvc.Status)
|
framework.Logf("PVC %s (status: %s) has not been deleted yet, rechecking...", name, pvc.Status)
|
||||||
|
|
||||||
@ -297,7 +311,7 @@ func deletePVCAndValidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClai
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Examine the pv.ClaimRef and UID. Expect nil values.
|
// Examine the pv.ClaimRef and UID. Expect nil values.
|
||||||
oldPV, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{})
|
oldPV, err := c.CoreV1().PersistentVolumes().Get(ctx, pv.Name, metav1.GetOptions{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
framework.Logf("PV %s (status: %s) has not been deleted yet, rechecking...", pv.Name, oldPV.Status)
|
framework.Logf("PV %s (status: %s) has not been deleted yet, rechecking...", pv.Name, oldPV.Status)
|
||||||
|
|
||||||
@ -383,7 +397,7 @@ func getMetricsForPVC(f *framework.Framework, pvc *v1.PersistentVolumeClaim, t i
|
|||||||
// retry as kubelet does not immediately have the metrics available
|
// retry as kubelet does not immediately have the metrics available
|
||||||
timeout := time.Duration(t) * time.Minute
|
timeout := time.Duration(t) * time.Minute
|
||||||
|
|
||||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
return wait.PollUntilContextTimeout(context.TODO(), poll, timeout, true, func(_ context.Context) (bool, error) {
|
||||||
stdOut, stdErr, err := execCommandInToolBoxPod(f, cmd, rookNamespace)
|
stdOut, stdErr, err := execCommandInToolBoxPod(f, cmd, rookNamespace)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("failed to get metrics for pvc %q (%v): %v", pvc.Name, err, stdErr)
|
framework.Logf("failed to get metrics for pvc %q (%v): %v", pvc.Name, err, stdErr)
|
||||||
|
127
e2e/rbd.go
127
e2e/rbd.go
@ -18,14 +18,13 @@ package e2e
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ceph/ceph-csi/internal/util"
|
"github.com/ceph/ceph-csi/internal/util"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo/v2" // nolint
|
. "github.com/onsi/ginkgo/v2" //nolint:golint // e2e uses By() and other Ginkgo functions
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
@ -45,12 +44,14 @@ var (
|
|||||||
configMap = "csi-config-map.yaml"
|
configMap = "csi-config-map.yaml"
|
||||||
cephConfconfigMap = "ceph-conf.yaml"
|
cephConfconfigMap = "ceph-conf.yaml"
|
||||||
csiDriverObject = "csidriver.yaml"
|
csiDriverObject = "csidriver.yaml"
|
||||||
rbdDirPath = "../deploy/rbd/kubernetes/"
|
deployPath = "../deploy/"
|
||||||
|
rbdDirPath = deployPath + "/rbd/kubernetes/"
|
||||||
examplePath = "../examples/"
|
examplePath = "../examples/"
|
||||||
rbdExamplePath = examplePath + "/rbd/"
|
rbdExamplePath = examplePath + "/rbd/"
|
||||||
e2eTemplatesPath = "../e2e/templates/"
|
e2eTemplatesPath = "../e2e/templates/"
|
||||||
rbdDeploymentName = "csi-rbdplugin-provisioner"
|
rbdDeploymentName = "csi-rbdplugin-provisioner"
|
||||||
rbdDaemonsetName = "csi-rbdplugin"
|
rbdDaemonsetName = "csi-rbdplugin"
|
||||||
|
rbdContainerName = "csi-rbdplugin"
|
||||||
defaultRBDPool = "replicapool"
|
defaultRBDPool = "replicapool"
|
||||||
erasureCodedPool = "ec-pool"
|
erasureCodedPool = "ec-pool"
|
||||||
noDataPool = ""
|
noDataPool = ""
|
||||||
@ -130,6 +131,7 @@ func deleteRBDPlugin() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func createORDeleteRbdResources(action kubectlAction) {
|
func createORDeleteRbdResources(action kubectlAction) {
|
||||||
|
cephConfigFile := getConfigFile(cephConfconfigMap, deployPath, examplePath)
|
||||||
resources := []ResourceDeployer{
|
resources := []ResourceDeployer{
|
||||||
// shared resources
|
// shared resources
|
||||||
&yamlResource{
|
&yamlResource{
|
||||||
@ -137,7 +139,7 @@ func createORDeleteRbdResources(action kubectlAction) {
|
|||||||
allowMissing: true,
|
allowMissing: true,
|
||||||
},
|
},
|
||||||
&yamlResource{
|
&yamlResource{
|
||||||
filename: examplePath + cephConfconfigMap,
|
filename: cephConfigFile,
|
||||||
allowMissing: true,
|
allowMissing: true,
|
||||||
},
|
},
|
||||||
// dependencies for provisioner
|
// dependencies for provisioner
|
||||||
@ -204,7 +206,8 @@ func checkGetKeyError(err error, stdErr string) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// checkClusternameInMetadata check for cluster name metadata on RBD image.
|
// checkClusternameInMetadata check for cluster name metadata on RBD image.
|
||||||
// nolint:nilerr // intentionally returning nil on error in the retry loop.
|
//
|
||||||
|
//nolint:nilerr // intentionally returning nil on error in the retry loop.
|
||||||
func checkClusternameInMetadata(f *framework.Framework, ns, pool, image string) {
|
func checkClusternameInMetadata(f *framework.Framework, ns, pool, image string) {
|
||||||
t := time.Duration(deployTimeout) * time.Minute
|
t := time.Duration(deployTimeout) * time.Minute
|
||||||
var (
|
var (
|
||||||
@ -212,7 +215,7 @@ func checkClusternameInMetadata(f *framework.Framework, ns, pool, image string)
|
|||||||
stdErr string
|
stdErr string
|
||||||
execErr error
|
execErr error
|
||||||
)
|
)
|
||||||
err := wait.PollImmediate(poll, t, func() (bool, error) {
|
err := wait.PollUntilContextTimeout(context.TODO(), poll, t, true, func(_ context.Context) (bool, error) {
|
||||||
coName, stdErr, execErr = execCommandInToolBoxPod(f,
|
coName, stdErr, execErr = execCommandInToolBoxPod(f,
|
||||||
fmt.Sprintf("rbd image-meta get %s --image=%s %s", rbdOptions(pool), image, clusterNameKey),
|
fmt.Sprintf("rbd image-meta get %s --image=%s %s", rbdOptions(pool), image, clusterNameKey),
|
||||||
ns)
|
ns)
|
||||||
@ -355,7 +358,7 @@ var _ = Describe("RBD", func() {
|
|||||||
logsCSIPods("app=csi-rbdplugin", c)
|
logsCSIPods("app=csi-rbdplugin", c)
|
||||||
|
|
||||||
// log all details from the namespace where Ceph-CSI is deployed
|
// log all details from the namespace where Ceph-CSI is deployed
|
||||||
e2edebug.DumpAllNamespaceInfo(c, cephCSINamespace)
|
e2edebug.DumpAllNamespaceInfo(context.TODO(), c, cephCSINamespace)
|
||||||
}
|
}
|
||||||
|
|
||||||
err := deleteConfigMap(rbdDirPath)
|
err := deleteConfigMap(rbdDirPath)
|
||||||
@ -419,7 +422,7 @@ var _ = Describe("RBD", func() {
|
|||||||
By("verify PVC and app binding on helm installation", func() {
|
By("verify PVC and app binding on helm installation", func() {
|
||||||
err := validatePVCAndAppBinding(pvcPath, appPath, f)
|
err := validatePVCAndAppBinding(pvcPath, appPath, f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Failf("failed to validate CephFS pvc and application binding: %v", err)
|
framework.Failf("failed to validate RBD pvc and application binding: %v", err)
|
||||||
}
|
}
|
||||||
// validate created backend rbd images
|
// validate created backend rbd images
|
||||||
validateRBDImageCount(f, 0, defaultRBDPool)
|
validateRBDImageCount(f, 0, defaultRBDPool)
|
||||||
@ -441,6 +444,14 @@ var _ = Describe("RBD", func() {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
By("verify mountOptions support", func() {
|
||||||
|
err := verifySeLinuxMountOption(f, pvcPath, appPath,
|
||||||
|
rbdDaemonsetName, rbdContainerName, cephCSINamespace)
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to verify mount options: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
By("create a PVC and check PVC/PV metadata on RBD image", func() {
|
By("create a PVC and check PVC/PV metadata on RBD image", func() {
|
||||||
pvc, err := loadPVC(pvcPath)
|
pvc, err := loadPVC(pvcPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -596,7 +607,7 @@ var _ = Describe("RBD", func() {
|
|||||||
validateRBDImageCount(f, 1, defaultRBDPool)
|
validateRBDImageCount(f, 1, defaultRBDPool)
|
||||||
validateOmapCount(f, 1, rbdType, defaultRBDPool, volumesType)
|
validateOmapCount(f, 1, rbdType, defaultRBDPool, volumesType)
|
||||||
// create namespace for reattach PVC, deletion will be taken care by framework
|
// create namespace for reattach PVC, deletion will be taken care by framework
|
||||||
ns, err := f.CreateNamespace(reattachPVCNamespace, nil)
|
ns, err := f.CreateNamespace(context.TODO(), reattachPVCNamespace, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Failf("failed to create namespace: %v", err)
|
framework.Failf("failed to create namespace: %v", err)
|
||||||
}
|
}
|
||||||
@ -1089,6 +1100,45 @@ var _ = Describe("RBD", func() {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
By("create a PVC and bind it to an app with ext4 as the FS and 1024 inodes ", func() {
|
||||||
|
err := deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to delete storageclass: %v", err)
|
||||||
|
}
|
||||||
|
err = createRBDStorageClass(
|
||||||
|
f.ClientSet,
|
||||||
|
f,
|
||||||
|
defaultSCName,
|
||||||
|
nil,
|
||||||
|
map[string]string{
|
||||||
|
"csi.storage.k8s.io/fstype": "ext4",
|
||||||
|
"mkfsOptions": "-N1024", // 1024 inodes
|
||||||
|
},
|
||||||
|
deletePolicy)
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to create storageclass: %v", err)
|
||||||
|
}
|
||||||
|
err = validatePVCAndAppBinding(pvcPath, appPath, f)
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to validate pvc and application binding: %v", err)
|
||||||
|
}
|
||||||
|
err = validateInodeCount(pvcPath, f, 1024)
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to validate pvc and application binding: %v", err)
|
||||||
|
}
|
||||||
|
// validate created backend rbd images
|
||||||
|
validateRBDImageCount(f, 0, defaultRBDPool)
|
||||||
|
validateOmapCount(f, 0, rbdType, defaultRBDPool, volumesType)
|
||||||
|
err = deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to delete storageclass: %v", err)
|
||||||
|
}
|
||||||
|
err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy)
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to create storageclass: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
By("create a PVC and bind it to an app using rbd-nbd mounter", func() {
|
By("create a PVC and bind it to an app using rbd-nbd mounter", func() {
|
||||||
if !testNBD {
|
if !testNBD {
|
||||||
framework.Logf("skipping NBD test")
|
framework.Logf("skipping NBD test")
|
||||||
@ -1207,7 +1257,7 @@ var _ = Describe("RBD", func() {
|
|||||||
}
|
}
|
||||||
err = validatePVCAndAppBinding(pvcPath, appPath, f)
|
err = validatePVCAndAppBinding(pvcPath, appPath, f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Failf("failed to validate CephFS pvc and application binding: %v", err)
|
framework.Failf("failed to validate RBD pvc and application binding: %v", err)
|
||||||
}
|
}
|
||||||
// validate created backend rbd images
|
// validate created backend rbd images
|
||||||
validateRBDImageCount(f, 0, defaultRBDPool)
|
validateRBDImageCount(f, 0, defaultRBDPool)
|
||||||
@ -1372,7 +1422,7 @@ var _ = Describe("RBD", func() {
|
|||||||
}
|
}
|
||||||
err = validatePVCAndAppBinding(pvcPath, appPath, f)
|
err = validatePVCAndAppBinding(pvcPath, appPath, f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Failf("failed to validate CephFS pvc and application binding: %v", err)
|
framework.Failf("failed to validate RBD pvc and application binding: %v", err)
|
||||||
}
|
}
|
||||||
// validate created backend rbd images
|
// validate created backend rbd images
|
||||||
validateRBDImageCount(f, 0, defaultRBDPool)
|
validateRBDImageCount(f, 0, defaultRBDPool)
|
||||||
@ -1848,7 +1898,7 @@ var _ = Describe("RBD", func() {
|
|||||||
|
|
||||||
timeout := time.Duration(deployTimeout) * time.Minute
|
timeout := time.Duration(deployTimeout) * time.Minute
|
||||||
var reason string
|
var reason string
|
||||||
err = wait.PollImmediate(poll, timeout, func() (bool, error) {
|
err = wait.PollUntilContextTimeout(context.TODO(), poll, timeout, true, func(_ context.Context) (bool, error) {
|
||||||
var runningAttachCmd string
|
var runningAttachCmd string
|
||||||
runningAttachCmd, stdErr, err = execCommandInContainer(
|
runningAttachCmd, stdErr, err = execCommandInContainer(
|
||||||
f,
|
f,
|
||||||
@ -1873,7 +1923,7 @@ var _ = Describe("RBD", func() {
|
|||||||
return true, nil
|
return true, nil
|
||||||
})
|
})
|
||||||
|
|
||||||
if errors.Is(err, wait.ErrWaitTimeout) {
|
if wait.Interrupted(err) {
|
||||||
framework.Failf("timed out waiting for the rbd-nbd process: %s", reason)
|
framework.Failf("timed out waiting for the rbd-nbd process: %s", reason)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -2833,6 +2883,55 @@ var _ = Describe("RBD", func() {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
By("create storageClass with encrypted as false", func() {
|
||||||
|
err := deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to delete storageclass: %v", err)
|
||||||
|
}
|
||||||
|
err = createRBDStorageClass(
|
||||||
|
f.ClientSet,
|
||||||
|
f,
|
||||||
|
defaultSCName,
|
||||||
|
nil,
|
||||||
|
map[string]string{"encrypted": "false"},
|
||||||
|
deletePolicy)
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to create storageclass: %v", err)
|
||||||
|
}
|
||||||
|
// set up PVC
|
||||||
|
pvc, err := loadPVC(pvcPath)
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to load PVC: %v", err)
|
||||||
|
}
|
||||||
|
pvc.Namespace = f.UniqueName
|
||||||
|
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to create PVC: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// validate created backend rbd images
|
||||||
|
validateRBDImageCount(f, 1, defaultRBDPool)
|
||||||
|
validateOmapCount(f, 1, rbdType, defaultRBDPool, volumesType)
|
||||||
|
|
||||||
|
// clean up after ourselves
|
||||||
|
err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout)
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to delete PVC: %v", err)
|
||||||
|
}
|
||||||
|
// validate created backend rbd images
|
||||||
|
validateRBDImageCount(f, 0, defaultRBDPool)
|
||||||
|
validateOmapCount(f, 0, rbdType, defaultRBDPool, volumesType)
|
||||||
|
|
||||||
|
err = deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to delete storageclass: %v", err)
|
||||||
|
}
|
||||||
|
err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy)
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to create storageclass: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
By("validate RBD static FileSystem PVC", func() {
|
By("validate RBD static FileSystem PVC", func() {
|
||||||
err := validateRBDStaticPV(f, appPath, false, false)
|
err := validateRBDStaticPV(f, appPath, false, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -3697,7 +3796,7 @@ var _ = Describe("RBD", func() {
|
|||||||
validateRBDImageCount(f, 0, defaultRBDPool)
|
validateRBDImageCount(f, 0, defaultRBDPool)
|
||||||
validateOmapCount(f, 0, rbdType, defaultRBDPool, volumesType)
|
validateOmapCount(f, 0, rbdType, defaultRBDPool, volumesType)
|
||||||
|
|
||||||
// Create a PVC and bind it to an app within the namesapce
|
// Create a PVC and bind it to an app within the namespace
|
||||||
err = validatePVCAndAppBinding(pvcPath, appPath, f)
|
err = validatePVCAndAppBinding(pvcPath, appPath, f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Failf("failed to validate pvc and application binding: %v", err)
|
framework.Failf("failed to validate pvc and application binding: %v", err)
|
||||||
|
@ -19,7 +19,6 @@ package e2e
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
@ -37,7 +36,7 @@ import (
|
|||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
)
|
)
|
||||||
|
|
||||||
// nolint:gomnd // numbers specify Kernel versions.
|
//nolint:gomnd // numbers specify Kernel versions.
|
||||||
var nbdResizeSupport = []util.KernelVersion{
|
var nbdResizeSupport = []util.KernelVersion{
|
||||||
{
|
{
|
||||||
Version: 5,
|
Version: 5,
|
||||||
@ -49,7 +48,7 @@ var nbdResizeSupport = []util.KernelVersion{
|
|||||||
}, // standard 5.3+ versions
|
}, // standard 5.3+ versions
|
||||||
}
|
}
|
||||||
|
|
||||||
// nolint:gomnd // numbers specify Kernel versions.
|
//nolint:gomnd // numbers specify Kernel versions.
|
||||||
var fastDiffSupport = []util.KernelVersion{
|
var fastDiffSupport = []util.KernelVersion{
|
||||||
{
|
{
|
||||||
Version: 5,
|
Version: 5,
|
||||||
@ -61,7 +60,7 @@ var fastDiffSupport = []util.KernelVersion{
|
|||||||
}, // standard 5.3+ versions
|
}, // standard 5.3+ versions
|
||||||
}
|
}
|
||||||
|
|
||||||
// nolint:gomnd // numbers specify Kernel versions.
|
//nolint:gomnd // numbers specify Kernel versions.
|
||||||
var deepFlattenSupport = []util.KernelVersion{
|
var deepFlattenSupport = []util.KernelVersion{
|
||||||
{
|
{
|
||||||
Version: 5,
|
Version: 5,
|
||||||
@ -75,7 +74,8 @@ var deepFlattenSupport = []util.KernelVersion{
|
|||||||
|
|
||||||
// To use `io-timeout=0` we need
|
// To use `io-timeout=0` we need
|
||||||
// www.mail-archive.com/linux-block@vger.kernel.org/msg38060.html
|
// www.mail-archive.com/linux-block@vger.kernel.org/msg38060.html
|
||||||
// nolint:gomnd // numbers specify Kernel versions.
|
//
|
||||||
|
//nolint:gomnd // numbers specify Kernel versions.
|
||||||
var nbdZeroIOtimeoutSupport = []util.KernelVersion{
|
var nbdZeroIOtimeoutSupport = []util.KernelVersion{
|
||||||
{
|
{
|
||||||
Version: 5,
|
Version: 5,
|
||||||
@ -164,8 +164,8 @@ func createRBDStorageClass(
|
|||||||
|
|
||||||
timeout := time.Duration(deployTimeout) * time.Minute
|
timeout := time.Duration(deployTimeout) * time.Minute
|
||||||
|
|
||||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
return wait.PollUntilContextTimeout(context.TODO(), poll, timeout, true, func(ctx context.Context) (bool, error) {
|
||||||
_, err = c.StorageV1().StorageClasses().Create(context.TODO(), &sc, metav1.CreateOptions{})
|
_, err = c.StorageV1().StorageClasses().Create(ctx, &sc, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("error creating StorageClass %q: %v", sc.Name, err)
|
framework.Logf("error creating StorageClass %q: %v", sc.Name, err)
|
||||||
if isRetryableAPIError(err) {
|
if isRetryableAPIError(err) {
|
||||||
@ -791,6 +791,8 @@ func sparsifyBackingRBDImage(f *framework.Framework, pvc *v1.PersistentVolumeCla
|
|||||||
func deletePool(name string, cephFS bool, f *framework.Framework) error {
|
func deletePool(name string, cephFS bool, f *framework.Framework) error {
|
||||||
cmds := []string{}
|
cmds := []string{}
|
||||||
if cephFS {
|
if cephFS {
|
||||||
|
//nolint:dupword // "ceph osd pool delete" requires the pool 2x
|
||||||
|
//
|
||||||
// ceph fs fail
|
// ceph fs fail
|
||||||
// ceph fs rm myfs --yes-i-really-mean-it
|
// ceph fs rm myfs --yes-i-really-mean-it
|
||||||
// ceph osd pool delete myfs-metadata myfs-metadata
|
// ceph osd pool delete myfs-metadata myfs-metadata
|
||||||
@ -802,6 +804,8 @@ func deletePool(name string, cephFS bool, f *framework.Framework) error {
|
|||||||
fmt.Sprintf("ceph osd pool delete %s-metadata %s-metadata --yes-i-really-really-mean-it", name, name),
|
fmt.Sprintf("ceph osd pool delete %s-metadata %s-metadata --yes-i-really-really-mean-it", name, name),
|
||||||
fmt.Sprintf("ceph osd pool delete %s-replicated %s-replicated --yes-i-really-really-mean-it", name, name))
|
fmt.Sprintf("ceph osd pool delete %s-replicated %s-replicated --yes-i-really-really-mean-it", name, name))
|
||||||
} else {
|
} else {
|
||||||
|
//nolint:dupword // "ceph osd pool delete" requires the pool 2x
|
||||||
|
//
|
||||||
// ceph osd pool delete replicapool replicapool
|
// ceph osd pool delete replicapool replicapool
|
||||||
// --yes-i-really-mean-it
|
// --yes-i-really-mean-it
|
||||||
cmds = append(cmds, fmt.Sprintf("ceph osd pool delete %s %s --yes-i-really-really-mean-it", name, name))
|
cmds = append(cmds, fmt.Sprintf("ceph osd pool delete %s %s --yes-i-really-really-mean-it", name, name))
|
||||||
@ -1033,7 +1037,7 @@ func listRBDImagesInTrash(f *framework.Framework, poolName string) ([]trashInfo,
|
|||||||
func waitToRemoveImagesFromTrash(f *framework.Framework, poolName string, t int) error {
|
func waitToRemoveImagesFromTrash(f *framework.Framework, poolName string, t int) error {
|
||||||
var errReason error
|
var errReason error
|
||||||
timeout := time.Duration(t) * time.Minute
|
timeout := time.Duration(t) * time.Minute
|
||||||
err := wait.PollImmediate(poll, timeout, func() (bool, error) {
|
err := wait.PollUntilContextTimeout(context.TODO(), poll, timeout, true, func(_ context.Context) (bool, error) {
|
||||||
imagesInTrash, err := listRBDImagesInTrash(f, poolName)
|
imagesInTrash, err := listRBDImagesInTrash(f, poolName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
@ -1047,7 +1051,7 @@ func waitToRemoveImagesFromTrash(f *framework.Framework, poolName string, t int)
|
|||||||
return false, nil
|
return false, nil
|
||||||
})
|
})
|
||||||
|
|
||||||
if errors.Is(err, wait.ErrWaitTimeout) {
|
if wait.Interrupted(err) {
|
||||||
err = errReason
|
err = errReason
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -22,7 +22,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
. "github.com/onsi/gomega" // nolint
|
. "github.com/onsi/gomega" //nolint:golint // e2e uses Expect() and other Gomega functions
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
@ -33,6 +33,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func expandPVCSize(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, size string, t int) error {
|
func expandPVCSize(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, size string, t int) error {
|
||||||
|
ctx := context.TODO()
|
||||||
pvcName := pvc.Name
|
pvcName := pvc.Name
|
||||||
pvcNamespace := pvc.Namespace
|
pvcNamespace := pvc.Namespace
|
||||||
updatedPVC, err := getPersistentVolumeClaim(c, pvcNamespace, pvcName)
|
updatedPVC, err := getPersistentVolumeClaim(c, pvcNamespace, pvcName)
|
||||||
@ -44,17 +45,17 @@ func expandPVCSize(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, size s
|
|||||||
updatedPVC.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(size)
|
updatedPVC.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(size)
|
||||||
_, err = c.CoreV1().
|
_, err = c.CoreV1().
|
||||||
PersistentVolumeClaims(updatedPVC.Namespace).
|
PersistentVolumeClaims(updatedPVC.Namespace).
|
||||||
Update(context.TODO(), updatedPVC, metav1.UpdateOptions{})
|
Update(ctx, updatedPVC, metav1.UpdateOptions{})
|
||||||
Expect(err).Should(BeNil())
|
Expect(err).ShouldNot(HaveOccurred())
|
||||||
|
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
framework.Logf("Waiting up to %v to be in Resized state", pvc)
|
framework.Logf("Waiting up to %v to be in Resized state", pvc)
|
||||||
|
|
||||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
return wait.PollUntilContextTimeout(ctx, poll, timeout, true, func(ctx context.Context) (bool, error) {
|
||||||
framework.Logf("waiting for PVC %s (%d seconds elapsed)", pvcName, int(time.Since(start).Seconds()))
|
framework.Logf("waiting for PVC %s (%d seconds elapsed)", pvcName, int(time.Since(start).Seconds()))
|
||||||
updatedPVC, err = c.CoreV1().
|
updatedPVC, err = c.CoreV1().
|
||||||
PersistentVolumeClaims(pvcNamespace).
|
PersistentVolumeClaims(pvcNamespace).
|
||||||
Get(context.TODO(), pvcName, metav1.GetOptions{})
|
Get(ctx, pvcName, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("Error getting pvc in namespace: '%s': %v", pvcNamespace, err)
|
framework.Logf("Error getting pvc in namespace: '%s': %v", pvcNamespace, err)
|
||||||
if isRetryableAPIError(err) {
|
if isRetryableAPIError(err) {
|
||||||
@ -185,7 +186,7 @@ func checkAppMntSize(f *framework.Framework, opt *metav1.ListOptions, size, cmd,
|
|||||||
timeout := time.Duration(t) * time.Minute
|
timeout := time.Duration(t) * time.Minute
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
|
|
||||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
return wait.PollUntilContextTimeout(context.TODO(), poll, timeout, true, func(_ context.Context) (bool, error) {
|
||||||
framework.Logf("executing cmd %s (%d seconds elapsed)", cmd, int(time.Since(start).Seconds()))
|
framework.Logf("executing cmd %s (%d seconds elapsed)", cmd, int(time.Since(start).Seconds()))
|
||||||
output, stdErr, err := execCommandInPod(f, cmd, ns, opt)
|
output, stdErr, err := execCommandInPod(f, cmd, ns, opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -23,7 +23,7 @@ import (
|
|||||||
|
|
||||||
snapapi "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1"
|
snapapi "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1"
|
||||||
snapclient "github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/typed/volumesnapshot/v1"
|
snapclient "github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/typed/volumesnapshot/v1"
|
||||||
. "github.com/onsi/gomega" // nolint
|
. "github.com/onsi/gomega" //nolint:golint // e2e uses Expect() and other Gomega functions
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
@ -35,7 +35,7 @@ import (
|
|||||||
func getSnapshotClass(path string) snapapi.VolumeSnapshotClass {
|
func getSnapshotClass(path string) snapapi.VolumeSnapshotClass {
|
||||||
sc := snapapi.VolumeSnapshotClass{}
|
sc := snapapi.VolumeSnapshotClass{}
|
||||||
err := unmarshal(path, &sc)
|
err := unmarshal(path, &sc)
|
||||||
Expect(err).Should(BeNil())
|
Expect(err).ShouldNot(HaveOccurred())
|
||||||
|
|
||||||
return sc
|
return sc
|
||||||
}
|
}
|
||||||
@ -43,7 +43,7 @@ func getSnapshotClass(path string) snapapi.VolumeSnapshotClass {
|
|||||||
func getSnapshot(path string) snapapi.VolumeSnapshot {
|
func getSnapshot(path string) snapapi.VolumeSnapshot {
|
||||||
sc := snapapi.VolumeSnapshot{}
|
sc := snapapi.VolumeSnapshot{}
|
||||||
err := unmarshal(path, &sc)
|
err := unmarshal(path, &sc)
|
||||||
Expect(err).Should(BeNil())
|
Expect(err).ShouldNot(HaveOccurred())
|
||||||
|
|
||||||
return sc
|
return sc
|
||||||
}
|
}
|
||||||
@ -67,9 +67,10 @@ func createSnapshot(snap *snapapi.VolumeSnapshot, t int) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ctx := context.TODO()
|
||||||
_, err = sclient.
|
_, err = sclient.
|
||||||
VolumeSnapshots(snap.Namespace).
|
VolumeSnapshots(snap.Namespace).
|
||||||
Create(context.TODO(), snap, metav1.CreateOptions{})
|
Create(ctx, snap, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to create volumesnapshot: %w", err)
|
return fmt.Errorf("failed to create volumesnapshot: %w", err)
|
||||||
}
|
}
|
||||||
@ -80,11 +81,11 @@ func createSnapshot(snap *snapapi.VolumeSnapshot, t int) error {
|
|||||||
start := time.Now()
|
start := time.Now()
|
||||||
framework.Logf("waiting for %v to be in ready state", snap)
|
framework.Logf("waiting for %v to be in ready state", snap)
|
||||||
|
|
||||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
return wait.PollUntilContextTimeout(ctx, poll, timeout, true, func(ctx context.Context) (bool, error) {
|
||||||
framework.Logf("waiting for snapshot %s (%d seconds elapsed)", snap.Name, int(time.Since(start).Seconds()))
|
framework.Logf("waiting for snapshot %s (%d seconds elapsed)", snap.Name, int(time.Since(start).Seconds()))
|
||||||
snaps, err := sclient.
|
snaps, err := sclient.
|
||||||
VolumeSnapshots(snap.Namespace).
|
VolumeSnapshots(snap.Namespace).
|
||||||
Get(context.TODO(), name, metav1.GetOptions{})
|
Get(ctx, name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("Error getting snapshot in namespace: '%s': %v", snap.Namespace, err)
|
framework.Logf("Error getting snapshot in namespace: '%s': %v", snap.Namespace, err)
|
||||||
if isRetryableAPIError(err) {
|
if isRetryableAPIError(err) {
|
||||||
@ -114,9 +115,10 @@ func deleteSnapshot(snap *snapapi.VolumeSnapshot, t int) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ctx := context.TODO()
|
||||||
err = sclient.
|
err = sclient.
|
||||||
VolumeSnapshots(snap.Namespace).
|
VolumeSnapshots(snap.Namespace).
|
||||||
Delete(context.TODO(), snap.Name, metav1.DeleteOptions{})
|
Delete(ctx, snap.Name, metav1.DeleteOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to delete volumesnapshot: %w", err)
|
return fmt.Errorf("failed to delete volumesnapshot: %w", err)
|
||||||
}
|
}
|
||||||
@ -126,11 +128,11 @@ func deleteSnapshot(snap *snapapi.VolumeSnapshot, t int) error {
|
|||||||
start := time.Now()
|
start := time.Now()
|
||||||
framework.Logf("Waiting up to %v to be deleted", snap)
|
framework.Logf("Waiting up to %v to be deleted", snap)
|
||||||
|
|
||||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
return wait.PollUntilContextTimeout(ctx, poll, timeout, true, func(ctx context.Context) (bool, error) {
|
||||||
framework.Logf("deleting snapshot %s (%d seconds elapsed)", name, int(time.Since(start).Seconds()))
|
framework.Logf("deleting snapshot %s (%d seconds elapsed)", name, int(time.Since(start).Seconds()))
|
||||||
_, err := sclient.
|
_, err := sclient.
|
||||||
VolumeSnapshots(snap.Namespace).
|
VolumeSnapshots(snap.Namespace).
|
||||||
Get(context.TODO(), name, metav1.GetOptions{})
|
Get(ctx, name, metav1.GetOptions{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
@ -223,8 +225,8 @@ func createNFSSnapshotClass(f *framework.Framework) error {
|
|||||||
|
|
||||||
timeout := time.Duration(deployTimeout) * time.Minute
|
timeout := time.Duration(deployTimeout) * time.Minute
|
||||||
|
|
||||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
return wait.PollUntilContextTimeout(context.TODO(), poll, timeout, true, func(ctx context.Context) (bool, error) {
|
||||||
_, err = sclient.VolumeSnapshotClasses().Create(context.TODO(), &sc, metav1.CreateOptions{})
|
_, err = sclient.VolumeSnapshotClasses().Create(ctx, &sc, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("error creating SnapshotClass %q: %v", sc.Name, err)
|
framework.Logf("error creating SnapshotClass %q: %v", sc.Name, err)
|
||||||
if apierrs.IsAlreadyExists(err) {
|
if apierrs.IsAlreadyExists(err) {
|
||||||
@ -252,8 +254,8 @@ func deleteNFSSnapshotClass() error {
|
|||||||
|
|
||||||
timeout := time.Duration(deployTimeout) * time.Minute
|
timeout := time.Duration(deployTimeout) * time.Minute
|
||||||
|
|
||||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
return wait.PollUntilContextTimeout(context.TODO(), poll, timeout, true, func(ctx context.Context) (bool, error) {
|
||||||
err = sclient.VolumeSnapshotClasses().Delete(context.TODO(), sc.Name, metav1.DeleteOptions{})
|
err = sclient.VolumeSnapshotClasses().Delete(ctx, sc.Name, metav1.DeleteOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("error deleting SnapshotClass %q: %v", sc.Name, err)
|
framework.Logf("error deleting SnapshotClass %q: %v", sc.Name, err)
|
||||||
if apierrs.IsNotFound(err) {
|
if apierrs.IsNotFound(err) {
|
||||||
@ -276,16 +278,17 @@ func getVolumeSnapshotContent(namespace, snapshotName string) (*snapapi.VolumeSn
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ctx := context.TODO()
|
||||||
snapshot, err := sclient.
|
snapshot, err := sclient.
|
||||||
VolumeSnapshots(namespace).
|
VolumeSnapshots(namespace).
|
||||||
Get(context.TODO(), snapshotName, metav1.GetOptions{})
|
Get(ctx, snapshotName, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to get volumesnapshot: %w", err)
|
return nil, fmt.Errorf("failed to get volumesnapshot: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
volumeSnapshotContent, err := sclient.
|
volumeSnapshotContent, err := sclient.
|
||||||
VolumeSnapshotContents().
|
VolumeSnapshotContents().
|
||||||
Get(context.TODO(), *snapshot.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{})
|
Get(ctx, *snapshot.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to get volumesnapshotcontent: %w", err)
|
return nil, fmt.Errorf("failed to get volumesnapshotcontent: %w", err)
|
||||||
}
|
}
|
||||||
@ -293,7 +296,7 @@ func getVolumeSnapshotContent(namespace, snapshotName string) (*snapapi.VolumeSn
|
|||||||
return volumeSnapshotContent, nil
|
return volumeSnapshotContent, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// nolint:gocyclo,cyclop // reduce complexity
|
//nolint:gocyclo,cyclop // reduce complexity
|
||||||
func validateBiggerPVCFromSnapshot(f *framework.Framework,
|
func validateBiggerPVCFromSnapshot(f *framework.Framework,
|
||||||
pvcPath,
|
pvcPath,
|
||||||
appPath,
|
appPath,
|
||||||
|
@ -322,7 +322,7 @@ func validateRBDStaticMigrationPVC(f *framework.Framework, appPath, scName strin
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// nolint:gocyclo,cyclop // reduce complexity
|
//nolint:gocyclo,cyclop // reduce complexity
|
||||||
func validateCephFsStaticPV(f *framework.Framework, appPath, scPath string) error {
|
func validateCephFsStaticPV(f *framework.Framework, appPath, scPath string) error {
|
||||||
opt := make(map[string]string)
|
opt := make(map[string]string)
|
||||||
var (
|
var (
|
||||||
|
@ -23,7 +23,7 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo/v2" // nolint
|
. "github.com/onsi/ginkgo/v2" //nolint:golint // e2e uses By() and other Ginkgo functions
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
@ -125,7 +125,7 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
|||||||
logsCSIPods("app=csi-cephfsplugin", c)
|
logsCSIPods("app=csi-cephfsplugin", c)
|
||||||
|
|
||||||
// log all details from the namespace where Ceph-CSI is deployed
|
// log all details from the namespace where Ceph-CSI is deployed
|
||||||
e2edebug.DumpAllNamespaceInfo(c, cephCSINamespace)
|
e2edebug.DumpAllNamespaceInfo(context.TODO(), c, cephCSINamespace)
|
||||||
}
|
}
|
||||||
err = deleteConfigMap(cephFSDirPath)
|
err = deleteConfigMap(cephFSDirPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -23,7 +23,7 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo/v2" // nolint
|
. "github.com/onsi/ginkgo/v2" //nolint:golint // e2e uses By() and other Ginkgo functions
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
@ -130,7 +130,7 @@ var _ = Describe("RBD Upgrade Testing", func() {
|
|||||||
logsCSIPods("app=csi-rbdplugin", c)
|
logsCSIPods("app=csi-rbdplugin", c)
|
||||||
|
|
||||||
// log all details from the namespace where Ceph-CSI is deployed
|
// log all details from the namespace where Ceph-CSI is deployed
|
||||||
e2edebug.DumpAllNamespaceInfo(c, cephCSINamespace)
|
e2edebug.DumpAllNamespaceInfo(context.TODO(), c, cephCSINamespace)
|
||||||
}
|
}
|
||||||
|
|
||||||
err := deleteConfigMap(rbdDirPath)
|
err := deleteConfigMap(rbdDirPath)
|
||||||
|
88
e2e/utils.go
88
e2e/utils.go
@ -248,9 +248,9 @@ func getMons(ns string, c kubernetes.Interface) ([]string, error) {
|
|||||||
|
|
||||||
var svcList *v1.ServiceList
|
var svcList *v1.ServiceList
|
||||||
t := time.Duration(deployTimeout) * time.Minute
|
t := time.Duration(deployTimeout) * time.Minute
|
||||||
err := wait.PollImmediate(poll, t, func() (bool, error) {
|
err := wait.PollUntilContextTimeout(context.TODO(), poll, t, true, func(ctx context.Context) (bool, error) {
|
||||||
var svcErr error
|
var svcErr error
|
||||||
svcList, svcErr = c.CoreV1().Services(ns).List(context.TODO(), opt)
|
svcList, svcErr = c.CoreV1().Services(ns).List(ctx, opt)
|
||||||
if svcErr != nil {
|
if svcErr != nil {
|
||||||
if isRetryableAPIError(svcErr) {
|
if isRetryableAPIError(svcErr) {
|
||||||
return false, nil
|
return false, nil
|
||||||
@ -486,6 +486,7 @@ func validatePVCAndAppBinding(pvcPath, appPath string, f *framework.Framework) e
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = deletePVCAndApp("", f, pvc, app)
|
err = deletePVCAndApp("", f, pvc, app)
|
||||||
|
|
||||||
return err
|
return err
|
||||||
@ -508,6 +509,50 @@ func getMountType(selector, mountPath string, f *framework.Framework) (string, e
|
|||||||
}
|
}
|
||||||
|
|
||||||
func validateNormalUserPVCAccess(pvcPath string, f *framework.Framework) error {
|
func validateNormalUserPVCAccess(pvcPath string, f *framework.Framework) error {
|
||||||
|
writeTest := func(ns string, opts *metav1.ListOptions) error {
|
||||||
|
_, stdErr, err := execCommandInPod(f, "echo testing > /target/testing", ns, opts)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to exec command in pod: %w", err)
|
||||||
|
}
|
||||||
|
if stdErr != "" {
|
||||||
|
return fmt.Errorf("failed to touch a file as non-root user %v", stdErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return validateNormalUserPVCAccessFunc(pvcPath, f, writeTest)
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateInodeCount(pvcPath string, f *framework.Framework, inodes int) error {
|
||||||
|
countInodes := func(ns string, opts *metav1.ListOptions) error {
|
||||||
|
stdOut, stdErr, err := execCommandInPod(f, "df --output=itotal /target | tail -n1", ns, opts)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to exec command in pod: %w", err)
|
||||||
|
}
|
||||||
|
if stdErr != "" {
|
||||||
|
return fmt.Errorf("failed to list inodes in pod: %v", stdErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
itotal, err := strconv.Atoi(strings.TrimSpace(stdOut))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to parse itotal %q to int: %w", strings.TrimSpace(stdOut), err)
|
||||||
|
}
|
||||||
|
if inodes != itotal {
|
||||||
|
return fmt.Errorf("expected inodes (%d) do not match itotal on volume (%d)", inodes, itotal)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return validateNormalUserPVCAccessFunc(pvcPath, f, countInodes)
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateNormalUserPVCAccessFunc(
|
||||||
|
pvcPath string,
|
||||||
|
f *framework.Framework,
|
||||||
|
validate func(ns string, opts *metav1.ListOptions) error,
|
||||||
|
) error {
|
||||||
pvc, err := loadPVC(pvcPath)
|
pvc, err := loadPVC(pvcPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -571,12 +616,10 @@ func validateNormalUserPVCAccess(pvcPath string, f *framework.Framework) error {
|
|||||||
opt := metav1.ListOptions{
|
opt := metav1.ListOptions{
|
||||||
LabelSelector: "app=pod-run-as-non-root",
|
LabelSelector: "app=pod-run-as-non-root",
|
||||||
}
|
}
|
||||||
_, stdErr, err := execCommandInPod(f, "echo testing > /target/testing", app.Namespace, &opt)
|
|
||||||
|
err = validate(app.Namespace, &opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to exec command in pod: %w", err)
|
return fmt.Errorf("failed to run validation function: %w", err)
|
||||||
}
|
|
||||||
if stdErr != "" {
|
|
||||||
return fmt.Errorf("failed to touch a file as non-root user %v", stdErr)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// metrics for BlockMode was added in Kubernetes 1.22
|
// metrics for BlockMode was added in Kubernetes 1.22
|
||||||
@ -809,7 +852,7 @@ func writeDataAndCalChecksum(app *v1.Pod, opt *metav1.ListOptions, f *framework.
|
|||||||
return checkSum, nil
|
return checkSum, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// nolint:gocyclo,gocognit,nestif,cyclop // reduce complexity
|
//nolint:gocyclo,gocognit,nestif,cyclop // reduce complexity
|
||||||
func validatePVCClone(
|
func validatePVCClone(
|
||||||
totalCount int,
|
totalCount int,
|
||||||
sourcePvcPath, sourceAppPath, clonePvcPath, clonePvcAppPath,
|
sourcePvcPath, sourceAppPath, clonePvcPath, clonePvcAppPath,
|
||||||
@ -1024,7 +1067,7 @@ func validatePVCClone(
|
|||||||
validateRBDImageCount(f, 0, defaultRBDPool)
|
validateRBDImageCount(f, 0, defaultRBDPool)
|
||||||
}
|
}
|
||||||
|
|
||||||
// nolint:gocyclo,gocognit,nestif,cyclop // reduce complexity
|
//nolint:gocyclo,gocognit,nestif,cyclop // reduce complexity
|
||||||
func validatePVCSnapshot(
|
func validatePVCSnapshot(
|
||||||
totalCount int,
|
totalCount int,
|
||||||
pvcPath, appPath, snapshotPath, pvcClonePath, appClonePath string,
|
pvcPath, appPath, snapshotPath, pvcClonePath, appClonePath string,
|
||||||
@ -1486,13 +1529,14 @@ func validateController(
|
|||||||
return deleteResource(rbdExamplePath + "storageclass.yaml")
|
return deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||||
}
|
}
|
||||||
|
|
||||||
// nolint:deadcode,unused // Unused code will be used in future.
|
|
||||||
// k8sVersionGreaterEquals checks the ServerVersion of the Kubernetes cluster
|
// k8sVersionGreaterEquals checks the ServerVersion of the Kubernetes cluster
|
||||||
// and compares it to the major.minor version passed. In case the version of
|
// and compares it to the major.minor version passed. In case the version of
|
||||||
// the cluster is equal or higher to major.minor, `true` is returned, `false`
|
// the cluster is equal or higher to major.minor, `true` is returned, `false`
|
||||||
// otherwise.
|
// otherwise.
|
||||||
// If fetching the ServerVersion of the Kubernetes cluster fails, the calling
|
// If fetching the ServerVersion of the Kubernetes cluster fails, the calling
|
||||||
// test case is marked as `FAILED` and gets aborted.
|
// test case is marked as `FAILED` and gets aborted.
|
||||||
|
//
|
||||||
|
//nolint:deadcode,unused // Unused code will be used in future.
|
||||||
func k8sVersionGreaterEquals(c kubernetes.Interface, major, minor int) bool {
|
func k8sVersionGreaterEquals(c kubernetes.Interface, major, minor int) bool {
|
||||||
v, err := c.Discovery().ServerVersion()
|
v, err := c.Discovery().ServerVersion()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1516,8 +1560,8 @@ func waitForJobCompletion(c kubernetes.Interface, ns, job string, timeout int) e
|
|||||||
|
|
||||||
framework.Logf("waiting for Job %s/%s to be in state %q", ns, job, batch.JobComplete)
|
framework.Logf("waiting for Job %s/%s to be in state %q", ns, job, batch.JobComplete)
|
||||||
|
|
||||||
return wait.PollImmediate(poll, t, func() (bool, error) {
|
return wait.PollUntilContextTimeout(context.TODO(), poll, t, true, func(ctx context.Context) (bool, error) {
|
||||||
j, err := c.BatchV1().Jobs(ns).Get(context.TODO(), job, metav1.GetOptions{})
|
j, err := c.BatchV1().Jobs(ns).Get(ctx, job, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if isRetryableAPIError(err) {
|
if isRetryableAPIError(err) {
|
||||||
return false, nil
|
return false, nil
|
||||||
@ -1564,7 +1608,7 @@ func retryKubectlInput(namespace string, action kubectlAction, data string, t in
|
|||||||
framework.Logf("waiting for kubectl (%s -f args %s) to finish", action, args)
|
framework.Logf("waiting for kubectl (%s -f args %s) to finish", action, args)
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
|
|
||||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
return wait.PollUntilContextTimeout(context.TODO(), poll, timeout, true, func(_ context.Context) (bool, error) {
|
||||||
cmd := []string{}
|
cmd := []string{}
|
||||||
if len(args) != 0 {
|
if len(args) != 0 {
|
||||||
cmd = append(cmd, strings.Join(args, ""))
|
cmd = append(cmd, strings.Join(args, ""))
|
||||||
@ -1603,7 +1647,7 @@ func retryKubectlFile(namespace string, action kubectlAction, filename string, t
|
|||||||
framework.Logf("waiting for kubectl (%s -f %q args %s) to finish", action, filename, args)
|
framework.Logf("waiting for kubectl (%s -f %q args %s) to finish", action, filename, args)
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
|
|
||||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
return wait.PollUntilContextTimeout(context.TODO(), poll, timeout, true, func(_ context.Context) (bool, error) {
|
||||||
cmd := []string{}
|
cmd := []string{}
|
||||||
if len(args) != 0 {
|
if len(args) != 0 {
|
||||||
cmd = append(cmd, strings.Join(args, ""))
|
cmd = append(cmd, strings.Join(args, ""))
|
||||||
@ -1638,14 +1682,15 @@ func retryKubectlFile(namespace string, action kubectlAction, filename string, t
|
|||||||
// retryKubectlArgs takes a namespace and action telling kubectl what to do
|
// retryKubectlArgs takes a namespace and action telling kubectl what to do
|
||||||
// with the passed arguments. This function retries until no error occurred, or
|
// with the passed arguments. This function retries until no error occurred, or
|
||||||
// the timeout passed.
|
// the timeout passed.
|
||||||
// nolint:unparam // retryKubectlArgs will be used with kubectlDelete arg later on.
|
//
|
||||||
|
//nolint:unparam // retryKubectlArgs will be used with kubectlDelete arg later on.
|
||||||
func retryKubectlArgs(namespace string, action kubectlAction, t int, args ...string) error {
|
func retryKubectlArgs(namespace string, action kubectlAction, t int, args ...string) error {
|
||||||
timeout := time.Duration(t) * time.Minute
|
timeout := time.Duration(t) * time.Minute
|
||||||
args = append([]string{string(action)}, args...)
|
args = append([]string{string(action)}, args...)
|
||||||
framework.Logf("waiting for kubectl (%s args) to finish", args)
|
framework.Logf("waiting for kubectl (%s args) to finish", args)
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
|
|
||||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
return wait.PollUntilContextTimeout(context.TODO(), poll, timeout, true, func(_ context.Context) (bool, error) {
|
||||||
_, err := e2ekubectl.RunKubectl(namespace, args...)
|
_, err := e2ekubectl.RunKubectl(namespace, args...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if isRetryableAPIError(err) {
|
if isRetryableAPIError(err) {
|
||||||
@ -1687,3 +1732,14 @@ func rwopMayFail(err error) bool {
|
|||||||
|
|
||||||
return !rwopSupported
|
return !rwopSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getConfigFile returns the config file path at the preferred location if it
|
||||||
|
// exists there. Returns the fallback location otherwise.
|
||||||
|
func getConfigFile(filename, preferred, fallback string) string {
|
||||||
|
configFile := preferred + filename
|
||||||
|
if _, err := os.Stat(configFile); os.IsNotExist(err) {
|
||||||
|
configFile = fallback + filename
|
||||||
|
}
|
||||||
|
|
||||||
|
return configFile
|
||||||
|
}
|
||||||
|
@ -2,17 +2,17 @@
|
|||||||
|
|
||||||
## Deploying Ceph-CSI services
|
## Deploying Ceph-CSI services
|
||||||
|
|
||||||
Create [ceph-config](./ceph-conf.yaml) configmap using the following command.
|
Create [ceph-config](../deploy/ceph-conf.yaml) configmap using the following command.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
kubectl apply -f ./ceph-conf.yaml
|
kubectl apply -f ../deploy/ceph-conf.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
Both `rbd` and `cephfs` directories contain `plugin-deploy.sh` and
|
Both `rbd` and `cephfs` directories contain `plugin-deploy.sh` and
|
||||||
`plugin-teardown.sh` helper scripts. You can use those to help you
|
`plugin-teardown.sh` helper scripts. You can use those to help you
|
||||||
deploy/teardown RBACs, sidecar containers and the plugin in one go.
|
deploy/teardown RBACs, sidecar containers and the plugin in one go.
|
||||||
By default, they look for the YAML manifests in
|
By default, they look for the YAML manifests in
|
||||||
`../../deploy/{rbd,cephfs}/kubernetes`.
|
`../deploy/{rbd,cephfs}/kubernetes`.
|
||||||
You can override this path by running
|
You can override this path by running
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@ -25,7 +25,7 @@ The CSI plugin requires configuration information regarding the Ceph cluster(s),
|
|||||||
that would host the dynamically or statically provisioned volumes. This
|
that would host the dynamically or statically provisioned volumes. This
|
||||||
is provided by adding a per-cluster identifier (referred to as clusterID), and
|
is provided by adding a per-cluster identifier (referred to as clusterID), and
|
||||||
the required monitor details for the same, as in the provided [sample config
|
the required monitor details for the same, as in the provided [sample config
|
||||||
map](./csi-config-map-sample.yaml).
|
map](../deploy/csi-config-map-sample.yaml).
|
||||||
|
|
||||||
Gather the following information from the Ceph cluster(s) of choice,
|
Gather the following information from the Ceph cluster(s) of choice,
|
||||||
|
|
||||||
@ -38,13 +38,13 @@ Gather the following information from the Ceph cluster(s) of choice,
|
|||||||
* Alternatively, choose a `<cluster-id>` value that is distinct per Ceph
|
* Alternatively, choose a `<cluster-id>` value that is distinct per Ceph
|
||||||
cluster in use by this kubernetes cluster
|
cluster in use by this kubernetes cluster
|
||||||
|
|
||||||
Update the [sample configmap](./csi-config-map-sample.yaml) with values
|
Update the [sample configmap](../deploy/csi-config-map-sample.yaml) with values
|
||||||
from a Ceph cluster and replace `<cluster-id>` with the chosen clusterID, to
|
from a Ceph cluster and replace `<cluster-id>` with the chosen clusterID, to
|
||||||
create the manifest for the configmap which can be updated in the cluster
|
create the manifest for the configmap which can be updated in the cluster
|
||||||
using the following command,
|
using the following command,
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
kubectl replace -f ./csi-config-map-sample.yaml
|
kubectl replace -f ../deploy/csi-config-map-sample.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
Storage class and snapshot class, using `<cluster-id>` as the value for the
|
Storage class and snapshot class, using `<cluster-id>` as the value for the
|
||||||
@ -296,3 +296,102 @@ Units: sectors of 1 * 512 = 512 bytes
|
|||||||
Sector size (logical/physical): 512 bytes / 512 bytes
|
Sector size (logical/physical): 512 bytes / 512 bytes
|
||||||
I/O size (minimum/optimal): 4194304 bytes / 4194304 bytes
|
I/O size (minimum/optimal): 4194304 bytes / 4194304 bytes
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### How to create CephFS Snapshot and Restore
|
||||||
|
|
||||||
|
In the `examples/cephfs` directory you will find two files related to snapshots:
|
||||||
|
[snapshotclass.yaml](./cephfs/snapshotclass.yaml) and
|
||||||
|
[snapshot.yaml](./cephfs/snapshot.yaml).
|
||||||
|
|
||||||
|
Once you created your CephFS volume, you'll need to customize at least
|
||||||
|
`snapshotclass.yaml` and make sure the `clusterID` parameter matches
|
||||||
|
your Ceph cluster setup.
|
||||||
|
|
||||||
|
Note that it is recommended to create a volume snapshot or a PVC clone
|
||||||
|
only when the PVC is not in use.
|
||||||
|
|
||||||
|
After configuring everything you needed, create the snapshot class:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kubectl create -f ../examples/cephfs/snapshotclass.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
Verify that the snapshot class was created:
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ kubectl get volumesnapshotclass
|
||||||
|
NAME DRIVER DELETIONPOLICY AGE
|
||||||
|
csi-cephfsplugin-snapclass cephfs.csi.ceph.com Delete 24m
|
||||||
|
```
|
||||||
|
|
||||||
|
Create a snapshot from the existing PVC:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kubectl create -f ../examples/cephfs/snapshot.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
To verify if your volume snapshot has successfully been created and to
|
||||||
|
get the details about snapshot, run the following:
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ kubectl get volumesnapshot
|
||||||
|
NAME READYTOUSE SOURCEPVC SOURCESNAPSHOTCONTENT RESTORESIZE SNAPSHOTCLASS SNAPSHOTCONTENT CREATIONTIME AGE
|
||||||
|
cephfs-pvc-snapshot true csi-cephfs-pvc 1Gi csi-cephfsplugin-snapclass snapcontent-34476204-a14a-4d59-bfbc-2bbba695652c 3s 6s
|
||||||
|
```
|
||||||
|
|
||||||
|
To be sure everything is OK you can run
|
||||||
|
`ceph fs subvolume snapshot ls <vol_name> <sub_name> [<group_name>]`
|
||||||
|
inside one of your Ceph pod.
|
||||||
|
|
||||||
|
To restore the snapshot to a new PVC, deploy
|
||||||
|
[pvc-restore.yaml](./cephfs/pvc-restore.yaml) and a testing pod
|
||||||
|
[pod-restore.yaml](./cephfs/pod-restore.yaml):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kubectl create -f pvc-restore.yaml
|
||||||
|
kubectl create -f pod-restore.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
### Cleanup for CephFS Snapshot and Restore
|
||||||
|
|
||||||
|
Delete the testing pod and restored pvc.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kubectl delete pod <pod-restore name>
|
||||||
|
kubectl delete pvc <pvc-restore name>
|
||||||
|
```
|
||||||
|
|
||||||
|
Now, the snapshot is no longer in use, Delete the volume snapshot
|
||||||
|
and volume snapshot class.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kubectl delete volumesnapshot <snapshot name>
|
||||||
|
kubectl delete volumesnapshotclass <snapshotclass name>
|
||||||
|
```
|
||||||
|
|
||||||
|
### How to Clone CephFS Volumes
|
||||||
|
|
||||||
|
Create the clone from cephFS PVC:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kubectl create -f ../examples/cephfs/pvc-clone.yaml
|
||||||
|
kubectl create -f ../examples/cephfs/pod-clone.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
To verify if your clone has successfully been created, run the following:
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ kubectl get pvc
|
||||||
|
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
|
||||||
|
csi-cephfs-pvc Bound pvc-1ea51547-a88b-4ab0-8b4a-812caeaf025d 1Gi RWX csi-cephfs-sc 20h
|
||||||
|
cephfs-pvc-clone Bound pvc-b575bc35-d521-4c41-b4f9-1d733cd28fdf 1Gi RWX csi-cephfs-sc 39s
|
||||||
|
```
|
||||||
|
|
||||||
|
### Cleanup
|
||||||
|
|
||||||
|
Delete the cloned pod and pvc:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kubectl delete pod <pod-clone name>
|
||||||
|
kubectl delete pvc <pvc-clone name>
|
||||||
|
```
|
||||||
|
@ -65,5 +65,5 @@ parameters:
|
|||||||
|
|
||||||
reclaimPolicy: Delete
|
reclaimPolicy: Delete
|
||||||
allowVolumeExpansion: true
|
allowVolumeExpansion: true
|
||||||
mountOptions:
|
# mountOptions:
|
||||||
- debug
|
# - context="system_u:object_r:container_file_t:s0:c0,c1"
|
||||||
|
@ -45,5 +45,11 @@ parameters:
|
|||||||
# If omitted, defaults to "csi-vol-".
|
# If omitted, defaults to "csi-vol-".
|
||||||
volumeNamePrefix: nfs-export-
|
volumeNamePrefix: nfs-export-
|
||||||
|
|
||||||
|
# (optional) Security requirements for the NFS-export. Valid flavours
|
||||||
|
# include: none, sys, krb5, krb5i and krb5p. The <sectype-list> is a comma
|
||||||
|
# delimited string, for example "sys,krb5".
|
||||||
|
# This option is available with Ceph v17.2.6 and newer.
|
||||||
|
# secTypes: <sectype-list>
|
||||||
|
|
||||||
reclaimPolicy: Delete
|
reclaimPolicy: Delete
|
||||||
allowVolumeExpansion: true
|
allowVolumeExpansion: true
|
||||||
|
@ -37,6 +37,17 @@ parameters:
|
|||||||
# imageFeatures: layering,journaling,exclusive-lock,object-map,fast-diff
|
# imageFeatures: layering,journaling,exclusive-lock,object-map,fast-diff
|
||||||
imageFeatures: "layering"
|
imageFeatures: "layering"
|
||||||
|
|
||||||
|
# (optional) Options to pass to the `mkfs` command while creating the
|
||||||
|
# filesystem on the RBD device. Check the man-page for the `mkfs` command
|
||||||
|
# for the filesystem for more details. When `mkfsOptions` is set here, the
|
||||||
|
# defaults will not be used, consider including them in this parameter.
|
||||||
|
#
|
||||||
|
# The default options depend on the csi.storage.k8s.io/fstype setting:
|
||||||
|
# - ext4: "-m0 -Enodiscard,lazy_itable_init=1,lazy_journal_init=1"
|
||||||
|
# - xfs: "-onouuid -K"
|
||||||
|
#
|
||||||
|
# mkfsOptions: "-m0 -Ediscard -i1024"
|
||||||
|
|
||||||
# (optional) Specifies whether to try other mounters in case if the current
|
# (optional) Specifies whether to try other mounters in case if the current
|
||||||
# mounter fails to mount the rbd image for any reason. True means fallback
|
# mounter fails to mount the rbd image for any reason. True means fallback
|
||||||
# to next mounter, default is set to false.
|
# to next mounter, default is set to false.
|
||||||
|
213
go.mod
213
go.mod
@ -1,110 +1,121 @@
|
|||||||
module github.com/ceph/ceph-csi
|
module github.com/ceph/ceph-csi
|
||||||
|
|
||||||
go 1.19
|
go 1.20
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/IBM/keyprotect-go-client v0.9.2
|
github.com/IBM/keyprotect-go-client v0.10.0
|
||||||
github.com/aws/aws-sdk-go v1.44.195
|
github.com/aws/aws-sdk-go v1.44.285
|
||||||
github.com/aws/aws-sdk-go-v2/service/sts v1.18.3
|
github.com/aws/aws-sdk-go-v2/service/sts v1.19.0
|
||||||
github.com/ceph/ceph-csi/api v0.0.0-00010101000000-000000000000
|
github.com/ceph/ceph-csi/api v0.0.0-00010101000000-000000000000
|
||||||
// TODO: API for managing subvolume metadata and snapshot metadata requires `ceph_ci_untested` build-tag
|
// TODO: API for managing subvolume metadata and snapshot metadata requires `ceph_ci_untested` build-tag
|
||||||
github.com/ceph/go-ceph v0.20.0
|
github.com/ceph/go-ceph v0.21.0
|
||||||
github.com/container-storage-interface/spec v1.7.0
|
github.com/container-storage-interface/spec v1.8.0
|
||||||
github.com/csi-addons/replication-lib-utils v0.2.0
|
github.com/csi-addons/replication-lib-utils v0.2.0
|
||||||
github.com/csi-addons/spec v0.1.2-0.20221101132540-98eff76b0ff8
|
github.com/csi-addons/spec v0.2.0
|
||||||
github.com/gemalto/kmip-go v0.0.8
|
github.com/gemalto/kmip-go v0.0.9
|
||||||
github.com/golang/protobuf v1.5.2
|
github.com/golang/protobuf v1.5.3
|
||||||
github.com/google/fscrypt v0.3.3
|
github.com/google/fscrypt v0.3.4
|
||||||
github.com/google/uuid v1.3.0
|
github.com/google/uuid v1.3.0
|
||||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
|
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0
|
||||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
|
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
|
||||||
github.com/hashicorp/vault/api v1.9.0
|
github.com/hashicorp/vault/api v1.9.2
|
||||||
github.com/kubernetes-csi/csi-lib-utils v0.13.0
|
github.com/kubernetes-csi/csi-lib-utils v0.13.0
|
||||||
github.com/kubernetes-csi/external-snapshotter/client/v6 v6.2.0
|
github.com/kubernetes-csi/external-snapshotter/client/v6 v6.2.0
|
||||||
github.com/libopenstorage/secrets v0.0.0-20210908194121-a1d19aa9713a
|
github.com/libopenstorage/secrets v0.0.0-20210908194121-a1d19aa9713a
|
||||||
github.com/onsi/ginkgo/v2 v2.8.1
|
github.com/onsi/ginkgo/v2 v2.10.0
|
||||||
github.com/onsi/gomega v1.27.1
|
github.com/onsi/gomega v1.27.8
|
||||||
github.com/pkg/xattr v0.4.9
|
github.com/pkg/xattr v0.4.9
|
||||||
github.com/prometheus/client_golang v1.14.0
|
github.com/prometheus/client_golang v1.16.0
|
||||||
github.com/stretchr/testify v1.8.1
|
github.com/stretchr/testify v1.8.4
|
||||||
golang.org/x/crypto v0.6.0
|
golang.org/x/crypto v0.10.0
|
||||||
golang.org/x/net v0.7.0
|
golang.org/x/net v0.11.0
|
||||||
golang.org/x/sys v0.5.0
|
golang.org/x/sys v0.9.0
|
||||||
google.golang.org/grpc v1.53.0
|
google.golang.org/grpc v1.56.0
|
||||||
google.golang.org/protobuf v1.28.1
|
google.golang.org/protobuf v1.30.0
|
||||||
k8s.io/api v0.26.1
|
|
||||||
k8s.io/apimachinery v0.26.1
|
|
||||||
k8s.io/client-go v12.0.0+incompatible
|
|
||||||
k8s.io/cloud-provider v0.26.1
|
|
||||||
k8s.io/klog/v2 v2.90.0
|
|
||||||
//
|
//
|
||||||
// when updating k8s.io/kubernetes, make sure to update the replace section too
|
// when updating k8s.io/kubernetes, make sure to update the replace section too
|
||||||
//
|
//
|
||||||
k8s.io/kubernetes v1.26.1
|
k8s.io/api v0.27.2
|
||||||
k8s.io/mount-utils v0.26.1
|
k8s.io/apimachinery v0.27.2
|
||||||
|
k8s.io/client-go v12.0.0+incompatible
|
||||||
|
k8s.io/cloud-provider v0.27.2
|
||||||
|
k8s.io/klog/v2 v2.100.1
|
||||||
|
k8s.io/kubernetes v1.27.3
|
||||||
|
k8s.io/mount-utils v0.27.2
|
||||||
k8s.io/pod-security-admission v0.0.0
|
k8s.io/pod-security-admission v0.0.0
|
||||||
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448
|
k8s.io/utils v0.0.0-20230209194617-a36077c30491
|
||||||
sigs.k8s.io/controller-runtime v0.14.4
|
sigs.k8s.io/controller-runtime v0.15.1-0.20230524200249-30eae58f1b98
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
github.com/NYTimes/gziphandler v1.1.1 // indirect
|
||||||
github.com/ansel1/merry v1.6.2 // indirect
|
github.com/ansel1/merry v1.6.2 // indirect
|
||||||
github.com/ansel1/merry/v2 v2.0.1 // indirect
|
github.com/ansel1/merry/v2 v2.0.1 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2 v1.17.4 // indirect
|
github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.28 // indirect
|
github.com/armon/go-metrics v0.3.10 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.22 // indirect
|
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.22 // indirect
|
github.com/aws/aws-sdk-go-v2 v1.18.0 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.33 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.27 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.27 // indirect
|
||||||
github.com/aws/smithy-go v1.13.5 // indirect
|
github.com/aws/smithy-go v1.13.5 // indirect
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||||
github.com/cenkalti/backoff/v3 v3.0.0 // indirect
|
github.com/cenkalti/backoff/v3 v3.2.2 // indirect
|
||||||
github.com/cenkalti/backoff/v4 v4.1.3 // indirect
|
github.com/cenkalti/backoff/v4 v4.1.3 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||||
|
github.com/coreos/go-semver v0.3.0 // indirect
|
||||||
|
github.com/coreos/go-systemd/v22 v22.4.0 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
github.com/docker/distribution v2.8.1+incompatible // indirect
|
github.com/docker/distribution v2.8.2+incompatible // indirect
|
||||||
github.com/emicklei/go-restful/v3 v3.9.0 // indirect
|
github.com/emicklei/go-restful/v3 v3.9.0 // indirect
|
||||||
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
||||||
github.com/evanphx/json-patch/v5 v5.6.0 // indirect
|
github.com/evanphx/json-patch/v5 v5.6.0 // indirect
|
||||||
github.com/fatih/color v1.9.0 // indirect
|
github.com/fatih/color v1.13.0 // indirect
|
||||||
github.com/felixge/httpsnoop v1.0.3 // indirect
|
github.com/felixge/httpsnoop v1.0.3 // indirect
|
||||||
|
github.com/frankban/quicktest v1.13.0 // indirect
|
||||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||||
github.com/gemalto/flume v0.13.0 // indirect
|
github.com/gemalto/flume v0.13.0 // indirect
|
||||||
github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 // indirect
|
github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 // indirect
|
||||||
github.com/go-logr/logr v1.2.3 // indirect
|
github.com/go-jose/go-jose/v3 v3.0.0 // indirect
|
||||||
|
github.com/go-logr/logr v1.2.4 // indirect
|
||||||
github.com/go-logr/stdr v1.2.2 // indirect
|
github.com/go-logr/stdr v1.2.2 // indirect
|
||||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||||
github.com/go-openapi/jsonreference v0.20.0 // indirect
|
github.com/go-openapi/jsonreference v0.20.1 // indirect
|
||||||
github.com/go-openapi/swag v0.22.3 // indirect
|
github.com/go-openapi/swag v0.22.3 // indirect
|
||||||
github.com/go-sql-driver/mysql v1.5.0 // indirect
|
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||||
github.com/golang/snappy v0.0.4 // indirect
|
github.com/golang/snappy v0.0.4 // indirect
|
||||||
|
github.com/google/cel-go v0.12.6 // indirect
|
||||||
github.com/google/gnostic v0.6.9 // indirect
|
github.com/google/gnostic v0.6.9 // indirect
|
||||||
github.com/google/go-cmp v0.5.9 // indirect
|
github.com/google/go-cmp v0.5.9 // indirect
|
||||||
github.com/google/gofuzz v1.2.0 // indirect
|
github.com/google/gofuzz v1.2.0 // indirect
|
||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect
|
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect
|
||||||
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 // indirect
|
||||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||||
github.com/hashicorp/go-hclog v0.16.2 // indirect
|
github.com/hashicorp/go-hclog v1.2.2 // indirect
|
||||||
|
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
|
||||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||||
github.com/hashicorp/go-retryablehttp v0.7.0 // indirect
|
github.com/hashicorp/go-retryablehttp v0.7.0 // indirect
|
||||||
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
|
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
|
||||||
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 // indirect
|
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 // indirect
|
||||||
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect
|
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect
|
||||||
github.com/hashicorp/go-sockaddr v1.0.2 // indirect
|
github.com/hashicorp/go-sockaddr v1.0.2 // indirect
|
||||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
github.com/hashicorp/golang-lru v0.5.4 // indirect
|
||||||
github.com/hashicorp/vault v1.4.2 // indirect
|
github.com/hashicorp/hcl v1.0.1-vault-3 // indirect
|
||||||
|
github.com/hashicorp/vault v1.11.11 // indirect
|
||||||
github.com/hashicorp/vault/sdk v0.7.0 // indirect
|
github.com/hashicorp/vault/sdk v0.7.0 // indirect
|
||||||
github.com/imdario/mergo v0.3.12 // indirect
|
github.com/imdario/mergo v0.3.13 // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
||||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||||
github.com/josharian/intern v1.0.0 // indirect
|
github.com/josharian/intern v1.0.0 // indirect
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
github.com/kr/pretty v0.2.1 // indirect
|
|
||||||
github.com/mailru/easyjson v0.7.7 // indirect
|
github.com/mailru/easyjson v0.7.7 // indirect
|
||||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.16 // indirect
|
github.com/mattn/go-isatty v0.0.16 // indirect
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect
|
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
||||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||||
@ -115,17 +126,22 @@ require (
|
|||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||||
github.com/opencontainers/selinux v1.10.0 // indirect
|
github.com/opencontainers/selinux v1.10.0 // indirect
|
||||||
github.com/openshift/api v0.0.0-20210927171657-636513e97fda // indirect
|
github.com/openshift/api v0.0.0-20230320192226-1fc631efd341 // indirect
|
||||||
github.com/pierrec/lz4 v2.5.2+incompatible // indirect
|
github.com/pierrec/lz4 v2.6.1+incompatible // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||||
github.com/prometheus/client_model v0.3.0 // indirect
|
github.com/prometheus/client_model v0.4.0 // indirect
|
||||||
github.com/prometheus/common v0.37.0 // indirect
|
github.com/prometheus/common v0.42.0 // indirect
|
||||||
github.com/prometheus/procfs v0.8.0 // indirect
|
github.com/prometheus/procfs v0.10.1 // indirect
|
||||||
github.com/ryanuber/go-glob v1.0.0 // indirect
|
github.com/ryanuber/go-glob v1.0.0 // indirect
|
||||||
github.com/spf13/cobra v1.6.0 // indirect
|
github.com/spf13/cobra v1.6.0 // indirect
|
||||||
github.com/spf13/pflag v1.0.5 // indirect
|
github.com/spf13/pflag v1.0.5 // indirect
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.0 // indirect
|
github.com/stoewer/go-strcase v1.2.0 // indirect
|
||||||
|
go.etcd.io/etcd/api/v3 v3.5.7 // indirect
|
||||||
|
go.etcd.io/etcd/client/pkg/v3 v3.5.7 // indirect
|
||||||
|
go.etcd.io/etcd/client/v3 v3.5.7 // indirect
|
||||||
|
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect
|
||||||
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect
|
||||||
go.opentelemetry.io/otel v1.10.0 // indirect
|
go.opentelemetry.io/otel v1.10.0 // indirect
|
||||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect
|
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect
|
||||||
@ -137,26 +153,31 @@ require (
|
|||||||
go.uber.org/atomic v1.10.0 // indirect
|
go.uber.org/atomic v1.10.0 // indirect
|
||||||
go.uber.org/multierr v1.8.0 // indirect
|
go.uber.org/multierr v1.8.0 // indirect
|
||||||
go.uber.org/zap v1.24.0 // indirect
|
go.uber.org/zap v1.24.0 // indirect
|
||||||
golang.org/x/oauth2 v0.4.0 // indirect
|
golang.org/x/oauth2 v0.7.0 // indirect
|
||||||
golang.org/x/term v0.5.0 // indirect
|
golang.org/x/sync v0.2.0 // indirect
|
||||||
golang.org/x/text v0.7.0 // indirect
|
golang.org/x/term v0.9.0 // indirect
|
||||||
|
golang.org/x/text v0.10.0 // indirect
|
||||||
golang.org/x/time v0.3.0 // indirect
|
golang.org/x/time v0.3.0 // indirect
|
||||||
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
|
golang.org/x/tools v0.9.3 // indirect
|
||||||
|
gomodules.xyz/jsonpatch/v2 v2.3.0 // indirect
|
||||||
|
google.golang.org/api v0.110.0 // indirect
|
||||||
google.golang.org/appengine v1.6.7 // indirect
|
google.golang.org/appengine v1.6.7 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect
|
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
|
||||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||||
gopkg.in/square/go-jose.v2 v2.5.1 // indirect
|
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
k8s.io/apiextensions-apiserver v0.26.1 // indirect
|
k8s.io/apiextensions-apiserver v0.27.2 // indirect
|
||||||
k8s.io/apiserver v0.26.1 // indirect
|
k8s.io/apiserver v0.27.2 // indirect
|
||||||
k8s.io/component-base v0.26.1 // indirect
|
k8s.io/component-base v0.27.2 // indirect
|
||||||
k8s.io/component-helpers v0.26.1 // indirect
|
k8s.io/component-helpers v0.27.2 // indirect
|
||||||
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect
|
k8s.io/controller-manager v0.27.2 // indirect
|
||||||
|
k8s.io/kms v0.27.2 // indirect
|
||||||
|
k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect
|
||||||
k8s.io/kubectl v0.0.0 // indirect
|
k8s.io/kubectl v0.0.0 // indirect
|
||||||
k8s.io/kubelet v0.0.0 // indirect
|
k8s.io/kubelet v0.0.0 // indirect
|
||||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.35 // indirect
|
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 // indirect
|
||||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
|
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||||
)
|
)
|
||||||
@ -166,38 +187,37 @@ replace (
|
|||||||
github.com/ceph/ceph-csi/api => ./api
|
github.com/ceph/ceph-csi/api => ./api
|
||||||
|
|
||||||
// Required for kubernetes 1.26
|
// Required for kubernetes 1.26
|
||||||
github.com/onsi/ginkgo/v2 => github.com/onsi/ginkgo/v2 v2.4.0
|
|
||||||
github.com/portworx/sched-ops => github.com/portworx/sched-ops v0.20.4-openstorage-rc3
|
github.com/portworx/sched-ops => github.com/portworx/sched-ops v0.20.4-openstorage-rc3
|
||||||
gomodules.xyz/jsonpatch/v2 => github.com/gomodules/jsonpatch/v2 v2.2.0
|
gomodules.xyz/jsonpatch/v2 => github.com/gomodules/jsonpatch/v2 v2.2.0
|
||||||
//
|
//
|
||||||
// k8s.io/kubernetes depends on these k8s.io packages, but unversioned
|
// k8s.io/kubernetes depends on these k8s.io packages, but unversioned
|
||||||
//
|
//
|
||||||
k8s.io/api => k8s.io/api v0.26.1
|
k8s.io/api => k8s.io/api v0.27.2
|
||||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.26.1
|
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.27.2
|
||||||
k8s.io/apimachinery => k8s.io/apimachinery v0.26.1
|
k8s.io/apimachinery => k8s.io/apimachinery v0.27.2
|
||||||
k8s.io/apiserver => k8s.io/apiserver v0.26.1
|
k8s.io/apiserver => k8s.io/apiserver v0.27.2
|
||||||
k8s.io/cli-runtime => k8s.io/cli-runtime v0.26.1
|
k8s.io/cli-runtime => k8s.io/cli-runtime v0.27.2
|
||||||
k8s.io/client-go => k8s.io/client-go v0.26.1
|
k8s.io/client-go => k8s.io/client-go v0.27.2
|
||||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.26.1
|
k8s.io/cloud-provider => k8s.io/cloud-provider v0.27.2
|
||||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.26.1
|
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.27.2
|
||||||
k8s.io/code-generator => k8s.io/code-generator v0.26.1
|
k8s.io/code-generator => k8s.io/code-generator v0.27.2
|
||||||
k8s.io/component-base => k8s.io/component-base v0.26.1
|
k8s.io/component-base => k8s.io/component-base v0.27.2
|
||||||
k8s.io/component-helpers => k8s.io/component-helpers v0.26.1
|
k8s.io/component-helpers => k8s.io/component-helpers v0.27.2
|
||||||
k8s.io/controller-manager => k8s.io/controller-manager v0.26.1
|
k8s.io/controller-manager => k8s.io/controller-manager v0.27.2
|
||||||
k8s.io/cri-api => k8s.io/cri-api v0.26.1
|
k8s.io/cri-api => k8s.io/cri-api v0.27.2
|
||||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.26.1
|
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.27.2
|
||||||
k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.26.1
|
k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.27.2
|
||||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.26.1
|
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.27.2
|
||||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.26.1
|
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.27.2
|
||||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.26.1
|
k8s.io/kube-proxy => k8s.io/kube-proxy v0.27.2
|
||||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.26.1
|
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.27.2
|
||||||
k8s.io/kubectl => k8s.io/kubectl v0.26.1
|
k8s.io/kubectl => k8s.io/kubectl v0.27.2
|
||||||
k8s.io/kubelet => k8s.io/kubelet v0.26.1
|
k8s.io/kubelet => k8s.io/kubelet v0.27.2
|
||||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.26.1
|
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.27.2
|
||||||
k8s.io/metrics => k8s.io/metrics v0.26.1
|
k8s.io/metrics => k8s.io/metrics v0.27.2
|
||||||
k8s.io/mount-utils => k8s.io/mount-utils v0.26.1
|
k8s.io/mount-utils => k8s.io/mount-utils v0.27.2
|
||||||
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.26.1
|
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.27.2
|
||||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.26.1
|
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.27.2
|
||||||
// layeh.com seems to be misbehaving
|
// layeh.com seems to be misbehaving
|
||||||
layeh.com/radius => github.com/layeh/radius v0.0.0-20190322222518-890bc1058917
|
layeh.com/radius => github.com/layeh/radius v0.0.0-20190322222518-890bc1058917
|
||||||
)
|
)
|
||||||
@ -208,4 +228,7 @@ exclude (
|
|||||||
|
|
||||||
// This tag doesn't exist, but is imported by github.com/portworx/sched-ops.
|
// This tag doesn't exist, but is imported by github.com/portworx/sched-ops.
|
||||||
github.com/kubernetes-incubator/external-storage v0.20.4-openstorage-rc2
|
github.com/kubernetes-incubator/external-storage v0.20.4-openstorage-rc2
|
||||||
|
|
||||||
|
// version 3.9 is really old, don't use that!
|
||||||
|
github.com/openshift/api v3.9.0+incompatible
|
||||||
)
|
)
|
||||||
|
@ -241,7 +241,8 @@ func checkValidCreateVolumeRequest(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CreateVolume creates a reservation and the volume in backend, if it is not already present.
|
// CreateVolume creates a reservation and the volume in backend, if it is not already present.
|
||||||
// nolint:gocognit,gocyclo,nestif,cyclop // TODO: reduce complexity
|
//
|
||||||
|
//nolint:gocognit,gocyclo,nestif,cyclop // TODO: reduce complexity
|
||||||
func (cs *ControllerServer) CreateVolume(
|
func (cs *ControllerServer) CreateVolume(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
req *csi.CreateVolumeRequest,
|
req *csi.CreateVolumeRequest,
|
||||||
@ -730,7 +731,8 @@ func (cs *ControllerServer) ControllerExpandVolume(
|
|||||||
|
|
||||||
// CreateSnapshot creates the snapshot in backend and stores metadata
|
// CreateSnapshot creates the snapshot in backend and stores metadata
|
||||||
// in store
|
// in store
|
||||||
// nolint:gocognit,gocyclo,cyclop // golangci-lint did not catch this earlier, needs to get fixed late
|
//
|
||||||
|
//nolint:gocognit,gocyclo,cyclop // golangci-lint did not catch this earlier, needs to get fixed late
|
||||||
func (cs *ControllerServer) CreateSnapshot(
|
func (cs *ControllerServer) CreateSnapshot(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
req *csi.CreateSnapshotRequest,
|
req *csi.CreateSnapshotRequest,
|
||||||
@ -986,7 +988,8 @@ func (cs *ControllerServer) validateSnapshotReq(ctx context.Context, req *csi.Cr
|
|||||||
|
|
||||||
// DeleteSnapshot deletes the snapshot in backend and removes the
|
// DeleteSnapshot deletes the snapshot in backend and removes the
|
||||||
// snapshot metadata from store.
|
// snapshot metadata from store.
|
||||||
// nolint:gocyclo,cyclop // TODO: reduce complexity
|
//
|
||||||
|
//nolint:gocyclo,cyclop // TODO: reduce complexity
|
||||||
func (cs *ControllerServer) DeleteSnapshot(
|
func (cs *ControllerServer) DeleteSnapshot(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
req *csi.DeleteSnapshotRequest,
|
req *csi.DeleteSnapshotRequest,
|
||||||
|
@ -49,6 +49,8 @@ type Subvolume struct {
|
|||||||
|
|
||||||
// SubVolumeClient is the interface that holds the signature of subvolume methods
|
// SubVolumeClient is the interface that holds the signature of subvolume methods
|
||||||
// that interacts with CephFS subvolume API's.
|
// that interacts with CephFS subvolume API's.
|
||||||
|
//
|
||||||
|
//nolint:interfacebloat // SubVolumeClient has more than 10 methods, that is ok.
|
||||||
type SubVolumeClient interface {
|
type SubVolumeClient interface {
|
||||||
// GetVolumeRootPathCeph returns the root path of the subvolume.
|
// GetVolumeRootPathCeph returns the root path of the subvolume.
|
||||||
GetVolumeRootPathCeph(ctx context.Context) (string, error)
|
GetVolumeRootPathCeph(ctx context.Context) (string, error)
|
||||||
|
@ -31,7 +31,7 @@ import (
|
|||||||
var (
|
var (
|
||||||
availableMounters []string
|
availableMounters []string
|
||||||
|
|
||||||
// nolint:gomnd // numbers specify Kernel versions.
|
//nolint:gomnd // numbers specify Kernel versions.
|
||||||
quotaSupport = []util.KernelVersion{
|
quotaSupport = []util.KernelVersion{
|
||||||
{
|
{
|
||||||
Version: 4,
|
Version: 4,
|
||||||
|
@ -292,11 +292,18 @@ func (ns *NodeServer) mount(
|
|||||||
|
|
||||||
log.DebugLog(ctx, "cephfs: mounting volume %s with %s", volID, mnt.Name())
|
log.DebugLog(ctx, "cephfs: mounting volume %s with %s", volID, mnt.Name())
|
||||||
|
|
||||||
|
var mountOptions []string
|
||||||
|
if m := volCap.GetMount(); m != nil {
|
||||||
|
mountOptions = m.GetMountFlags()
|
||||||
|
}
|
||||||
|
|
||||||
switch mnt.(type) {
|
switch mnt.(type) {
|
||||||
case *mounter.FuseMounter:
|
case *mounter.FuseMounter:
|
||||||
volOptions.FuseMountOptions = util.MountOptionsAdd(volOptions.FuseMountOptions, ns.fuseMountOptions)
|
volOptions.FuseMountOptions = util.MountOptionsAdd(volOptions.FuseMountOptions, ns.fuseMountOptions)
|
||||||
|
volOptions.FuseMountOptions = util.MountOptionsAdd(volOptions.FuseMountOptions, mountOptions...)
|
||||||
case *mounter.KernelMounter:
|
case *mounter.KernelMounter:
|
||||||
volOptions.KernelMountOptions = util.MountOptionsAdd(volOptions.KernelMountOptions, ns.kernelMountOptions)
|
volOptions.KernelMountOptions = util.MountOptionsAdd(volOptions.KernelMountOptions, ns.kernelMountOptions)
|
||||||
|
volOptions.KernelMountOptions = util.MountOptionsAdd(volOptions.KernelMountOptions, mountOptions...)
|
||||||
}
|
}
|
||||||
|
|
||||||
const readOnly = "ro"
|
const readOnly = "ro"
|
||||||
|
@ -71,7 +71,7 @@ because, the order of omap creation and deletion are inverse of each other, and
|
|||||||
request name lock, and hence any stale omaps are leftovers from incomplete transactions and are
|
request name lock, and hence any stale omaps are leftovers from incomplete transactions and are
|
||||||
hence safe to garbage collect.
|
hence safe to garbage collect.
|
||||||
*/
|
*/
|
||||||
// nolint:gocognit,gocyclo,nestif,cyclop // TODO: reduce complexity
|
//nolint:gocognit,gocyclo,nestif,cyclop // TODO: reduce complexity
|
||||||
func CheckVolExists(ctx context.Context,
|
func CheckVolExists(ctx context.Context,
|
||||||
volOptions,
|
volOptions,
|
||||||
parentVolOpt *VolumeOptions,
|
parentVolOpt *VolumeOptions,
|
||||||
|
@ -210,7 +210,8 @@ func fmtBackingSnapshotOptionMismatch(optName, expected, actual string) error {
|
|||||||
|
|
||||||
// NewVolumeOptions generates a new instance of volumeOptions from the provided
|
// NewVolumeOptions generates a new instance of volumeOptions from the provided
|
||||||
// CSI request parameters.
|
// CSI request parameters.
|
||||||
// nolint:gocyclo,cyclop // TODO: reduce complexity
|
//
|
||||||
|
//nolint:gocyclo,cyclop // TODO: reduce complexity
|
||||||
func NewVolumeOptions(
|
func NewVolumeOptions(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
requestName,
|
requestName,
|
||||||
@ -348,7 +349,8 @@ func IsVolumeCreateRO(caps []*csi.VolumeCapability) bool {
|
|||||||
|
|
||||||
// newVolumeOptionsFromVolID generates a new instance of volumeOptions and VolumeIdentifier
|
// newVolumeOptionsFromVolID generates a new instance of volumeOptions and VolumeIdentifier
|
||||||
// from the provided CSI VolumeID.
|
// from the provided CSI VolumeID.
|
||||||
// nolint:gocyclo,cyclop // TODO: reduce complexity
|
//
|
||||||
|
//nolint:gocyclo,cyclop // TODO: reduce complexity
|
||||||
func NewVolumeOptionsFromVolID(
|
func NewVolumeOptionsFromVolID(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
volID string,
|
volID string,
|
||||||
|
@ -82,7 +82,7 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Watch for changes to PersistentVolumes
|
// Watch for changes to PersistentVolumes
|
||||||
err = c.Watch(&source.Kind{Type: &corev1.PersistentVolume{}}, &handler.EnqueueRequestForObject{})
|
err = c.Watch(source.Kind(mgr.GetCache(), &corev1.PersistentVolume{}), &handler.EnqueueRequestForObject{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to watch the changes: %w", err)
|
return fmt.Errorf("failed to watch the changes: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -26,6 +26,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
corerbd "github.com/ceph/ceph-csi/internal/rbd"
|
||||||
"github.com/ceph/ceph-csi/internal/util"
|
"github.com/ceph/ceph-csi/internal/util"
|
||||||
"github.com/ceph/ceph-csi/internal/util/log"
|
"github.com/ceph/ceph-csi/internal/util/log"
|
||||||
|
|
||||||
@ -78,7 +79,7 @@ type ReplicationServer struct {
|
|||||||
// compatibility.
|
// compatibility.
|
||||||
*replication.UnimplementedControllerServer
|
*replication.UnimplementedControllerServer
|
||||||
// Embed ControllerServer as it implements helper functions
|
// Embed ControllerServer as it implements helper functions
|
||||||
*ControllerServer
|
*corerbd.ControllerServer
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rs *ReplicationServer) RegisterService(server grpc.ServiceRegistrar) {
|
func (rs *ReplicationServer) RegisterService(server grpc.ServiceRegistrar) {
|
||||||
@ -229,11 +230,11 @@ func (rs *ReplicationServer) EnableVolumeReplication(ctx context.Context,
|
|||||||
}
|
}
|
||||||
defer rs.VolumeLocks.Release(volumeID)
|
defer rs.VolumeLocks.Release(volumeID)
|
||||||
|
|
||||||
rbdVol, err := GenVolFromVolID(ctx, volumeID, cr, req.GetSecrets())
|
rbdVol, err := corerbd.GenVolFromVolID(ctx, volumeID, cr, req.GetSecrets())
|
||||||
defer rbdVol.Destroy()
|
defer rbdVol.Destroy()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
switch {
|
switch {
|
||||||
case errors.Is(err, ErrImageNotFound):
|
case errors.Is(err, corerbd.ErrImageNotFound):
|
||||||
err = status.Errorf(codes.NotFound, "volume %s not found", volumeID)
|
err = status.Errorf(codes.NotFound, "volume %s not found", volumeID)
|
||||||
case errors.Is(err, util.ErrPoolNotFound):
|
case errors.Is(err, util.ErrPoolNotFound):
|
||||||
err = status.Errorf(codes.NotFound, "pool %s not found for %s", rbdVol.Pool, volumeID)
|
err = status.Errorf(codes.NotFound, "pool %s not found for %s", rbdVol.Pool, volumeID)
|
||||||
@ -249,7 +250,7 @@ func (rs *ReplicationServer) EnableVolumeReplication(ctx context.Context,
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
mirroringInfo, err := rbdVol.getImageMirroringInfo()
|
mirroringInfo, err := rbdVol.GetImageMirroringInfo()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.ErrorLog(ctx, err.Error())
|
log.ErrorLog(ctx, err.Error())
|
||||||
|
|
||||||
@ -257,7 +258,7 @@ func (rs *ReplicationServer) EnableVolumeReplication(ctx context.Context,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if mirroringInfo.State != librbd.MirrorImageEnabled {
|
if mirroringInfo.State != librbd.MirrorImageEnabled {
|
||||||
err = rbdVol.enableImageMirroring(mirroringMode)
|
err = rbdVol.EnableImageMirroring(mirroringMode)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.ErrorLog(ctx, err.Error())
|
log.ErrorLog(ctx, err.Error())
|
||||||
|
|
||||||
@ -291,11 +292,11 @@ func (rs *ReplicationServer) DisableVolumeReplication(ctx context.Context,
|
|||||||
}
|
}
|
||||||
defer rs.VolumeLocks.Release(volumeID)
|
defer rs.VolumeLocks.Release(volumeID)
|
||||||
|
|
||||||
rbdVol, err := GenVolFromVolID(ctx, volumeID, cr, req.GetSecrets())
|
rbdVol, err := corerbd.GenVolFromVolID(ctx, volumeID, cr, req.GetSecrets())
|
||||||
defer rbdVol.Destroy()
|
defer rbdVol.Destroy()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
switch {
|
switch {
|
||||||
case errors.Is(err, ErrImageNotFound):
|
case errors.Is(err, corerbd.ErrImageNotFound):
|
||||||
err = status.Errorf(codes.NotFound, "volume %s not found", volumeID)
|
err = status.Errorf(codes.NotFound, "volume %s not found", volumeID)
|
||||||
case errors.Is(err, util.ErrPoolNotFound):
|
case errors.Is(err, util.ErrPoolNotFound):
|
||||||
err = status.Errorf(codes.NotFound, "pool %s not found for %s", rbdVol.Pool, volumeID)
|
err = status.Errorf(codes.NotFound, "pool %s not found for %s", rbdVol.Pool, volumeID)
|
||||||
@ -311,7 +312,7 @@ func (rs *ReplicationServer) DisableVolumeReplication(ctx context.Context,
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
mirroringInfo, err := rbdVol.getImageMirroringInfo()
|
mirroringInfo, err := rbdVol.GetImageMirroringInfo()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.ErrorLog(ctx, err.Error())
|
log.ErrorLog(ctx, err.Error())
|
||||||
|
|
||||||
@ -325,7 +326,7 @@ func (rs *ReplicationServer) DisableVolumeReplication(ctx context.Context,
|
|||||||
case librbd.MirrorImageDisabling:
|
case librbd.MirrorImageDisabling:
|
||||||
return nil, status.Errorf(codes.Aborted, "%s is in disabling state", volumeID)
|
return nil, status.Errorf(codes.Aborted, "%s is in disabling state", volumeID)
|
||||||
case librbd.MirrorImageEnabled:
|
case librbd.MirrorImageEnabled:
|
||||||
return disableVolumeReplication(rbdVol, mirroringInfo, force)
|
return corerbd.DisableVolumeReplication(rbdVol, mirroringInfo, force)
|
||||||
default:
|
default:
|
||||||
return nil, status.Errorf(codes.InvalidArgument, "image is in %s Mode", mirroringInfo.State)
|
return nil, status.Errorf(codes.InvalidArgument, "image is in %s Mode", mirroringInfo.State)
|
||||||
}
|
}
|
||||||
@ -333,53 +334,6 @@ func (rs *ReplicationServer) DisableVolumeReplication(ctx context.Context,
|
|||||||
return &replication.DisableVolumeReplicationResponse{}, nil
|
return &replication.DisableVolumeReplicationResponse{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func disableVolumeReplication(rbdVol *rbdVolume,
|
|
||||||
mirroringInfo *librbd.MirrorImageInfo,
|
|
||||||
force bool,
|
|
||||||
) (*replication.DisableVolumeReplicationResponse, error) {
|
|
||||||
if !mirroringInfo.Primary {
|
|
||||||
// Return success if the below condition is met
|
|
||||||
// Local image is secondary
|
|
||||||
// Local image is in up+replaying state
|
|
||||||
|
|
||||||
// If the image is in a secondary and its state is up+replaying means
|
|
||||||
// its a healthy secondary and the image is primary somewhere in the
|
|
||||||
// remote cluster and the local image is getting replayed. Return
|
|
||||||
// success for the Disabling mirroring as we cannot disable mirroring
|
|
||||||
// on the secondary image, when the image on the primary site gets
|
|
||||||
// disabled the image on all the remote (secondary) clusters will get
|
|
||||||
// auto-deleted. This helps in garbage collecting the volume
|
|
||||||
// replication Kubernetes artifacts after failback operation.
|
|
||||||
localStatus, rErr := rbdVol.getLocalState()
|
|
||||||
if rErr != nil {
|
|
||||||
return nil, status.Error(codes.Internal, rErr.Error())
|
|
||||||
}
|
|
||||||
if localStatus.Up && localStatus.State == librbd.MirrorImageStatusStateReplaying {
|
|
||||||
return &replication.DisableVolumeReplicationResponse{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, status.Errorf(codes.InvalidArgument,
|
|
||||||
"secondary image status is up=%t and state=%s",
|
|
||||||
localStatus.Up,
|
|
||||||
localStatus.State)
|
|
||||||
}
|
|
||||||
err := rbdVol.disableImageMirroring(force)
|
|
||||||
if err != nil {
|
|
||||||
return nil, status.Error(codes.Internal, err.Error())
|
|
||||||
}
|
|
||||||
// the image state can be still disabling once we disable the mirroring
|
|
||||||
// check the mirroring is disabled or not
|
|
||||||
mirroringInfo, err = rbdVol.getImageMirroringInfo()
|
|
||||||
if err != nil {
|
|
||||||
return nil, status.Error(codes.Internal, err.Error())
|
|
||||||
}
|
|
||||||
if mirroringInfo.State == librbd.MirrorImageDisabling {
|
|
||||||
return nil, status.Errorf(codes.Aborted, "%s is in disabling state", rbdVol.VolID)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &replication.DisableVolumeReplicationResponse{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PromoteVolume extracts the RBD volume information from the volumeID, If the
|
// PromoteVolume extracts the RBD volume information from the volumeID, If the
|
||||||
// image is present, mirroring is enabled and the image is in demoted state it
|
// image is present, mirroring is enabled and the image is in demoted state it
|
||||||
// will promote the volume as primary.
|
// will promote the volume as primary.
|
||||||
@ -404,11 +358,11 @@ func (rs *ReplicationServer) PromoteVolume(ctx context.Context,
|
|||||||
}
|
}
|
||||||
defer rs.VolumeLocks.Release(volumeID)
|
defer rs.VolumeLocks.Release(volumeID)
|
||||||
|
|
||||||
rbdVol, err := GenVolFromVolID(ctx, volumeID, cr, req.GetSecrets())
|
rbdVol, err := corerbd.GenVolFromVolID(ctx, volumeID, cr, req.GetSecrets())
|
||||||
defer rbdVol.Destroy()
|
defer rbdVol.Destroy()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
switch {
|
switch {
|
||||||
case errors.Is(err, ErrImageNotFound):
|
case errors.Is(err, corerbd.ErrImageNotFound):
|
||||||
err = status.Errorf(codes.NotFound, "volume %s not found", volumeID)
|
err = status.Errorf(codes.NotFound, "volume %s not found", volumeID)
|
||||||
case errors.Is(err, util.ErrPoolNotFound):
|
case errors.Is(err, util.ErrPoolNotFound):
|
||||||
err = status.Errorf(codes.NotFound, "pool %s not found for %s", rbdVol.Pool, volumeID)
|
err = status.Errorf(codes.NotFound, "pool %s not found for %s", rbdVol.Pool, volumeID)
|
||||||
@ -419,7 +373,7 @@ func (rs *ReplicationServer) PromoteVolume(ctx context.Context,
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
mirroringInfo, err := rbdVol.getImageMirroringInfo()
|
mirroringInfo, err := rbdVol.GetImageMirroringInfo()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.ErrorLog(ctx, err.Error())
|
log.ErrorLog(ctx, err.Error())
|
||||||
|
|
||||||
@ -439,9 +393,9 @@ func (rs *ReplicationServer) PromoteVolume(ctx context.Context,
|
|||||||
if req.GetForce() {
|
if req.GetForce() {
|
||||||
// workaround for https://github.com/ceph/ceph-csi/issues/2736
|
// workaround for https://github.com/ceph/ceph-csi/issues/2736
|
||||||
// TODO: remove this workaround when the issue is fixed
|
// TODO: remove this workaround when the issue is fixed
|
||||||
err = rbdVol.forcePromoteImage(cr)
|
err = rbdVol.ForcePromoteImage(cr)
|
||||||
} else {
|
} else {
|
||||||
err = rbdVol.promoteImage(req.GetForce())
|
err = rbdVol.PromoteImage(req.GetForce())
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.ErrorLog(ctx, err.Error())
|
log.ErrorLog(ctx, err.Error())
|
||||||
@ -461,7 +415,7 @@ func (rs *ReplicationServer) PromoteVolume(ctx context.Context,
|
|||||||
|
|
||||||
interval, startTime := getSchedulingDetails(req.GetParameters())
|
interval, startTime := getSchedulingDetails(req.GetParameters())
|
||||||
if interval != admin.NoInterval {
|
if interval != admin.NoInterval {
|
||||||
err = rbdVol.addSnapshotScheduling(interval, startTime)
|
err = rbdVol.AddSnapshotScheduling(interval, startTime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -500,11 +454,11 @@ func (rs *ReplicationServer) DemoteVolume(ctx context.Context,
|
|||||||
}
|
}
|
||||||
defer rs.VolumeLocks.Release(volumeID)
|
defer rs.VolumeLocks.Release(volumeID)
|
||||||
|
|
||||||
rbdVol, err := GenVolFromVolID(ctx, volumeID, cr, req.GetSecrets())
|
rbdVol, err := corerbd.GenVolFromVolID(ctx, volumeID, cr, req.GetSecrets())
|
||||||
defer rbdVol.Destroy()
|
defer rbdVol.Destroy()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
switch {
|
switch {
|
||||||
case errors.Is(err, ErrImageNotFound):
|
case errors.Is(err, corerbd.ErrImageNotFound):
|
||||||
err = status.Errorf(codes.NotFound, "volume %s not found", volumeID)
|
err = status.Errorf(codes.NotFound, "volume %s not found", volumeID)
|
||||||
case errors.Is(err, util.ErrPoolNotFound):
|
case errors.Is(err, util.ErrPoolNotFound):
|
||||||
err = status.Errorf(codes.NotFound, "pool %s not found for %s", rbdVol.Pool, volumeID)
|
err = status.Errorf(codes.NotFound, "pool %s not found for %s", rbdVol.Pool, volumeID)
|
||||||
@ -514,7 +468,7 @@ func (rs *ReplicationServer) DemoteVolume(ctx context.Context,
|
|||||||
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
mirroringInfo, err := rbdVol.getImageMirroringInfo()
|
mirroringInfo, err := rbdVol.GetImageMirroringInfo()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.ErrorLog(ctx, err.Error())
|
log.ErrorLog(ctx, err.Error())
|
||||||
|
|
||||||
@ -531,7 +485,7 @@ func (rs *ReplicationServer) DemoteVolume(ctx context.Context,
|
|||||||
|
|
||||||
// demote image to secondary
|
// demote image to secondary
|
||||||
if mirroringInfo.Primary {
|
if mirroringInfo.Primary {
|
||||||
err = rbdVol.demoteImage()
|
err = rbdVol.DemoteImage()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.ErrorLog(ctx, err.Error())
|
log.ErrorLog(ctx, err.Error())
|
||||||
|
|
||||||
@ -591,11 +545,11 @@ func (rs *ReplicationServer) ResyncVolume(ctx context.Context,
|
|||||||
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volumeID)
|
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volumeID)
|
||||||
}
|
}
|
||||||
defer rs.VolumeLocks.Release(volumeID)
|
defer rs.VolumeLocks.Release(volumeID)
|
||||||
rbdVol, err := GenVolFromVolID(ctx, volumeID, cr, req.GetSecrets())
|
rbdVol, err := corerbd.GenVolFromVolID(ctx, volumeID, cr, req.GetSecrets())
|
||||||
defer rbdVol.Destroy()
|
defer rbdVol.Destroy()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
switch {
|
switch {
|
||||||
case errors.Is(err, ErrImageNotFound):
|
case errors.Is(err, corerbd.ErrImageNotFound):
|
||||||
err = status.Errorf(codes.NotFound, "volume %s not found", volumeID)
|
err = status.Errorf(codes.NotFound, "volume %s not found", volumeID)
|
||||||
case errors.Is(err, util.ErrPoolNotFound):
|
case errors.Is(err, util.ErrPoolNotFound):
|
||||||
err = status.Errorf(codes.NotFound, "pool %s not found for %s", rbdVol.Pool, volumeID)
|
err = status.Errorf(codes.NotFound, "pool %s not found for %s", rbdVol.Pool, volumeID)
|
||||||
@ -606,7 +560,7 @@ func (rs *ReplicationServer) ResyncVolume(ctx context.Context,
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
mirroringInfo, err := rbdVol.getImageMirroringInfo()
|
mirroringInfo, err := rbdVol.GetImageMirroringInfo()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// in case of Resync the image will get deleted and gets recreated and
|
// in case of Resync the image will get deleted and gets recreated and
|
||||||
// it takes time for this operation.
|
// it takes time for this operation.
|
||||||
@ -624,10 +578,10 @@ func (rs *ReplicationServer) ResyncVolume(ctx context.Context,
|
|||||||
return nil, status.Error(codes.InvalidArgument, "image is in primary state")
|
return nil, status.Error(codes.InvalidArgument, "image is in primary state")
|
||||||
}
|
}
|
||||||
|
|
||||||
mirrorStatus, err := rbdVol.getImageMirroringStatus()
|
mirrorStatus, err := rbdVol.GetImageMirroringStatus()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// the image gets recreated after issuing resync
|
// the image gets recreated after issuing resync
|
||||||
if errors.Is(err, ErrImageNotFound) {
|
if errors.Is(err, corerbd.ErrImageNotFound) {
|
||||||
// caller retries till RBD syncs an initial version of the image to
|
// caller retries till RBD syncs an initial version of the image to
|
||||||
// report its status in the resync call. Ideally, this line will not
|
// report its status in the resync call. Ideally, this line will not
|
||||||
// be executed as the error would get returned due to getImageMirroringInfo
|
// be executed as the error would get returned due to getImageMirroringInfo
|
||||||
@ -671,7 +625,7 @@ func (rs *ReplicationServer) ResyncVolume(ctx context.Context,
|
|||||||
ready = checkRemoteSiteStatus(ctx, mirrorStatus)
|
ready = checkRemoteSiteStatus(ctx, mirrorStatus)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = resyncVolume(localStatus, rbdVol, req.Force)
|
err = rbdVol.ResyncVol(localStatus, req.Force)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.ErrorLog(ctx, err.Error())
|
log.ErrorLog(ctx, err.Error())
|
||||||
|
|
||||||
@ -683,7 +637,7 @@ func (rs *ReplicationServer) ResyncVolume(ctx context.Context,
|
|||||||
return nil, status.Error(codes.Internal, err.Error())
|
return nil, status.Error(codes.Internal, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
err = repairResyncedImageID(ctx, rbdVol, ready)
|
err = rbdVol.RepairResyncedImageID(ctx, ready)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, status.Errorf(codes.Internal, "failed to resync Image ID: %s", err.Error())
|
return nil, status.Errorf(codes.Internal, "failed to resync Image ID: %s", err.Error())
|
||||||
}
|
}
|
||||||
@ -716,11 +670,11 @@ func (rs *ReplicationServer) GetVolumeReplicationInfo(ctx context.Context,
|
|||||||
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volumeID)
|
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volumeID)
|
||||||
}
|
}
|
||||||
defer rs.VolumeLocks.Release(volumeID)
|
defer rs.VolumeLocks.Release(volumeID)
|
||||||
rbdVol, err := GenVolFromVolID(ctx, volumeID, cr, req.GetSecrets())
|
rbdVol, err := corerbd.GenVolFromVolID(ctx, volumeID, cr, req.GetSecrets())
|
||||||
defer rbdVol.Destroy()
|
defer rbdVol.Destroy()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
switch {
|
switch {
|
||||||
case errors.Is(err, ErrImageNotFound):
|
case errors.Is(err, corerbd.ErrImageNotFound):
|
||||||
err = status.Errorf(codes.NotFound, "volume %s not found", volumeID)
|
err = status.Errorf(codes.NotFound, "volume %s not found", volumeID)
|
||||||
case errors.Is(err, util.ErrPoolNotFound):
|
case errors.Is(err, util.ErrPoolNotFound):
|
||||||
err = status.Errorf(codes.NotFound, "pool %s not found for %s", rbdVol.Pool, volumeID)
|
err = status.Errorf(codes.NotFound, "pool %s not found for %s", rbdVol.Pool, volumeID)
|
||||||
@ -731,7 +685,7 @@ func (rs *ReplicationServer) GetVolumeReplicationInfo(ctx context.Context,
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
mirroringInfo, err := rbdVol.getImageMirroringInfo()
|
mirroringInfo, err := rbdVol.GetImageMirroringInfo()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.ErrorLog(ctx, err.Error())
|
log.ErrorLog(ctx, err.Error())
|
||||||
|
|
||||||
@ -747,9 +701,9 @@ func (rs *ReplicationServer) GetVolumeReplicationInfo(ctx context.Context,
|
|||||||
return nil, status.Error(codes.InvalidArgument, "image is not in primary state")
|
return nil, status.Error(codes.InvalidArgument, "image is not in primary state")
|
||||||
}
|
}
|
||||||
|
|
||||||
mirrorStatus, err := rbdVol.getImageMirroringStatus()
|
mirrorStatus, err := rbdVol.GetImageMirroringStatus()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, ErrImageNotFound) {
|
if errors.Is(err, corerbd.ErrImageNotFound) {
|
||||||
return nil, status.Error(codes.Aborted, err.Error())
|
return nil, status.Error(codes.Aborted, err.Error())
|
||||||
}
|
}
|
||||||
log.ErrorLog(ctx, err.Error())
|
log.ErrorLog(ctx, err.Error())
|
||||||
@ -767,7 +721,7 @@ func (rs *ReplicationServer) GetVolumeReplicationInfo(ctx context.Context,
|
|||||||
description := remoteStatus.Description
|
description := remoteStatus.Description
|
||||||
lastSyncTime, err := getLastSyncTime(description)
|
lastSyncTime, err := getLastSyncTime(description)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, ErrLastSyncTimeNotFound) {
|
if errors.Is(err, corerbd.ErrLastSyncTimeNotFound) {
|
||||||
return nil, status.Errorf(codes.NotFound, "failed to get last sync time: %v", err)
|
return nil, status.Errorf(codes.NotFound, "failed to get last sync time: %v", err)
|
||||||
}
|
}
|
||||||
log.ErrorLog(ctx, err.Error())
|
log.ErrorLog(ctx, err.Error())
|
||||||
@ -812,11 +766,11 @@ func getLastSyncTime(description string) (*timestamppb.Timestamp, error) {
|
|||||||
// In case there is no local snapshot timestamp return an error as the
|
// In case there is no local snapshot timestamp return an error as the
|
||||||
// LastSyncTime is required.
|
// LastSyncTime is required.
|
||||||
if description == "" {
|
if description == "" {
|
||||||
return nil, fmt.Errorf("empty description: %w", ErrLastSyncTimeNotFound)
|
return nil, fmt.Errorf("empty description: %w", corerbd.ErrLastSyncTimeNotFound)
|
||||||
}
|
}
|
||||||
splittedString := strings.SplitN(description, ",", 2)
|
splittedString := strings.SplitN(description, ",", 2)
|
||||||
if len(splittedString) == 1 {
|
if len(splittedString) == 1 {
|
||||||
return nil, fmt.Errorf("no local snapshot timestamp: %w", ErrLastSyncTimeNotFound)
|
return nil, fmt.Errorf("no local snapshot timestamp: %w", corerbd.ErrLastSyncTimeNotFound)
|
||||||
}
|
}
|
||||||
type localStatus struct {
|
type localStatus struct {
|
||||||
LocalSnapshotTime int64 `json:"local_snapshot_timestamp"`
|
LocalSnapshotTime int64 `json:"local_snapshot_timestamp"`
|
||||||
@ -831,7 +785,7 @@ func getLastSyncTime(description string) (*timestamppb.Timestamp, error) {
|
|||||||
// If the json unmarsal is successful but the local snapshot time is 0, we
|
// If the json unmarsal is successful but the local snapshot time is 0, we
|
||||||
// need to consider it as an error as the LastSyncTime is required.
|
// need to consider it as an error as the LastSyncTime is required.
|
||||||
if localSnapTime.LocalSnapshotTime == 0 {
|
if localSnapTime.LocalSnapshotTime == 0 {
|
||||||
return nil, fmt.Errorf("empty local snapshot timestamp: %w", ErrLastSyncTimeNotFound)
|
return nil, fmt.Errorf("empty local snapshot timestamp: %w", corerbd.ErrLastSyncTimeNotFound)
|
||||||
}
|
}
|
||||||
|
|
||||||
lastUpdateTime := time.Unix(localSnapTime.LocalSnapshotTime, 0)
|
lastUpdateTime := time.Unix(localSnapTime.LocalSnapshotTime, 0)
|
||||||
@ -840,29 +794,6 @@ func getLastSyncTime(description string) (*timestamppb.Timestamp, error) {
|
|||||||
return lastSyncTime, nil
|
return lastSyncTime, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func resyncVolume(localStatus librbd.SiteMirrorImageStatus, rbdVol *rbdVolume, force bool) error {
|
|
||||||
if resyncRequired(localStatus) {
|
|
||||||
// If the force option is not set return the error message to retry
|
|
||||||
// with Force option.
|
|
||||||
if !force {
|
|
||||||
return status.Errorf(codes.FailedPrecondition,
|
|
||||||
"image is in %q state, description (%s). Force resync to recover volume",
|
|
||||||
localStatus.State, localStatus.Description)
|
|
||||||
}
|
|
||||||
err := rbdVol.resyncImage()
|
|
||||||
if err != nil {
|
|
||||||
return status.Error(codes.Internal, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we issued a resync, return a non-final error as image needs to be recreated
|
|
||||||
// locally. Caller retries till RBD syncs an initial version of the image to
|
|
||||||
// report its status in the resync request.
|
|
||||||
return status.Error(codes.Unavailable, "awaiting initial resync due to split brain")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkVolumeResyncStatus(localStatus librbd.SiteMirrorImageStatus) error {
|
func checkVolumeResyncStatus(localStatus librbd.SiteMirrorImageStatus) error {
|
||||||
// we are considering 2 states to check resync started and resync completed
|
// we are considering 2 states to check resync started and resync completed
|
||||||
// as below. all other states will be considered as an error state so that
|
// as below. all other states will be considered as an error state so that
|
||||||
@ -882,39 +813,3 @@ func checkVolumeResyncStatus(localStatus librbd.SiteMirrorImageStatus) error {
|
|||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// resyncRequired returns true if local image is in split-brain state and image
|
|
||||||
// needs resync.
|
|
||||||
func resyncRequired(localStatus librbd.SiteMirrorImageStatus) bool {
|
|
||||||
// resync is required if the image is in error state or the description
|
|
||||||
// contains split-brain message.
|
|
||||||
// In some corner cases like `re-player shutdown` the local image will not
|
|
||||||
// be in an error state. It would be also worth considering the `description`
|
|
||||||
// field to make sure about split-brain.
|
|
||||||
if localStatus.State == librbd.MirrorImageStatusStateError ||
|
|
||||||
strings.Contains(localStatus.Description, "split-brain") {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// repairResyncedImageID updates the existing image ID with new one.
|
|
||||||
func repairResyncedImageID(ctx context.Context, rv *rbdVolume, ready bool) error {
|
|
||||||
// During resync operation the local image will get deleted and a new
|
|
||||||
// image is recreated by the rbd mirroring. The new image will have a
|
|
||||||
// new image ID. Once resync is completed update the image ID in the OMAP
|
|
||||||
// to get the image removed from the trash during DeleteVolume.
|
|
||||||
|
|
||||||
// if the image is not completely resynced skip repairing image ID.
|
|
||||||
if !ready {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
j, err := volJournal.Connect(rv.Monitors, rv.RadosNamespace, rv.conn.Creds)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer j.Destroy()
|
|
||||||
// reset the image ID which is stored in the existing OMAP
|
|
||||||
return rv.repairImageID(ctx, j, true)
|
|
||||||
}
|
|
@ -23,6 +23,8 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
corerbd "github.com/ceph/ceph-csi/internal/rbd"
|
||||||
|
|
||||||
librbd "github.com/ceph/go-ceph/rbd"
|
librbd "github.com/ceph/go-ceph/rbd"
|
||||||
"github.com/ceph/go-ceph/rbd/admin"
|
"github.com/ceph/go-ceph/rbd/admin"
|
||||||
"google.golang.org/protobuf/types/known/timestamppb"
|
"google.golang.org/protobuf/types/known/timestamppb"
|
||||||
@ -455,7 +457,7 @@ func TestValidateLastSyncTime(t *testing.T) {
|
|||||||
"empty description",
|
"empty description",
|
||||||
"",
|
"",
|
||||||
nil,
|
nil,
|
||||||
ErrLastSyncTimeNotFound.Error(),
|
corerbd.ErrLastSyncTimeNotFound.Error(),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"description without local_snapshot_timestamp",
|
"description without local_snapshot_timestamp",
|
||||||
@ -473,7 +475,7 @@ func TestValidateLastSyncTime(t *testing.T) {
|
|||||||
"description with no JSON",
|
"description with no JSON",
|
||||||
`replaying`,
|
`replaying`,
|
||||||
nil,
|
nil,
|
||||||
ErrLastSyncTimeNotFound.Error(),
|
corerbd.ErrLastSyncTimeNotFound.Error(),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
@ -17,8 +17,6 @@ limitations under the License.
|
|||||||
package csicommon
|
package csicommon
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/ceph/ceph-csi/internal/util/log"
|
"github.com/ceph/ceph-csi/internal/util/log"
|
||||||
|
|
||||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||||
@ -71,6 +69,8 @@ func NewCSIDriver(name, v, nodeID string) *CSIDriver {
|
|||||||
|
|
||||||
// ValidateControllerServiceRequest validates the controller
|
// ValidateControllerServiceRequest validates the controller
|
||||||
// plugin capabilities.
|
// plugin capabilities.
|
||||||
|
//
|
||||||
|
//nolint:interfacer // c can be of type fmt.Stringer, but that does not make the API clearer
|
||||||
func (d *CSIDriver) ValidateControllerServiceRequest(c csi.ControllerServiceCapability_RPC_Type) error {
|
func (d *CSIDriver) ValidateControllerServiceRequest(c csi.ControllerServiceCapability_RPC_Type) error {
|
||||||
if c == csi.ControllerServiceCapability_RPC_UNKNOWN {
|
if c == csi.ControllerServiceCapability_RPC_UNKNOWN {
|
||||||
return nil
|
return nil
|
||||||
@ -82,7 +82,7 @@ func (d *CSIDriver) ValidateControllerServiceRequest(c csi.ControllerServiceCapa
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return status.Error(codes.InvalidArgument, fmt.Sprintf("%s", c)) //nolint
|
return status.Error(codes.InvalidArgument, c.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddControllerServiceCapabilities stores the controller capabilities
|
// AddControllerServiceCapabilities stores the controller capabilities
|
||||||
|
@ -118,7 +118,7 @@ func NewMiddlewareServerOption(withMetrics bool) grpc.ServerOption {
|
|||||||
middleWare = append(middleWare, grpc_prometheus.UnaryServerInterceptor)
|
middleWare = append(middleWare, grpc_prometheus.UnaryServerInterceptor)
|
||||||
}
|
}
|
||||||
|
|
||||||
return grpc_middleware.WithUnaryServerChain(middleWare...)
|
return grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(middleWare...))
|
||||||
}
|
}
|
||||||
|
|
||||||
func getReqID(req interface{}) string {
|
func getReqID(req interface{}) string {
|
||||||
|
@ -53,7 +53,7 @@ func TestGenerateNonce(t *testing.T) {
|
|||||||
|
|
||||||
func TestGenerateCipher(t *testing.T) {
|
func TestGenerateCipher(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
// nolint:gosec // this passphrase is intentionally hardcoded
|
//nolint:gosec // this passphrase is intentionally hardcoded
|
||||||
passphrase := "my-cool-luks-passphrase"
|
passphrase := "my-cool-luks-passphrase"
|
||||||
salt := "unique-id-for-the-volume"
|
salt := "unique-id-for-the-volume"
|
||||||
|
|
||||||
|
@ -123,7 +123,7 @@ func setConfigString(option *string, config map[string]interface{}, key string)
|
|||||||
// these settings will be used when connecting to the Vault service with
|
// these settings will be used when connecting to the Vault service with
|
||||||
// vc.connectVault().
|
// vc.connectVault().
|
||||||
//
|
//
|
||||||
// nolint:gocyclo,cyclop // iterating through many config options, not complex at all.
|
//nolint:gocyclo,cyclop // iterating through many config options, not complex at all.
|
||||||
func (vc *vaultConnection) initConnection(config map[string]interface{}) error {
|
func (vc *vaultConnection) initConnection(config map[string]interface{}) error {
|
||||||
vaultConfig := make(map[string]interface{})
|
vaultConfig := make(map[string]interface{})
|
||||||
keyContext := make(map[string]string)
|
keyContext := make(map[string]string)
|
||||||
|
@ -44,7 +44,8 @@ ServiceAccount from the Tenant that owns the volume to store/retrieve the
|
|||||||
encryption passphrase of volumes.
|
encryption passphrase of volumes.
|
||||||
|
|
||||||
Example JSON structure in the KMS config is,
|
Example JSON structure in the KMS config is,
|
||||||
{
|
|
||||||
|
{
|
||||||
"vault-tenant-sa": {
|
"vault-tenant-sa": {
|
||||||
"encryptionKMSType": "vaulttenantsa",
|
"encryptionKMSType": "vaulttenantsa",
|
||||||
"vaultAddress": "http://vault.default.svc.cluster.local:8200",
|
"vaultAddress": "http://vault.default.svc.cluster.local:8200",
|
||||||
@ -66,7 +67,7 @@ Example JSON structure in the KMS config is,
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
...
|
...
|
||||||
}.
|
}.
|
||||||
*/
|
*/
|
||||||
type vaultTenantSA struct {
|
type vaultTenantSA struct {
|
||||||
vaultTenantConnection
|
vaultTenantConnection
|
||||||
|
@ -160,7 +160,8 @@ VaultTokens represents a Hashicorp Vault KMS configuration that provides a
|
|||||||
Token per tenant.
|
Token per tenant.
|
||||||
|
|
||||||
Example JSON structure in the KMS config is,
|
Example JSON structure in the KMS config is,
|
||||||
{
|
|
||||||
|
{
|
||||||
"vault-with-tokens": {
|
"vault-with-tokens": {
|
||||||
"encryptionKMSType": "vaulttokens",
|
"encryptionKMSType": "vaulttokens",
|
||||||
"vaultAddress": "http://vault.default.svc.cluster.local:8200",
|
"vaultAddress": "http://vault.default.svc.cluster.local:8200",
|
||||||
@ -183,7 +184,7 @@ Example JSON structure in the KMS config is,
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
...
|
...
|
||||||
}.
|
}.
|
||||||
*/
|
*/
|
||||||
type vaultTenantConnection struct {
|
type vaultTenantConnection struct {
|
||||||
vaultConnection
|
vaultConnection
|
||||||
@ -353,7 +354,7 @@ func (kms *vaultTokensKMS) setTokenName(config map[string]interface{}) error {
|
|||||||
// initCertificates updates the kms.vaultConfig with the options from config
|
// initCertificates updates the kms.vaultConfig with the options from config
|
||||||
// it calls the kubernetes secrets and get the required data.
|
// it calls the kubernetes secrets and get the required data.
|
||||||
|
|
||||||
// nolint:gocyclo,cyclop // iterating through many config options, not complex at all.
|
//nolint:gocyclo,cyclop // iterating through many config options, not complex at all.
|
||||||
func (vtc *vaultTenantConnection) initCertificates(config map[string]interface{}) error {
|
func (vtc *vaultTenantConnection) initCertificates(config map[string]interface{}) error {
|
||||||
vaultConfig := make(map[string]interface{})
|
vaultConfig := make(map[string]interface{})
|
||||||
|
|
||||||
|
@ -131,6 +131,7 @@ func (nv *NFSVolume) CreateExport(backend *csi.Volume) error {
|
|||||||
fs := backend.VolumeContext["fsName"]
|
fs := backend.VolumeContext["fsName"]
|
||||||
nfsCluster := backend.VolumeContext["nfsCluster"]
|
nfsCluster := backend.VolumeContext["nfsCluster"]
|
||||||
path := backend.VolumeContext["subvolumePath"]
|
path := backend.VolumeContext["subvolumePath"]
|
||||||
|
secTypes := backend.VolumeContext["secTypes"]
|
||||||
|
|
||||||
err := nv.setNFSCluster(nfsCluster)
|
err := nv.setNFSCluster(nfsCluster)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -142,12 +143,21 @@ func (nv *NFSVolume) CreateExport(backend *csi.Volume) error {
|
|||||||
return fmt.Errorf("failed to get NFSAdmin: %w", err)
|
return fmt.Errorf("failed to get NFSAdmin: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = nfsa.CreateCephFSExport(nfs.CephFSExportSpec{
|
export := nfs.CephFSExportSpec{
|
||||||
FileSystemName: fs,
|
FileSystemName: fs,
|
||||||
ClusterID: nfsCluster,
|
ClusterID: nfsCluster,
|
||||||
PseudoPath: nv.GetExportPath(),
|
PseudoPath: nv.GetExportPath(),
|
||||||
Path: path,
|
Path: path,
|
||||||
})
|
}
|
||||||
|
|
||||||
|
if secTypes != "" {
|
||||||
|
export.SecType = []nfs.SecType{}
|
||||||
|
for _, secType := range strings.Split(secTypes, ",") {
|
||||||
|
export.SecType = append(export.SecType, nfs.SecType(secType))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = nfsa.CreateCephFSExport(export)
|
||||||
switch {
|
switch {
|
||||||
case err == nil:
|
case err == nil:
|
||||||
return nil
|
return nil
|
||||||
@ -260,14 +270,14 @@ func (nv *NFSVolume) getNFSCluster() (string, error) {
|
|||||||
fs := fscore.NewFileSystem(nv.conn)
|
fs := fscore.NewFileSystem(nv.conn)
|
||||||
fsName, err := fs.GetFsName(nv.ctx, nv.fscID)
|
fsName, err := fs.GetFsName(nv.ctx, nv.fscID)
|
||||||
if err != nil && errors.Is(err, util.ErrPoolNotFound) {
|
if err != nil && errors.Is(err, util.ErrPoolNotFound) {
|
||||||
return "", fmt.Errorf("%w for ID %x: %v", ErrFilesystemNotFound, nv.fscID, err)
|
return "", fmt.Errorf("%w for ID %x: %w", ErrFilesystemNotFound, nv.fscID, err)
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return "", fmt.Errorf("failed to get filesystem name for ID %x: %w", nv.fscID, err)
|
return "", fmt.Errorf("failed to get filesystem name for ID %x: %w", nv.fscID, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
mdPool, err := fs.GetMetadataPool(nv.ctx, fsName)
|
mdPool, err := fs.GetMetadataPool(nv.ctx, fsName)
|
||||||
if err != nil && errors.Is(err, util.ErrPoolNotFound) {
|
if err != nil && errors.Is(err, util.ErrPoolNotFound) {
|
||||||
return "", fmt.Errorf("metadata pool for %q %w: %v", fsName, ErrNotFound, err)
|
return "", fmt.Errorf("metadata pool for %q %w: %w", fsName, ErrNotFound, err)
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return "", fmt.Errorf("failed to get metadata pool for %q: %w", fsName, err)
|
return "", fmt.Errorf("failed to get metadata pool for %q: %w", fsName, err)
|
||||||
}
|
}
|
||||||
@ -281,7 +291,7 @@ func (nv *NFSVolume) getNFSCluster() (string, error) {
|
|||||||
|
|
||||||
clusterName, err := j.FetchAttribute(nv.ctx, mdPool, nv.objectUUID, clusterNameKey)
|
clusterName, err := j.FetchAttribute(nv.ctx, mdPool, nv.objectUUID, clusterNameKey)
|
||||||
if err != nil && errors.Is(err, util.ErrPoolNotFound) || errors.Is(err, util.ErrKeyNotFound) {
|
if err != nil && errors.Is(err, util.ErrPoolNotFound) || errors.Is(err, util.ErrKeyNotFound) {
|
||||||
return "", fmt.Errorf("cluster name for %q %w: %v", nv.objectUUID, ErrNotFound, err)
|
return "", fmt.Errorf("cluster name for %q %w: %w", nv.objectUUID, ErrNotFound, err)
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return "", fmt.Errorf("failed to get cluster name for %q: %w", nv.objectUUID, err)
|
return "", fmt.Errorf("failed to get cluster name for %q: %w", nv.objectUUID, err)
|
||||||
}
|
}
|
||||||
@ -298,14 +308,14 @@ func (nv *NFSVolume) setNFSCluster(clusterName string) error {
|
|||||||
fs := fscore.NewFileSystem(nv.conn)
|
fs := fscore.NewFileSystem(nv.conn)
|
||||||
fsName, err := fs.GetFsName(nv.ctx, nv.fscID)
|
fsName, err := fs.GetFsName(nv.ctx, nv.fscID)
|
||||||
if err != nil && errors.Is(err, util.ErrPoolNotFound) {
|
if err != nil && errors.Is(err, util.ErrPoolNotFound) {
|
||||||
return fmt.Errorf("%w for ID %x: %v", ErrFilesystemNotFound, nv.fscID, err)
|
return fmt.Errorf("%w for ID %x: %w", ErrFilesystemNotFound, nv.fscID, err)
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return fmt.Errorf("failed to get filesystem name for ID %x: %w", nv.fscID, err)
|
return fmt.Errorf("failed to get filesystem name for ID %x: %w", nv.fscID, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
mdPool, err := fs.GetMetadataPool(nv.ctx, fsName)
|
mdPool, err := fs.GetMetadataPool(nv.ctx, fsName)
|
||||||
if err != nil && errors.Is(err, util.ErrPoolNotFound) {
|
if err != nil && errors.Is(err, util.ErrPoolNotFound) {
|
||||||
return fmt.Errorf("metadata pool for %q %w: %v", fsName, ErrNotFound, err)
|
return fmt.Errorf("metadata pool for %q %w: %w", fsName, ErrNotFound, err)
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return fmt.Errorf("failed to get metadata pool for %q: %w", fsName, err)
|
return fmt.Errorf("failed to get metadata pool for %q: %w", fsName, err)
|
||||||
}
|
}
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user