Merge pull request #21 from ceph/devel

Sync rhs:devel with ceph:devel
This commit is contained in:
OpenShift Merge Robot 2021-08-31 12:08:42 -04:00 committed by GitHub
commit d446ba408c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
67 changed files with 1154 additions and 980 deletions

38
.github/dependabot.yml vendored Normal file
View File

@ -0,0 +1,38 @@
---
version: 2
updates:
- package-ecosystem: "gomod"
directory: "/"
schedule:
interval: "weekly"
labels:
- rebase
commit-message:
prefix: "rebase"
ignore:
# k8s.io/kubernetes prevents auto-updating these
- dependency-name: "k8s.io/api"
- dependency-name: "k8s.io/apiextensions-apiserver"
- dependency-name: "k8s.io/apimachinery"
- dependency-name: "k8s.io/apiserver"
- dependency-name: "k8s.io/cli-runtime"
- dependency-name: "k8s.io/client-go"
- dependency-name: "k8s.io/cloud-provider"
- dependency-name: "k8s.io/cluster-bootstrap"
- dependency-name: "k8s.io/code-generator"
- dependency-name: "k8s.io/component-base"
- dependency-name: "k8s.io/component-helpers"
- dependency-name: "k8s.io/controller-manager"
- dependency-name: "k8s.io/cri-api"
- dependency-name: "k8s.io/csi-translation-lib"
- dependency-name: "k8s.io/kube-aggregator"
- dependency-name: "k8s.io/kube-controller-manager"
- dependency-name: "k8s.io/kube-proxy"
- dependency-name: "k8s.io/kube-scheduler"
- dependency-name: "k8s.io/kubectl"
- dependency-name: "k8s.io/kubelet"
- dependency-name: "k8s.io/legacy-cloud-providers"
- dependency-name: "k8s.io/metrics"
- dependency-name: "k8s.io/mount-utils"
- dependency-name: "k8s.io/pod-security-admission"
- dependency-name: "k8s.io/sample-apiserver"

45
.github/stale.yml vendored
View File

@ -1,45 +0,0 @@
# Configuration for probot-stale - https://github.com/probot/stale
# Issues or Pull Requests with these labels will never be
# considered stale. Set to `[]` to disable
---
exemptLabels:
- keepalive
- security
- reliability
- release requirement
# Set to true to ignore issues in a project (defaults to false)
exemptProjects: false
# Set to true to ignore issues in a milestone (defaults to false)
exemptMilestones: true
# Label to use when marking as stale
staleLabel: wontfix
# Limit the number of actions per hour, from 1-30. Default is 30
limitPerRun: 5
# Specify configuration settings that are specific to issues
issues:
daysUntilStale: 90
daysUntilClose: 7
markComment: >
This issue has been automatically marked as stale because it has not had
recent activity. It will be closed in a week if no further activity occurs.
Thank you for your contributions.
closeComment: >
This issue has been automatically closed due to inactivity. Please re-open
if this still requires investigation.
# Specify configuration settings that are specific to PRs
pulls:
daysUntilStale: 60
daysUntilClose: 30
markComment: >
This pull request has been automatically marked as stale because it has not
had recent activity. It will be closed in a month if no further activity
occurs. Thank you for your contributions.
closeComment: >
This pull request has been automatically closed due to inactivity. Please
re-open if these changes are still required.

40
.github/workflows/stale.yaml vendored Normal file
View File

@ -0,0 +1,40 @@
---
# Reference https://github.com/actions/stale
name: "Mark or close stale issues and PRs"
# yamllint disable-line rule:truthy
on:
schedule:
# Run the stalebot every day at 9pm UTC
- cron: "00 21 * * *"
# yamllint disable rule:line-length
jobs:
stale:
runs-on: ubuntu-18.04
if: github.repository == 'ceph/ceph-csi'
steps:
- uses: actions/stale@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
days-before-issue-stale: 30
days-before-issue-close: 7
stale-issue-message: >
This issue has been automatically marked as stale because it has not had recent activity.
It will be closed in a week if no further activity occurs.
Thank you for your contributions.
close-issue-message: >
This issue has been automatically closed due to inactivity.
Please re-open if this still requires investigation.
stale-issue-label: "wontfix"
exempt-issue-labels: "keepalive,security,reliability,release requirement"
days-before-pr-stale: 30
days-before-pr-close: 14
stale-pr-message: >
This pull request has been automatically marked as stale because it has not had
recent activity. It will be closed in two weeks if no further activity occurs.
Thank you for your contributions.
close-pr-message: >
This pull request has been automatically closed due to inactivity.
Please re-open if these changes are still required.
stale-pr-label: "stale"
exempt-pr-labels: "keepalive,security,reliability,release requirement"

View File

@ -32,6 +32,35 @@ pull_request_rules:
message: "This pull request now has conflicts with the target branch. message: "This pull request now has conflicts with the target branch.
Could you please resolve conflicts and force push the corrected Could you please resolve conflicts and force push the corrected
changes? 🙏" changes? 🙏"
- name: update dependencies by dependabot (skip commitlint)
conditions:
- author=dependabot[bot]
- label!=DNM
- base~=^(devel)|(release-.+)$
- "#approved-reviews-by>=2"
- "#changes-requested-reviews-by=0"
- "approved-reviews-by=@ceph/ceph-csi-contributors"
- "approved-reviews-by=@ceph/ceph-csi-maintainers"
- "status-success=codespell"
- "status-success=multi-arch-build"
- "status-success=go-test"
- "status-success=golangci-lint"
- "status-success=gosec"
- "status-success=mod-check"
- "status-success=lint-extras"
- "status-success=ci/centos/mini-e2e-helm/k8s-1.20"
- "status-success=ci/centos/mini-e2e-helm/k8s-1.21"
- "status-success=ci/centos/mini-e2e-helm/k8s-1.22"
- "status-success=ci/centos/mini-e2e/k8s-1.20"
- "status-success=ci/centos/mini-e2e/k8s-1.21"
- "status-success=ci/centos/mini-e2e/k8s-1.22"
- "status-success=ci/centos/upgrade-tests-cephfs"
- "status-success=ci/centos/upgrade-tests-rbd"
- "status-success=DCO"
actions:
merge: {}
dismiss_reviews: {}
delete_head_branch: {}
- name: automatic merge - name: automatic merge
conditions: conditions:
- label!=DNM - label!=DNM
@ -89,164 +118,6 @@ pull_request_rules:
merge: {} merge: {}
dismiss_reviews: {} dismiss_reviews: {}
delete_head_branch: {} delete_head_branch: {}
- name: backport patches to release v1.2.0 branch
conditions:
- base=devel
- label=backport-to-release-v1.2.0
actions:
backport:
branches:
- release-v1.2.0
# automerge backports if CI successfully ran
- name: automerge backport release-v1.2.0
conditions:
- author=mergify[bot]
- base=release-v1.2.0
- label!=DNM
- "#changes-requested-reviews-by=0"
- "#approved-reviews-by>=1"
actions:
merge: {}
dismiss_reviews: {}
delete_head_branch: {}
- name: backport patches to release-v2.0 branch
conditions:
- base=devel
- label=backport-to-release-v2.0
actions:
backport:
branches:
- release-v2.0
# automerge backports if CI successfully ran
- name: automerge backport release-v2.0
conditions:
- author=mergify[bot]
- base=release-v2.0
- label!=DNM
- "#changes-requested-reviews-by=0"
- "#approved-reviews-by>=2"
- "approved-reviews-by=@ceph/ceph-csi-contributors"
- "approved-reviews-by=@ceph/ceph-csi-maintainers"
actions:
merge: {}
dismiss_reviews: {}
delete_head_branch: {}
- name: backport patches to release-v2.1 branch
conditions:
- base=devel
- label=backport-to-release-v2.1
actions:
backport:
branches:
- release-v2.1
# automerge backports if CI successfully ran
- name: automerge backport release-v2.1
conditions:
- author=mergify[bot]
- base=release-v2.1
- label!=DNM
- "#changes-requested-reviews-by=0"
- "#approved-reviews-by>=2"
- "approved-reviews-by=@ceph/ceph-csi-contributors"
- "approved-reviews-by=@ceph/ceph-csi-maintainers"
actions:
merge: {}
dismiss_reviews: {}
delete_head_branch: {}
- name: backport patches to release-v3.0 branch
conditions:
- base=devel
- label=backport-to-release-v3.0
actions:
backport:
branches:
- release-v3.0
# automerge backports if CI successfully ran
- name: automerge backport release-v3.0
conditions:
- author=mergify[bot]
- base=release-v3.0
- label!=DNM
- "#changes-requested-reviews-by=0"
- "#approved-reviews-by>=2"
- "approved-reviews-by=@ceph/ceph-csi-contributors"
- "approved-reviews-by=@ceph/ceph-csi-maintainers"
actions:
merge: {}
dismiss_reviews: {}
delete_head_branch: {}
- name: backport patches to release-v3.1 branch
conditions:
- base=devel
- label=backport-to-release-v3.1
actions:
backport:
branches:
- release-v3.1
# automerge backports if CI successfully ran
- name: automerge backport release-v3.1
conditions:
- author=mergify[bot]
- base=release-v3.1
- label!=DNM
- "#changes-requested-reviews-by=0"
- "#approved-reviews-by>=2"
- "approved-reviews-by=@ceph/ceph-csi-contributors"
- "approved-reviews-by=@ceph/ceph-csi-maintainers"
- "status-success=multi-arch-build"
- "status-success=commitlint"
- "status-success=ci/centos/mini-e2e-helm/k8s-1.20"
- "status-success=ci/centos/mini-e2e-helm/k8s-1.21"
- "status-success=ci/centos/mini-e2e-helm/k8s-1.22"
- "status-success=ci/centos/mini-e2e/k8s-1.20"
- "status-success=ci/centos/mini-e2e/k8s-1.21"
- "status-success=ci/centos/mini-e2e/k8s-1.22"
- "status-success=ci/centos/upgrade-tests-cephfs"
- "status-success=ci/centos/upgrade-tests-rbd"
- "status-success=DCO"
actions:
merge: {}
dismiss_reviews: {}
delete_head_branch: {}
- name: backport patches to release-v3.2 branch
conditions:
- base=devel
- label=backport-to-release-v3.2
actions:
backport:
branches:
- release-v3.2
# automerge backports if CI successfully ran
- name: automerge backport release-v3.2
conditions:
- author=mergify[bot]
- base=release-v3.2
- label!=DNM
- "#approved-reviews-by>=2"
- "approved-reviews-by=@ceph/ceph-csi-contributors"
- "approved-reviews-by=@ceph/ceph-csi-maintainers"
- "status-success=codespell"
- "status-success=multi-arch-build"
- "status-success=go-test"
- "status-success=golangci-lint"
- "status-success=gosec"
- "status-success=commitlint"
- "status-success=mod-check"
- "status-success=lint-extras"
- "#changes-requested-reviews-by=0"
- "status-success=ci/centos/mini-e2e-helm/k8s-1.20"
- "status-success=ci/centos/mini-e2e-helm/k8s-1.21"
- "status-success=ci/centos/mini-e2e-helm/k8s-1.22"
- "status-success=ci/centos/mini-e2e/k8s-1.20"
- "status-success=ci/centos/mini-e2e/k8s-1.21"
- "status-success=ci/centos/mini-e2e/k8s-1.22"
- "status-success=ci/centos/upgrade-tests-cephfs"
- "status-success=ci/centos/upgrade-tests-rbd"
- "status-success=DCO"
actions:
merge: {}
dismiss_reviews: {}
delete_head_branch: {}
- name: backport patches to release-v3.3 branch - name: backport patches to release-v3.3 branch
conditions: conditions:
- base=devel - base=devel

View File

@ -55,7 +55,13 @@ support other orchestration environments in the future.
NOTE: NOTE:
- **`csiv0.3`** is deprecated with release of **`csi v1.1.0`** The supported window of Ceph CSI versions is known as "N.(x-1)":
(N (Latest major release) . (x (Latest minor release) - 1)).
For example, if Ceph CSI latest major version is `3.4.0` today, support is
provided for the versions above `3.3.0`. If users are running an unsupported
Ceph CSI version, they will be asked to upgrade when requesting support for the
cluster.
## Support Matrix ## Support Matrix
@ -106,6 +112,9 @@ in the Kubernetes documentation.
| v3.4.0 (Release) | quay.io/cephcsi/cephcsi | v3.4.0 | | v3.4.0 (Release) | quay.io/cephcsi/cephcsi | v3.4.0 |
| v3.3.1 (Release) | quay.io/cephcsi/cephcsi | v3.3.1 | | v3.3.1 (Release) | quay.io/cephcsi/cephcsi | v3.3.1 |
| v3.3.0 (Release) | quay.io/cephcsi/cephcsi | v3.3.0 | | v3.3.0 (Release) | quay.io/cephcsi/cephcsi | v3.3.0 |
| Deprecated Ceph CSI Release/Branch | Container image name | Image Tag |
| ----------------------- | --------------------------------| --------- |
| v3.2.2 (Release) | quay.io/cephcsi/cephcsi | v3.2.2 | | v3.2.2 (Release) | quay.io/cephcsi/cephcsi | v3.2.2 |
| v3.2.1 (Release) | quay.io/cephcsi/cephcsi | v3.2.1 | | v3.2.1 (Release) | quay.io/cephcsi/cephcsi | v3.2.1 |
| v3.2.0 (Release) | quay.io/cephcsi/cephcsi | v3.2.0 | | v3.2.0 (Release) | quay.io/cephcsi/cephcsi | v3.2.0 |

View File

@ -29,6 +29,7 @@ import (
"github.com/ceph/ceph-csi/internal/liveness" "github.com/ceph/ceph-csi/internal/liveness"
"github.com/ceph/ceph-csi/internal/rbd" "github.com/ceph/ceph-csi/internal/rbd"
"github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/log"
"k8s.io/klog/v2" "k8s.io/klog/v2"
) )
@ -164,7 +165,7 @@ func main() {
} }
os.Exit(0) os.Exit(0)
} }
util.DefaultLog("Driver version: %s and Git version: %s", util.DriverVersion, util.GitCommit) log.DefaultLog("Driver version: %s and Git version: %s", util.DriverVersion, util.GitCommit)
if conf.Vtype == "" { if conf.Vtype == "" {
logAndExit("driver type not specified") logAndExit("driver type not specified")
@ -182,15 +183,15 @@ func main() {
if pidErr != nil { if pidErr != nil {
klog.Errorf("Failed to get the PID limit, can not reconfigure: %v", pidErr) klog.Errorf("Failed to get the PID limit, can not reconfigure: %v", pidErr)
} else { } else {
util.DefaultLog("Initial PID limit is set to %d", currentLimit) log.DefaultLog("Initial PID limit is set to %d", currentLimit)
err = util.SetPIDLimit(conf.PidLimit) err = util.SetPIDLimit(conf.PidLimit)
switch { switch {
case err != nil: case err != nil:
klog.Errorf("Failed to set new PID limit to %d: %v", conf.PidLimit, err) klog.Errorf("Failed to set new PID limit to %d: %v", conf.PidLimit, err)
case conf.PidLimit == -1: case conf.PidLimit == -1:
util.DefaultLog("Reconfigured PID limit to %d (max)", conf.PidLimit) log.DefaultLog("Reconfigured PID limit to %d (max)", conf.PidLimit)
default: default:
util.DefaultLog("Reconfigured PID limit to %d", conf.PidLimit) log.DefaultLog("Reconfigured PID limit to %d", conf.PidLimit)
} }
} }
} }
@ -209,7 +210,7 @@ func main() {
} }
} }
util.DefaultLog("Starting driver type: %v with name: %v", conf.Vtype, dname) log.DefaultLog("Starting driver type: %v with name: %v", conf.Vtype, dname)
switch conf.Vtype { switch conf.Vtype {
case rbdType: case rbdType:
validateCloneDepthFlag(&conf) validateCloneDepthFlag(&conf)

View File

@ -103,6 +103,10 @@ func isAlreadyExistsCLIError(err error) bool {
if strings.TrimSuffix(s, "\n") == "" { if strings.TrimSuffix(s, "\n") == "" {
continue continue
} }
// Ignore warnings
if strings.Contains(s, "Warning") {
continue
}
// Resource already exists error message // Resource already exists error message
if !strings.Contains(s, "Error from server (AlreadyExists)") { if !strings.Contains(s, "Error from server (AlreadyExists)") {
return false return false

View File

@ -1261,7 +1261,7 @@ func (ka kubectlAction) String() string {
// no error occurred, or the timeout passed. // no error occurred, or the timeout passed.
func retryKubectlInput(namespace string, action kubectlAction, data string, t int, args ...string) error { func retryKubectlInput(namespace string, action kubectlAction, data string, t int, args ...string) error {
timeout := time.Duration(t) * time.Minute timeout := time.Duration(t) * time.Minute
e2elog.Logf("waiting for kubectl (%s -f %q args %s) to finish", action, args) e2elog.Logf("waiting for kubectl (%s -f args %s) to finish", action, args)
start := time.Now() start := time.Now()
return wait.PollImmediate(poll, timeout, func() (bool, error) { return wait.PollImmediate(poll, timeout, func() (bool, error) {

5
go.mod
View File

@ -35,11 +35,16 @@ require (
) )
replace ( replace (
code.cloudfoundry.org/gofileutils => github.com/cloudfoundry/gofileutils v0.0.0-20170111115228-4d0c80011a0f
github.com/golang/protobuf => github.com/golang/protobuf v1.4.3 github.com/golang/protobuf => github.com/golang/protobuf v1.4.3
github.com/hashicorp/vault/api => github.com/hashicorp/vault/api v1.0.5-0.20200902155336-f9d5ce5a171a github.com/hashicorp/vault/api => github.com/hashicorp/vault/api v1.0.5-0.20200902155336-f9d5ce5a171a
github.com/hashicorp/vault/sdk => github.com/hashicorp/vault/sdk v0.1.14-0.20201116234512-b4d4137dfe8b github.com/hashicorp/vault/sdk => github.com/hashicorp/vault/sdk v0.1.14-0.20201116234512-b4d4137dfe8b
github.com/portworx/sched-ops => github.com/portworx/sched-ops v0.20.4-openstorage-rc3 github.com/portworx/sched-ops => github.com/portworx/sched-ops v0.20.4-openstorage-rc3
gomodules.xyz/jsonpatch/v2 => github.com/gomodules/jsonpatch/v2 v2.2.0
google.golang.org/grpc => google.golang.org/grpc v1.35.0 google.golang.org/grpc => google.golang.org/grpc v1.35.0
//
// k8s.io/kubernetes depends on these k8s.io packages, but unversioned
//
k8s.io/api => k8s.io/api v0.22.0 k8s.io/api => k8s.io/api v0.22.0
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.22.0 k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.22.0
k8s.io/apimachinery => k8s.io/apimachinery v0.22.0 k8s.io/apimachinery => k8s.io/apimachinery v0.22.0

7
go.sum
View File

@ -36,7 +36,6 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f/go.mod h1:sk5LnIjB/nIEU7yP5sDQExVm62wu0pBh3yrElngUisI=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
git.apache.org/thrift.git v0.12.0/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= git.apache.org/thrift.git v0.12.0/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
@ -178,6 +177,7 @@ github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJ
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/cloudfoundry-community/go-cfclient v0.0.0-20190201205600-f136f9222381/go.mod h1:e5+USP2j8Le2M0Jo3qKPFnNhuo1wueU4nWHCXBOfQ14= github.com/cloudfoundry-community/go-cfclient v0.0.0-20190201205600-f136f9222381/go.mod h1:e5+USP2j8Le2M0Jo3qKPFnNhuo1wueU4nWHCXBOfQ14=
github.com/cloudfoundry/gofileutils v0.0.0-20170111115228-4d0c80011a0f/go.mod h1:Zv7xtAh/T/tmfZlxpESaWWiWOdiJz2GfbBYxImuI6T4=
github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313/go.mod h1:P1wt9Z3DP8O6W3rvwCt0REIlshg1InHImaLW0t3ObY0= github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313/go.mod h1:P1wt9Z3DP8O6W3rvwCt0REIlshg1InHImaLW0t3ObY0=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
@ -380,6 +380,8 @@ github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8l
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk=
github.com/gomodules/jsonpatch/v2 v2.2.0 h1:QBjDK/nX43P4z/Os3gnk8VeFdLDgBuMns1Wljyo607U=
github.com/gomodules/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
@ -1329,9 +1331,6 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU=
gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY=
gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY=
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0=
gonum.org/v1/gonum v0.6.2/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= gonum.org/v1/gonum v0.6.2/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU=

View File

@ -21,19 +21,20 @@ import (
"fmt" "fmt"
"github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/log"
) )
func (vo *volumeOptions) getFscID(ctx context.Context) (int64, error) { func (vo *volumeOptions) getFscID(ctx context.Context) (int64, error) {
fsa, err := vo.conn.GetFSAdmin() fsa, err := vo.conn.GetFSAdmin()
if err != nil { if err != nil {
util.ErrorLog(ctx, "could not get FSAdmin, can not fetch filesystem ID for %s:", vo.FsName, err) log.ErrorLog(ctx, "could not get FSAdmin, can not fetch filesystem ID for %s:", vo.FsName, err)
return 0, err return 0, err
} }
volumes, err := fsa.EnumerateVolumes() volumes, err := fsa.EnumerateVolumes()
if err != nil { if err != nil {
util.ErrorLog(ctx, "could not list volumes, can not fetch filesystem ID for %s:", vo.FsName, err) log.ErrorLog(ctx, "could not list volumes, can not fetch filesystem ID for %s:", vo.FsName, err)
return 0, err return 0, err
} }
@ -44,7 +45,7 @@ func (vo *volumeOptions) getFscID(ctx context.Context) (int64, error) {
} }
} }
util.ErrorLog(ctx, "failed to list volume %s", vo.FsName) log.ErrorLog(ctx, "failed to list volume %s", vo.FsName)
return 0, ErrVolumeNotFound return 0, ErrVolumeNotFound
} }
@ -52,14 +53,14 @@ func (vo *volumeOptions) getFscID(ctx context.Context) (int64, error) {
func (vo *volumeOptions) getMetadataPool(ctx context.Context) (string, error) { func (vo *volumeOptions) getMetadataPool(ctx context.Context) (string, error) {
fsa, err := vo.conn.GetFSAdmin() fsa, err := vo.conn.GetFSAdmin()
if err != nil { if err != nil {
util.ErrorLog(ctx, "could not get FSAdmin, can not fetch metadata pool for %s:", vo.FsName, err) log.ErrorLog(ctx, "could not get FSAdmin, can not fetch metadata pool for %s:", vo.FsName, err)
return "", err return "", err
} }
fsPoolInfos, err := fsa.ListFileSystems() fsPoolInfos, err := fsa.ListFileSystems()
if err != nil { if err != nil {
util.ErrorLog(ctx, "could not list filesystems, can not fetch metadata pool for %s:", vo.FsName, err) log.ErrorLog(ctx, "could not list filesystems, can not fetch metadata pool for %s:", vo.FsName, err)
return "", err return "", err
} }
@ -76,14 +77,14 @@ func (vo *volumeOptions) getMetadataPool(ctx context.Context) (string, error) {
func (vo *volumeOptions) getFsName(ctx context.Context) (string, error) { func (vo *volumeOptions) getFsName(ctx context.Context) (string, error) {
fsa, err := vo.conn.GetFSAdmin() fsa, err := vo.conn.GetFSAdmin()
if err != nil { if err != nil {
util.ErrorLog(ctx, "could not get FSAdmin, can not fetch filesystem name for ID %d:", vo.FscID, err) log.ErrorLog(ctx, "could not get FSAdmin, can not fetch filesystem name for ID %d:", vo.FscID, err)
return "", err return "", err
} }
volumes, err := fsa.EnumerateVolumes() volumes, err := fsa.EnumerateVolumes()
if err != nil { if err != nil {
util.ErrorLog(ctx, "could not list volumes, can not fetch filesystem name for ID %d:", vo.FscID, err) log.ErrorLog(ctx, "could not list volumes, can not fetch filesystem name for ID %d:", vo.FscID, err)
return "", err return "", err
} }

View File

@ -20,7 +20,7 @@ import (
"context" "context"
"errors" "errors"
"github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util/log"
) )
// cephFSCloneState describes the status of the clone. // cephFSCloneState describes the status of the clone.
@ -64,7 +64,7 @@ func createCloneFromSubvolume(ctx context.Context, volID, cloneID volumeID, volO
snapshotID := cloneID snapshotID := cloneID
err := parentvolOpt.createSnapshot(ctx, snapshotID, volID) err := parentvolOpt.createSnapshot(ctx, snapshotID, volID)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to create snapshot %s %v", snapshotID, err) log.ErrorLog(ctx, "failed to create snapshot %s %v", snapshotID, err)
return err return err
} }
@ -78,57 +78,57 @@ func createCloneFromSubvolume(ctx context.Context, volID, cloneID volumeID, volO
if protectErr != nil { if protectErr != nil {
err = parentvolOpt.deleteSnapshot(ctx, snapshotID, volID) err = parentvolOpt.deleteSnapshot(ctx, snapshotID, volID)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to delete snapshot %s %v", snapshotID, err) log.ErrorLog(ctx, "failed to delete snapshot %s %v", snapshotID, err)
} }
} }
if cloneErr != nil { if cloneErr != nil {
if err = volOpt.purgeVolume(ctx, cloneID, true); err != nil { if err = volOpt.purgeVolume(ctx, cloneID, true); err != nil {
util.ErrorLog(ctx, "failed to delete volume %s: %v", cloneID, err) log.ErrorLog(ctx, "failed to delete volume %s: %v", cloneID, err)
} }
if err = parentvolOpt.unprotectSnapshot(ctx, snapshotID, volID); err != nil { if err = parentvolOpt.unprotectSnapshot(ctx, snapshotID, volID); err != nil {
// In case the snap is already unprotected we get ErrSnapProtectionExist error code // In case the snap is already unprotected we get ErrSnapProtectionExist error code
// in that case we are safe and we could discard this error and we are good to go // in that case we are safe and we could discard this error and we are good to go
// ahead with deletion // ahead with deletion
if !errors.Is(err, ErrSnapProtectionExist) { if !errors.Is(err, ErrSnapProtectionExist) {
util.ErrorLog(ctx, "failed to unprotect snapshot %s %v", snapshotID, err) log.ErrorLog(ctx, "failed to unprotect snapshot %s %v", snapshotID, err)
} }
} }
if err = parentvolOpt.deleteSnapshot(ctx, snapshotID, volID); err != nil { if err = parentvolOpt.deleteSnapshot(ctx, snapshotID, volID); err != nil {
util.ErrorLog(ctx, "failed to delete snapshot %s %v", snapshotID, err) log.ErrorLog(ctx, "failed to delete snapshot %s %v", snapshotID, err)
} }
} }
}() }()
protectErr = parentvolOpt.protectSnapshot(ctx, snapshotID, volID) protectErr = parentvolOpt.protectSnapshot(ctx, snapshotID, volID)
if protectErr != nil { if protectErr != nil {
util.ErrorLog(ctx, "failed to protect snapshot %s %v", snapshotID, protectErr) log.ErrorLog(ctx, "failed to protect snapshot %s %v", snapshotID, protectErr)
return protectErr return protectErr
} }
cloneErr = parentvolOpt.cloneSnapshot(ctx, volID, snapshotID, cloneID, volOpt) cloneErr = parentvolOpt.cloneSnapshot(ctx, volID, snapshotID, cloneID, volOpt)
if cloneErr != nil { if cloneErr != nil {
util.ErrorLog(ctx, "failed to clone snapshot %s %s to %s %v", volID, snapshotID, cloneID, cloneErr) log.ErrorLog(ctx, "failed to clone snapshot %s %s to %s %v", volID, snapshotID, cloneID, cloneErr)
return cloneErr return cloneErr
} }
cloneState, cloneErr := volOpt.getCloneState(ctx, cloneID) cloneState, cloneErr := volOpt.getCloneState(ctx, cloneID)
if cloneErr != nil { if cloneErr != nil {
util.ErrorLog(ctx, "failed to get clone state: %v", cloneErr) log.ErrorLog(ctx, "failed to get clone state: %v", cloneErr)
return cloneErr return cloneErr
} }
if cloneState != cephFSCloneComplete { if cloneState != cephFSCloneComplete {
util.ErrorLog(ctx, "clone %s did not complete: %v", cloneID, cloneState.toError()) log.ErrorLog(ctx, "clone %s did not complete: %v", cloneID, cloneState.toError())
return cloneState.toError() return cloneState.toError()
} }
// This is a work around to fix sizing issue for cloned images // This is a work around to fix sizing issue for cloned images
err = volOpt.resizeVolume(ctx, cloneID, volOpt.Size) err = volOpt.resizeVolume(ctx, cloneID, volOpt.Size)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to expand volume %s: %v", cloneID, err) log.ErrorLog(ctx, "failed to expand volume %s: %v", cloneID, err)
return err return err
} }
@ -138,13 +138,13 @@ func createCloneFromSubvolume(ctx context.Context, volID, cloneID volumeID, volO
// in that case we are safe and we could discard this error and we are good to go // in that case we are safe and we could discard this error and we are good to go
// ahead with deletion // ahead with deletion
if !errors.Is(err, ErrSnapProtectionExist) { if !errors.Is(err, ErrSnapProtectionExist) {
util.ErrorLog(ctx, "failed to unprotect snapshot %s %v", snapshotID, err) log.ErrorLog(ctx, "failed to unprotect snapshot %s %v", snapshotID, err)
return err return err
} }
} }
if err = parentvolOpt.deleteSnapshot(ctx, snapshotID, volID); err != nil { if err = parentvolOpt.deleteSnapshot(ctx, snapshotID, volID); err != nil {
util.ErrorLog(ctx, "failed to delete snapshot %s %v", snapshotID, err) log.ErrorLog(ctx, "failed to delete snapshot %s %v", snapshotID, err)
return err return err
} }
@ -171,14 +171,14 @@ func cleanupCloneFromSubvolumeSnapshot(
if snapInfo.Protected == snapshotIsProtected { if snapInfo.Protected == snapshotIsProtected {
err = parentVolOpt.unprotectSnapshot(ctx, snapShotID, volID) err = parentVolOpt.unprotectSnapshot(ctx, snapShotID, volID)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to unprotect snapshot %s %v", snapShotID, err) log.ErrorLog(ctx, "failed to unprotect snapshot %s %v", snapShotID, err)
return err return err
} }
} }
err = parentVolOpt.deleteSnapshot(ctx, snapShotID, volID) err = parentVolOpt.deleteSnapshot(ctx, snapShotID, volID)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to delete snapshot %s %v", snapShotID, err) log.ErrorLog(ctx, "failed to delete snapshot %s %v", snapShotID, err)
return err return err
} }
@ -206,7 +206,7 @@ func createCloneFromSnapshot(
if err != nil { if err != nil {
if !isCloneRetryError(err) { if !isCloneRetryError(err) {
if dErr := volOptions.purgeVolume(ctx, volumeID(vID.FsSubvolName), true); dErr != nil { if dErr := volOptions.purgeVolume(ctx, volumeID(vID.FsSubvolName), true); dErr != nil {
util.ErrorLog(ctx, "failed to delete volume %s: %v", vID.FsSubvolName, dErr) log.ErrorLog(ctx, "failed to delete volume %s: %v", vID.FsSubvolName, dErr)
} }
} }
} }
@ -214,7 +214,7 @@ func createCloneFromSnapshot(
cloneState, err := volOptions.getCloneState(ctx, volumeID(vID.FsSubvolName)) cloneState, err := volOptions.getCloneState(ctx, volumeID(vID.FsSubvolName))
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to get clone state: %v", err) log.ErrorLog(ctx, "failed to get clone state: %v", err)
return err return err
} }
@ -227,7 +227,7 @@ func createCloneFromSnapshot(
// in the new cloned volume too. Till then we are explicitly making the size set // in the new cloned volume too. Till then we are explicitly making the size set
err = volOptions.resizeVolume(ctx, volumeID(vID.FsSubvolName), volOptions.Size) err = volOptions.resizeVolume(ctx, volumeID(vID.FsSubvolName), volOptions.Size)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to expand volume %s with error: %v", vID.FsSubvolName, err) log.ErrorLog(ctx, "failed to expand volume %s with error: %v", vID.FsSubvolName, err)
return err return err
} }
@ -238,7 +238,7 @@ func createCloneFromSnapshot(
func (vo *volumeOptions) getCloneState(ctx context.Context, volID volumeID) (cephFSCloneState, error) { func (vo *volumeOptions) getCloneState(ctx context.Context, volID volumeID) (cephFSCloneState, error) {
fsa, err := vo.conn.GetFSAdmin() fsa, err := vo.conn.GetFSAdmin()
if err != nil { if err != nil {
util.ErrorLog( log.ErrorLog(
ctx, ctx,
"could not get FSAdmin, can get clone status for volume %s with ID %s: %v", "could not get FSAdmin, can get clone status for volume %s with ID %s: %v",
vo.FsName, vo.FsName,
@ -250,7 +250,7 @@ func (vo *volumeOptions) getCloneState(ctx context.Context, volID volumeID) (cep
cs, err := fsa.CloneStatus(vo.FsName, vo.SubvolumeGroup, string(volID)) cs, err := fsa.CloneStatus(vo.FsName, vo.SubvolumeGroup, string(volID))
if err != nil { if err != nil {
util.ErrorLog(ctx, "could not get clone state for volume %s with ID %s: %v", vo.FsName, string(volID), err) log.ErrorLog(ctx, "could not get clone state for volume %s with ID %s: %v", vo.FsName, string(volID), err)
return cephFSCloneError, err return cephFSCloneError, err
} }

View File

@ -23,6 +23,7 @@ import (
csicommon "github.com/ceph/ceph-csi/internal/csi-common" csicommon "github.com/ceph/ceph-csi/internal/csi-common"
"github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/log"
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi"
"github.com/golang/protobuf/ptypes/timestamp" "github.com/golang/protobuf/ptypes/timestamp"
@ -59,7 +60,7 @@ func (cs *ControllerServer) createBackingVolume(
var err error var err error
if sID != nil { if sID != nil {
if err = cs.OperationLocks.GetRestoreLock(sID.SnapshotID); err != nil { if err = cs.OperationLocks.GetRestoreLock(sID.SnapshotID); err != nil {
util.ErrorLog(ctx, err.Error()) log.ErrorLog(ctx, err.Error())
return status.Error(codes.Aborted, err.Error()) return status.Error(codes.Aborted, err.Error())
} }
@ -67,7 +68,7 @@ func (cs *ControllerServer) createBackingVolume(
err = createCloneFromSnapshot(ctx, parentVolOpt, volOptions, vID, sID) err = createCloneFromSnapshot(ctx, parentVolOpt, volOptions, vID, sID)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to create clone from snapshot %s: %v", sID.FsSnapshotName, err) log.ErrorLog(ctx, "failed to create clone from snapshot %s: %v", sID.FsSnapshotName, err)
return err return err
} }
@ -76,7 +77,7 @@ func (cs *ControllerServer) createBackingVolume(
} }
if parentVolOpt != nil { if parentVolOpt != nil {
if err = cs.OperationLocks.GetCloneLock(pvID.VolumeID); err != nil { if err = cs.OperationLocks.GetCloneLock(pvID.VolumeID); err != nil {
util.ErrorLog(ctx, err.Error()) log.ErrorLog(ctx, err.Error())
return status.Error(codes.Aborted, err.Error()) return status.Error(codes.Aborted, err.Error())
} }
@ -88,7 +89,7 @@ func (cs *ControllerServer) createBackingVolume(
volOptions, volOptions,
parentVolOpt) parentVolOpt)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to create clone from subvolume %s: %v", volumeID(pvID.FsSubvolName), err) log.ErrorLog(ctx, "failed to create clone from subvolume %s: %v", volumeID(pvID.FsSubvolName), err)
return err return err
} }
@ -97,7 +98,7 @@ func (cs *ControllerServer) createBackingVolume(
} }
if err = createVolume(ctx, volOptions, volumeID(vID.FsSubvolName), volOptions.Size); err != nil { if err = createVolume(ctx, volOptions, volumeID(vID.FsSubvolName), volOptions.Size); err != nil {
util.ErrorLog(ctx, "failed to create volume %s: %v", volOptions.RequestName, err) log.ErrorLog(ctx, "failed to create volume %s: %v", volOptions.RequestName, err)
return status.Error(codes.Internal, err.Error()) return status.Error(codes.Internal, err.Error())
} }
@ -150,7 +151,7 @@ func (cs *ControllerServer) CreateVolume(
ctx context.Context, ctx context.Context,
req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) { req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
if err := cs.validateCreateVolumeRequest(req); err != nil { if err := cs.validateCreateVolumeRequest(req); err != nil {
util.ErrorLog(ctx, "CreateVolumeRequest validation failed: %v", err) log.ErrorLog(ctx, "CreateVolumeRequest validation failed: %v", err)
return nil, err return nil, err
} }
@ -161,7 +162,7 @@ func (cs *ControllerServer) CreateVolume(
cr, err := util.NewAdminCredentials(secret) cr, err := util.NewAdminCredentials(secret)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to retrieve admin credentials: %v", err) log.ErrorLog(ctx, "failed to retrieve admin credentials: %v", err)
return nil, status.Error(codes.InvalidArgument, err.Error()) return nil, status.Error(codes.InvalidArgument, err.Error())
} }
@ -169,7 +170,7 @@ func (cs *ControllerServer) CreateVolume(
// Existence and conflict checks // Existence and conflict checks
if acquired := cs.VolumeLocks.TryAcquire(requestName); !acquired { if acquired := cs.VolumeLocks.TryAcquire(requestName); !acquired {
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, requestName) log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, requestName)
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, requestName) return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, requestName)
} }
@ -177,7 +178,7 @@ func (cs *ControllerServer) CreateVolume(
volOptions, err := newVolumeOptions(ctx, requestName, req, cr) volOptions, err := newVolumeOptions(ctx, requestName, req, cr)
if err != nil { if err != nil {
util.ErrorLog(ctx, "validation and extraction of volume options failed: %v", err) log.ErrorLog(ctx, "validation and extraction of volume options failed: %v", err)
return nil, status.Error(codes.InvalidArgument, err.Error()) return nil, status.Error(codes.InvalidArgument, err.Error())
} }
@ -216,7 +217,7 @@ func (cs *ControllerServer) CreateVolume(
if err != nil { if err != nil {
purgeErr := volOptions.purgeVolume(ctx, volumeID(vID.FsSubvolName), false) purgeErr := volOptions.purgeVolume(ctx, volumeID(vID.FsSubvolName), false)
if purgeErr != nil { if purgeErr != nil {
util.ErrorLog(ctx, "failed to delete volume %s: %v", requestName, purgeErr) log.ErrorLog(ctx, "failed to delete volume %s: %v", requestName, purgeErr)
// All errors other than ErrVolumeNotFound should return an error back to the caller // All errors other than ErrVolumeNotFound should return an error back to the caller
if !errors.Is(purgeErr, ErrVolumeNotFound) { if !errors.Is(purgeErr, ErrVolumeNotFound) {
return nil, status.Error(codes.Internal, purgeErr.Error()) return nil, status.Error(codes.Internal, purgeErr.Error())
@ -224,10 +225,10 @@ func (cs *ControllerServer) CreateVolume(
} }
errUndo := undoVolReservation(ctx, volOptions, *vID, secret) errUndo := undoVolReservation(ctx, volOptions, *vID, secret)
if errUndo != nil { if errUndo != nil {
util.WarningLog(ctx, "failed undoing reservation of volume: %s (%s)", log.WarningLog(ctx, "failed undoing reservation of volume: %s (%s)",
requestName, errUndo) requestName, errUndo)
} }
util.ErrorLog(ctx, "failed to expand volume %s: %v", volumeID(vID.FsSubvolName), err) log.ErrorLog(ctx, "failed to expand volume %s: %v", volumeID(vID.FsSubvolName), err)
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
@ -264,7 +265,7 @@ func (cs *ControllerServer) CreateVolume(
if !isCloneRetryError(err) { if !isCloneRetryError(err) {
errDefer := undoVolReservation(ctx, volOptions, *vID, secret) errDefer := undoVolReservation(ctx, volOptions, *vID, secret)
if errDefer != nil { if errDefer != nil {
util.WarningLog(ctx, "failed undoing reservation of volume: %s (%s)", log.WarningLog(ctx, "failed undoing reservation of volume: %s (%s)",
requestName, errDefer) requestName, errDefer)
} }
} }
@ -285,7 +286,7 @@ func (cs *ControllerServer) CreateVolume(
if err != nil { if err != nil {
purgeErr := volOptions.purgeVolume(ctx, volumeID(vID.FsSubvolName), true) purgeErr := volOptions.purgeVolume(ctx, volumeID(vID.FsSubvolName), true)
if purgeErr != nil { if purgeErr != nil {
util.ErrorLog(ctx, "failed to delete volume %s: %v", vID.FsSubvolName, purgeErr) log.ErrorLog(ctx, "failed to delete volume %s: %v", vID.FsSubvolName, purgeErr)
// All errors other than ErrVolumeNotFound should return an error back to the caller // All errors other than ErrVolumeNotFound should return an error back to the caller
if !errors.Is(purgeErr, ErrVolumeNotFound) { if !errors.Is(purgeErr, ErrVolumeNotFound) {
// If the subvolume deletion is failed, we should not cleanup // If the subvolume deletion is failed, we should not cleanup
@ -297,12 +298,12 @@ func (cs *ControllerServer) CreateVolume(
return nil, status.Error(codes.Internal, purgeErr.Error()) return nil, status.Error(codes.Internal, purgeErr.Error())
} }
} }
util.ErrorLog(ctx, "failed to get subvolume path %s: %v", vID.FsSubvolName, err) log.ErrorLog(ctx, "failed to get subvolume path %s: %v", vID.FsSubvolName, err)
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
util.DebugLog(ctx, "cephfs: successfully created backing volume named %s for request name %s", log.DebugLog(ctx, "cephfs: successfully created backing volume named %s for request name %s",
vID.FsSubvolName, requestName) vID.FsSubvolName, requestName)
volumeContext := req.GetParameters() volumeContext := req.GetParameters()
volumeContext["subvolumeName"] = vID.FsSubvolName volumeContext["subvolumeName"] = vID.FsSubvolName
@ -330,7 +331,7 @@ func (cs *ControllerServer) DeleteVolume(
ctx context.Context, ctx context.Context,
req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) { req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) {
if err := cs.validateDeleteVolumeRequest(); err != nil { if err := cs.validateDeleteVolumeRequest(); err != nil {
util.ErrorLog(ctx, "DeleteVolumeRequest validation failed: %v", err) log.ErrorLog(ctx, "DeleteVolumeRequest validation failed: %v", err)
return nil, err return nil, err
} }
@ -340,7 +341,7 @@ func (cs *ControllerServer) DeleteVolume(
// lock out parallel delete operations // lock out parallel delete operations
if acquired := cs.VolumeLocks.TryAcquire(string(volID)); !acquired { if acquired := cs.VolumeLocks.TryAcquire(string(volID)); !acquired {
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volID) log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volID)
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, string(volID)) return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, string(volID))
} }
@ -348,7 +349,7 @@ func (cs *ControllerServer) DeleteVolume(
// lock out volumeID for clone and expand operation // lock out volumeID for clone and expand operation
if err := cs.OperationLocks.GetDeleteLock(req.GetVolumeId()); err != nil { if err := cs.OperationLocks.GetDeleteLock(req.GetVolumeId()); err != nil {
util.ErrorLog(ctx, err.Error()) log.ErrorLog(ctx, err.Error())
return nil, status.Error(codes.Aborted, err.Error()) return nil, status.Error(codes.Aborted, err.Error())
} }
@ -360,7 +361,7 @@ func (cs *ControllerServer) DeleteVolume(
// if error is ErrPoolNotFound, the pool is already deleted we dont // if error is ErrPoolNotFound, the pool is already deleted we dont
// need to worry about deleting subvolume or omap data, return success // need to worry about deleting subvolume or omap data, return success
if errors.Is(err, util.ErrPoolNotFound) { if errors.Is(err, util.ErrPoolNotFound) {
util.WarningLog(ctx, "failed to get backend volume for %s: %v", string(volID), err) log.WarningLog(ctx, "failed to get backend volume for %s: %v", string(volID), err)
return &csi.DeleteVolumeResponse{}, nil return &csi.DeleteVolumeResponse{}, nil
} }
@ -371,7 +372,7 @@ func (cs *ControllerServer) DeleteVolume(
return &csi.DeleteVolumeResponse{}, nil return &csi.DeleteVolumeResponse{}, nil
} }
util.ErrorLog(ctx, "Error returned from newVolumeOptionsFromVolID: %v", err) log.ErrorLog(ctx, "Error returned from newVolumeOptionsFromVolID: %v", err)
// All errors other than ErrVolumeNotFound should return an error back to the caller // All errors other than ErrVolumeNotFound should return an error back to the caller
if !errors.Is(err, ErrVolumeNotFound) { if !errors.Is(err, ErrVolumeNotFound) {
@ -404,14 +405,14 @@ func (cs *ControllerServer) DeleteVolume(
// Deleting a volume requires admin credentials // Deleting a volume requires admin credentials
cr, err := util.NewAdminCredentials(secrets) cr, err := util.NewAdminCredentials(secrets)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to retrieve admin credentials: %v", err) log.ErrorLog(ctx, "failed to retrieve admin credentials: %v", err)
return nil, status.Error(codes.InvalidArgument, err.Error()) return nil, status.Error(codes.InvalidArgument, err.Error())
} }
defer cr.DeleteCredentials() defer cr.DeleteCredentials()
if err = volOptions.purgeVolume(ctx, volumeID(vID.FsSubvolName), false); err != nil { if err = volOptions.purgeVolume(ctx, volumeID(vID.FsSubvolName), false); err != nil {
util.ErrorLog(ctx, "failed to delete volume %s: %v", volID, err) log.ErrorLog(ctx, "failed to delete volume %s: %v", volID, err)
if errors.Is(err, ErrVolumeHasSnapshots) { if errors.Is(err, ErrVolumeHasSnapshots) {
return nil, status.Error(codes.FailedPrecondition, err.Error()) return nil, status.Error(codes.FailedPrecondition, err.Error())
} }
@ -425,7 +426,7 @@ func (cs *ControllerServer) DeleteVolume(
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
util.DebugLog(ctx, "cephfs: successfully deleted volume %s", volID) log.DebugLog(ctx, "cephfs: successfully deleted volume %s", volID)
return &csi.DeleteVolumeResponse{}, nil return &csi.DeleteVolumeResponse{}, nil
} }
@ -454,7 +455,7 @@ func (cs *ControllerServer) ControllerExpandVolume(
ctx context.Context, ctx context.Context,
req *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) { req *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) {
if err := cs.validateExpandVolumeRequest(req); err != nil { if err := cs.validateExpandVolumeRequest(req); err != nil {
util.ErrorLog(ctx, "ControllerExpandVolumeRequest validation failed: %v", err) log.ErrorLog(ctx, "ControllerExpandVolumeRequest validation failed: %v", err)
return nil, err return nil, err
} }
@ -464,7 +465,7 @@ func (cs *ControllerServer) ControllerExpandVolume(
// lock out parallel delete operations // lock out parallel delete operations
if acquired := cs.VolumeLocks.TryAcquire(volID); !acquired { if acquired := cs.VolumeLocks.TryAcquire(volID); !acquired {
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volID) log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volID)
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volID) return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volID)
} }
@ -472,7 +473,7 @@ func (cs *ControllerServer) ControllerExpandVolume(
// lock out volumeID for clone and delete operation // lock out volumeID for clone and delete operation
if err := cs.OperationLocks.GetExpandLock(volID); err != nil { if err := cs.OperationLocks.GetExpandLock(volID); err != nil {
util.ErrorLog(ctx, err.Error()) log.ErrorLog(ctx, err.Error())
return nil, status.Error(codes.Aborted, err.Error()) return nil, status.Error(codes.Aborted, err.Error())
} }
@ -486,7 +487,7 @@ func (cs *ControllerServer) ControllerExpandVolume(
volOptions, volIdentifier, err := newVolumeOptionsFromVolID(ctx, volID, nil, secret) volOptions, volIdentifier, err := newVolumeOptionsFromVolID(ctx, volID, nil, secret)
if err != nil { if err != nil {
util.ErrorLog(ctx, "validation and extraction of volume options failed: %v", err) log.ErrorLog(ctx, "validation and extraction of volume options failed: %v", err)
return nil, status.Error(codes.InvalidArgument, err.Error()) return nil, status.Error(codes.InvalidArgument, err.Error())
} }
@ -495,7 +496,7 @@ func (cs *ControllerServer) ControllerExpandVolume(
RoundOffSize := util.RoundOffBytes(req.GetCapacityRange().GetRequiredBytes()) RoundOffSize := util.RoundOffBytes(req.GetCapacityRange().GetRequiredBytes())
if err = volOptions.resizeVolume(ctx, volumeID(volIdentifier.FsSubvolName), RoundOffSize); err != nil { if err = volOptions.resizeVolume(ctx, volumeID(volIdentifier.FsSubvolName), RoundOffSize); err != nil {
util.ErrorLog(ctx, "failed to expand volume %s: %v", volumeID(volIdentifier.FsSubvolName), err) log.ErrorLog(ctx, "failed to expand volume %s: %v", volumeID(volIdentifier.FsSubvolName), err)
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
@ -530,14 +531,14 @@ func (cs *ControllerServer) CreateSnapshot(
sourceVolID := req.GetSourceVolumeId() sourceVolID := req.GetSourceVolumeId()
// Existence and conflict checks // Existence and conflict checks
if acquired := cs.SnapshotLocks.TryAcquire(requestName); !acquired { if acquired := cs.SnapshotLocks.TryAcquire(requestName); !acquired {
util.ErrorLog(ctx, util.SnapshotOperationAlreadyExistsFmt, requestName) log.ErrorLog(ctx, util.SnapshotOperationAlreadyExistsFmt, requestName)
return nil, status.Errorf(codes.Aborted, util.SnapshotOperationAlreadyExistsFmt, requestName) return nil, status.Errorf(codes.Aborted, util.SnapshotOperationAlreadyExistsFmt, requestName)
} }
defer cs.SnapshotLocks.Release(requestName) defer cs.SnapshotLocks.Release(requestName)
if err = cs.OperationLocks.GetSnapshotCreateLock(sourceVolID); err != nil { if err = cs.OperationLocks.GetSnapshotCreateLock(sourceVolID); err != nil {
util.ErrorLog(ctx, err.Error()) log.ErrorLog(ctx, err.Error())
return nil, status.Error(codes.Aborted, err.Error()) return nil, status.Error(codes.Aborted, err.Error())
} }
@ -548,7 +549,7 @@ func (cs *ControllerServer) CreateSnapshot(
parentVolOptions, vid, err := newVolumeOptionsFromVolID(ctx, sourceVolID, nil, req.GetSecrets()) parentVolOptions, vid, err := newVolumeOptionsFromVolID(ctx, sourceVolID, nil, req.GetSecrets())
if err != nil { if err != nil {
if errors.Is(err, util.ErrPoolNotFound) { if errors.Is(err, util.ErrPoolNotFound) {
util.WarningLog(ctx, "failed to get backend volume for %s: %v", sourceVolID, err) log.WarningLog(ctx, "failed to get backend volume for %s: %v", sourceVolID, err)
return nil, status.Error(codes.NotFound, err.Error()) return nil, status.Error(codes.NotFound, err.Error())
} }
@ -576,7 +577,7 @@ func (cs *ControllerServer) CreateSnapshot(
// lock out parallel snapshot create operations // lock out parallel snapshot create operations
if acquired := cs.VolumeLocks.TryAcquire(sourceVolID); !acquired { if acquired := cs.VolumeLocks.TryAcquire(sourceVolID); !acquired {
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, sourceVolID) log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, sourceVolID)
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, sourceVolID) return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, sourceVolID)
} }
@ -605,7 +606,7 @@ func (cs *ControllerServer) CreateSnapshot(
if sid != nil { if sid != nil {
errDefer := undoSnapReservation(ctx, parentVolOptions, *sid, snapName, cr) errDefer := undoSnapReservation(ctx, parentVolOptions, *sid, snapName, cr)
if errDefer != nil { if errDefer != nil {
util.WarningLog(ctx, "failed undoing reservation of snapshot: %s (%s)", log.WarningLog(ctx, "failed undoing reservation of snapshot: %s (%s)",
requestName, errDefer) requestName, errDefer)
} }
} }
@ -620,7 +621,7 @@ func (cs *ControllerServer) CreateSnapshot(
err = parentVolOptions.protectSnapshot(ctx, volumeID(sid.FsSnapshotName), volumeID(vid.FsSubvolName)) err = parentVolOptions.protectSnapshot(ctx, volumeID(sid.FsSnapshotName), volumeID(vid.FsSubvolName))
if err != nil { if err != nil {
protected = false protected = false
util.WarningLog(ctx, "failed to protect snapshot of snapshot: %s (%s)", log.WarningLog(ctx, "failed to protect snapshot of snapshot: %s (%s)",
sid.FsSnapshotName, err) sid.FsSnapshotName, err)
} }
} }
@ -645,7 +646,7 @@ func (cs *ControllerServer) CreateSnapshot(
if err != nil { if err != nil {
errDefer := undoSnapReservation(ctx, parentVolOptions, *sID, snapName, cr) errDefer := undoSnapReservation(ctx, parentVolOptions, *sID, snapName, cr)
if errDefer != nil { if errDefer != nil {
util.WarningLog(ctx, "failed undoing reservation of snapshot: %s (%s)", log.WarningLog(ctx, "failed undoing reservation of snapshot: %s (%s)",
requestName, errDefer) requestName, errDefer)
} }
} }
@ -672,7 +673,7 @@ func doSnapshot(ctx context.Context, volOpt *volumeOptions, subvolumeName, snaps
snap := snapshotInfo{} snap := snapshotInfo{}
err := volOpt.createSnapshot(ctx, snapID, volID) err := volOpt.createSnapshot(ctx, snapID, volID)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to create snapshot %s %v", snapID, err) log.ErrorLog(ctx, "failed to create snapshot %s %v", snapID, err)
return snap, err return snap, err
} }
@ -680,13 +681,13 @@ func doSnapshot(ctx context.Context, volOpt *volumeOptions, subvolumeName, snaps
if err != nil { if err != nil {
dErr := volOpt.deleteSnapshot(ctx, snapID, volID) dErr := volOpt.deleteSnapshot(ctx, snapID, volID)
if dErr != nil { if dErr != nil {
util.ErrorLog(ctx, "failed to delete snapshot %s %v", snapID, err) log.ErrorLog(ctx, "failed to delete snapshot %s %v", snapID, err)
} }
} }
}() }()
snap, err = volOpt.getSnapshotInfo(ctx, snapID, volID) snap, err = volOpt.getSnapshotInfo(ctx, snapID, volID)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to get snapshot info %s %v", snapID, err) log.ErrorLog(ctx, "failed to get snapshot info %s %v", snapID, err)
return snap, fmt.Errorf("failed to get snapshot info for snapshot:%s", snapID) return snap, fmt.Errorf("failed to get snapshot info for snapshot:%s", snapID)
} }
@ -698,7 +699,7 @@ func doSnapshot(ctx context.Context, volOpt *volumeOptions, subvolumeName, snaps
snap.CreationTime = t snap.CreationTime = t
err = volOpt.protectSnapshot(ctx, snapID, volID) err = volOpt.protectSnapshot(ctx, snapID, volID)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to protect snapshot %s %v", snapID, err) log.ErrorLog(ctx, "failed to protect snapshot %s %v", snapID, err)
} }
return snap, err return snap, err
@ -707,7 +708,7 @@ func doSnapshot(ctx context.Context, volOpt *volumeOptions, subvolumeName, snaps
func (cs *ControllerServer) validateSnapshotReq(ctx context.Context, req *csi.CreateSnapshotRequest) error { func (cs *ControllerServer) validateSnapshotReq(ctx context.Context, req *csi.CreateSnapshotRequest) error {
if err := cs.Driver.ValidateControllerServiceRequest( if err := cs.Driver.ValidateControllerServiceRequest(
csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT); err != nil { csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT); err != nil {
util.ErrorLog(ctx, "invalid create snapshot req: %v", protosanitizer.StripSecrets(req)) log.ErrorLog(ctx, "invalid create snapshot req: %v", protosanitizer.StripSecrets(req))
return err return err
} }
@ -730,7 +731,7 @@ func (cs *ControllerServer) DeleteSnapshot(
req *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) { req *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) {
if err := cs.Driver.ValidateControllerServiceRequest( if err := cs.Driver.ValidateControllerServiceRequest(
csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT); err != nil { csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT); err != nil {
util.ErrorLog(ctx, "invalid delete snapshot req: %v", protosanitizer.StripSecrets(req)) log.ErrorLog(ctx, "invalid delete snapshot req: %v", protosanitizer.StripSecrets(req))
return nil, err return nil, err
} }
@ -746,7 +747,7 @@ func (cs *ControllerServer) DeleteSnapshot(
} }
if acquired := cs.SnapshotLocks.TryAcquire(snapshotID); !acquired { if acquired := cs.SnapshotLocks.TryAcquire(snapshotID); !acquired {
util.ErrorLog(ctx, util.SnapshotOperationAlreadyExistsFmt, snapshotID) log.ErrorLog(ctx, util.SnapshotOperationAlreadyExistsFmt, snapshotID)
return nil, status.Errorf(codes.Aborted, util.SnapshotOperationAlreadyExistsFmt, snapshotID) return nil, status.Errorf(codes.Aborted, util.SnapshotOperationAlreadyExistsFmt, snapshotID)
} }
@ -754,7 +755,7 @@ func (cs *ControllerServer) DeleteSnapshot(
// lock out snapshotID for restore operation // lock out snapshotID for restore operation
if err = cs.OperationLocks.GetDeleteLock(snapshotID); err != nil { if err = cs.OperationLocks.GetDeleteLock(snapshotID); err != nil {
util.ErrorLog(ctx, err.Error()) log.ErrorLog(ctx, err.Error())
return nil, status.Error(codes.Aborted, err.Error()) return nil, status.Error(codes.Aborted, err.Error())
} }
@ -766,7 +767,7 @@ func (cs *ControllerServer) DeleteSnapshot(
case errors.Is(err, util.ErrPoolNotFound): case errors.Is(err, util.ErrPoolNotFound):
// if error is ErrPoolNotFound, the pool is already deleted we dont // if error is ErrPoolNotFound, the pool is already deleted we dont
// need to worry about deleting snapshot or omap data, return success // need to worry about deleting snapshot or omap data, return success
util.WarningLog(ctx, "failed to get backend snapshot for %s: %v", snapshotID, err) log.WarningLog(ctx, "failed to get backend snapshot for %s: %v", snapshotID, err)
return &csi.DeleteSnapshotResponse{}, nil return &csi.DeleteSnapshotResponse{}, nil
case errors.Is(err, util.ErrKeyNotFound): case errors.Is(err, util.ErrKeyNotFound):
@ -777,7 +778,7 @@ func (cs *ControllerServer) DeleteSnapshot(
case errors.Is(err, ErrSnapNotFound): case errors.Is(err, ErrSnapNotFound):
err = undoSnapReservation(ctx, volOpt, *sid, sid.FsSnapshotName, cr) err = undoSnapReservation(ctx, volOpt, *sid, sid.FsSnapshotName, cr)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) (%s)", log.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) (%s)",
sid.FsSubvolName, sid.FsSnapshotName, err) sid.FsSubvolName, sid.FsSnapshotName, err)
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
@ -787,10 +788,10 @@ func (cs *ControllerServer) DeleteSnapshot(
case errors.Is(err, ErrVolumeNotFound): case errors.Is(err, ErrVolumeNotFound):
// if the error is ErrVolumeNotFound, the subvolume is already deleted // if the error is ErrVolumeNotFound, the subvolume is already deleted
// from backend, Hence undo the omap entries and return success // from backend, Hence undo the omap entries and return success
util.ErrorLog(ctx, "Volume not present") log.ErrorLog(ctx, "Volume not present")
err = undoSnapReservation(ctx, volOpt, *sid, sid.FsSnapshotName, cr) err = undoSnapReservation(ctx, volOpt, *sid, sid.FsSnapshotName, cr)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) (%s)", log.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) (%s)",
sid.FsSubvolName, sid.FsSnapshotName, err) sid.FsSubvolName, sid.FsSnapshotName, err)
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
@ -806,7 +807,7 @@ func (cs *ControllerServer) DeleteSnapshot(
// safeguard against parallel create or delete requests against the same // safeguard against parallel create or delete requests against the same
// name // name
if acquired := cs.SnapshotLocks.TryAcquire(sid.RequestName); !acquired { if acquired := cs.SnapshotLocks.TryAcquire(sid.RequestName); !acquired {
util.ErrorLog(ctx, util.SnapshotOperationAlreadyExistsFmt, sid.RequestName) log.ErrorLog(ctx, util.SnapshotOperationAlreadyExistsFmt, sid.RequestName)
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, sid.RequestName) return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, sid.RequestName)
} }
@ -827,7 +828,7 @@ func (cs *ControllerServer) DeleteSnapshot(
} }
err = undoSnapReservation(ctx, volOpt, *sid, sid.FsSnapshotName, cr) err = undoSnapReservation(ctx, volOpt, *sid, sid.FsSnapshotName, cr)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) (%s)", log.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) (%s)",
sid.RequestName, sid.FsSnapshotName, err) sid.RequestName, sid.FsSnapshotName, err)
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())

View File

@ -20,6 +20,7 @@ import (
csicommon "github.com/ceph/ceph-csi/internal/csi-common" csicommon "github.com/ceph/ceph-csi/internal/csi-common"
"github.com/ceph/ceph-csi/internal/journal" "github.com/ceph/ceph-csi/internal/journal"
"github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/log"
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi"
) )
@ -93,11 +94,11 @@ func (fs *Driver) Run(conf *util.Config) {
// Configuration // Configuration
if err = loadAvailableMounters(conf); err != nil { if err = loadAvailableMounters(conf); err != nil {
util.FatalLogMsg("cephfs: failed to load ceph mounters: %v", err) log.FatalLogMsg("cephfs: failed to load ceph mounters: %v", err)
} }
if err = util.WriteCephConfig(); err != nil { if err = util.WriteCephConfig(); err != nil {
util.FatalLogMsg("failed to write ceph configuration file: %v", err) log.FatalLogMsg("failed to write ceph configuration file: %v", err)
} }
// Use passed in instance ID, if provided for omap suffix naming // Use passed in instance ID, if provided for omap suffix naming
@ -112,7 +113,7 @@ func (fs *Driver) Run(conf *util.Config) {
fs.cd = csicommon.NewCSIDriver(conf.DriverName, util.DriverVersion, conf.NodeID) fs.cd = csicommon.NewCSIDriver(conf.DriverName, util.DriverVersion, conf.NodeID)
if fs.cd == nil { if fs.cd == nil {
util.FatalLogMsg("failed to initialize CSI driver") log.FatalLogMsg("failed to initialize CSI driver")
} }
if conf.IsControllerServer || !conf.IsNodeServer { if conf.IsControllerServer || !conf.IsNodeServer {
@ -134,7 +135,7 @@ func (fs *Driver) Run(conf *util.Config) {
if conf.IsNodeServer { if conf.IsNodeServer {
topology, err = util.GetTopologyFromDomainLabels(conf.DomainLabels, conf.NodeID, conf.DriverName) topology, err = util.GetTopologyFromDomainLabels(conf.DomainLabels, conf.NodeID, conf.DriverName)
if err != nil { if err != nil {
util.FatalLogMsg(err.Error()) log.FatalLogMsg(err.Error())
} }
fs.ns = NewNodeServer(fs.cd, conf.Vtype, topology) fs.ns = NewNodeServer(fs.cd, conf.Vtype, topology)
} }
@ -145,7 +146,7 @@ func (fs *Driver) Run(conf *util.Config) {
if !conf.IsControllerServer && !conf.IsNodeServer { if !conf.IsControllerServer && !conf.IsNodeServer {
topology, err = util.GetTopologyFromDomainLabels(conf.DomainLabels, conf.NodeID, conf.DriverName) topology, err = util.GetTopologyFromDomainLabels(conf.DomainLabels, conf.NodeID, conf.DriverName)
if err != nil { if err != nil {
util.FatalLogMsg(err.Error()) log.FatalLogMsg(err.Error())
} }
fs.ns = NewNodeServer(fs.cd, conf.Vtype, topology) fs.ns = NewNodeServer(fs.cd, conf.Vtype, topology)
fs.cs = NewControllerServer(fs.cd) fs.cs = NewControllerServer(fs.cd)
@ -161,14 +162,14 @@ func (fs *Driver) Run(conf *util.Config) {
} }
server.Start(conf.Endpoint, conf.HistogramOption, srv, conf.EnableGRPCMetrics) server.Start(conf.Endpoint, conf.HistogramOption, srv, conf.EnableGRPCMetrics)
if conf.EnableGRPCMetrics { if conf.EnableGRPCMetrics {
util.WarningLogMsg("EnableGRPCMetrics is deprecated") log.WarningLogMsg("EnableGRPCMetrics is deprecated")
go util.StartMetricsServer(conf) go util.StartMetricsServer(conf)
} }
if conf.EnableProfiling { if conf.EnableProfiling {
if !conf.EnableGRPCMetrics { if !conf.EnableGRPCMetrics {
go util.StartMetricsServer(conf) go util.StartMetricsServer(conf)
} }
util.DebugLogMsg("Registering profiling handler") log.DebugLogMsg("Registering profiling handler")
go util.EnableProfiling() go util.EnableProfiling()
} }
server.Wait() server.Wait()

View File

@ -22,6 +22,7 @@ import (
"fmt" "fmt"
"github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/log"
"github.com/golang/protobuf/ptypes/timestamp" "github.com/golang/protobuf/ptypes/timestamp"
) )
@ -112,7 +113,7 @@ func checkVolExists(ctx context.Context,
if cloneState == cephFSCloneFailed { if cloneState == cephFSCloneFailed {
err = volOptions.purgeVolume(ctx, volumeID(vid.FsSubvolName), true) err = volOptions.purgeVolume(ctx, volumeID(vid.FsSubvolName), true)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to delete volume %s: %v", vid.FsSubvolName, err) log.ErrorLog(ctx, "failed to delete volume %s: %v", vid.FsSubvolName, err)
return nil, err return nil, err
} }
@ -171,7 +172,7 @@ func checkVolExists(ctx context.Context,
return nil, err return nil, err
} }
util.DebugLog(ctx, "Found existing volume (%s) with subvolume name (%s) for request (%s)", log.DebugLog(ctx, "Found existing volume (%s) with subvolume name (%s) for request (%s)",
vid.VolumeID, vid.FsSubvolName, volOptions.RequestName) vid.VolumeID, vid.FsSubvolName, volOptions.RequestName)
if parentVolOpt != nil && pvID != nil { if parentVolOpt != nil && pvID != nil {
@ -269,7 +270,7 @@ func reserveVol(ctx context.Context, volOptions *volumeOptions, secret map[strin
return nil, err return nil, err
} }
util.DebugLog(ctx, "Generated Volume ID (%s) and subvolume name (%s) for request name (%s)", log.DebugLog(ctx, "Generated Volume ID (%s) and subvolume name (%s) for request name (%s)",
vid.VolumeID, vid.FsSubvolName, volOptions.RequestName) vid.VolumeID, vid.FsSubvolName, volOptions.RequestName)
return &vid, nil return &vid, nil
@ -311,7 +312,7 @@ func reserveSnap(
return nil, err return nil, err
} }
util.DebugLog(ctx, "Generated Snapshot ID (%s) for request name (%s)", log.DebugLog(ctx, "Generated Snapshot ID (%s) for request name (%s)",
vid.SnapshotID, snap.RequestName) vid.SnapshotID, snap.RequestName)
return &vid, nil return &vid, nil
@ -392,14 +393,14 @@ func checkSnapExists(
if err != nil { if err != nil {
err = volOptions.deleteSnapshot(ctx, volumeID(snapID), volumeID(parentSubVolName)) err = volOptions.deleteSnapshot(ctx, volumeID(snapID), volumeID(parentSubVolName))
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to delete snapshot %s: %v", snapID, err) log.ErrorLog(ctx, "failed to delete snapshot %s: %v", snapID, err)
return return
} }
err = j.UndoReservation(ctx, volOptions.MetadataPool, err = j.UndoReservation(ctx, volOptions.MetadataPool,
volOptions.MetadataPool, snapID, snap.RequestName) volOptions.MetadataPool, snapID, snap.RequestName)
if err != nil { if err != nil {
util.ErrorLog(ctx, "removing reservation failed for snapshot %s: %v", snapID, err) log.ErrorLog(ctx, "removing reservation failed for snapshot %s: %v", snapID, err)
} }
} }
}() }()
@ -415,7 +416,7 @@ func checkSnapExists(
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
util.DebugLog(ctx, "Found existing snapshot (%s) with subvolume name (%s) for request (%s)", log.DebugLog(ctx, "Found existing snapshot (%s) with subvolume name (%s) for request (%s)",
snapData.ImageAttributes.RequestName, parentSubVolName, sid.FsSnapshotName) snapData.ImageAttributes.RequestName, parentSubVolName, sid.FsSnapshotName)
return sid, &snapInfo, nil return sid, &snapInfo, nil

View File

@ -25,6 +25,7 @@ import (
csicommon "github.com/ceph/ceph-csi/internal/csi-common" csicommon "github.com/ceph/ceph-csi/internal/csi-common"
"github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/log"
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
@ -81,7 +82,7 @@ func (ns *NodeServer) NodeStageVolume(
volID := volumeID(req.GetVolumeId()) volID := volumeID(req.GetVolumeId())
if acquired := ns.VolumeLocks.TryAcquire(req.GetVolumeId()); !acquired { if acquired := ns.VolumeLocks.TryAcquire(req.GetVolumeId()); !acquired {
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volID) log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volID)
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, req.GetVolumeId()) return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, req.GetVolumeId())
} }
@ -114,13 +115,13 @@ func (ns *NodeServer) NodeStageVolume(
isMnt, err := util.IsMountPoint(stagingTargetPath) isMnt, err := util.IsMountPoint(stagingTargetPath)
if err != nil { if err != nil {
util.ErrorLog(ctx, "stat failed: %v", err) log.ErrorLog(ctx, "stat failed: %v", err)
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
if isMnt { if isMnt {
util.DebugLog(ctx, "cephfs: volume %s is already mounted to %s, skipping", volID, stagingTargetPath) log.DebugLog(ctx, "cephfs: volume %s is already mounted to %s, skipping", volID, stagingTargetPath)
return &csi.NodeStageVolumeResponse{}, nil return &csi.NodeStageVolumeResponse{}, nil
} }
@ -130,7 +131,7 @@ func (ns *NodeServer) NodeStageVolume(
return nil, err return nil, err
} }
util.DebugLog(ctx, "cephfs: successfully mounted volume %s to %s", volID, stagingTargetPath) log.DebugLog(ctx, "cephfs: successfully mounted volume %s to %s", volID, stagingTargetPath)
return &csi.NodeStageVolumeResponse{}, nil return &csi.NodeStageVolumeResponse{}, nil
} }
@ -141,7 +142,7 @@ func (*NodeServer) mount(ctx context.Context, volOptions *volumeOptions, req *cs
cr, err := getCredentialsForVolume(volOptions, req) cr, err := getCredentialsForVolume(volOptions, req)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to get ceph credentials for volume %s: %v", volID, err) log.ErrorLog(ctx, "failed to get ceph credentials for volume %s: %v", volID, err)
return status.Error(codes.Internal, err.Error()) return status.Error(codes.Internal, err.Error())
} }
@ -149,12 +150,12 @@ func (*NodeServer) mount(ctx context.Context, volOptions *volumeOptions, req *cs
m, err := newMounter(volOptions) m, err := newMounter(volOptions)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to create mounter for volume %s: %v", volID, err) log.ErrorLog(ctx, "failed to create mounter for volume %s: %v", volID, err)
return status.Error(codes.Internal, err.Error()) return status.Error(codes.Internal, err.Error())
} }
util.DebugLog(ctx, "cephfs: mounting volume %s with %s", volID, m.name()) log.DebugLog(ctx, "cephfs: mounting volume %s with %s", volID, m.name())
readOnly := "ro" readOnly := "ro"
fuseMountOptions := strings.Split(volOptions.FuseMountOptions, ",") fuseMountOptions := strings.Split(volOptions.FuseMountOptions, ",")
@ -177,7 +178,7 @@ func (*NodeServer) mount(ctx context.Context, volOptions *volumeOptions, req *cs
} }
if err = m.mount(ctx, stagingTargetPath, cr, volOptions); err != nil { if err = m.mount(ctx, stagingTargetPath, cr, volOptions); err != nil {
util.ErrorLog(ctx, log.ErrorLog(ctx,
"failed to mount volume %s: %v Check dmesg logs if required.", "failed to mount volume %s: %v Check dmesg logs if required.",
volID, volID,
err) err)
@ -189,7 +190,7 @@ func (*NodeServer) mount(ctx context.Context, volOptions *volumeOptions, req *cs
// #nosec - allow anyone to write inside the stagingtarget path // #nosec - allow anyone to write inside the stagingtarget path
err = os.Chmod(stagingTargetPath, 0o777) err = os.Chmod(stagingTargetPath, 0o777)
if err != nil { if err != nil {
util.ErrorLog( log.ErrorLog(
ctx, ctx,
"failed to change stagingtarget path %s permission for volume %s: %v", "failed to change stagingtarget path %s permission for volume %s: %v",
stagingTargetPath, stagingTargetPath,
@ -197,7 +198,7 @@ func (*NodeServer) mount(ctx context.Context, volOptions *volumeOptions, req *cs
err) err)
uErr := unmountVolume(ctx, stagingTargetPath) uErr := unmountVolume(ctx, stagingTargetPath)
if uErr != nil { if uErr != nil {
util.ErrorLog( log.ErrorLog(
ctx, ctx,
"failed to umount stagingtarget path %s for volume %s: %v", "failed to umount stagingtarget path %s for volume %s: %v",
stagingTargetPath, stagingTargetPath,
@ -229,7 +230,7 @@ func (ns *NodeServer) NodePublishVolume(
// are serialized, we dont need any extra locking in nodePublish // are serialized, we dont need any extra locking in nodePublish
if err := util.CreateMountPoint(targetPath); err != nil { if err := util.CreateMountPoint(targetPath); err != nil {
util.ErrorLog(ctx, "failed to create mount point at %s: %v", targetPath, err) log.ErrorLog(ctx, "failed to create mount point at %s: %v", targetPath, err)
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
@ -244,13 +245,13 @@ func (ns *NodeServer) NodePublishVolume(
isMnt, err := util.IsMountPoint(targetPath) isMnt, err := util.IsMountPoint(targetPath)
if err != nil { if err != nil {
util.ErrorLog(ctx, "stat failed: %v", err) log.ErrorLog(ctx, "stat failed: %v", err)
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
if isMnt { if isMnt {
util.DebugLog(ctx, "cephfs: volume %s is already bind-mounted to %s", volID, targetPath) log.DebugLog(ctx, "cephfs: volume %s is already bind-mounted to %s", volID, targetPath)
return &csi.NodePublishVolumeResponse{}, nil return &csi.NodePublishVolumeResponse{}, nil
} }
@ -258,12 +259,12 @@ func (ns *NodeServer) NodePublishVolume(
// It's not, mount now // It's not, mount now
if err = bindMount(ctx, req.GetStagingTargetPath(), req.GetTargetPath(), req.GetReadonly(), mountOptions); err != nil { if err = bindMount(ctx, req.GetStagingTargetPath(), req.GetTargetPath(), req.GetReadonly(), mountOptions); err != nil {
util.ErrorLog(ctx, "failed to bind-mount volume %s: %v", volID, err) log.ErrorLog(ctx, "failed to bind-mount volume %s: %v", volID, err)
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
util.DebugLog(ctx, "cephfs: successfully bind-mounted volume %s to %s", volID, targetPath) log.DebugLog(ctx, "cephfs: successfully bind-mounted volume %s to %s", volID, targetPath)
return &csi.NodePublishVolumeResponse{}, nil return &csi.NodePublishVolumeResponse{}, nil
} }
@ -283,7 +284,7 @@ func (ns *NodeServer) NodeUnpublishVolume(
if err != nil { if err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
// targetPath has already been deleted // targetPath has already been deleted
util.DebugLog(ctx, "targetPath: %s has already been deleted", targetPath) log.DebugLog(ctx, "targetPath: %s has already been deleted", targetPath)
return &csi.NodeUnpublishVolumeResponse{}, nil return &csi.NodeUnpublishVolumeResponse{}, nil
} }
@ -308,7 +309,7 @@ func (ns *NodeServer) NodeUnpublishVolume(
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
util.DebugLog(ctx, "cephfs: successfully unbounded volume %s from %s", req.GetVolumeId(), targetPath) log.DebugLog(ctx, "cephfs: successfully unbounded volume %s from %s", req.GetVolumeId(), targetPath)
return &csi.NodeUnpublishVolumeResponse{}, nil return &csi.NodeUnpublishVolumeResponse{}, nil
} }
@ -324,7 +325,7 @@ func (ns *NodeServer) NodeUnstageVolume(
volID := req.GetVolumeId() volID := req.GetVolumeId()
if acquired := ns.VolumeLocks.TryAcquire(volID); !acquired { if acquired := ns.VolumeLocks.TryAcquire(volID); !acquired {
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volID) log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volID)
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volID) return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volID)
} }
@ -336,7 +337,7 @@ func (ns *NodeServer) NodeUnstageVolume(
if err != nil { if err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
// targetPath has already been deleted // targetPath has already been deleted
util.DebugLog(ctx, "targetPath: %s has already been deleted", stagingTargetPath) log.DebugLog(ctx, "targetPath: %s has already been deleted", stagingTargetPath)
return &csi.NodeUnstageVolumeResponse{}, nil return &csi.NodeUnstageVolumeResponse{}, nil
} }
@ -351,7 +352,7 @@ func (ns *NodeServer) NodeUnstageVolume(
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
util.DebugLog(ctx, "cephfs: successfully unmounted volume %s from %s", req.GetVolumeId(), stagingTargetPath) log.DebugLog(ctx, "cephfs: successfully unmounted volume %s from %s", req.GetVolumeId(), stagingTargetPath)
return &csi.NodeUnstageVolumeResponse{}, nil return &csi.NodeUnstageVolumeResponse{}, nil
} }

View File

@ -21,7 +21,7 @@ import (
"errors" "errors"
"time" "time"
"github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util/log"
"github.com/ceph/go-ceph/cephfs/admin" "github.com/ceph/go-ceph/cephfs/admin"
"github.com/ceph/go-ceph/rados" "github.com/ceph/go-ceph/rados"
@ -51,14 +51,14 @@ type cephfsSnapshot struct {
func (vo *volumeOptions) createSnapshot(ctx context.Context, snapID, volID volumeID) error { func (vo *volumeOptions) createSnapshot(ctx context.Context, snapID, volID volumeID) error {
fsa, err := vo.conn.GetFSAdmin() fsa, err := vo.conn.GetFSAdmin()
if err != nil { if err != nil {
util.ErrorLog(ctx, "could not get FSAdmin: %s", err) log.ErrorLog(ctx, "could not get FSAdmin: %s", err)
return err return err
} }
err = fsa.CreateSubVolumeSnapshot(vo.FsName, vo.SubvolumeGroup, string(volID), string(snapID)) err = fsa.CreateSubVolumeSnapshot(vo.FsName, vo.SubvolumeGroup, string(volID), string(snapID))
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to create subvolume snapshot %s %s in fs %s: %s", log.ErrorLog(ctx, "failed to create subvolume snapshot %s %s in fs %s: %s",
string(snapID), string(volID), vo.FsName, err) string(snapID), string(volID), vo.FsName, err)
return err return err
@ -70,14 +70,14 @@ func (vo *volumeOptions) createSnapshot(ctx context.Context, snapID, volID volum
func (vo *volumeOptions) deleteSnapshot(ctx context.Context, snapID, volID volumeID) error { func (vo *volumeOptions) deleteSnapshot(ctx context.Context, snapID, volID volumeID) error {
fsa, err := vo.conn.GetFSAdmin() fsa, err := vo.conn.GetFSAdmin()
if err != nil { if err != nil {
util.ErrorLog(ctx, "could not get FSAdmin: %s", err) log.ErrorLog(ctx, "could not get FSAdmin: %s", err)
return err return err
} }
err = fsa.ForceRemoveSubVolumeSnapshot(vo.FsName, vo.SubvolumeGroup, string(volID), string(snapID)) err = fsa.ForceRemoveSubVolumeSnapshot(vo.FsName, vo.SubvolumeGroup, string(volID), string(snapID))
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to delete subvolume snapshot %s %s in fs %s: %s", log.ErrorLog(ctx, "failed to delete subvolume snapshot %s %s in fs %s: %s",
string(snapID), string(volID), vo.FsName, err) string(snapID), string(volID), vo.FsName, err)
return err return err
@ -97,7 +97,7 @@ func (vo *volumeOptions) getSnapshotInfo(ctx context.Context, snapID, volID volu
snap := snapshotInfo{} snap := snapshotInfo{}
fsa, err := vo.conn.GetFSAdmin() fsa, err := vo.conn.GetFSAdmin()
if err != nil { if err != nil {
util.ErrorLog(ctx, "could not get FSAdmin: %s", err) log.ErrorLog(ctx, "could not get FSAdmin: %s", err)
return snap, err return snap, err
} }
@ -107,7 +107,7 @@ func (vo *volumeOptions) getSnapshotInfo(ctx context.Context, snapID, volID volu
if errors.Is(err, rados.ErrNotFound) { if errors.Is(err, rados.ErrNotFound) {
return snap, ErrSnapNotFound return snap, ErrSnapNotFound
} }
util.ErrorLog( log.ErrorLog(
ctx, ctx,
"failed to get subvolume snapshot info %s %s in fs %s with error %s", "failed to get subvolume snapshot info %s %s in fs %s with error %s",
string(volID), string(volID),
@ -132,7 +132,7 @@ func (vo *volumeOptions) protectSnapshot(ctx context.Context, snapID, volID volu
} }
fsa, err := vo.conn.GetFSAdmin() fsa, err := vo.conn.GetFSAdmin()
if err != nil { if err != nil {
util.ErrorLog(ctx, "could not get FSAdmin: %s", err) log.ErrorLog(ctx, "could not get FSAdmin: %s", err)
return err return err
} }
@ -143,7 +143,7 @@ func (vo *volumeOptions) protectSnapshot(ctx context.Context, snapID, volID volu
if errors.Is(err, rados.ErrObjectExists) { if errors.Is(err, rados.ErrObjectExists) {
return nil return nil
} }
util.ErrorLog( log.ErrorLog(
ctx, ctx,
"failed to protect subvolume snapshot %s %s in fs %s with error: %s", "failed to protect subvolume snapshot %s %s in fs %s with error: %s",
string(volID), string(volID),
@ -165,7 +165,7 @@ func (vo *volumeOptions) unprotectSnapshot(ctx context.Context, snapID, volID vo
} }
fsa, err := vo.conn.GetFSAdmin() fsa, err := vo.conn.GetFSAdmin()
if err != nil { if err != nil {
util.ErrorLog(ctx, "could not get FSAdmin: %s", err) log.ErrorLog(ctx, "could not get FSAdmin: %s", err)
return err return err
} }
@ -178,7 +178,7 @@ func (vo *volumeOptions) unprotectSnapshot(ctx context.Context, snapID, volID vo
if errors.Is(err, rados.ErrObjectExists) { if errors.Is(err, rados.ErrObjectExists) {
return nil return nil
} }
util.ErrorLog( log.ErrorLog(
ctx, ctx,
"failed to unprotect subvolume snapshot %s %s in fs %s with error: %s", "failed to unprotect subvolume snapshot %s %s in fs %s with error: %s",
string(volID), string(volID),
@ -199,7 +199,7 @@ func (vo *volumeOptions) cloneSnapshot(
) error { ) error {
fsa, err := vo.conn.GetFSAdmin() fsa, err := vo.conn.GetFSAdmin()
if err != nil { if err != nil {
util.ErrorLog(ctx, "could not get FSAdmin: %s", err) log.ErrorLog(ctx, "could not get FSAdmin: %s", err)
return err return err
} }
@ -212,7 +212,7 @@ func (vo *volumeOptions) cloneSnapshot(
err = fsa.CloneSubVolumeSnapshot(vo.FsName, vo.SubvolumeGroup, string(volID), string(snapID), string(cloneID), co) err = fsa.CloneSubVolumeSnapshot(vo.FsName, vo.SubvolumeGroup, string(volID), string(snapID), string(cloneID), co)
if err != nil { if err != nil {
util.ErrorLog( log.ErrorLog(
ctx, ctx,
"failed to clone subvolume snapshot %s %s in fs %s with error: %s", "failed to clone subvolume snapshot %s %s in fs %s with error: %s",
string(volID), string(volID),

View File

@ -22,6 +22,7 @@ import (
"time" "time"
"github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/log"
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi"
"github.com/golang/protobuf/ptypes" "github.com/golang/protobuf/ptypes"
@ -130,7 +131,7 @@ func genSnapFromOptions(ctx context.Context, req *csi.CreateSnapshotRequest) (sn
cephfsSnap.Monitors, cephfsSnap.ClusterID, err = util.GetMonsAndClusterID(snapOptions) cephfsSnap.Monitors, cephfsSnap.ClusterID, err = util.GetMonsAndClusterID(snapOptions)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed getting mons (%s)", err) log.ErrorLog(ctx, "failed getting mons (%s)", err)
return nil, err return nil, err
} }
@ -144,7 +145,7 @@ func genSnapFromOptions(ctx context.Context, req *csi.CreateSnapshotRequest) (sn
func parseTime(ctx context.Context, createTime time.Time) (*timestamp.Timestamp, error) { func parseTime(ctx context.Context, createTime time.Time) (*timestamp.Timestamp, error) {
tm, err := ptypes.TimestampProto(createTime) tm, err := ptypes.TimestampProto(createTime)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to convert time %s %v", createTime, err) log.ErrorLog(ctx, "failed to convert time %s %v", createTime, err)
return tm, err return tm, err
} }

View File

@ -24,6 +24,7 @@ import (
"strings" "strings"
"github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/log"
fsAdmin "github.com/ceph/go-ceph/cephfs/admin" fsAdmin "github.com/ceph/go-ceph/cephfs/admin"
"github.com/ceph/go-ceph/rados" "github.com/ceph/go-ceph/rados"
@ -59,13 +60,13 @@ func getVolumeRootPathCephDeprecated(volID volumeID) string {
func (vo *volumeOptions) getVolumeRootPathCeph(ctx context.Context, volID volumeID) (string, error) { func (vo *volumeOptions) getVolumeRootPathCeph(ctx context.Context, volID volumeID) (string, error) {
fsa, err := vo.conn.GetFSAdmin() fsa, err := vo.conn.GetFSAdmin()
if err != nil { if err != nil {
util.ErrorLog(ctx, "could not get FSAdmin err %s", err) log.ErrorLog(ctx, "could not get FSAdmin err %s", err)
return "", err return "", err
} }
svPath, err := fsa.SubVolumePath(vo.FsName, vo.SubvolumeGroup, string(volID)) svPath, err := fsa.SubVolumePath(vo.FsName, vo.SubvolumeGroup, string(volID))
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to get the rootpath for the vol %s: %s", string(volID), err) log.ErrorLog(ctx, "failed to get the rootpath for the vol %s: %s", string(volID), err)
if errors.Is(err, rados.ErrNotFound) { if errors.Is(err, rados.ErrNotFound) {
return "", util.JoinErrors(ErrVolumeNotFound, err) return "", util.JoinErrors(ErrVolumeNotFound, err)
} }
@ -79,14 +80,14 @@ func (vo *volumeOptions) getVolumeRootPathCeph(ctx context.Context, volID volume
func (vo *volumeOptions) getSubVolumeInfo(ctx context.Context, volID volumeID) (*Subvolume, error) { func (vo *volumeOptions) getSubVolumeInfo(ctx context.Context, volID volumeID) (*Subvolume, error) {
fsa, err := vo.conn.GetFSAdmin() fsa, err := vo.conn.GetFSAdmin()
if err != nil { if err != nil {
util.ErrorLog(ctx, "could not get FSAdmin, can not fetch metadata pool for %s:", vo.FsName, err) log.ErrorLog(ctx, "could not get FSAdmin, can not fetch metadata pool for %s:", vo.FsName, err)
return nil, err return nil, err
} }
info, err := fsa.SubVolumeInfo(vo.FsName, vo.SubvolumeGroup, string(volID)) info, err := fsa.SubVolumeInfo(vo.FsName, vo.SubvolumeGroup, string(volID))
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to get subvolume info for the vol %s: %s", string(volID), err) log.ErrorLog(ctx, "failed to get subvolume info for the vol %s: %s", string(volID), err)
if errors.Is(err, rados.ErrNotFound) { if errors.Is(err, rados.ErrNotFound) {
return nil, ErrVolumeNotFound return nil, ErrVolumeNotFound
} }
@ -148,7 +149,7 @@ func createVolume(ctx context.Context, volOptions *volumeOptions, volID volumeID
ca, err := volOptions.conn.GetFSAdmin() ca, err := volOptions.conn.GetFSAdmin()
if err != nil { if err != nil {
util.ErrorLog(ctx, "could not get FSAdmin, can not create subvolume %s: %s", string(volID), err) log.ErrorLog(ctx, "could not get FSAdmin, can not create subvolume %s: %s", string(volID), err)
return err return err
} }
@ -158,7 +159,7 @@ func createVolume(ctx context.Context, volOptions *volumeOptions, volID volumeID
opts := fsAdmin.SubVolumeGroupOptions{} opts := fsAdmin.SubVolumeGroupOptions{}
err = ca.CreateSubVolumeGroup(volOptions.FsName, volOptions.SubvolumeGroup, &opts) err = ca.CreateSubVolumeGroup(volOptions.FsName, volOptions.SubvolumeGroup, &opts)
if err != nil { if err != nil {
util.ErrorLog( log.ErrorLog(
ctx, ctx,
"failed to create subvolume group %s, for the vol %s: %s", "failed to create subvolume group %s, for the vol %s: %s",
volOptions.SubvolumeGroup, volOptions.SubvolumeGroup,
@ -167,7 +168,7 @@ func createVolume(ctx context.Context, volOptions *volumeOptions, volID volumeID
return err return err
} }
util.DebugLog(ctx, "cephfs: created subvolume group %s", volOptions.SubvolumeGroup) log.DebugLog(ctx, "cephfs: created subvolume group %s", volOptions.SubvolumeGroup)
clusterAdditionalInfo[volOptions.ClusterID].subVolumeGroupCreated = true clusterAdditionalInfo[volOptions.ClusterID].subVolumeGroupCreated = true
} }
@ -182,7 +183,7 @@ func createVolume(ctx context.Context, volOptions *volumeOptions, volID volumeID
// FIXME: check if the right credentials are used ("-n", cephEntityClientPrefix + cr.ID) // FIXME: check if the right credentials are used ("-n", cephEntityClientPrefix + cr.ID)
err = ca.CreateSubVolume(volOptions.FsName, volOptions.SubvolumeGroup, string(volID), &opts) err = ca.CreateSubVolume(volOptions.FsName, volOptions.SubvolumeGroup, string(volID), &opts)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to create subvolume %s in fs %s: %s", string(volID), volOptions.FsName, err) log.ErrorLog(ctx, "failed to create subvolume %s in fs %s: %s", string(volID), volOptions.FsName, err)
return err return err
} }
@ -207,7 +208,7 @@ func (vo *volumeOptions) resizeVolume(ctx context.Context, volID volumeID, bytes
clusterAdditionalInfo[vo.ClusterID].resizeState == supported { clusterAdditionalInfo[vo.ClusterID].resizeState == supported {
fsa, err := vo.conn.GetFSAdmin() fsa, err := vo.conn.GetFSAdmin()
if err != nil { if err != nil {
util.ErrorLog(ctx, "could not get FSAdmin, can not resize volume %s:", vo.FsName, err) log.ErrorLog(ctx, "could not get FSAdmin, can not resize volume %s:", vo.FsName, err)
return err return err
} }
@ -220,7 +221,7 @@ func (vo *volumeOptions) resizeVolume(ctx context.Context, volID volumeID, bytes
var invalid fsAdmin.NotImplementedError var invalid fsAdmin.NotImplementedError
// In case the error is other than invalid command return error to the caller. // In case the error is other than invalid command return error to the caller.
if !errors.As(err, &invalid) { if !errors.As(err, &invalid) {
util.ErrorLog(ctx, "failed to resize subvolume %s in fs %s: %s", string(volID), vo.FsName, err) log.ErrorLog(ctx, "failed to resize subvolume %s in fs %s: %s", string(volID), vo.FsName, err)
return err return err
} }
@ -233,7 +234,7 @@ func (vo *volumeOptions) resizeVolume(ctx context.Context, volID volumeID, bytes
func (vo *volumeOptions) purgeVolume(ctx context.Context, volID volumeID, force bool) error { func (vo *volumeOptions) purgeVolume(ctx context.Context, volID volumeID, force bool) error {
fsa, err := vo.conn.GetFSAdmin() fsa, err := vo.conn.GetFSAdmin()
if err != nil { if err != nil {
util.ErrorLog(ctx, "could not get FSAdmin %s:", err) log.ErrorLog(ctx, "could not get FSAdmin %s:", err)
return err return err
} }
@ -247,7 +248,7 @@ func (vo *volumeOptions) purgeVolume(ctx context.Context, volID volumeID, force
err = fsa.RemoveSubVolumeWithFlags(vo.FsName, vo.SubvolumeGroup, string(volID), opt) err = fsa.RemoveSubVolumeWithFlags(vo.FsName, vo.SubvolumeGroup, string(volID), opt)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to purge subvolume %s in fs %s: %s", string(volID), vo.FsName, err) log.ErrorLog(ctx, "failed to purge subvolume %s in fs %s: %s", string(volID), vo.FsName, err)
if strings.Contains(err.Error(), volumeNotEmpty) { if strings.Contains(err.Error(), volumeNotEmpty) {
return util.JoinErrors(ErrVolumeHasSnapshots, err) return util.JoinErrors(ErrVolumeHasSnapshots, err)
} }

View File

@ -28,6 +28,7 @@ import (
"sync" "sync"
"github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/log"
) )
const ( const (
@ -75,7 +76,7 @@ func loadAvailableMounters(conf *util.Config) error {
err := kernelMounterProbe.Run() err := kernelMounterProbe.Run()
if err != nil { if err != nil {
util.ErrorLogMsg("failed to run mount.ceph %v", err) log.ErrorLogMsg("failed to run mount.ceph %v", err)
} else { } else {
// fetch the current running kernel info // fetch the current running kernel info
release, kvErr := util.GetKernelVersion() release, kvErr := util.GetKernelVersion()
@ -84,18 +85,18 @@ func loadAvailableMounters(conf *util.Config) error {
} }
if conf.ForceKernelCephFS || util.CheckKernelSupport(release, quotaSupport) { if conf.ForceKernelCephFS || util.CheckKernelSupport(release, quotaSupport) {
util.DefaultLog("loaded mounter: %s", volumeMounterKernel) log.DefaultLog("loaded mounter: %s", volumeMounterKernel)
availableMounters = append(availableMounters, volumeMounterKernel) availableMounters = append(availableMounters, volumeMounterKernel)
} else { } else {
util.DefaultLog("kernel version < 4.17 might not support quota feature, hence not loading kernel client") log.DefaultLog("kernel version < 4.17 might not support quota feature, hence not loading kernel client")
} }
} }
err = fuseMounterProbe.Run() err = fuseMounterProbe.Run()
if err != nil { if err != nil {
util.ErrorLogMsg("failed to run ceph-fuse %v", err) log.ErrorLogMsg("failed to run ceph-fuse %v", err)
} else { } else {
util.DefaultLog("loaded mounter: %s", volumeMounterFuse) log.DefaultLog("loaded mounter: %s", volumeMounterFuse)
availableMounters = append(availableMounters, volumeMounterFuse) availableMounters = append(availableMounters, volumeMounterFuse)
} }
@ -131,7 +132,7 @@ func newMounter(volOptions *volumeOptions) (volumeMounter, error) {
if chosenMounter == "" { if chosenMounter == "" {
// Otherwise pick whatever is left // Otherwise pick whatever is left
chosenMounter = availableMounters[0] chosenMounter = availableMounters[0]
util.DebugLogMsg("requested mounter: %s, chosen mounter: %s", wantMounter, chosenMounter) log.DebugLogMsg("requested mounter: %s, chosen mounter: %s", wantMounter, chosenMounter)
} }
// Create the mounter // Create the mounter
@ -291,10 +292,10 @@ func unmountVolume(ctx context.Context, mountPoint string) error {
if ok { if ok {
p, err := os.FindProcess(pid) p, err := os.FindProcess(pid)
if err != nil { if err != nil {
util.WarningLog(ctx, "failed to find process %d: %v", pid, err) log.WarningLog(ctx, "failed to find process %d: %v", pid, err)
} else { } else {
if _, err = p.Wait(); err != nil { if _, err = p.Wait(); err != nil {
util.WarningLog(ctx, "%d is not a child process: %v", pid, err) log.WarningLog(ctx, "%d is not a child process: %v", pid, err)
} }
} }
} }

View File

@ -18,7 +18,7 @@ package controller
import ( import (
"fmt" "fmt"
"github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util/log"
clientConfig "sigs.k8s.io/controller-runtime/pkg/client/config" clientConfig "sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/manager"
@ -65,19 +65,19 @@ func Start(config Config) error {
} }
mgr, err := manager.New(clientConfig.GetConfigOrDie(), opts) mgr, err := manager.New(clientConfig.GetConfigOrDie(), opts)
if err != nil { if err != nil {
util.ErrorLogMsg("failed to create manager %s", err) log.ErrorLogMsg("failed to create manager %s", err)
return err return err
} }
err = addToManager(mgr, config) err = addToManager(mgr, config)
if err != nil { if err != nil {
util.ErrorLogMsg("failed to add manager %s", err) log.ErrorLogMsg("failed to add manager %s", err)
return err return err
} }
err = mgr.Start(signals.SetupSignalHandler()) err = mgr.Start(signals.SetupSignalHandler())
if err != nil { if err != nil {
util.ErrorLogMsg("failed to start manager %s", err) log.ErrorLogMsg("failed to start manager %s", err)
} }
return err return err

View File

@ -24,6 +24,7 @@ import (
ctrl "github.com/ceph/ceph-csi/internal/controller" ctrl "github.com/ceph/ceph-csi/internal/controller"
"github.com/ceph/ceph-csi/internal/rbd" "github.com/ceph/ceph-csi/internal/rbd"
"github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/log"
corev1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
@ -98,7 +99,7 @@ func (r *ReconcilePersistentVolume) getCredentials(
if name == "" || namespace == "" { if name == "" || namespace == "" {
errStr := "secret name or secret namespace is empty" errStr := "secret name or secret namespace is empty"
util.ErrorLogMsg(errStr) log.ErrorLogMsg(errStr)
return nil, errors.New(errStr) return nil, errors.New(errStr)
} }
@ -117,7 +118,7 @@ func (r *ReconcilePersistentVolume) getCredentials(
cr, err = util.NewUserCredentials(credentials) cr, err = util.NewUserCredentials(credentials)
if err != nil { if err != nil {
util.ErrorLogMsg("failed to get user credentials %s", err) log.ErrorLogMsg("failed to get user credentials %s", err)
return nil, err return nil, err
} }
@ -201,7 +202,7 @@ func (r ReconcilePersistentVolume) reconcilePV(ctx context.Context, obj runtime.
cr, err := r.getCredentials(ctx, secretName, secretNamespace) cr, err := r.getCredentials(ctx, secretName, secretNamespace)
if err != nil { if err != nil {
util.ErrorLogMsg("failed to get credentials from secret %s", err) log.ErrorLogMsg("failed to get credentials from secret %s", err)
return err return err
} }
@ -209,14 +210,14 @@ func (r ReconcilePersistentVolume) reconcilePV(ctx context.Context, obj runtime.
rbdVolID, err := rbd.RegenerateJournal(pv.Spec.CSI.VolumeAttributes, volumeHandler, requestName, cr) rbdVolID, err := rbd.RegenerateJournal(pv.Spec.CSI.VolumeAttributes, volumeHandler, requestName, cr)
if err != nil { if err != nil {
util.ErrorLogMsg("failed to regenerate journal %s", err) log.ErrorLogMsg("failed to regenerate journal %s", err)
return err return err
} }
if rbdVolID != volumeHandler { if rbdVolID != volumeHandler {
err = r.storeVolumeIDInPV(ctx, pv, rbdVolID) err = r.storeVolumeIDInPV(ctx, pv, rbdVolID)
if err != nil { if err != nil {
util.ErrorLogMsg("failed to store volumeID in PV %s", err) log.ErrorLogMsg("failed to store volumeID in PV %s", err)
return err return err
} }

View File

@ -19,7 +19,7 @@ package csicommon
import ( import (
"context" "context"
"github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util/log"
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
@ -71,7 +71,7 @@ func (cs *DefaultControllerServer) GetCapacity(
func (cs *DefaultControllerServer) ControllerGetCapabilities( func (cs *DefaultControllerServer) ControllerGetCapabilities(
ctx context.Context, ctx context.Context,
req *csi.ControllerGetCapabilitiesRequest) (*csi.ControllerGetCapabilitiesResponse, error) { req *csi.ControllerGetCapabilitiesRequest) (*csi.ControllerGetCapabilitiesResponse, error) {
util.TraceLog(ctx, "Using default ControllerGetCapabilities") log.TraceLog(ctx, "Using default ControllerGetCapabilities")
if cs.Driver == nil { if cs.Driver == nil {
return nil, status.Error(codes.Unimplemented, "Controller server is not enabled") return nil, status.Error(codes.Unimplemented, "Controller server is not enabled")
} }

View File

@ -19,12 +19,12 @@ package csicommon
import ( import (
"fmt" "fmt"
"github.com/ceph/ceph-csi/internal/util/log"
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
"k8s.io/klog/v2" "k8s.io/klog/v2"
"github.com/ceph/ceph-csi/internal/util"
) )
// CSIDriver stores driver information. // CSIDriver stores driver information.
@ -91,7 +91,7 @@ func (d *CSIDriver) AddControllerServiceCapabilities(cl []csi.ControllerServiceC
csc := make([]*csi.ControllerServiceCapability, 0, len(cl)) csc := make([]*csi.ControllerServiceCapability, 0, len(cl))
for _, c := range cl { for _, c := range cl {
util.DefaultLog("Enabling controller service capability: %v", c.String()) log.DefaultLog("Enabling controller service capability: %v", c.String())
csc = append(csc, NewControllerServiceCapability(c)) csc = append(csc, NewControllerServiceCapability(c))
} }
@ -103,7 +103,7 @@ func (d *CSIDriver) AddVolumeCapabilityAccessModes(
vc []csi.VolumeCapability_AccessMode_Mode) []*csi.VolumeCapability_AccessMode { vc []csi.VolumeCapability_AccessMode_Mode) []*csi.VolumeCapability_AccessMode {
vca := make([]*csi.VolumeCapability_AccessMode, 0, len(vc)) vca := make([]*csi.VolumeCapability_AccessMode, 0, len(vc))
for _, c := range vc { for _, c := range vc {
util.DefaultLog("Enabling volume access mode: %v", c.String()) log.DefaultLog("Enabling volume access mode: %v", c.String())
vca = append(vca, NewVolumeCapabilityAccessMode(c)) vca = append(vca, NewVolumeCapabilityAccessMode(c))
} }
d.vc = vca d.vc = vca

View File

@ -19,7 +19,7 @@ package csicommon
import ( import (
"context" "context"
"github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util/log"
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
@ -35,7 +35,7 @@ type DefaultIdentityServer struct {
func (ids *DefaultIdentityServer) GetPluginInfo( func (ids *DefaultIdentityServer) GetPluginInfo(
ctx context.Context, ctx context.Context,
req *csi.GetPluginInfoRequest) (*csi.GetPluginInfoResponse, error) { req *csi.GetPluginInfoRequest) (*csi.GetPluginInfoResponse, error) {
util.TraceLog(ctx, "Using default GetPluginInfo") log.TraceLog(ctx, "Using default GetPluginInfo")
if ids.Driver.name == "" { if ids.Driver.name == "" {
return nil, status.Error(codes.Unavailable, "Driver name not configured") return nil, status.Error(codes.Unavailable, "Driver name not configured")
@ -60,7 +60,7 @@ func (ids *DefaultIdentityServer) Probe(ctx context.Context, req *csi.ProbeReque
func (ids *DefaultIdentityServer) GetPluginCapabilities( func (ids *DefaultIdentityServer) GetPluginCapabilities(
ctx context.Context, ctx context.Context,
req *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) { req *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) {
util.TraceLog(ctx, "Using default capabilities") log.TraceLog(ctx, "Using default capabilities")
return &csi.GetPluginCapabilitiesResponse{ return &csi.GetPluginCapabilitiesResponse{
Capabilities: []*csi.PluginCapability{ Capabilities: []*csi.PluginCapability{

View File

@ -19,7 +19,7 @@ package csicommon
import ( import (
"context" "context"
"github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util/log"
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
@ -57,7 +57,7 @@ func (ns *DefaultNodeServer) NodeExpandVolume(
func (ns *DefaultNodeServer) NodeGetInfo( func (ns *DefaultNodeServer) NodeGetInfo(
ctx context.Context, ctx context.Context,
req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) { req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) {
util.TraceLog(ctx, "Using default NodeGetInfo") log.TraceLog(ctx, "Using default NodeGetInfo")
csiTopology := &csi.Topology{ csiTopology := &csi.Topology{
Segments: ns.Driver.topology, Segments: ns.Driver.topology,
@ -73,7 +73,7 @@ func (ns *DefaultNodeServer) NodeGetInfo(
func (ns *DefaultNodeServer) NodeGetCapabilities( func (ns *DefaultNodeServer) NodeGetCapabilities(
ctx context.Context, ctx context.Context,
req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) { req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) {
util.TraceLog(ctx, "Using default NodeGetCapabilities") log.TraceLog(ctx, "Using default NodeGetCapabilities")
return &csi.NodeGetCapabilitiesResponse{ return &csi.NodeGetCapabilitiesResponse{
Capabilities: []*csi.NodeServiceCapability{ Capabilities: []*csi.NodeServiceCapability{

View File

@ -23,6 +23,8 @@ import (
"strings" "strings"
"sync" "sync"
"github.com/ceph/ceph-csi/internal/util/log"
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi"
"github.com/csi-addons/spec/lib/go/replication" "github.com/csi-addons/spec/lib/go/replication"
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
@ -30,8 +32,6 @@ import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"google.golang.org/grpc" "google.golang.org/grpc"
"k8s.io/klog/v2" "k8s.io/klog/v2"
"github.com/ceph/ceph-csi/internal/util"
) )
// NonBlockingGRPCServer defines Non blocking GRPC server interfaces. // NonBlockingGRPCServer defines Non blocking GRPC server interfaces.
@ -128,7 +128,7 @@ func (s *nonBlockingGRPCServer) serve(endpoint, hstOptions string, srv Servers,
replication.RegisterControllerServer(server, srv.RS) replication.RegisterControllerServer(server, srv.RS)
} }
util.DefaultLog("Listening for connections on address: %#v", listener.Addr()) log.DefaultLog("Listening for connections on address: %#v", listener.Addr())
if metrics { if metrics {
ho := strings.Split(hstOptions, ",") ho := strings.Split(hstOptions, ",")
const expectedHo = 3 const expectedHo = 3

View File

@ -25,6 +25,7 @@ import (
"sync/atomic" "sync/atomic"
"github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/log"
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi"
rp "github.com/csi-addons/replication-lib-utils/protosanitizer" rp "github.com/csi-addons/replication-lib-utils/protosanitizer"
@ -160,9 +161,9 @@ func contextIDInjector(
info *grpc.UnaryServerInfo, info *grpc.UnaryServerInfo,
handler grpc.UnaryHandler) (resp interface{}, err error) { handler grpc.UnaryHandler) (resp interface{}, err error) {
atomic.AddUint64(&id, 1) atomic.AddUint64(&id, 1)
ctx = context.WithValue(ctx, util.CtxKey, id) ctx = context.WithValue(ctx, log.CtxKey, id)
if reqID := getReqID(req); reqID != "" { if reqID := getReqID(req); reqID != "" {
ctx = context.WithValue(ctx, util.ReqID, reqID) ctx = context.WithValue(ctx, log.ReqID, reqID)
} }
return handler(ctx, req) return handler(ctx, req)
@ -173,18 +174,18 @@ func logGRPC(
req interface{}, req interface{},
info *grpc.UnaryServerInfo, info *grpc.UnaryServerInfo,
handler grpc.UnaryHandler) (interface{}, error) { handler grpc.UnaryHandler) (interface{}, error) {
util.ExtendedLog(ctx, "GRPC call: %s", info.FullMethod) log.ExtendedLog(ctx, "GRPC call: %s", info.FullMethod)
if isReplicationRequest(req) { if isReplicationRequest(req) {
util.TraceLog(ctx, "GRPC request: %s", rp.StripReplicationSecrets(req)) log.TraceLog(ctx, "GRPC request: %s", rp.StripReplicationSecrets(req))
} else { } else {
util.TraceLog(ctx, "GRPC request: %s", protosanitizer.StripSecrets(req)) log.TraceLog(ctx, "GRPC request: %s", protosanitizer.StripSecrets(req))
} }
resp, err := handler(ctx, req) resp, err := handler(ctx, req)
if err != nil { if err != nil {
klog.Errorf(util.Log(ctx, "GRPC error: %v"), err) klog.Errorf(log.Log(ctx, "GRPC error: %v"), err)
} else { } else {
util.TraceLog(ctx, "GRPC response: %s", protosanitizer.StripSecrets(resp)) log.TraceLog(ctx, "GRPC response: %s", protosanitizer.StripSecrets(resp))
} }
return resp, err return resp, err
@ -231,32 +232,32 @@ func FilesystemNodeGetVolumeStats(ctx context.Context, targetPath string) (*csi.
available, ok := (*(volMetrics.Available)).AsInt64() available, ok := (*(volMetrics.Available)).AsInt64()
if !ok { if !ok {
util.ErrorLog(ctx, "failed to fetch available bytes") log.ErrorLog(ctx, "failed to fetch available bytes")
} }
capacity, ok := (*(volMetrics.Capacity)).AsInt64() capacity, ok := (*(volMetrics.Capacity)).AsInt64()
if !ok { if !ok {
util.ErrorLog(ctx, "failed to fetch capacity bytes") log.ErrorLog(ctx, "failed to fetch capacity bytes")
return nil, status.Error(codes.Unknown, "failed to fetch capacity bytes") return nil, status.Error(codes.Unknown, "failed to fetch capacity bytes")
} }
used, ok := (*(volMetrics.Used)).AsInt64() used, ok := (*(volMetrics.Used)).AsInt64()
if !ok { if !ok {
util.ErrorLog(ctx, "failed to fetch used bytes") log.ErrorLog(ctx, "failed to fetch used bytes")
} }
inodes, ok := (*(volMetrics.Inodes)).AsInt64() inodes, ok := (*(volMetrics.Inodes)).AsInt64()
if !ok { if !ok {
util.ErrorLog(ctx, "failed to fetch available inodes") log.ErrorLog(ctx, "failed to fetch available inodes")
return nil, status.Error(codes.Unknown, "failed to fetch available inodes") return nil, status.Error(codes.Unknown, "failed to fetch available inodes")
} }
inodesFree, ok := (*(volMetrics.InodesFree)).AsInt64() inodesFree, ok := (*(volMetrics.InodesFree)).AsInt64()
if !ok { if !ok {
util.ErrorLog(ctx, "failed to fetch free inodes") log.ErrorLog(ctx, "failed to fetch free inodes")
} }
inodesUsed, ok := (*(volMetrics.InodesUsed)).AsInt64() inodesUsed, ok := (*(volMetrics.InodesUsed)).AsInt64()
if !ok { if !ok {
util.ErrorLog(ctx, "failed to fetch used inodes") log.ErrorLog(ctx, "failed to fetch used inodes")
} }
return &csi.NodeGetVolumeStatsResponse{ return &csi.NodeGetVolumeStatsResponse{

View File

@ -21,6 +21,7 @@ import (
"errors" "errors"
"github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/log"
"github.com/ceph/go-ceph/rados" "github.com/ceph/go-ceph/rados"
) )
@ -74,7 +75,7 @@ func getOMapValues(
if err != nil { if err != nil {
if errors.Is(err, rados.ErrNotFound) { if errors.Is(err, rados.ErrNotFound) {
util.ErrorLog(ctx, "omap not found (pool=%q, namespace=%q, name=%q): %v", log.ErrorLog(ctx, "omap not found (pool=%q, namespace=%q, name=%q): %v",
poolName, namespace, oid, err) poolName, namespace, oid, err)
return nil, util.JoinErrors(util.ErrKeyNotFound, err) return nil, util.JoinErrors(util.ErrKeyNotFound, err)
@ -83,7 +84,7 @@ func getOMapValues(
return nil, err return nil, err
} }
util.DebugLog(ctx, "got omap values: (pool=%q, namespace=%q, name=%q): %+v", log.DebugLog(ctx, "got omap values: (pool=%q, namespace=%q, name=%q): %+v",
poolName, namespace, oid, results) poolName, namespace, oid, results)
return results, nil return results, nil
@ -110,16 +111,16 @@ func removeMapKeys(
// the previous implementation of removing omap keys (via the cli) // the previous implementation of removing omap keys (via the cli)
// treated failure to find the omap as a non-error. Do so here to // treated failure to find the omap as a non-error. Do so here to
// mimic the previous behavior. // mimic the previous behavior.
util.DebugLog(ctx, "when removing omap keys, omap not found (pool=%q, namespace=%q, name=%q): %+v", log.DebugLog(ctx, "when removing omap keys, omap not found (pool=%q, namespace=%q, name=%q): %+v",
poolName, namespace, oid, keys) poolName, namespace, oid, keys)
} else { } else {
util.ErrorLog(ctx, "failed removing omap keys (pool=%q, namespace=%q, name=%q): %v", log.ErrorLog(ctx, "failed removing omap keys (pool=%q, namespace=%q, name=%q): %v",
poolName, namespace, oid, err) poolName, namespace, oid, err)
return err return err
} }
} }
util.DebugLog(ctx, "removed omap keys (pool=%q, namespace=%q, name=%q): %+v", log.DebugLog(ctx, "removed omap keys (pool=%q, namespace=%q, name=%q): %+v",
poolName, namespace, oid, keys) poolName, namespace, oid, keys)
return nil return nil
@ -146,12 +147,12 @@ func setOMapKeys(
} }
err = ioctx.SetOmap(oid, bpairs) err = ioctx.SetOmap(oid, bpairs)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed setting omap keys (pool=%q, namespace=%q, name=%q, pairs=%+v): %v", log.ErrorLog(ctx, "failed setting omap keys (pool=%q, namespace=%q, name=%q, pairs=%+v): %v",
poolName, namespace, oid, pairs, err) poolName, namespace, oid, pairs, err)
return err return err
} }
util.DebugLog(ctx, "set omap keys (pool=%q, namespace=%q, name=%q): %+v)", log.DebugLog(ctx, "set omap keys (pool=%q, namespace=%q, name=%q): %+v)",
poolName, namespace, oid, pairs) poolName, namespace, oid, pairs)
return nil return nil

View File

@ -25,6 +25,7 @@ import (
"strings" "strings"
"github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/log"
"github.com/pborman/uuid" "github.com/pborman/uuid"
) )
@ -437,7 +438,7 @@ func (conn *Connection) UndoReservation(ctx context.Context,
cj.cephUUIDDirectoryPrefix+imageUUID) cj.cephUUIDDirectoryPrefix+imageUUID)
if err != nil { if err != nil {
if !errors.Is(err, util.ErrObjectNotFound) { if !errors.Is(err, util.ErrObjectNotFound) {
util.ErrorLog(ctx, "failed removing oMap %s (%s)", cj.cephUUIDDirectoryPrefix+imageUUID, err) log.ErrorLog(ctx, "failed removing oMap %s (%s)", cj.cephUUIDDirectoryPrefix+imageUUID, err)
return err return err
} }
@ -448,7 +449,7 @@ func (conn *Connection) UndoReservation(ctx context.Context,
err := removeMapKeys(ctx, conn, csiJournalPool, cj.namespace, cj.csiDirectory, err := removeMapKeys(ctx, conn, csiJournalPool, cj.namespace, cj.csiDirectory,
[]string{cj.csiNameKeyPrefix + reqName}) []string{cj.csiNameKeyPrefix + reqName})
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed removing oMap key %s (%s)", cj.csiNameKeyPrefix+reqName, err) log.ErrorLog(ctx, "failed removing oMap key %s (%s)", cj.csiNameKeyPrefix+reqName, err)
return err return err
} }
@ -486,7 +487,7 @@ func reserveOMapName(
if volUUID == "" && errors.Is(err, util.ErrObjectExists) { if volUUID == "" && errors.Is(err, util.ErrObjectExists) {
attempt++ attempt++
// try again with a different uuid, for maxAttempts tries // try again with a different uuid, for maxAttempts tries
util.DebugLog(ctx, "uuid (%s) conflict detected, retrying (attempt %d of %d)", log.DebugLog(ctx, "uuid (%s) conflict detected, retrying (attempt %d of %d)",
iterUUID, attempt, maxAttempts) iterUUID, attempt, maxAttempts)
continue continue
@ -590,10 +591,10 @@ func (conn *Connection) ReserveName(ctx context.Context,
} }
defer func() { defer func() {
if err != nil { if err != nil {
util.WarningLog(ctx, "reservation failed for volume: %s", reqName) log.WarningLog(ctx, "reservation failed for volume: %s", reqName)
errDefer := conn.UndoReservation(ctx, imagePool, journalPool, imageName, reqName) errDefer := conn.UndoReservation(ctx, imagePool, journalPool, imageName, reqName)
if errDefer != nil { if errDefer != nil {
util.WarningLog(ctx, "failed undoing reservation of volume: %s (%v)", reqName, errDefer) log.WarningLog(ctx, "failed undoing reservation of volume: %s (%v)", reqName, errDefer)
} }
} }
}() }()
@ -686,7 +687,7 @@ func (conn *Connection) GetImageAttributes(
if !errors.Is(err, util.ErrKeyNotFound) && !errors.Is(err, util.ErrPoolNotFound) { if !errors.Is(err, util.ErrKeyNotFound) && !errors.Is(err, util.ErrPoolNotFound) {
return nil, err return nil, err
} }
util.WarningLog(ctx, "unable to read omap keys: pool or key missing: %v", err) log.WarningLog(ctx, "unable to read omap keys: pool or key missing: %v", err)
} }
var found bool var found bool

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package util package kms
import ( import (
"context" "context"
@ -22,6 +22,8 @@ import (
"errors" "errors"
"fmt" "fmt"
"github.com/ceph/ceph-csi/internal/util/k8s"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
awsCreds "github.com/aws/aws-sdk-go/aws/credentials" awsCreds "github.com/aws/aws-sdk-go/aws/credentials"
awsSession "github.com/aws/aws-sdk-go/aws/session" awsSession "github.com/aws/aws-sdk-go/aws/session"
@ -58,7 +60,7 @@ const (
awsCMK = "AWS_CMK_ARN" awsCMK = "AWS_CMK_ARN"
) )
var _ = RegisterKMSProvider(KMSProvider{ var _ = RegisterProvider(Provider{
UniqueID: kmsTypeAWSMetadata, UniqueID: kmsTypeAWSMetadata,
Initializer: initAWSMetadataKMS, Initializer: initAWSMetadataKMS,
}) })
@ -76,7 +78,7 @@ type AWSMetadataKMS struct {
cmk string cmk string
} }
func initAWSMetadataKMS(args KMSInitializerArgs) (EncryptionKMS, error) { func initAWSMetadataKMS(args ProviderInitArgs) (EncryptionKMS, error) {
kms := &AWSMetadataKMS{ kms := &AWSMetadataKMS{
namespace: args.Namespace, namespace: args.Namespace,
} }
@ -123,7 +125,7 @@ func initAWSMetadataKMS(args KMSInitializerArgs) (EncryptionKMS, error) {
} }
func (kms *AWSMetadataKMS) getSecrets() (map[string]interface{}, error) { func (kms *AWSMetadataKMS) getSecrets() (map[string]interface{}, error) {
c := NewK8sClient() c := k8s.NewK8sClient()
secret, err := c.CoreV1().Secrets(kms.namespace).Get(context.TODO(), secret, err := c.CoreV1().Secrets(kms.namespace).Get(context.TODO(),
kms.secretName, metav1.GetOptions{}) kms.secretName, metav1.GetOptions{})
if err != nil { if err != nil {
@ -150,10 +152,10 @@ func (kms *AWSMetadataKMS) Destroy() {
// Nothing to do. // Nothing to do.
} }
// requiresDEKStore indicates that the DEKs should get stored in the metadata // RequiresDEKStore indicates that the DEKs should get stored in the metadata
// of the volumes. This Amazon KMS provider does not support storing DEKs in // of the volumes. This Amazon KMS provider does not support storing DEKs in
// AWS as that adds additional costs. // AWS as that adds additional costs.
func (kms *AWSMetadataKMS) requiresDEKStore() DEKStoreType { func (kms *AWSMetadataKMS) RequiresDEKStore() DEKStoreType {
return DEKStoreMetadata return DEKStoreMetadata
} }

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package util package kms
import ( import (
"testing" "testing"

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package util package kms
import ( import (
"context" "context"
@ -23,6 +23,8 @@ import (
"io/ioutil" "io/ioutil"
"os" "os"
"github.com/ceph/ceph-csi/internal/util/k8s"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
) )
@ -44,6 +46,12 @@ const (
// defaultKMSConfigMapName default ConfigMap name to fetch kms // defaultKMSConfigMapName default ConfigMap name to fetch kms
// connection details. // connection details.
defaultKMSConfigMapName = "csi-kms-connection-details" defaultKMSConfigMapName = "csi-kms-connection-details"
// kmsConfigPath is the location of the vault config file.
kmsConfigPath = "/etc/ceph-csi-encryption-kms-config/config.json"
// Default KMS type.
DefaultKMSType = "default"
) )
// GetKMS returns an instance of Key Management System. // GetKMS returns an instance of Key Management System.
@ -54,8 +62,8 @@ const (
// - secrets contain additional details, like TLS certificates to connect to // - secrets contain additional details, like TLS certificates to connect to
// the KMS // the KMS
func GetKMS(tenant, kmsID string, secrets map[string]string) (EncryptionKMS, error) { func GetKMS(tenant, kmsID string, secrets map[string]string) (EncryptionKMS, error) {
if kmsID == "" || kmsID == defaultKMSType { if kmsID == "" || kmsID == DefaultKMSType {
return initSecretsKMS(secrets) return GetDefaultKMS(secrets)
} }
config, err := getKMSConfiguration() config, err := getKMSConfiguration()
@ -146,7 +154,7 @@ func getKMSConfigMap() (map[string]interface{}, error) {
} }
cmName := getKMSConfigMapName() cmName := getKMSConfigMapName()
c := NewK8sClient() c := k8s.NewK8sClient()
cm, err := c.CoreV1().ConfigMaps(ns).Get(context.Background(), cm, err := c.CoreV1().ConfigMaps(ns).Get(context.Background(),
cmName, metav1.GetOptions{}) cmName, metav1.GetOptions{})
if err != nil { if err != nil {
@ -168,10 +176,10 @@ func getKMSConfigMap() (map[string]interface{}, error) {
return kmsConfig, nil return kmsConfig, nil
} }
// getKMSProvider inspects the configuration and tries to identify what // getProvider inspects the configuration and tries to identify what
// KMSProvider is expected to be used with it. This returns the // Provider is expected to be used with it. This returns the
// KMSProvider.UniqueID. // Provider.UniqueID.
func getKMSProvider(config map[string]interface{}) (string, error) { func getProvider(config map[string]interface{}) (string, error) {
var name string var name string
providerName, ok := config[kmsTypeKey] providerName, ok := config[kmsTypeKey]
@ -200,45 +208,45 @@ func getKMSProvider(config map[string]interface{}) (string, error) {
"configuration option %q or %q", kmsTypeKey, kmsProviderKey) "configuration option %q or %q", kmsTypeKey, kmsProviderKey)
} }
// KMSInitializerArgs get passed to KMSInitializerFunc when a new instance of a // ProviderInitArgs get passed to ProviderInitFunc when a new instance of a
// KMSProvider is initialized. // Provider is initialized.
type KMSInitializerArgs struct { type ProviderInitArgs struct {
Tenant string Tenant string
Config map[string]interface{} Config map[string]interface{}
Secrets map[string]string Secrets map[string]string
// Namespace contains the Kubernetes Namespace where the Ceph-CSI Pods // Namespace contains the Kubernetes Namespace where the Ceph-CSI Pods
// are running. This is an optional option, and might be unset when the // are running. This is an optional option, and might be unset when the
// KMSProvider.Initializer is called. // Provider.Initializer is called.
Namespace string Namespace string
} }
// KMSInitializerFunc gets called when the KMSProvider needs to be // ProviderInitFunc gets called when the Provider needs to be
// instantiated. // instantiated.
type KMSInitializerFunc func(args KMSInitializerArgs) (EncryptionKMS, error) type ProviderInitFunc func(args ProviderInitArgs) (EncryptionKMS, error)
type KMSProvider struct { type Provider struct {
UniqueID string UniqueID string
Initializer KMSInitializerFunc Initializer ProviderInitFunc
} }
type kmsProviderList struct { type kmsProviderList struct {
providers map[string]KMSProvider providers map[string]Provider
} }
// kmsManager is used to create instances for a KMS provider. // kmsManager is used to create instances for a KMS provider.
var kmsManager = kmsProviderList{providers: map[string]KMSProvider{}} var kmsManager = kmsProviderList{providers: map[string]Provider{}}
// RegisterKMSProvider uses kmsManager to register the given KMSProvider. The // RegisterProvider uses kmsManager to register the given Provider. The
// KMSProvider.Initializer function will get called when a new instance of the // Provider.Initializer function will get called when a new instance of the
// KMS is required. // KMS is required.
func RegisterKMSProvider(provider KMSProvider) bool { func RegisterProvider(provider Provider) bool {
// validate uniqueness of the UniqueID // validate uniqueness of the UniqueID
if provider.UniqueID == "" { if provider.UniqueID == "" {
panic("a provider MUST set a UniqueID") panic("a provider MUST set a UniqueID")
} }
_, ok := kmsManager.providers[provider.UniqueID] _, ok := kmsManager.providers[provider.UniqueID]
if ok { if ok {
panic("duplicate registration of KMSProvider.UniqueID: " + provider.UniqueID) panic("duplicate registration of Provider.UniqueID: " + provider.UniqueID)
} }
// validate the Initializer // validate the Initializer
@ -251,14 +259,14 @@ func RegisterKMSProvider(provider KMSProvider) bool {
return true return true
} }
// buildKMS creates a new KMSProvider instance, based on the configuration that // buildKMS creates a new Provider instance, based on the configuration that
// was passed. This uses getKMSProvider() internally to identify the // was passed. This uses getProvider() internally to identify the
// KMSProvider to instantiate. // Provider to instantiate.
func (kf *kmsProviderList) buildKMS( func (kf *kmsProviderList) buildKMS(
tenant string, tenant string,
config map[string]interface{}, config map[string]interface{},
secrets map[string]string) (EncryptionKMS, error) { secrets map[string]string) (EncryptionKMS, error) {
providerName, err := getKMSProvider(config) providerName, err := getProvider(config)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -269,14 +277,14 @@ func (kf *kmsProviderList) buildKMS(
providerName) providerName)
} }
kmsInitArgs := KMSInitializerArgs{ kmsInitArgs := ProviderInitArgs{
Tenant: tenant, Tenant: tenant,
Config: config, Config: config,
Secrets: secrets, Secrets: secrets,
} }
// Namespace is an optional parameter, it may not be set and is not // Namespace is an optional parameter, it may not be set and is not
// required for all KMSProviders // required for all Providers
ns, err := getPodNamespace() ns, err := getPodNamespace()
if err == nil { if err == nil {
kmsInitArgs.Namespace = ns kmsInitArgs.Namespace = ns
@ -284,3 +292,96 @@ func (kf *kmsProviderList) buildKMS(
return provider.Initializer(kmsInitArgs) return provider.Initializer(kmsInitArgs)
} }
func GetDefaultKMS(secrets map[string]string) (EncryptionKMS, error) {
provider, ok := kmsManager.providers[DefaultKMSType]
if !ok {
return nil, fmt.Errorf("could not find KMS provider %q", DefaultKMSType)
}
kmsInitArgs := ProviderInitArgs{
Secrets: secrets,
}
return provider.Initializer(kmsInitArgs)
}
// EncryptionKMS provides external Key Management System for encryption
// passphrases storage.
type EncryptionKMS interface {
Destroy()
// RequiresDEKStore returns the DEKStoreType that is needed to be
// configure for the KMS. Nothing needs to be done when this function
// returns DEKStoreIntegrated, otherwise you will need to configure an
// alternative storage for the DEKs.
RequiresDEKStore() DEKStoreType
// EncryptDEK provides a way for a KMS to encrypt a DEK. In case the
// encryption is done transparently inside the KMS service, the
// function can return an unencrypted value.
EncryptDEK(volumeID, plainDEK string) (string, error)
// DecryptDEK provides a way for a KMS to decrypt a DEK. In case the
// encryption is done transparently inside the KMS service, the
// function does not need to do anything except return the encyptedDEK
// as it was received.
DecryptDEK(volumeID, encyptedDEK string) (string, error)
}
// DEKStoreType describes what DEKStore needs to be configured when using a
// particular KMS. A KMS might support different DEKStores depending on its
// configuration.
type DEKStoreType string
const (
// DEKStoreIntegrated indicates that the KMS itself supports storing
// DEKs.
DEKStoreIntegrated = DEKStoreType("")
// DEKStoreMetadata indicates that the KMS should be configured to
// store the DEK in the metadata of the volume.
DEKStoreMetadata = DEKStoreType("metadata")
)
// DEKStore allows KMS instances to implement a modular backend for DEK
// storage. This can be used to store the DEK in a different location, in case
// the KMS can not store passphrases for volumes.
type DEKStore interface {
// StoreDEK saves the DEK in the configured store.
StoreDEK(volumeID string, dek string) error
// FetchDEK reads the DEK from the configured store and returns it.
FetchDEK(volumeID string) (string, error)
// RemoveDEK deletes the DEK from the configured store.
RemoveDEK(volumeID string) error
}
// IntegratedDEK is a DEKStore that can not be configured. Either the KMS does
// not use a DEK, or the DEK is stored in the KMS without additional
// configuration options.
type IntegratedDEK struct{}
func (i IntegratedDEK) RequiresDEKStore() DEKStoreType {
return DEKStoreIntegrated
}
func (i IntegratedDEK) EncryptDEK(volumeID, plainDEK string) (string, error) {
return plainDEK, nil
}
func (i IntegratedDEK) DecryptDEK(volumeID, encyptedDEK string) (string, error) {
return encyptedDEK, nil
}
// getKeys takes a map that uses strings for keys and returns a slice with the
// keys.
func getKeys(m map[string]interface{}) []string {
keys := make([]string, len(m))
i := 0
for k := range m {
keys[i] = k
i++
}
return keys
}

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package util package kms
import ( import (
"testing" "testing"
@ -22,22 +22,22 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
func noinitKMS(args KMSInitializerArgs) (EncryptionKMS, error) { func noinitKMS(args ProviderInitArgs) (EncryptionKMS, error) {
return nil, nil return nil, nil
} }
func TestRegisterKMSProvider(t *testing.T) { func TestRegisterProvider(t *testing.T) {
t.Parallel() t.Parallel()
tests := []struct { tests := []struct {
provider KMSProvider provider Provider
panics bool panics bool
}{{ }{{
KMSProvider{ Provider{
UniqueID: "incomplete-provider", UniqueID: "incomplete-provider",
}, },
true, true,
}, { }, {
KMSProvider{ Provider{
UniqueID: "initializer-only", UniqueID: "initializer-only",
Initializer: noinitKMS, Initializer: noinitKMS,
}, },
@ -47,9 +47,9 @@ func TestRegisterKMSProvider(t *testing.T) {
for _, test := range tests { for _, test := range tests {
provider := test.provider provider := test.provider
if test.panics { if test.panics {
assert.Panics(t, func() { RegisterKMSProvider(provider) }) assert.Panics(t, func() { RegisterProvider(provider) })
} else { } else {
assert.True(t, RegisterKMSProvider(provider)) assert.True(t, RegisterProvider(provider))
} }
} }
} }

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package util package kms
import ( import (
"context" "context"
@ -26,6 +26,8 @@ import (
"fmt" "fmt"
"io" "io"
"github.com/ceph/ceph-csi/internal/util/k8s"
"golang.org/x/crypto/scrypt" "golang.org/x/crypto/scrypt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
) )
@ -34,9 +36,6 @@ const (
// Encryption passphrase location in K8s secrets. // Encryption passphrase location in K8s secrets.
encryptionPassphraseKey = "encryptionPassphrase" encryptionPassphraseKey = "encryptionPassphrase"
// Default KMS type.
defaultKMSType = "default"
// kmsTypeSecretsMetadata is the SecretsKMS with per-volume encryption, // kmsTypeSecretsMetadata is the SecretsKMS with per-volume encryption,
// where the DEK is stored in the metadata of the volume itself. // where the DEK is stored in the metadata of the volume itself.
kmsTypeSecretsMetadata = "metadata" kmsTypeSecretsMetadata = "metadata"
@ -51,16 +50,21 @@ const (
// SecretsKMS is default KMS implementation that means no KMS is in use. // SecretsKMS is default KMS implementation that means no KMS is in use.
type SecretsKMS struct { type SecretsKMS struct {
integratedDEK IntegratedDEK
passphrase string passphrase string
} }
// initSecretsKMS initializes a SecretsKMS that uses the passphrase from the var _ = RegisterProvider(Provider{
UniqueID: DefaultKMSType,
Initializer: newSecretsKMS,
})
// newSecretsKMS initializes a SecretsKMS that uses the passphrase from the
// secret that is configured for the StorageClass. This KMS provider uses a // secret that is configured for the StorageClass. This KMS provider uses a
// single (LUKS) passhprase for all volumes. // single (LUKS) passhprase for all volumes.
func initSecretsKMS(secrets map[string]string) (EncryptionKMS, error) { func newSecretsKMS(args ProviderInitArgs) (EncryptionKMS, error) {
passphraseValue, ok := secrets[encryptionPassphraseKey] passphraseValue, ok := args.Secrets[encryptionPassphraseKey]
if !ok { if !ok {
return nil, errors.New("missing encryption passphrase in secrets") return nil, errors.New("missing encryption passphrase in secrets")
} }
@ -96,7 +100,7 @@ type SecretsMetadataKMS struct {
SecretsKMS SecretsKMS
} }
var _ = RegisterKMSProvider(KMSProvider{ var _ = RegisterProvider(Provider{
UniqueID: kmsTypeSecretsMetadata, UniqueID: kmsTypeSecretsMetadata,
Initializer: initSecretsMetadataKMS, Initializer: initSecretsMetadataKMS,
}) })
@ -104,7 +108,7 @@ var _ = RegisterKMSProvider(KMSProvider{
// initSecretsMetadataKMS initializes a SecretsMetadataKMS that wraps a SecretsKMS, // initSecretsMetadataKMS initializes a SecretsMetadataKMS that wraps a SecretsKMS,
// so that the passphrase from the user provided or StorageClass secrets can be used // so that the passphrase from the user provided or StorageClass secrets can be used
// for encrypting/decrypting DEKs that are stored in a detached DEKStore. // for encrypting/decrypting DEKs that are stored in a detached DEKStore.
func initSecretsMetadataKMS(args KMSInitializerArgs) (EncryptionKMS, error) { func initSecretsMetadataKMS(args ProviderInitArgs) (EncryptionKMS, error) {
var ( var (
smKMS SecretsMetadataKMS smKMS SecretsMetadataKMS
encryptionPassphrase string encryptionPassphrase string
@ -155,7 +159,7 @@ func (kms SecretsMetadataKMS) fetchEncryptionPassphrase(
secretNamespace = defaultNamespace secretNamespace = defaultNamespace
} }
c := NewK8sClient() c := k8s.NewK8sClient()
secret, err := c.CoreV1().Secrets(secretNamespace).Get(context.TODO(), secret, err := c.CoreV1().Secrets(secretNamespace).Get(context.TODO(),
secretName, metav1.GetOptions{}) secretName, metav1.GetOptions{})
if err != nil { if err != nil {
@ -177,7 +181,7 @@ func (kms SecretsMetadataKMS) Destroy() {
kms.SecretsKMS.Destroy() kms.SecretsKMS.Destroy()
} }
func (kms SecretsMetadataKMS) requiresDEKStore() DEKStoreType { func (kms SecretsMetadataKMS) RequiresDEKStore() DEKStoreType {
return DEKStoreMetadata return DEKStoreMetadata
} }

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package util package kms
import ( import (
"testing" "testing"
@ -23,6 +23,26 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func TestNewSecretsKMS(t *testing.T) {
t.Parallel()
secrets := map[string]string{}
// no passphrase in the secrets, should fail
kms, err := newSecretsKMS(ProviderInitArgs{
Secrets: secrets,
})
assert.Error(t, err)
assert.Nil(t, kms)
// set a passphrase and it should pass
secrets[encryptionPassphraseKey] = "plaintext encryption key"
kms, err = newSecretsKMS(ProviderInitArgs{
Secrets: secrets,
})
assert.NotNil(t, kms)
assert.NoError(t, err)
}
func TestGenerateNonce(t *testing.T) { func TestGenerateNonce(t *testing.T) {
t.Parallel() t.Parallel()
size := 64 size := 64
@ -44,7 +64,7 @@ func TestGenerateCipher(t *testing.T) {
func TestInitSecretsMetadataKMS(t *testing.T) { func TestInitSecretsMetadataKMS(t *testing.T) {
t.Parallel() t.Parallel()
args := KMSInitializerArgs{ args := ProviderInitArgs{
Tenant: "tenant", Tenant: "tenant",
Config: nil, Config: nil,
Secrets: map[string]string{}, Secrets: map[string]string{},
@ -61,7 +81,7 @@ func TestInitSecretsMetadataKMS(t *testing.T) {
kms, err = initSecretsMetadataKMS(args) kms, err = initSecretsMetadataKMS(args)
assert.NoError(t, err) assert.NoError(t, err)
require.NotNil(t, kms) require.NotNil(t, kms)
assert.Equal(t, DEKStoreMetadata, kms.requiresDEKStore()) assert.Equal(t, DEKStoreMetadata, kms.RequiresDEKStore())
} }
func TestWorkflowSecretsMetadataKMS(t *testing.T) { func TestWorkflowSecretsMetadataKMS(t *testing.T) {
@ -69,7 +89,7 @@ func TestWorkflowSecretsMetadataKMS(t *testing.T) {
secrets := map[string]string{ secrets := map[string]string{
encryptionPassphraseKey: "my-passphrase-from-kubernetes", encryptionPassphraseKey: "my-passphrase-from-kubernetes",
} }
args := KMSInitializerArgs{ args := ProviderInitArgs{
Tenant: "tenant", Tenant: "tenant",
Config: nil, Config: nil,
Secrets: secrets, Secrets: secrets,
@ -81,9 +101,7 @@ func TestWorkflowSecretsMetadataKMS(t *testing.T) {
require.NotNil(t, kms) require.NotNil(t, kms)
// plainDEK is the (LUKS) passphrase for the volume // plainDEK is the (LUKS) passphrase for the volume
plainDEK, err := generateNewEncryptionPassphrase() plainDEK := "usually created with generateNewEncryptionPassphrase()"
assert.NoError(t, err)
assert.NotEqual(t, "", plainDEK)
encryptedDEK, err := kms.EncryptDEK(volumeID, plainDEK) encryptedDEK, err := kms.EncryptDEK(volumeID, plainDEK)
assert.NoError(t, err) assert.NoError(t, err)

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package util package kms
import ( import (
"errors" "errors"
@ -89,7 +89,7 @@ type vaultConnection struct {
type VaultKMS struct { type VaultKMS struct {
vaultConnection vaultConnection
integratedDEK IntegratedDEK
// vaultPassphrasePath (VPP) used to be added before the "key" of the // vaultPassphrasePath (VPP) used to be added before the "key" of the
// secret (like /v1/secret/data/<VPP>/key) // secret (like /v1/secret/data/<VPP>/key)
@ -317,13 +317,13 @@ func (vc *vaultConnection) getDeleteKeyContext() map[string]string {
return keyContext return keyContext
} }
var _ = RegisterKMSProvider(KMSProvider{ var _ = RegisterProvider(Provider{
UniqueID: kmsTypeVault, UniqueID: kmsTypeVault,
Initializer: initVaultKMS, Initializer: initVaultKMS,
}) })
// InitVaultKMS returns an interface to HashiCorp Vault KMS. // InitVaultKMS returns an interface to HashiCorp Vault KMS.
func initVaultKMS(args KMSInitializerArgs) (EncryptionKMS, error) { func initVaultKMS(args ProviderInitArgs) (EncryptionKMS, error) {
kms := &VaultKMS{} kms := &VaultKMS{}
err := kms.initConnection(args.Config) err := kms.initConnection(args.Config)
if err != nil { if err != nil {

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package util package kms
import ( import (
"context" "context"
@ -77,14 +77,14 @@ type VaultTenantSA struct {
saTokenDir string saTokenDir string
} }
var _ = RegisterKMSProvider(KMSProvider{ var _ = RegisterProvider(Provider{
UniqueID: kmsTypeVaultTenantSA, UniqueID: kmsTypeVaultTenantSA,
Initializer: initVaultTenantSA, Initializer: initVaultTenantSA,
}) })
// initVaultTenantSA returns an interface to HashiCorp Vault KMS where Tenants // initVaultTenantSA returns an interface to HashiCorp Vault KMS where Tenants
// use their ServiceAccount to access the service. // use their ServiceAccount to access the service.
func initVaultTenantSA(args KMSInitializerArgs) (EncryptionKMS, error) { func initVaultTenantSA(args ProviderInitArgs) (EncryptionKMS, error) {
var err error var err error
config := args.Config config := args.Config

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package util package kms
import ( import (
"errors" "errors"

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package util package kms
import ( import (
"errors" "errors"

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package util package kms
import ( import (
"context" "context"
@ -24,6 +24,8 @@ import (
"os" "os"
"strconv" "strconv"
"github.com/ceph/ceph-csi/internal/util/k8s"
"github.com/hashicorp/vault/api" "github.com/hashicorp/vault/api"
apierrs "k8s.io/apimachinery/pkg/api/errors" apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -179,7 +181,7 @@ Example JSON structure in the KMS config is,
*/ */
type vaultTenantConnection struct { type vaultTenantConnection struct {
vaultConnection vaultConnection
integratedDEK IntegratedDEK
client *kubernetes.Clientset client *kubernetes.Clientset
@ -202,13 +204,13 @@ type VaultTokensKMS struct {
TokenName string TokenName string
} }
var _ = RegisterKMSProvider(KMSProvider{ var _ = RegisterProvider(Provider{
UniqueID: kmsTypeVaultTokens, UniqueID: kmsTypeVaultTokens,
Initializer: initVaultTokensKMS, Initializer: initVaultTokensKMS,
}) })
// InitVaultTokensKMS returns an interface to HashiCorp Vault KMS. // InitVaultTokensKMS returns an interface to HashiCorp Vault KMS.
func initVaultTokensKMS(args KMSInitializerArgs) (EncryptionKMS, error) { func initVaultTokensKMS(args ProviderInitArgs) (EncryptionKMS, error) {
var err error var err error
config := args.Config config := args.Config
@ -438,7 +440,7 @@ func (vtc *vaultTenantConnection) initCertificates(config map[string]interface{}
func (vtc *vaultTenantConnection) getK8sClient() *kubernetes.Clientset { func (vtc *vaultTenantConnection) getK8sClient() *kubernetes.Clientset {
if vtc.client == nil { if vtc.client == nil {
vtc.client = NewK8sClient() vtc.client = k8s.NewK8sClient()
} }
return vtc.client return vtc.client

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package util package kms
import ( import (
"encoding/json" "encoding/json"
@ -76,7 +76,7 @@ func TestInitVaultTokensKMS(t *testing.T) {
return return
} }
args := KMSInitializerArgs{ args := ProviderInitArgs{
Tenant: "bob", Tenant: "bob",
Config: make(map[string]interface{}), Config: make(map[string]interface{}),
Secrets: nil, Secrets: nil,

View File

@ -21,6 +21,7 @@ import (
"time" "time"
"github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/log"
connlib "github.com/kubernetes-csi/csi-lib-utils/connection" connlib "github.com/kubernetes-csi/csi-lib-utils/connection"
"github.com/kubernetes-csi/csi-lib-utils/metrics" "github.com/kubernetes-csi/csi-lib-utils/metrics"
@ -39,23 +40,23 @@ func getLiveness(timeout time.Duration, csiConn *grpc.ClientConn) {
ctx, cancel := context.WithTimeout(context.Background(), timeout) ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel() defer cancel()
util.TraceLogMsg("Sending probe request to CSI driver") log.TraceLogMsg("Sending probe request to CSI driver")
ready, err := rpc.Probe(ctx, csiConn) ready, err := rpc.Probe(ctx, csiConn)
if err != nil { if err != nil {
liveness.Set(0) liveness.Set(0)
util.ErrorLogMsg("health check failed: %v", err) log.ErrorLogMsg("health check failed: %v", err)
return return
} }
if !ready { if !ready {
liveness.Set(0) liveness.Set(0)
util.ErrorLogMsg("driver responded but is not ready") log.ErrorLogMsg("driver responded but is not ready")
return return
} }
liveness.Set(1) liveness.Set(1)
util.ExtendedLogMsg("Health check succeeded") log.ExtendedLogMsg("Health check succeeded")
} }
func recordLiveness(endpoint, drivername string, pollTime, timeout time.Duration) { func recordLiveness(endpoint, drivername string, pollTime, timeout time.Duration) {
@ -63,14 +64,14 @@ func recordLiveness(endpoint, drivername string, pollTime, timeout time.Duration
// register prometheus metrics // register prometheus metrics
err := prometheus.Register(liveness) err := prometheus.Register(liveness)
if err != nil { if err != nil {
util.FatalLogMsg(err.Error()) log.FatalLogMsg(err.Error())
} }
csiConn, err := connlib.Connect(endpoint, liveMetricsManager) csiConn, err := connlib.Connect(endpoint, liveMetricsManager)
if err != nil { if err != nil {
// connlib should retry forever so a returned error should mean // connlib should retry forever so a returned error should mean
// the grpc client is misconfigured rather than an error on the network // the grpc client is misconfigured rather than an error on the network
util.FatalLogMsg("failed to establish connection to CSI driver: %v", err) log.FatalLogMsg("failed to establish connection to CSI driver: %v", err)
} }
// get liveness periodically // get liveness periodically
@ -83,7 +84,7 @@ func recordLiveness(endpoint, drivername string, pollTime, timeout time.Duration
// Run starts liveness collection and prometheus endpoint. // Run starts liveness collection and prometheus endpoint.
func Run(conf *util.Config) { func Run(conf *util.Config) {
util.ExtendedLogMsg("Liveness Running") log.ExtendedLogMsg("Liveness Running")
// start liveness collection // start liveness collection
go recordLiveness(conf.Endpoint, conf.DriverName, conf.PollTime, conf.PoolTimeout) go recordLiveness(conf.Endpoint, conf.DriverName, conf.PollTime, conf.PoolTimeout)

View File

@ -21,7 +21,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util/log"
librbd "github.com/ceph/go-ceph/rbd" librbd "github.com/ceph/go-ceph/rbd"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
@ -103,14 +103,14 @@ func (rv *rbdVolume) checkCloneImage(ctx context.Context, parentVol *rbdVolume)
// and add task to flatten temporary cloned image // and add task to flatten temporary cloned image
err = rv.cloneRbdImageFromSnapshot(ctx, snap, parentVol) err = rv.cloneRbdImageFromSnapshot(ctx, snap, parentVol)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to clone rbd image %s from snapshot %s: %v", rv.RbdImageName, snap.RbdSnapName, err) log.ErrorLog(ctx, "failed to clone rbd image %s from snapshot %s: %v", rv.RbdImageName, snap.RbdSnapName, err)
err = fmt.Errorf("failed to clone rbd image %s from snapshot %s: %w", rv.RbdImageName, snap.RbdSnapName, err) err = fmt.Errorf("failed to clone rbd image %s from snapshot %s: %w", rv.RbdImageName, snap.RbdSnapName, err)
return false, err return false, err
} }
err = tempClone.deleteSnapshot(ctx, snap) err = tempClone.deleteSnapshot(ctx, snap)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to delete snapshot: %v", err) log.ErrorLog(ctx, "failed to delete snapshot: %v", err)
return false, err return false, err
} }
@ -155,7 +155,7 @@ func (rv *rbdVolume) createCloneFromImage(ctx context.Context, parentVol *rbdVol
err = rv.getImageID() err = rv.getImageID()
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to get volume id %s: %v", rv, err) log.ErrorLog(ctx, "failed to get volume id %s: %v", rv, err)
return err return err
} }
@ -176,7 +176,7 @@ func (rv *rbdVolume) createCloneFromImage(ctx context.Context, parentVol *rbdVol
err = j.StoreImageID(ctx, rv.JournalPool, rv.ReservedID, rv.ImageID) err = j.StoreImageID(ctx, rv.JournalPool, rv.ReservedID, rv.ImageID)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to store volume %s: %v", rv, err) log.ErrorLog(ctx, "failed to store volume %s: %v", rv, err)
return err return err
} }
@ -213,7 +213,7 @@ func (rv *rbdVolume) doSnapClone(ctx context.Context, parentVol *rbdVolume) erro
if err != nil || errClone != nil { if err != nil || errClone != nil {
cErr := cleanUpSnapshot(ctx, tempClone, cloneSnap, rv, rv.conn.Creds) cErr := cleanUpSnapshot(ctx, tempClone, cloneSnap, rv, rv.conn.Creds)
if cErr != nil { if cErr != nil {
util.ErrorLog(ctx, "failed to cleanup image %s or snapshot %s: %v", cloneSnap, tempClone, cErr) log.ErrorLog(ctx, "failed to cleanup image %s or snapshot %s: %v", cloneSnap, tempClone, cErr)
} }
} }
@ -222,7 +222,7 @@ func (rv *rbdVolume) doSnapClone(ctx context.Context, parentVol *rbdVolume) erro
// cleanup snapshot // cleanup snapshot
cErr := cleanUpSnapshot(ctx, parentVol, tempSnap, tempClone, rv.conn.Creds) cErr := cleanUpSnapshot(ctx, parentVol, tempSnap, tempClone, rv.conn.Creds)
if cErr != nil { if cErr != nil {
util.ErrorLog(ctx, "failed to cleanup image %s or snapshot %s: %v", tempSnap, tempClone, cErr) log.ErrorLog(ctx, "failed to cleanup image %s or snapshot %s: %v", tempSnap, tempClone, cErr)
} }
} }
} }

View File

@ -24,6 +24,7 @@ import (
csicommon "github.com/ceph/ceph-csi/internal/csi-common" csicommon "github.com/ceph/ceph-csi/internal/csi-common"
"github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/log"
librbd "github.com/ceph/go-ceph/rbd" librbd "github.com/ceph/go-ceph/rbd"
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi"
@ -55,7 +56,7 @@ type ControllerServer struct {
func (cs *ControllerServer) validateVolumeReq(ctx context.Context, req *csi.CreateVolumeRequest) error { func (cs *ControllerServer) validateVolumeReq(ctx context.Context, req *csi.CreateVolumeRequest) error {
if err := cs.Driver.ValidateControllerServiceRequest( if err := cs.Driver.ValidateControllerServiceRequest(
csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil { csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
util.ErrorLog(ctx, "invalid create volume req: %v", protosanitizer.StripSecrets(req)) log.ErrorLog(ctx, "invalid create volume req: %v", protosanitizer.StripSecrets(req))
return err return err
} }
@ -286,7 +287,7 @@ func (cs *ControllerServer) CreateVolume(
defer rbdVol.Destroy() defer rbdVol.Destroy()
// Existence and conflict checks // Existence and conflict checks
if acquired := cs.VolumeLocks.TryAcquire(req.GetName()); !acquired { if acquired := cs.VolumeLocks.TryAcquire(req.GetName()); !acquired {
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, req.GetName()) log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, req.GetName())
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, req.GetName()) return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, req.GetName())
} }
@ -294,7 +295,7 @@ func (cs *ControllerServer) CreateVolume(
err = rbdVol.Connect(cr) err = rbdVol.Connect(cr)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to connect to volume %v: %v", rbdVol.RbdImageName, err) log.ErrorLog(ctx, "failed to connect to volume %v: %v", rbdVol.RbdImageName, err)
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
@ -330,7 +331,7 @@ func (cs *ControllerServer) CreateVolume(
if !errors.Is(err, ErrFlattenInProgress) { if !errors.Is(err, ErrFlattenInProgress) {
errDefer := undoVolReservation(ctx, rbdVol, cr) errDefer := undoVolReservation(ctx, rbdVol, cr)
if errDefer != nil { if errDefer != nil {
util.WarningLog(ctx, "failed undoing reservation of volume: %s (%s)", req.GetName(), errDefer) log.WarningLog(ctx, "failed undoing reservation of volume: %s (%s)", req.GetName(), errDefer)
} }
} }
} }
@ -487,7 +488,7 @@ func flattenTemporaryClonedImages(ctx context.Context, rbdVol *rbdVolume, cr *ut
} }
if len(snaps) > int(maxSnapshotsOnImage) { if len(snaps) > int(maxSnapshotsOnImage) {
util.DebugLog( log.DebugLog(
ctx, ctx,
"snapshots count %d on image: %s reached configured hard limit %d", "snapshots count %d on image: %s reached configured hard limit %d",
len(snaps), len(snaps),
@ -508,7 +509,7 @@ func flattenTemporaryClonedImages(ctx context.Context, rbdVol *rbdVolume, cr *ut
} }
if len(snaps) > int(minSnapshotsOnImageToStartFlatten) { if len(snaps) > int(minSnapshotsOnImageToStartFlatten) {
util.DebugLog( log.DebugLog(
ctx, ctx,
"snapshots count %d on image: %s reached configured soft limit %d", "snapshots count %d on image: %s reached configured soft limit %d",
len(snaps), len(snaps),
@ -544,13 +545,13 @@ func checkFlatten(ctx context.Context, rbdVol *rbdVolume, cr *util.Credentials)
return status.Error(codes.Aborted, err.Error()) return status.Error(codes.Aborted, err.Error())
} }
if errDefer := deleteImage(ctx, rbdVol, cr); errDefer != nil { if errDefer := deleteImage(ctx, rbdVol, cr); errDefer != nil {
util.ErrorLog(ctx, "failed to delete rbd image: %s with error: %v", rbdVol, errDefer) log.ErrorLog(ctx, "failed to delete rbd image: %s with error: %v", rbdVol, errDefer)
return status.Error(codes.Internal, err.Error()) return status.Error(codes.Internal, err.Error())
} }
errDefer := undoVolReservation(ctx, rbdVol, cr) errDefer := undoVolReservation(ctx, rbdVol, cr)
if errDefer != nil { if errDefer != nil {
util.WarningLog(ctx, "failed undoing reservation of volume: %s (%s)", rbdVol.RequestName, errDefer) log.WarningLog(ctx, "failed undoing reservation of volume: %s (%s)", rbdVol.RequestName, errDefer)
} }
return status.Error(codes.Internal, err.Error()) return status.Error(codes.Internal, err.Error())
@ -567,7 +568,7 @@ func (cs *ControllerServer) createVolumeFromSnapshot(
snapshotID string) error { snapshotID string) error {
rbdSnap := &rbdSnapshot{} rbdSnap := &rbdSnapshot{}
if acquired := cs.SnapshotLocks.TryAcquire(snapshotID); !acquired { if acquired := cs.SnapshotLocks.TryAcquire(snapshotID); !acquired {
util.ErrorLog(ctx, util.SnapshotOperationAlreadyExistsFmt, snapshotID) log.ErrorLog(ctx, util.SnapshotOperationAlreadyExistsFmt, snapshotID)
return status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, snapshotID) return status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, snapshotID)
} }
@ -576,7 +577,7 @@ func (cs *ControllerServer) createVolumeFromSnapshot(
err := genSnapFromSnapID(ctx, rbdSnap, snapshotID, cr, secrets) err := genSnapFromSnapID(ctx, rbdSnap, snapshotID, cr, secrets)
if err != nil { if err != nil {
if errors.Is(err, util.ErrPoolNotFound) { if errors.Is(err, util.ErrPoolNotFound) {
util.ErrorLog(ctx, "failed to get backend snapshot for %s: %v", snapshotID, err) log.ErrorLog(ctx, "failed to get backend snapshot for %s: %v", snapshotID, err)
return status.Error(codes.InvalidArgument, err.Error()) return status.Error(codes.InvalidArgument, err.Error())
} }
@ -603,13 +604,13 @@ func (cs *ControllerServer) createVolumeFromSnapshot(
// create clone image and delete snapshot // create clone image and delete snapshot
err = rbdVol.cloneRbdImageFromSnapshot(ctx, rbdSnap, parentVol) err = rbdVol.cloneRbdImageFromSnapshot(ctx, rbdSnap, parentVol)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to clone rbd image %s from snapshot %s: %v", rbdVol, rbdSnap, err) log.ErrorLog(ctx, "failed to clone rbd image %s from snapshot %s: %v", rbdVol, rbdSnap, err)
return err return err
} }
} }
util.DebugLog(ctx, "create volume %s from snapshot %s", rbdVol.RequestName, rbdSnap.RbdSnapName) log.DebugLog(ctx, "create volume %s from snapshot %s", rbdVol.RequestName, rbdSnap.RbdSnapName)
return nil return nil
} }
@ -631,7 +632,7 @@ func (cs *ControllerServer) createBackingImage(
switch { switch {
case rbdSnap != nil: case rbdSnap != nil:
if err = cs.OperationLocks.GetRestoreLock(rbdSnap.VolID); err != nil { if err = cs.OperationLocks.GetRestoreLock(rbdSnap.VolID); err != nil {
util.ErrorLog(ctx, err.Error()) log.ErrorLog(ctx, err.Error())
return status.Error(codes.Aborted, err.Error()) return status.Error(codes.Aborted, err.Error())
} }
@ -641,10 +642,10 @@ func (cs *ControllerServer) createBackingImage(
if err != nil { if err != nil {
return err return err
} }
util.DebugLog(ctx, "created volume %s from snapshot %s", rbdVol.RequestName, rbdSnap.RbdSnapName) log.DebugLog(ctx, "created volume %s from snapshot %s", rbdVol.RequestName, rbdSnap.RbdSnapName)
case parentVol != nil: case parentVol != nil:
if err = cs.OperationLocks.GetCloneLock(parentVol.VolID); err != nil { if err = cs.OperationLocks.GetCloneLock(parentVol.VolID); err != nil {
util.ErrorLog(ctx, err.Error()) log.ErrorLog(ctx, err.Error())
return status.Error(codes.Aborted, err.Error()) return status.Error(codes.Aborted, err.Error())
} }
@ -654,19 +655,19 @@ func (cs *ControllerServer) createBackingImage(
default: default:
err = createImage(ctx, rbdVol, cr) err = createImage(ctx, rbdVol, cr)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to create volume: %v", err) log.ErrorLog(ctx, "failed to create volume: %v", err)
return status.Error(codes.Internal, err.Error()) return status.Error(codes.Internal, err.Error())
} }
} }
util.DebugLog(ctx, "created volume %s backed by image %s", rbdVol.RequestName, rbdVol.RbdImageName) log.DebugLog(ctx, "created volume %s backed by image %s", rbdVol.RequestName, rbdVol.RbdImageName)
defer func() { defer func() {
if err != nil { if err != nil {
if !errors.Is(err, ErrFlattenInProgress) { if !errors.Is(err, ErrFlattenInProgress) {
if deleteErr := deleteImage(ctx, rbdVol, cr); deleteErr != nil { if deleteErr := deleteImage(ctx, rbdVol, cr); deleteErr != nil {
util.ErrorLog(ctx, "failed to delete rbd image: %s with error: %v", rbdVol, deleteErr) log.ErrorLog(ctx, "failed to delete rbd image: %s with error: %v", rbdVol, deleteErr)
} }
} }
} }
@ -679,7 +680,7 @@ func (cs *ControllerServer) createBackingImage(
if rbdSnap != nil { if rbdSnap != nil {
err = rbdVol.flattenRbdImage(ctx, cr, false, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth) err = rbdVol.flattenRbdImage(ctx, cr, false, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to flatten image %s: %v", rbdVol, err) log.ErrorLog(ctx, "failed to flatten image %s: %v", rbdVol, err)
return err return err
} }
@ -708,7 +709,7 @@ func checkContentSource(
} }
rbdSnap := &rbdSnapshot{} rbdSnap := &rbdSnapshot{}
if err := genSnapFromSnapID(ctx, rbdSnap, snapshotID, cr, req.GetSecrets()); err != nil { if err := genSnapFromSnapID(ctx, rbdSnap, snapshotID, cr, req.GetSecrets()); err != nil {
util.ErrorLog(ctx, "failed to get backend snapshot for %s: %v", snapshotID, err) log.ErrorLog(ctx, "failed to get backend snapshot for %s: %v", snapshotID, err)
if !errors.Is(err, ErrSnapNotFound) { if !errors.Is(err, ErrSnapNotFound) {
return nil, nil, status.Error(codes.Internal, err.Error()) return nil, nil, status.Error(codes.Internal, err.Error())
} }
@ -728,7 +729,7 @@ func checkContentSource(
} }
rbdvol, err := genVolFromVolID(ctx, volID, cr, req.GetSecrets()) rbdvol, err := genVolFromVolID(ctx, volID, cr, req.GetSecrets())
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to get backend image for %s: %v", volID, err) log.ErrorLog(ctx, "failed to get backend image for %s: %v", volID, err)
if !errors.Is(err, ErrImageNotFound) { if !errors.Is(err, ErrImageNotFound) {
return nil, nil, status.Error(codes.Internal, err.Error()) return nil, nil, status.Error(codes.Internal, err.Error())
} }
@ -750,7 +751,7 @@ func (cs *ControllerServer) DeleteVolume(
req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) { req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) {
if err := cs.Driver.ValidateControllerServiceRequest( if err := cs.Driver.ValidateControllerServiceRequest(
csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil { csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
util.ErrorLog(ctx, "invalid delete volume req: %v", protosanitizer.StripSecrets(req)) log.ErrorLog(ctx, "invalid delete volume req: %v", protosanitizer.StripSecrets(req))
return nil, err return nil, err
} }
@ -768,7 +769,7 @@ func (cs *ControllerServer) DeleteVolume(
} }
if acquired := cs.VolumeLocks.TryAcquire(volumeID); !acquired { if acquired := cs.VolumeLocks.TryAcquire(volumeID); !acquired {
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volumeID) log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volumeID)
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volumeID) return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volumeID)
} }
@ -776,7 +777,7 @@ func (cs *ControllerServer) DeleteVolume(
// lock out volumeID for clone and expand operation // lock out volumeID for clone and expand operation
if err = cs.OperationLocks.GetDeleteLock(volumeID); err != nil { if err = cs.OperationLocks.GetDeleteLock(volumeID); err != nil {
util.ErrorLog(ctx, err.Error()) log.ErrorLog(ctx, err.Error())
return nil, status.Error(codes.Aborted, err.Error()) return nil, status.Error(codes.Aborted, err.Error())
} }
@ -786,7 +787,7 @@ func (cs *ControllerServer) DeleteVolume(
defer rbdVol.Destroy() defer rbdVol.Destroy()
if err != nil { if err != nil {
if errors.Is(err, util.ErrPoolNotFound) { if errors.Is(err, util.ErrPoolNotFound) {
util.WarningLog(ctx, "failed to get backend volume for %s: %v", volumeID, err) log.WarningLog(ctx, "failed to get backend volume for %s: %v", volumeID, err)
return &csi.DeleteVolumeResponse{}, nil return &csi.DeleteVolumeResponse{}, nil
} }
@ -795,7 +796,7 @@ func (cs *ControllerServer) DeleteVolume(
// or partially complete (image and imageOMap are garbage collected already), hence return // or partially complete (image and imageOMap are garbage collected already), hence return
// success as deletion is complete // success as deletion is complete
if errors.Is(err, util.ErrKeyNotFound) { if errors.Is(err, util.ErrKeyNotFound) {
util.WarningLog(ctx, "Failed to volume options for %s: %v", volumeID, err) log.WarningLog(ctx, "Failed to volume options for %s: %v", volumeID, err)
return &csi.DeleteVolumeResponse{}, nil return &csi.DeleteVolumeResponse{}, nil
} }
@ -809,7 +810,7 @@ func (cs *ControllerServer) DeleteVolume(
// to lead us to the image, hence the imageOMap needs to be garbage collected, by calling // to lead us to the image, hence the imageOMap needs to be garbage collected, by calling
// unreserve for the same // unreserve for the same
if acquired := cs.VolumeLocks.TryAcquire(rbdVol.RequestName); !acquired { if acquired := cs.VolumeLocks.TryAcquire(rbdVol.RequestName); !acquired {
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, rbdVol.RequestName) log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, rbdVol.RequestName)
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, rbdVol.RequestName) return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, rbdVol.RequestName)
} }
@ -825,7 +826,7 @@ func (cs *ControllerServer) DeleteVolume(
// lock out parallel create requests against the same volume name as we // lock out parallel create requests against the same volume name as we
// clean up the image and associated omaps for the same // clean up the image and associated omaps for the same
if acquired := cs.VolumeLocks.TryAcquire(rbdVol.RequestName); !acquired { if acquired := cs.VolumeLocks.TryAcquire(rbdVol.RequestName); !acquired {
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, rbdVol.RequestName) log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, rbdVol.RequestName)
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, rbdVol.RequestName) return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, rbdVol.RequestName)
} }
@ -839,7 +840,7 @@ func cleanupRBDImage(ctx context.Context,
rbdVol *rbdVolume, cr *util.Credentials) (*csi.DeleteVolumeResponse, error) { rbdVol *rbdVolume, cr *util.Credentials) (*csi.DeleteVolumeResponse, error) {
mirroringInfo, err := rbdVol.getImageMirroringInfo() mirroringInfo, err := rbdVol.getImageMirroringInfo()
if err != nil { if err != nil {
util.ErrorLog(ctx, err.Error()) log.ErrorLog(ctx, err.Error())
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
@ -862,7 +863,7 @@ func cleanupRBDImage(ctx context.Context,
} }
if localStatus.Up && localStatus.State == librbd.MirrorImageStatusStateReplaying { if localStatus.Up && localStatus.State == librbd.MirrorImageStatusStateReplaying {
if err = undoVolReservation(ctx, rbdVol, cr); err != nil { if err = undoVolReservation(ctx, rbdVol, cr); err != nil {
util.ErrorLog(ctx, "failed to remove reservation for volume (%s) with backing image (%s) (%s)", log.ErrorLog(ctx, "failed to remove reservation for volume (%s) with backing image (%s) (%s)",
rbdVol.RequestName, rbdVol.RbdImageName, err) rbdVol.RequestName, rbdVol.RbdImageName, err)
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
@ -870,7 +871,7 @@ func cleanupRBDImage(ctx context.Context,
return &csi.DeleteVolumeResponse{}, nil return &csi.DeleteVolumeResponse{}, nil
} }
util.ErrorLog(ctx, log.ErrorLog(ctx,
"secondary image status is up=%t and state=%s", "secondary image status is up=%t and state=%s",
localStatus.Up, localStatus.Up,
localStatus.State) localStatus.State)
@ -878,12 +879,12 @@ func cleanupRBDImage(ctx context.Context,
inUse, err := rbdVol.isInUse() inUse, err := rbdVol.isInUse()
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed getting information for image (%s): (%s)", rbdVol, err) log.ErrorLog(ctx, "failed getting information for image (%s): (%s)", rbdVol, err)
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
if inUse { if inUse {
util.ErrorLog(ctx, "rbd %s is still being used", rbdVol) log.ErrorLog(ctx, "rbd %s is still being used", rbdVol)
return nil, status.Errorf(codes.Internal, "rbd %s is still being used", rbdVol.RbdImageName) return nil, status.Errorf(codes.Internal, "rbd %s is still being used", rbdVol.RbdImageName)
} }
@ -895,7 +896,7 @@ func cleanupRBDImage(ctx context.Context,
if err != nil { if err != nil {
// return error if it is not ErrImageNotFound // return error if it is not ErrImageNotFound
if !errors.Is(err, ErrImageNotFound) { if !errors.Is(err, ErrImageNotFound) {
util.ErrorLog(ctx, "failed to delete rbd image: %s with error: %v", log.ErrorLog(ctx, "failed to delete rbd image: %s with error: %v",
tempClone, err) tempClone, err)
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
@ -903,16 +904,16 @@ func cleanupRBDImage(ctx context.Context,
} }
// Deleting rbd image // Deleting rbd image
util.DebugLog(ctx, "deleting image %s", rbdVol.RbdImageName) log.DebugLog(ctx, "deleting image %s", rbdVol.RbdImageName)
if err = deleteImage(ctx, rbdVol, cr); err != nil { if err = deleteImage(ctx, rbdVol, cr); err != nil {
util.ErrorLog(ctx, "failed to delete rbd image: %s with error: %v", log.ErrorLog(ctx, "failed to delete rbd image: %s with error: %v",
rbdVol, err) rbdVol, err)
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
if err = undoVolReservation(ctx, rbdVol, cr); err != nil { if err = undoVolReservation(ctx, rbdVol, cr); err != nil {
util.ErrorLog(ctx, "failed to remove reservation for volume (%s) with backing image (%s) (%s)", log.ErrorLog(ctx, "failed to remove reservation for volume (%s) with backing image (%s) (%s)",
rbdVol.RequestName, rbdVol.RbdImageName, err) rbdVol.RequestName, rbdVol.RbdImageName, err)
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
@ -969,7 +970,7 @@ func (cs *ControllerServer) CreateSnapshot(
case errors.Is(err, ErrImageNotFound): case errors.Is(err, ErrImageNotFound):
err = status.Errorf(codes.NotFound, "source Volume ID %s not found", req.GetSourceVolumeId()) err = status.Errorf(codes.NotFound, "source Volume ID %s not found", req.GetSourceVolumeId())
case errors.Is(err, util.ErrPoolNotFound): case errors.Is(err, util.ErrPoolNotFound):
util.ErrorLog(ctx, "failed to get backend volume for %s: %v", req.GetSourceVolumeId(), err) log.ErrorLog(ctx, "failed to get backend volume for %s: %v", req.GetSourceVolumeId(), err)
err = status.Errorf(codes.NotFound, err.Error()) err = status.Errorf(codes.NotFound, err.Error())
default: default:
err = status.Errorf(codes.Internal, err.Error()) err = status.Errorf(codes.Internal, err.Error())
@ -996,7 +997,7 @@ func (cs *ControllerServer) CreateSnapshot(
rbdSnap.RequestName = req.GetName() rbdSnap.RequestName = req.GetName()
if acquired := cs.SnapshotLocks.TryAcquire(req.GetName()); !acquired { if acquired := cs.SnapshotLocks.TryAcquire(req.GetName()); !acquired {
util.ErrorLog(ctx, util.SnapshotOperationAlreadyExistsFmt, req.GetName()) log.ErrorLog(ctx, util.SnapshotOperationAlreadyExistsFmt, req.GetName())
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, req.GetName()) return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, req.GetName())
} }
@ -1004,7 +1005,7 @@ func (cs *ControllerServer) CreateSnapshot(
// Take lock on parent rbd image // Take lock on parent rbd image
if err = cs.OperationLocks.GetSnapshotCreateLock(rbdSnap.SourceVolumeID); err != nil { if err = cs.OperationLocks.GetSnapshotCreateLock(rbdSnap.SourceVolumeID); err != nil {
util.ErrorLog(ctx, err.Error()) log.ErrorLog(ctx, err.Error())
return nil, status.Error(codes.Aborted, err.Error()) return nil, status.Error(codes.Aborted, err.Error())
} }
@ -1037,7 +1038,7 @@ func (cs *ControllerServer) CreateSnapshot(
if err != nil && !errors.Is(err, ErrFlattenInProgress) { if err != nil && !errors.Is(err, ErrFlattenInProgress) {
errDefer := undoSnapReservation(ctx, rbdSnap, cr) errDefer := undoSnapReservation(ctx, rbdSnap, cr)
if errDefer != nil { if errDefer != nil {
util.WarningLog(ctx, "failed undoing reservation of snapshot: %s %v", req.GetName(), errDefer) log.WarningLog(ctx, "failed undoing reservation of snapshot: %s %v", req.GetName(), errDefer)
} }
} }
}() }()
@ -1070,7 +1071,7 @@ func cloneFromSnapshot(
if err != nil { if err != nil {
uErr := undoSnapshotCloning(ctx, rbdVol, rbdSnap, vol, cr) uErr := undoSnapshotCloning(ctx, rbdVol, rbdSnap, vol, cr)
if uErr != nil { if uErr != nil {
util.WarningLog(ctx, "failed undoing reservation of snapshot: %s %v", rbdSnap.RequestName, uErr) log.WarningLog(ctx, "failed undoing reservation of snapshot: %s %v", rbdSnap.RequestName, uErr)
} }
return nil, status.Errorf(codes.Internal, err.Error()) return nil, status.Errorf(codes.Internal, err.Error())
@ -1116,7 +1117,7 @@ func cloneFromSnapshot(
} else if err != nil { } else if err != nil {
uErr := undoSnapshotCloning(ctx, rbdVol, rbdSnap, vol, cr) uErr := undoSnapshotCloning(ctx, rbdVol, rbdSnap, vol, cr)
if uErr != nil { if uErr != nil {
util.WarningLog(ctx, "failed undoing reservation of snapshot: %s %v", rbdSnap.RequestName, uErr) log.WarningLog(ctx, "failed undoing reservation of snapshot: %s %v", rbdSnap.RequestName, uErr)
} }
return nil, status.Errorf(codes.Internal, err.Error()) return nil, status.Errorf(codes.Internal, err.Error())
@ -1136,7 +1137,7 @@ func cloneFromSnapshot(
func (cs *ControllerServer) validateSnapshotReq(ctx context.Context, req *csi.CreateSnapshotRequest) error { func (cs *ControllerServer) validateSnapshotReq(ctx context.Context, req *csi.CreateSnapshotRequest) error {
if err := cs.Driver.ValidateControllerServiceRequest( if err := cs.Driver.ValidateControllerServiceRequest(
csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT); err != nil { csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT); err != nil {
util.ErrorLog(ctx, "invalid create snapshot req: %v", protosanitizer.StripSecrets(req)) log.ErrorLog(ctx, "invalid create snapshot req: %v", protosanitizer.StripSecrets(req))
return err return err
} }
@ -1179,7 +1180,7 @@ func (cs *ControllerServer) doSnapshotClone(
err = createRBDClone(ctx, parentVol, cloneRbd, rbdSnap, cr) err = createRBDClone(ctx, parentVol, cloneRbd, rbdSnap, cr)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to create snapshot: %v", err) log.ErrorLog(ctx, "failed to create snapshot: %v", err)
return cloneRbd, err return cloneRbd, err
} }
@ -1190,7 +1191,7 @@ func (cs *ControllerServer) doSnapshotClone(
// cleanup clone and snapshot // cleanup clone and snapshot
errCleanUp := cleanUpSnapshot(ctx, cloneRbd, rbdSnap, cloneRbd, cr) errCleanUp := cleanUpSnapshot(ctx, cloneRbd, rbdSnap, cloneRbd, cr)
if errCleanUp != nil { if errCleanUp != nil {
util.ErrorLog(ctx, "failed to cleanup snapshot and clone: %v", errCleanUp) log.ErrorLog(ctx, "failed to cleanup snapshot and clone: %v", errCleanUp)
} }
} }
} }
@ -1199,7 +1200,7 @@ func (cs *ControllerServer) doSnapshotClone(
if parentVol.isEncrypted() { if parentVol.isEncrypted() {
cryptErr := parentVol.copyEncryptionConfig(&cloneRbd.rbdImage) cryptErr := parentVol.copyEncryptionConfig(&cloneRbd.rbdImage)
if cryptErr != nil { if cryptErr != nil {
util.WarningLog(ctx, "failed copy encryption "+ log.WarningLog(ctx, "failed copy encryption "+
"config for %q: %v", cloneRbd, cryptErr) "config for %q: %v", cloneRbd, cryptErr)
return nil, err return nil, err
@ -1226,7 +1227,7 @@ func (cs *ControllerServer) doSnapshotClone(
if err != nil { if err != nil {
// update rbd image name for logging // update rbd image name for logging
rbdSnap.RbdImageName = cloneRbd.RbdImageName rbdSnap.RbdImageName = cloneRbd.RbdImageName
util.ErrorLog(ctx, "failed to create snapshot %s: %v", rbdSnap, err) log.ErrorLog(ctx, "failed to create snapshot %s: %v", rbdSnap, err)
return cloneRbd, err return cloneRbd, err
} }
@ -1234,14 +1235,14 @@ func (cs *ControllerServer) doSnapshotClone(
err = cloneRbd.getImageID() err = cloneRbd.getImageID()
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to get image id: %v", err) log.ErrorLog(ctx, "failed to get image id: %v", err)
return cloneRbd, err return cloneRbd, err
} }
// save image ID // save image ID
j, err := snapJournal.Connect(rbdSnap.Monitors, rbdSnap.RadosNamespace, cr) j, err := snapJournal.Connect(rbdSnap.Monitors, rbdSnap.RadosNamespace, cr)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to connect to cluster: %v", err) log.ErrorLog(ctx, "failed to connect to cluster: %v", err)
return cloneRbd, err return cloneRbd, err
} }
@ -1249,7 +1250,7 @@ func (cs *ControllerServer) doSnapshotClone(
err = j.StoreImageID(ctx, rbdSnap.JournalPool, rbdSnap.ReservedID, cloneRbd.ImageID) err = j.StoreImageID(ctx, rbdSnap.JournalPool, rbdSnap.ReservedID, cloneRbd.ImageID)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to reserve volume id: %v", err) log.ErrorLog(ctx, "failed to reserve volume id: %v", err)
return cloneRbd, err return cloneRbd, err
} }
@ -1269,7 +1270,7 @@ func (cs *ControllerServer) DeleteSnapshot(
req *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) { req *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) {
if err := cs.Driver.ValidateControllerServiceRequest( if err := cs.Driver.ValidateControllerServiceRequest(
csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT); err != nil { csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT); err != nil {
util.ErrorLog(ctx, "invalid delete snapshot req: %v", protosanitizer.StripSecrets(req)) log.ErrorLog(ctx, "invalid delete snapshot req: %v", protosanitizer.StripSecrets(req))
return nil, err return nil, err
} }
@ -1286,7 +1287,7 @@ func (cs *ControllerServer) DeleteSnapshot(
} }
if acquired := cs.SnapshotLocks.TryAcquire(snapshotID); !acquired { if acquired := cs.SnapshotLocks.TryAcquire(snapshotID); !acquired {
util.ErrorLog(ctx, util.SnapshotOperationAlreadyExistsFmt, snapshotID) log.ErrorLog(ctx, util.SnapshotOperationAlreadyExistsFmt, snapshotID)
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, snapshotID) return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, snapshotID)
} }
@ -1294,7 +1295,7 @@ func (cs *ControllerServer) DeleteSnapshot(
// lock out snapshotID for restore operation // lock out snapshotID for restore operation
if err = cs.OperationLocks.GetDeleteLock(snapshotID); err != nil { if err = cs.OperationLocks.GetDeleteLock(snapshotID); err != nil {
util.ErrorLog(ctx, err.Error()) log.ErrorLog(ctx, err.Error())
return nil, status.Error(codes.Aborted, err.Error()) return nil, status.Error(codes.Aborted, err.Error())
} }
@ -1305,7 +1306,7 @@ func (cs *ControllerServer) DeleteSnapshot(
// if error is ErrPoolNotFound, the pool is already deleted we don't // if error is ErrPoolNotFound, the pool is already deleted we don't
// need to worry about deleting snapshot or omap data, return success // need to worry about deleting snapshot or omap data, return success
if errors.Is(err, util.ErrPoolNotFound) { if errors.Is(err, util.ErrPoolNotFound) {
util.WarningLog(ctx, "failed to get backend snapshot for %s: %v", snapshotID, err) log.WarningLog(ctx, "failed to get backend snapshot for %s: %v", snapshotID, err)
return &csi.DeleteSnapshotResponse{}, nil return &csi.DeleteSnapshotResponse{}, nil
} }
@ -1323,14 +1324,14 @@ func (cs *ControllerServer) DeleteSnapshot(
// safeguard against parallel create or delete requests against the same // safeguard against parallel create or delete requests against the same
// name // name
if acquired := cs.SnapshotLocks.TryAcquire(rbdSnap.RequestName); !acquired { if acquired := cs.SnapshotLocks.TryAcquire(rbdSnap.RequestName); !acquired {
util.ErrorLog(ctx, util.SnapshotOperationAlreadyExistsFmt, rbdSnap.RequestName) log.ErrorLog(ctx, util.SnapshotOperationAlreadyExistsFmt, rbdSnap.RequestName)
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, rbdSnap.RequestName) return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, rbdSnap.RequestName)
} }
defer cs.SnapshotLocks.Release(rbdSnap.RequestName) defer cs.SnapshotLocks.Release(rbdSnap.RequestName)
// Deleting snapshot and cloned volume // Deleting snapshot and cloned volume
util.DebugLog(ctx, "deleting cloned rbd volume %s", rbdSnap.RbdSnapName) log.DebugLog(ctx, "deleting cloned rbd volume %s", rbdSnap.RbdSnapName)
rbdVol := generateVolFromSnap(rbdSnap) rbdVol := generateVolFromSnap(rbdSnap)
@ -1343,7 +1344,7 @@ func (cs *ControllerServer) DeleteSnapshot(
err = rbdVol.getImageInfo() err = rbdVol.getImageInfo()
if err != nil { if err != nil {
if !errors.Is(err, ErrImageNotFound) { if !errors.Is(err, ErrImageNotFound) {
util.ErrorLog(ctx, "failed to delete rbd image: %s/%s with error: %v", rbdVol.Pool, rbdVol.VolName, err) log.ErrorLog(ctx, "failed to delete rbd image: %s/%s with error: %v", rbdVol.Pool, rbdVol.VolName, err)
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
@ -1353,14 +1354,14 @@ func (cs *ControllerServer) DeleteSnapshot(
rbdSnap.RbdImageName = rbdVol.RbdImageName rbdSnap.RbdImageName = rbdVol.RbdImageName
err = cleanUpSnapshot(ctx, rbdVol, rbdSnap, rbdVol, cr) err = cleanUpSnapshot(ctx, rbdVol, rbdSnap, rbdVol, cr)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to delete image: %v", err) log.ErrorLog(ctx, "failed to delete image: %v", err)
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
} }
err = undoSnapReservation(ctx, rbdSnap, cr) err = undoSnapReservation(ctx, rbdSnap, cr)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) on image (%s) (%s)", log.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) on image (%s) (%s)",
rbdSnap.RequestName, rbdSnap.RbdSnapName, rbdSnap.RbdImageName, err) rbdSnap.RequestName, rbdSnap.RbdSnapName, rbdSnap.RbdImageName, err)
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
@ -1374,7 +1375,7 @@ func (cs *ControllerServer) ControllerExpandVolume(
ctx context.Context, ctx context.Context,
req *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) { req *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) {
if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_EXPAND_VOLUME); err != nil { if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_EXPAND_VOLUME); err != nil {
util.ErrorLog(ctx, "invalid expand volume req: %v", protosanitizer.StripSecrets(req)) log.ErrorLog(ctx, "invalid expand volume req: %v", protosanitizer.StripSecrets(req))
return nil, err return nil, err
} }
@ -1391,7 +1392,7 @@ func (cs *ControllerServer) ControllerExpandVolume(
// lock out parallel requests against the same volume ID // lock out parallel requests against the same volume ID
if acquired := cs.VolumeLocks.TryAcquire(volID); !acquired { if acquired := cs.VolumeLocks.TryAcquire(volID); !acquired {
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volID) log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volID)
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volID) return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volID)
} }
@ -1410,7 +1411,7 @@ func (cs *ControllerServer) ControllerExpandVolume(
case errors.Is(err, ErrImageNotFound): case errors.Is(err, ErrImageNotFound):
err = status.Errorf(codes.NotFound, "volume ID %s not found", volID) err = status.Errorf(codes.NotFound, "volume ID %s not found", volID)
case errors.Is(err, util.ErrPoolNotFound): case errors.Is(err, util.ErrPoolNotFound):
util.ErrorLog(ctx, "failed to get backend volume for %s: %v", volID, err) log.ErrorLog(ctx, "failed to get backend volume for %s: %v", volID, err)
err = status.Errorf(codes.NotFound, err.Error()) err = status.Errorf(codes.NotFound, err.Error())
default: default:
err = status.Errorf(codes.Internal, err.Error()) err = status.Errorf(codes.Internal, err.Error())
@ -1430,7 +1431,7 @@ func (cs *ControllerServer) ControllerExpandVolume(
// lock out volumeID for clone and delete operation // lock out volumeID for clone and delete operation
if err = cs.OperationLocks.GetExpandLock(volID); err != nil { if err = cs.OperationLocks.GetExpandLock(volID); err != nil {
util.ErrorLog(ctx, err.Error()) log.ErrorLog(ctx, err.Error())
return nil, status.Error(codes.Aborted, err.Error()) return nil, status.Error(codes.Aborted, err.Error())
} }
@ -1441,10 +1442,10 @@ func (cs *ControllerServer) ControllerExpandVolume(
// resize volume if required // resize volume if required
if rbdVol.VolSize < volSize { if rbdVol.VolSize < volSize {
util.DebugLog(ctx, "rbd volume %s size is %v,resizing to %v", rbdVol, rbdVol.VolSize, volSize) log.DebugLog(ctx, "rbd volume %s size is %v,resizing to %v", rbdVol, rbdVol.VolSize, volSize)
err = rbdVol.resize(volSize) err = rbdVol.resize(volSize)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to resize rbd image: %s with error: %v", rbdVol, err) log.ErrorLog(ctx, "failed to resize rbd image: %s with error: %v", rbdVol, err)
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }

View File

@ -20,6 +20,7 @@ import (
csicommon "github.com/ceph/ceph-csi/internal/csi-common" csicommon "github.com/ceph/ceph-csi/internal/csi-common"
"github.com/ceph/ceph-csi/internal/journal" "github.com/ceph/ceph-csi/internal/journal"
"github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/log"
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi"
mount "k8s.io/mount-utils" mount "k8s.io/mount-utils"
@ -107,7 +108,7 @@ func (r *Driver) Run(conf *util.Config) {
// Create ceph.conf for use with CLI commands // Create ceph.conf for use with CLI commands
if err = util.WriteCephConfig(); err != nil { if err = util.WriteCephConfig(); err != nil {
util.FatalLogMsg("failed to write ceph configuration file (%v)", err) log.FatalLogMsg("failed to write ceph configuration file (%v)", err)
} }
// Use passed in instance ID, if provided for omap suffix naming // Use passed in instance ID, if provided for omap suffix naming
@ -128,7 +129,7 @@ func (r *Driver) Run(conf *util.Config) {
// Initialize default library driver // Initialize default library driver
r.cd = csicommon.NewCSIDriver(conf.DriverName, util.DriverVersion, conf.NodeID) r.cd = csicommon.NewCSIDriver(conf.DriverName, util.DriverVersion, conf.NodeID)
if r.cd == nil { if r.cd == nil {
util.FatalLogMsg("Failed to initialize CSI Driver.") log.FatalLogMsg("Failed to initialize CSI Driver.")
} }
if conf.IsControllerServer || !conf.IsNodeServer { if conf.IsControllerServer || !conf.IsNodeServer {
r.cd.AddControllerServiceCapabilities([]csi.ControllerServiceCapability_RPC_Type{ r.cd.AddControllerServiceCapabilities([]csi.ControllerServiceCapability_RPC_Type{
@ -155,11 +156,11 @@ func (r *Driver) Run(conf *util.Config) {
if conf.IsNodeServer { if conf.IsNodeServer {
topology, err = util.GetTopologyFromDomainLabels(conf.DomainLabels, conf.NodeID, conf.DriverName) topology, err = util.GetTopologyFromDomainLabels(conf.DomainLabels, conf.NodeID, conf.DriverName)
if err != nil { if err != nil {
util.FatalLogMsg(err.Error()) log.FatalLogMsg(err.Error())
} }
r.ns, err = NewNodeServer(r.cd, conf.Vtype, topology) r.ns, err = NewNodeServer(r.cd, conf.Vtype, topology)
if err != nil { if err != nil {
util.FatalLogMsg("failed to start node server, err %v\n", err) log.FatalLogMsg("failed to start node server, err %v\n", err)
} }
} }
@ -170,11 +171,11 @@ func (r *Driver) Run(conf *util.Config) {
if !conf.IsControllerServer && !conf.IsNodeServer { if !conf.IsControllerServer && !conf.IsNodeServer {
topology, err = util.GetTopologyFromDomainLabels(conf.DomainLabels, conf.NodeID, conf.DriverName) topology, err = util.GetTopologyFromDomainLabels(conf.DomainLabels, conf.NodeID, conf.DriverName)
if err != nil { if err != nil {
util.FatalLogMsg(err.Error()) log.FatalLogMsg(err.Error())
} }
r.ns, err = NewNodeServer(r.cd, conf.Vtype, topology) r.ns, err = NewNodeServer(r.cd, conf.Vtype, topology)
if err != nil { if err != nil {
util.FatalLogMsg("failed to start node server, err %v\n", err) log.FatalLogMsg("failed to start node server, err %v\n", err)
} }
r.cs = NewControllerServer(r.cd) r.cs = NewControllerServer(r.cd)
} }
@ -190,14 +191,14 @@ func (r *Driver) Run(conf *util.Config) {
} }
s.Start(conf.Endpoint, conf.HistogramOption, srv, conf.EnableGRPCMetrics) s.Start(conf.Endpoint, conf.HistogramOption, srv, conf.EnableGRPCMetrics)
if conf.EnableGRPCMetrics { if conf.EnableGRPCMetrics {
util.WarningLogMsg("EnableGRPCMetrics is deprecated") log.WarningLogMsg("EnableGRPCMetrics is deprecated")
go util.StartMetricsServer(conf) go util.StartMetricsServer(conf)
} }
if conf.EnableProfiling { if conf.EnableProfiling {
if !conf.EnableGRPCMetrics { if !conf.EnableGRPCMetrics {
go util.StartMetricsServer(conf) go util.StartMetricsServer(conf)
} }
util.DebugLogMsg("Registering profiling handler") log.DebugLogMsg("Registering profiling handler")
go util.EnableProfiling() go util.EnableProfiling()
} }
if conf.IsNodeServer { if conf.IsNodeServer {
@ -205,7 +206,7 @@ func (r *Driver) Run(conf *util.Config) {
// TODO: move the healer to csi-addons // TODO: move the healer to csi-addons
err := runVolumeHealer(r.ns, conf) err := runVolumeHealer(r.ns, conf)
if err != nil { if err != nil {
util.ErrorLogMsg("healer had failures, err %v\n", err) log.ErrorLogMsg("healer had failures, err %v\n", err)
} }
}() }()
} }

View File

@ -22,7 +22,9 @@ import (
"fmt" "fmt"
"strings" "strings"
kmsapi "github.com/ceph/ceph-csi/internal/kms"
"github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/log"
librbd "github.com/ceph/go-ceph/rbd" librbd "github.com/ceph/go-ceph/rbd"
) )
@ -65,17 +67,17 @@ const (
func (ri *rbdImage) checkRbdImageEncrypted(ctx context.Context) (rbdEncryptionState, error) { func (ri *rbdImage) checkRbdImageEncrypted(ctx context.Context) (rbdEncryptionState, error) {
value, err := ri.MigrateMetadata(oldEncryptionMetaKey, encryptionMetaKey, string(rbdImageEncryptionUnknown)) value, err := ri.MigrateMetadata(oldEncryptionMetaKey, encryptionMetaKey, string(rbdImageEncryptionUnknown))
if errors.Is(err, librbd.ErrNotFound) { if errors.Is(err, librbd.ErrNotFound) {
util.DebugLog(ctx, "image %s encrypted state not set", ri) log.DebugLog(ctx, "image %s encrypted state not set", ri)
return rbdImageEncryptionUnknown, nil return rbdImageEncryptionUnknown, nil
} else if err != nil { } else if err != nil {
util.ErrorLog(ctx, "checking image %s encrypted state metadata failed: %s", ri, err) log.ErrorLog(ctx, "checking image %s encrypted state metadata failed: %s", ri, err)
return rbdImageEncryptionUnknown, err return rbdImageEncryptionUnknown, err
} }
encrypted := rbdEncryptionState(strings.TrimSpace(value)) encrypted := rbdEncryptionState(strings.TrimSpace(value))
util.DebugLog(ctx, "image %s encrypted state metadata reports %q", ri, encrypted) log.DebugLog(ctx, "image %s encrypted state metadata reports %q", ri, encrypted)
return encrypted, nil return encrypted, nil
} }
@ -100,7 +102,7 @@ func (ri *rbdImage) isEncrypted() bool {
func (ri *rbdImage) setupEncryption(ctx context.Context) error { func (ri *rbdImage) setupEncryption(ctx context.Context) error {
err := ri.encryption.StoreNewCryptoPassphrase(ri.VolID) err := ri.encryption.StoreNewCryptoPassphrase(ri.VolID)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to save encryption passphrase for "+ log.ErrorLog(ctx, "failed to save encryption passphrase for "+
"image %s: %s", ri, err) "image %s: %s", ri, err)
return err return err
@ -108,7 +110,7 @@ func (ri *rbdImage) setupEncryption(ctx context.Context) error {
err = ri.ensureEncryptionMetadataSet(rbdImageEncryptionPrepared) err = ri.ensureEncryptionMetadataSet(rbdImageEncryptionPrepared)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to save encryption status, deleting "+ log.ErrorLog(ctx, "failed to save encryption status, deleting "+
"image %s: %s", ri, err) "image %s: %s", ri, err)
return err return err
@ -185,7 +187,7 @@ func (ri *rbdImage) repairEncryptionConfig(dest *rbdImage) error {
func (ri *rbdImage) encryptDevice(ctx context.Context, devicePath string) error { func (ri *rbdImage) encryptDevice(ctx context.Context, devicePath string) error {
passphrase, err := ri.encryption.GetCryptoPassphrase(ri.VolID) passphrase, err := ri.encryption.GetCryptoPassphrase(ri.VolID)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to get crypto passphrase for %s: %v", log.ErrorLog(ctx, "failed to get crypto passphrase for %s: %v",
ri, err) ri, err)
return err return err
@ -193,14 +195,14 @@ func (ri *rbdImage) encryptDevice(ctx context.Context, devicePath string) error
if err = util.EncryptVolume(ctx, devicePath, passphrase); err != nil { if err = util.EncryptVolume(ctx, devicePath, passphrase); err != nil {
err = fmt.Errorf("failed to encrypt volume %s: %w", ri, err) err = fmt.Errorf("failed to encrypt volume %s: %w", ri, err)
util.ErrorLog(ctx, err.Error()) log.ErrorLog(ctx, err.Error())
return err return err
} }
err = ri.ensureEncryptionMetadataSet(rbdImageEncrypted) err = ri.ensureEncryptionMetadataSet(rbdImageEncrypted)
if err != nil { if err != nil {
util.ErrorLog(ctx, err.Error()) log.ErrorLog(ctx, err.Error())
return err return err
} }
@ -211,7 +213,7 @@ func (ri *rbdImage) encryptDevice(ctx context.Context, devicePath string) error
func (rv *rbdVolume) openEncryptedDevice(ctx context.Context, devicePath string) (string, error) { func (rv *rbdVolume) openEncryptedDevice(ctx context.Context, devicePath string) (string, error) {
passphrase, err := rv.encryption.GetCryptoPassphrase(rv.VolID) passphrase, err := rv.encryption.GetCryptoPassphrase(rv.VolID)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to get passphrase for encrypted device %s: %v", log.ErrorLog(ctx, "failed to get passphrase for encrypted device %s: %v",
rv, err) rv, err)
return "", err return "", err
@ -221,16 +223,16 @@ func (rv *rbdVolume) openEncryptedDevice(ctx context.Context, devicePath string)
isOpen, err := util.IsDeviceOpen(ctx, mapperFilePath) isOpen, err := util.IsDeviceOpen(ctx, mapperFilePath)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to check device %s encryption status: %s", devicePath, err) log.ErrorLog(ctx, "failed to check device %s encryption status: %s", devicePath, err)
return devicePath, err return devicePath, err
} }
if isOpen { if isOpen {
util.DebugLog(ctx, "encrypted device is already open at %s", mapperFilePath) log.DebugLog(ctx, "encrypted device is already open at %s", mapperFilePath)
} else { } else {
err = util.OpenEncryptedVolume(ctx, devicePath, mapperFile, passphrase) err = util.OpenEncryptedVolume(ctx, devicePath, mapperFile, passphrase)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to open device %s: %v", log.ErrorLog(ctx, "failed to open device %s: %v",
rv, err) rv, err)
return devicePath, err return devicePath, err
@ -270,7 +272,7 @@ func (ri *rbdImage) ParseEncryptionOpts(ctx context.Context, volOptions map[stri
// FIXME: this works only on Kubernetes, how do other CO supply metadata? // FIXME: this works only on Kubernetes, how do other CO supply metadata?
ri.Owner, ok = volOptions["csi.storage.k8s.io/pvc/namespace"] ri.Owner, ok = volOptions["csi.storage.k8s.io/pvc/namespace"]
if !ok { if !ok {
util.DebugLog(ctx, "could not detect owner for %s", ri) log.DebugLog(ctx, "could not detect owner for %s", ri)
} }
encrypted, ok = volOptions["encrypted"] encrypted, ok = volOptions["encrypted"]
@ -288,7 +290,7 @@ func (ri *rbdImage) ParseEncryptionOpts(ctx context.Context, volOptions map[stri
// configureEncryption sets up the VolumeEncryption for this rbdImage. Once // configureEncryption sets up the VolumeEncryption for this rbdImage. Once
// configured, use isEncrypted() to see if the volume supports encryption. // configured, use isEncrypted() to see if the volume supports encryption.
func (ri *rbdImage) configureEncryption(kmsID string, credentials map[string]string) error { func (ri *rbdImage) configureEncryption(kmsID string, credentials map[string]string) error {
kms, err := util.GetKMS(ri.Owner, kmsID, credentials) kms, err := kmsapi.GetKMS(ri.Owner, kmsID, credentials)
if err != nil { if err != nil {
return err return err
} }

View File

@ -27,6 +27,7 @@ import (
csicommon "github.com/ceph/ceph-csi/internal/csi-common" csicommon "github.com/ceph/ceph-csi/internal/csi-common"
"github.com/ceph/ceph-csi/internal/journal" "github.com/ceph/ceph-csi/internal/journal"
"github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/log"
librbd "github.com/ceph/go-ceph/rbd" librbd "github.com/ceph/go-ceph/rbd"
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi"
@ -131,7 +132,7 @@ func isStaticVolume(parameters map[string]string) bool {
func healerStageTransaction(ctx context.Context, cr *util.Credentials, volOps *rbdVolume, metaDataPath string) error { func healerStageTransaction(ctx context.Context, cr *util.Credentials, volOps *rbdVolume, metaDataPath string) error {
imgInfo, err := lookupRBDImageMetadataStash(metaDataPath) imgInfo, err := lookupRBDImageMetadataStash(metaDataPath)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to find image metadata, at stagingPath: %s, err: %v", metaDataPath, err) log.ErrorLog(ctx, "failed to find image metadata, at stagingPath: %s, err: %v", metaDataPath, err)
return err return err
} }
@ -143,7 +144,7 @@ func healerStageTransaction(ctx context.Context, cr *util.Credentials, volOps *r
if err != nil { if err != nil {
return err return err
} }
util.DebugLog(ctx, "rbd volID: %s was successfully attached to device: %s", volOps.VolID, devicePath) log.DebugLog(ctx, "rbd volID: %s was successfully attached to device: %s", volOps.VolID, devicePath)
return nil return nil
} }
@ -175,7 +176,7 @@ func (ns *NodeServer) NodeStageVolume(
// MULTI_NODE_MULTI_WRITER is supported by default for Block access type volumes // MULTI_NODE_MULTI_WRITER is supported by default for Block access type volumes
if req.VolumeCapability.AccessMode.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER { if req.VolumeCapability.AccessMode.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER {
if !isBlock { if !isBlock {
util.WarningLog( log.WarningLog(
ctx, ctx,
"MULTI_NODE_MULTI_WRITER currently only supported with volumes of access type `block`,"+ "MULTI_NODE_MULTI_WRITER currently only supported with volumes of access type `block`,"+
"invalid AccessMode for volume: %v", "invalid AccessMode for volume: %v",
@ -200,7 +201,7 @@ func (ns *NodeServer) NodeStageVolume(
defer cr.DeleteCredentials() defer cr.DeleteCredentials()
if acquired := ns.VolumeLocks.TryAcquire(volID); !acquired { if acquired := ns.VolumeLocks.TryAcquire(volID); !acquired {
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volID) log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volID)
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volID) return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volID)
} }
@ -217,7 +218,7 @@ func (ns *NodeServer) NodeStageVolume(
if err != nil { if err != nil {
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} else if !isNotMnt { } else if !isNotMnt {
util.DebugLog(ctx, "rbd: volume %s is already mounted to %s, skipping", volID, stagingTargetPath) log.DebugLog(ctx, "rbd: volume %s is already mounted to %s, skipping", volID, stagingTargetPath)
return &csi.NodeStageVolumeResponse{}, nil return &csi.NodeStageVolumeResponse{}, nil
} }
@ -254,7 +255,7 @@ func (ns *NodeServer) NodeStageVolume(
j, connErr := volJournal.Connect(volOptions.Monitors, volOptions.RadosNamespace, cr) j, connErr := volJournal.Connect(volOptions.Monitors, volOptions.RadosNamespace, cr)
if connErr != nil { if connErr != nil {
util.ErrorLog(ctx, "failed to establish cluster connection: %v", connErr) log.ErrorLog(ctx, "failed to establish cluster connection: %v", connErr)
return nil, status.Error(codes.Internal, connErr.Error()) return nil, status.Error(codes.Internal, connErr.Error())
} }
@ -281,7 +282,7 @@ func (ns *NodeServer) NodeStageVolume(
err = volOptions.Connect(cr) err = volOptions.Connect(cr)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to connect to volume %s: %v", volOptions, err) log.ErrorLog(ctx, "failed to connect to volume %s: %v", volOptions, err)
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
@ -316,7 +317,7 @@ func (ns *NodeServer) NodeStageVolume(
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
util.DebugLog( log.DebugLog(
ctx, ctx,
"rbd: successfully mounted volume %s to stagingTargetPath %s", "rbd: successfully mounted volume %s to stagingTargetPath %s",
volID, volID,
@ -346,7 +347,7 @@ func (ns *NodeServer) stageTransaction(
// Allow image to be mounted on multiple nodes if it is ROX // Allow image to be mounted on multiple nodes if it is ROX
if req.VolumeCapability.AccessMode.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY { if req.VolumeCapability.AccessMode.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY {
util.ExtendedLog(ctx, "setting disableInUseChecks on rbd volume to: %v", req.GetVolumeId) log.ExtendedLog(ctx, "setting disableInUseChecks on rbd volume to: %v", req.GetVolumeId)
volOptions.DisableInUseChecks = true volOptions.DisableInUseChecks = true
volOptions.readOnly = true volOptions.readOnly = true
} }
@ -382,7 +383,7 @@ func (ns *NodeServer) stageTransaction(
} }
transaction.devicePath = devicePath transaction.devicePath = devicePath
util.DebugLog(ctx, "rbd image: %s/%s was successfully mapped at %s\n", log.DebugLog(ctx, "rbd image: %s/%s was successfully mapped at %s\n",
req.GetVolumeId(), volOptions.Pool, devicePath) req.GetVolumeId(), volOptions.Pool, devicePath)
// userspace mounters like nbd need the device path as a reference while // userspace mounters like nbd need the device path as a reference while
@ -440,7 +441,7 @@ func (ns *NodeServer) undoStagingTransaction(
if transaction.isMounted { if transaction.isMounted {
err = ns.mounter.Unmount(stagingTargetPath) err = ns.mounter.Unmount(stagingTargetPath)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to unmount stagingtargetPath: %s with error: %v", stagingTargetPath, err) log.ErrorLog(ctx, "failed to unmount stagingtargetPath: %s with error: %v", stagingTargetPath, err)
return return
} }
@ -450,7 +451,7 @@ func (ns *NodeServer) undoStagingTransaction(
if transaction.isStagePathCreated { if transaction.isStagePathCreated {
err = os.Remove(stagingTargetPath) err = os.Remove(stagingTargetPath)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to remove stagingtargetPath: %s with error: %v", stagingTargetPath, err) log.ErrorLog(ctx, "failed to remove stagingtargetPath: %s with error: %v", stagingTargetPath, err)
// continue on failure to unmap the image, as leaving stale images causes more issues than a stale // continue on failure to unmap the image, as leaving stale images causes more issues than a stale
// file/directory // file/directory
} }
@ -462,7 +463,7 @@ func (ns *NodeServer) undoStagingTransaction(
if transaction.devicePath != "" { if transaction.devicePath != "" {
err = detachRBDDevice(ctx, transaction.devicePath, volID, volOptions.UnmapOptions, transaction.isEncrypted) err = detachRBDDevice(ctx, transaction.devicePath, volID, volOptions.UnmapOptions, transaction.isEncrypted)
if err != nil { if err != nil {
util.ErrorLog( log.ErrorLog(
ctx, ctx,
"failed to unmap rbd device: %s for volume %s with error: %v", "failed to unmap rbd device: %s for volume %s with error: %v",
transaction.devicePath, transaction.devicePath,
@ -475,7 +476,7 @@ func (ns *NodeServer) undoStagingTransaction(
// Cleanup the stashed image metadata // Cleanup the stashed image metadata
if err = cleanupRBDImageMetadataStash(req.GetStagingTargetPath()); err != nil { if err = cleanupRBDImageMetadataStash(req.GetStagingTargetPath()); err != nil {
util.ErrorLog(ctx, "failed to cleanup image metadata stash (%v)", err) log.ErrorLog(ctx, "failed to cleanup image metadata stash (%v)", err)
return return
} }
@ -486,12 +487,12 @@ func (ns *NodeServer) createStageMountPoint(ctx context.Context, mountPath strin
// #nosec:G304, intentionally creating file mountPath, not a security issue // #nosec:G304, intentionally creating file mountPath, not a security issue
pathFile, err := os.OpenFile(mountPath, os.O_CREATE|os.O_RDWR, 0o600) pathFile, err := os.OpenFile(mountPath, os.O_CREATE|os.O_RDWR, 0o600)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to create mountPath:%s with error: %v", mountPath, err) log.ErrorLog(ctx, "failed to create mountPath:%s with error: %v", mountPath, err)
return status.Error(codes.Internal, err.Error()) return status.Error(codes.Internal, err.Error())
} }
if err = pathFile.Close(); err != nil { if err = pathFile.Close(); err != nil {
util.ErrorLog(ctx, "failed to close mountPath:%s with error: %v", mountPath, err) log.ErrorLog(ctx, "failed to close mountPath:%s with error: %v", mountPath, err)
return status.Error(codes.Internal, err.Error()) return status.Error(codes.Internal, err.Error())
} }
@ -502,7 +503,7 @@ func (ns *NodeServer) createStageMountPoint(ctx context.Context, mountPath strin
err := os.Mkdir(mountPath, 0o750) err := os.Mkdir(mountPath, 0o750)
if err != nil { if err != nil {
if !os.IsExist(err) { if !os.IsExist(err) {
util.ErrorLog(ctx, "failed to create mountPath:%s with error: %v", mountPath, err) log.ErrorLog(ctx, "failed to create mountPath:%s with error: %v", mountPath, err)
return status.Error(codes.Internal, err.Error()) return status.Error(codes.Internal, err.Error())
} }
@ -545,7 +546,7 @@ func (ns *NodeServer) NodePublishVolume(
return nil, err return nil, err
} }
util.DebugLog(ctx, "rbd: successfully mounted stagingPath %s to targetPath %s", stagingPath, targetPath) log.DebugLog(ctx, "rbd: successfully mounted stagingPath %s to targetPath %s", stagingPath, targetPath)
return &csi.NodePublishVolumeResponse{}, nil return &csi.NodePublishVolumeResponse{}, nil
} }
@ -570,7 +571,7 @@ func (ns *NodeServer) mountVolumeToStagePath(
// the first time). // the first time).
existingFormat, err := diskMounter.GetDiskFormat(devicePath) existingFormat, err := diskMounter.GetDiskFormat(devicePath)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to get disk format for path %s, error: %v", devicePath, err) log.ErrorLog(ctx, "failed to get disk format for path %s, error: %v", devicePath, err)
return readOnly, err return readOnly, err
} }
@ -610,7 +611,7 @@ func (ns *NodeServer) mountVolumeToStagePath(
if len(args) > 0 { if len(args) > 0 {
cmdOut, cmdErr := diskMounter.Exec.Command("mkfs."+fsType, args...).CombinedOutput() cmdOut, cmdErr := diskMounter.Exec.Command("mkfs."+fsType, args...).CombinedOutput()
if cmdErr != nil { if cmdErr != nil {
util.ErrorLog(ctx, "failed to run mkfs error: %v, output: %v", cmdErr, string(cmdOut)) log.ErrorLog(ctx, "failed to run mkfs error: %v, output: %v", cmdErr, string(cmdOut))
return readOnly, cmdErr return readOnly, cmdErr
} }
@ -624,7 +625,7 @@ func (ns *NodeServer) mountVolumeToStagePath(
err = diskMounter.FormatAndMount(devicePath, stagingPath, fsType, opt) err = diskMounter.FormatAndMount(devicePath, stagingPath, fsType, opt)
} }
if err != nil { if err != nil {
util.ErrorLog(ctx, log.ErrorLog(ctx,
"failed to mount device path (%s) to staging path (%s) for volume "+ "failed to mount device path (%s) to staging path (%s) for volume "+
"(%s) error: %s Check dmesg logs if required.", "(%s) error: %s Check dmesg logs if required.",
devicePath, devicePath,
@ -646,7 +647,7 @@ func (ns *NodeServer) mountVolume(ctx context.Context, stagingPath string, req *
mountOptions = csicommon.ConstructMountOptions(mountOptions, req.GetVolumeCapability()) mountOptions = csicommon.ConstructMountOptions(mountOptions, req.GetVolumeCapability())
util.DebugLog(ctx, "target %v\nisBlock %v\nfstype %v\nstagingPath %v\nreadonly %v\nmountflags %v\n", log.DebugLog(ctx, "target %v\nisBlock %v\nfstype %v\nstagingPath %v\nreadonly %v\nmountflags %v\n",
targetPath, isBlock, fsType, stagingPath, readOnly, mountOptions) targetPath, isBlock, fsType, stagingPath, readOnly, mountOptions)
if readOnly { if readOnly {
@ -672,12 +673,12 @@ func (ns *NodeServer) createTargetMountPath(ctx context.Context, mountPath strin
// #nosec // #nosec
pathFile, e := os.OpenFile(mountPath, os.O_CREATE|os.O_RDWR, 0o750) pathFile, e := os.OpenFile(mountPath, os.O_CREATE|os.O_RDWR, 0o750)
if e != nil { if e != nil {
util.DebugLog(ctx, "Failed to create mountPath:%s with error: %v", mountPath, err) log.DebugLog(ctx, "Failed to create mountPath:%s with error: %v", mountPath, err)
return notMnt, status.Error(codes.Internal, e.Error()) return notMnt, status.Error(codes.Internal, e.Error())
} }
if err = pathFile.Close(); err != nil { if err = pathFile.Close(); err != nil {
util.DebugLog(ctx, "Failed to close mountPath:%s with error: %v", mountPath, err) log.DebugLog(ctx, "Failed to close mountPath:%s with error: %v", mountPath, err)
return notMnt, status.Error(codes.Internal, err.Error()) return notMnt, status.Error(codes.Internal, err.Error())
} }
@ -708,7 +709,7 @@ func (ns *NodeServer) NodeUnpublishVolume(
if err != nil { if err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
// targetPath has already been deleted // targetPath has already been deleted
util.DebugLog(ctx, "targetPath: %s has already been deleted", targetPath) log.DebugLog(ctx, "targetPath: %s has already been deleted", targetPath)
return &csi.NodeUnpublishVolumeResponse{}, nil return &csi.NodeUnpublishVolumeResponse{}, nil
} }
@ -731,7 +732,7 @@ func (ns *NodeServer) NodeUnpublishVolume(
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
util.DebugLog(ctx, "rbd: successfully unbound volume %s from %s", req.GetVolumeId(), targetPath) log.DebugLog(ctx, "rbd: successfully unbound volume %s from %s", req.GetVolumeId(), targetPath)
return &csi.NodeUnpublishVolumeResponse{}, nil return &csi.NodeUnpublishVolumeResponse{}, nil
} }
@ -761,7 +762,7 @@ func (ns *NodeServer) NodeUnstageVolume(
volID := req.GetVolumeId() volID := req.GetVolumeId()
if acquired := ns.VolumeLocks.TryAcquire(volID); !acquired { if acquired := ns.VolumeLocks.TryAcquire(volID); !acquired {
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volID) log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volID)
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volID) return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volID)
} }
@ -782,11 +783,11 @@ func (ns *NodeServer) NodeUnstageVolume(
// Unmounting the image // Unmounting the image
err = ns.mounter.Unmount(stagingTargetPath) err = ns.mounter.Unmount(stagingTargetPath)
if err != nil { if err != nil {
util.ExtendedLog(ctx, "failed to unmount targetPath: %s with error: %v", stagingTargetPath, err) log.ExtendedLog(ctx, "failed to unmount targetPath: %s with error: %v", stagingTargetPath, err)
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
util.DebugLog(ctx, "successfully unmounted volume (%s) from staging path (%s)", log.DebugLog(ctx, "successfully unmounted volume (%s) from staging path (%s)",
req.GetVolumeId(), stagingTargetPath) req.GetVolumeId(), stagingTargetPath)
} }
@ -795,7 +796,7 @@ func (ns *NodeServer) NodeUnstageVolume(
// keeps invoking Unstage. Hence any errors removing files within this path is a critical // keeps invoking Unstage. Hence any errors removing files within this path is a critical
// error // error
if !os.IsNotExist(err) { if !os.IsNotExist(err) {
util.ErrorLog(ctx, "failed to remove staging target path (%s): (%v)", stagingTargetPath, err) log.ErrorLog(ctx, "failed to remove staging target path (%s): (%v)", stagingTargetPath, err)
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
@ -803,7 +804,7 @@ func (ns *NodeServer) NodeUnstageVolume(
imgInfo, err := lookupRBDImageMetadataStash(stagingParentPath) imgInfo, err := lookupRBDImageMetadataStash(stagingParentPath)
if err != nil { if err != nil {
util.UsefulLog(ctx, "failed to find image metadata: %v", err) log.UsefulLog(ctx, "failed to find image metadata: %v", err)
// It is an error if it was mounted, as we should have found the image metadata file with // It is an error if it was mounted, as we should have found the image metadata file with
// no errors // no errors
if !notMnt { if !notMnt {
@ -833,7 +834,7 @@ func (ns *NodeServer) NodeUnstageVolume(
logDir: imgInfo.LogDir, logDir: imgInfo.LogDir,
} }
if err = detachRBDImageOrDeviceSpec(ctx, dArgs); err != nil { if err = detachRBDImageOrDeviceSpec(ctx, dArgs); err != nil {
util.ErrorLog( log.ErrorLog(
ctx, ctx,
"error unmapping volume (%s) from staging path (%s): (%v)", "error unmapping volume (%s) from staging path (%s): (%v)",
req.GetVolumeId(), req.GetVolumeId(),
@ -843,10 +844,10 @@ func (ns *NodeServer) NodeUnstageVolume(
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
util.DebugLog(ctx, "successfully unmapped volume (%s)", req.GetVolumeId()) log.DebugLog(ctx, "successfully unmapped volume (%s)", req.GetVolumeId())
if err = cleanupRBDImageMetadataStash(stagingParentPath); err != nil { if err = cleanupRBDImageMetadataStash(stagingParentPath); err != nil {
util.ErrorLog(ctx, "failed to cleanup image metadata stash (%v)", err) log.ErrorLog(ctx, "failed to cleanup image metadata stash (%v)", err)
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
@ -877,7 +878,7 @@ func (ns *NodeServer) NodeExpandVolume(
} }
if acquired := ns.VolumeLocks.TryAcquire(volumeID); !acquired { if acquired := ns.VolumeLocks.TryAcquire(volumeID); !acquired {
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volumeID) log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volumeID)
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volumeID) return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volumeID)
} }
@ -885,7 +886,7 @@ func (ns *NodeServer) NodeExpandVolume(
imgInfo, err := lookupRBDImageMetadataStash(volumePath) imgInfo, err := lookupRBDImageMetadataStash(volumePath)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to find image metadata: %v", err) log.ErrorLog(ctx, "failed to find image metadata: %v", err)
} }
devicePath, found := findDeviceMappingImage( devicePath, found := findDeviceMappingImage(
ctx, ctx,
@ -903,7 +904,7 @@ func (ns *NodeServer) NodeExpandVolume(
// The volume is encrypted, resize an active mapping // The volume is encrypted, resize an active mapping
err = util.ResizeEncryptedVolume(ctx, mapperFile) err = util.ResizeEncryptedVolume(ctx, mapperFile)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to resize device %s, mapper %s: %w", log.ErrorLog(ctx, "failed to resize device %s, mapper %s: %w",
devicePath, mapperFile, err) devicePath, mapperFile, err)
return nil, status.Errorf(codes.Internal, return nil, status.Errorf(codes.Internal,
@ -966,7 +967,7 @@ func (ns *NodeServer) processEncryptedDevice(
imageSpec := volOptions.String() imageSpec := volOptions.String()
encrypted, err := volOptions.checkRbdImageEncrypted(ctx) encrypted, err := volOptions.checkRbdImageEncrypted(ctx)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to get encryption status for rbd image %s: %v", log.ErrorLog(ctx, "failed to get encryption status for rbd image %s: %v",
imageSpec, err) imageSpec, err)
return "", err return "", err
@ -982,7 +983,7 @@ func (ns *NodeServer) processEncryptedDevice(
// continue with the common process to crypt-format the device. // continue with the common process to crypt-format the device.
err = volOptions.setupEncryption(ctx) err = volOptions.setupEncryption(ctx)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to setup encryption for rbd"+ log.ErrorLog(ctx, "failed to setup encryption for rbd"+
"image %s: %v", imageSpec, err) "image %s: %v", imageSpec, err)
return "", err return "", err
@ -1006,7 +1007,7 @@ func (ns *NodeServer) processEncryptedDevice(
return "", fmt.Errorf("failed to encrypt rbd image %s: %w", imageSpec, err) return "", fmt.Errorf("failed to encrypt rbd image %s: %w", imageSpec, err)
} }
case "crypt", "crypto_LUKS": case "crypt", "crypto_LUKS":
util.WarningLog(ctx, "rbd image %s is encrypted, but encryption state was not updated", log.WarningLog(ctx, "rbd image %s is encrypted, but encryption state was not updated",
imageSpec) imageSpec)
err = volOptions.ensureEncryptionMetadataSet(rbdImageEncrypted) err = volOptions.ensureEncryptionMetadataSet(rbdImageEncrypted)
if err != nil { if err != nil {
@ -1092,7 +1093,7 @@ func blockNodeGetVolumeStats(ctx context.Context, targetPath string) (*csi.NodeG
lsblkSize, _, err := util.ExecCommand(ctx, "/bin/lsblk", args...) lsblkSize, _, err := util.ExecCommand(ctx, "/bin/lsblk", args...)
if err != nil { if err != nil {
err = fmt.Errorf("lsblk %v returned an error: %w", args, err) err = fmt.Errorf("lsblk %v returned an error: %w", args, err)
util.ErrorLog(ctx, err.Error()) log.ErrorLog(ctx, err.Error())
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
@ -1100,7 +1101,7 @@ func blockNodeGetVolumeStats(ctx context.Context, targetPath string) (*csi.NodeG
size, err := strconv.ParseInt(strings.TrimSpace(lsblkSize), 10, 64) size, err := strconv.ParseInt(strings.TrimSpace(lsblkSize), 10, 64)
if err != nil { if err != nil {
err = fmt.Errorf("failed to convert %q to bytes: %w", lsblkSize, err) err = fmt.Errorf("failed to convert %q to bytes: %w", lsblkSize, err)
util.ErrorLog(ctx, err.Error()) log.ErrorLog(ctx, err.Error())
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }

View File

@ -27,6 +27,7 @@ import (
"time" "time"
"github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/log"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
) )
@ -161,7 +162,7 @@ func findDeviceMappingImage(ctx context.Context, pool, namespace, image string,
rbdDeviceList, err := rbdGetDeviceList(ctx, accessType) rbdDeviceList, err := rbdGetDeviceList(ctx, accessType)
if err != nil { if err != nil {
util.WarningLog(ctx, "failed to determine if image (%s) is mapped to a device (%v)", imageSpec, err) log.WarningLog(ctx, "failed to determine if image (%s) is mapped to a device (%v)", imageSpec, err)
return "", false return "", false
} }
@ -199,17 +200,17 @@ func checkRbdNbdTools() bool {
// try to load the module // try to load the module
_, _, err = util.ExecCommand(context.TODO(), "modprobe", moduleNbd) _, _, err = util.ExecCommand(context.TODO(), "modprobe", moduleNbd)
if err != nil { if err != nil {
util.ExtendedLogMsg("rbd-nbd: nbd modprobe failed with error %v", err) log.ExtendedLogMsg("rbd-nbd: nbd modprobe failed with error %v", err)
return false return false
} }
} }
if _, _, err := util.ExecCommand(context.TODO(), rbdTonbd, "--version"); err != nil { if _, _, err := util.ExecCommand(context.TODO(), rbdTonbd, "--version"); err != nil {
util.ExtendedLogMsg("rbd-nbd: running rbd-nbd --version failed with error %v", err) log.ExtendedLogMsg("rbd-nbd: running rbd-nbd --version failed with error %v", err)
return false return false
} }
util.ExtendedLogMsg("rbd-nbd tools were found.") log.ExtendedLogMsg("rbd-nbd tools were found.")
return true return true
} }
@ -305,7 +306,7 @@ func createPath(ctx context.Context, volOpt *rbdVolume, device string, cr *util.
isNbd := false isNbd := false
imagePath := volOpt.String() imagePath := volOpt.String()
util.TraceLog(ctx, "rbd: map mon %s", volOpt.Monitors) log.TraceLog(ctx, "rbd: map mon %s", volOpt.Monitors)
mapArgs := []string{ mapArgs := []string{
"--id", cr.ID, "--id", cr.ID,
@ -321,7 +322,7 @@ func createPath(ctx context.Context, volOpt *rbdVolume, device string, cr *util.
// check if the image should stay thick-provisioned // check if the image should stay thick-provisioned
isThick, err := volOpt.isThickProvisioned() isThick, err := volOpt.isThickProvisioned()
if err != nil { if err != nil {
util.WarningLog(ctx, "failed to detect if image %q is thick-provisioned: %v", volOpt, err) log.WarningLog(ctx, "failed to detect if image %q is thick-provisioned: %v", volOpt, err)
} }
if isNbd { if isNbd {
@ -347,7 +348,7 @@ func createPath(ctx context.Context, volOpt *rbdVolume, device string, cr *util.
// Execute map // Execute map
stdout, stderr, err := util.ExecCommand(ctx, cli, mapArgs...) stdout, stderr, err := util.ExecCommand(ctx, cli, mapArgs...)
if err != nil { if err != nil {
util.WarningLog(ctx, "rbd: map error %v, rbd output: %s", err, stderr) log.WarningLog(ctx, "rbd: map error %v, rbd output: %s", err, stderr)
// unmap rbd image if connection timeout // unmap rbd image if connection timeout
if strings.Contains(err.Error(), rbdMapConnectionTimeout) { if strings.Contains(err.Error(), rbdMapConnectionTimeout) {
dArgs := detachRBDImageArgs{ dArgs := detachRBDImageArgs{
@ -361,7 +362,7 @@ func createPath(ctx context.Context, volOpt *rbdVolume, device string, cr *util.
} }
detErr := detachRBDImageOrDeviceSpec(ctx, dArgs) detErr := detachRBDImageOrDeviceSpec(ctx, dArgs)
if detErr != nil { if detErr != nil {
util.WarningLog(ctx, "rbd: %s unmap error %v", imagePath, detErr) log.WarningLog(ctx, "rbd: %s unmap error %v", imagePath, detErr)
} }
} }
@ -381,7 +382,7 @@ func waitForrbdImage(ctx context.Context, backoff wait.Backoff, volOptions *rbdV
return false, fmt.Errorf("fail to check rbd image status: (%w)", err) return false, fmt.Errorf("fail to check rbd image status: (%w)", err)
} }
if (volOptions.DisableInUseChecks) && (used) { if (volOptions.DisableInUseChecks) && (used) {
util.UsefulLog(ctx, "valid multi-node attach requested, ignoring watcher in-use result") log.UsefulLog(ctx, "valid multi-node attach requested, ignoring watcher in-use result")
return used, nil return used, nil
} }
@ -423,7 +424,7 @@ func detachRBDImageOrDeviceSpec(
mapperFile, mapperPath := util.VolumeMapper(dArgs.volumeID) mapperFile, mapperPath := util.VolumeMapper(dArgs.volumeID)
mappedDevice, mapper, err := util.DeviceEncryptionStatus(ctx, mapperPath) mappedDevice, mapper, err := util.DeviceEncryptionStatus(ctx, mapperPath)
if err != nil { if err != nil {
util.ErrorLog(ctx, "error determining LUKS device on %s, %s: %s", log.ErrorLog(ctx, "error determining LUKS device on %s, %s: %s",
mapperPath, dArgs.imageOrDeviceSpec, err) mapperPath, dArgs.imageOrDeviceSpec, err)
return err return err
@ -432,7 +433,7 @@ func detachRBDImageOrDeviceSpec(
// mapper found, so it is open Luks device // mapper found, so it is open Luks device
err = util.CloseEncryptedVolume(ctx, mapperFile) err = util.CloseEncryptedVolume(ctx, mapperFile)
if err != nil { if err != nil {
util.ErrorLog(ctx, "error closing LUKS device on %s, %s: %s", log.ErrorLog(ctx, "error closing LUKS device on %s, %s: %s",
mapperPath, dArgs.imageOrDeviceSpec, err) mapperPath, dArgs.imageOrDeviceSpec, err)
return err return err
@ -452,7 +453,7 @@ func detachRBDImageOrDeviceSpec(
(strings.Contains(stderr, fmt.Sprintf(rbdUnmapCmdkRbdMissingMap, dArgs.imageOrDeviceSpec)) || (strings.Contains(stderr, fmt.Sprintf(rbdUnmapCmdkRbdMissingMap, dArgs.imageOrDeviceSpec)) ||
strings.Contains(stderr, fmt.Sprintf(rbdUnmapCmdNbdMissingMap, dArgs.imageOrDeviceSpec))) { strings.Contains(stderr, fmt.Sprintf(rbdUnmapCmdNbdMissingMap, dArgs.imageOrDeviceSpec))) {
// Devices found not to be mapped are treated as a successful detach // Devices found not to be mapped are treated as a successful detach
util.TraceLog(ctx, "image or device spec (%s) not mapped", dArgs.imageOrDeviceSpec) log.TraceLog(ctx, "image or device spec (%s) not mapped", dArgs.imageOrDeviceSpec)
return nil return nil
} }
@ -462,7 +463,7 @@ func detachRBDImageOrDeviceSpec(
if dArgs.isNbd && dArgs.logDir != "" { if dArgs.isNbd && dArgs.logDir != "" {
logFile := getCephClientLogFileName(dArgs.volumeID, dArgs.logDir, "rbd-nbd") logFile := getCephClientLogFileName(dArgs.volumeID, dArgs.logDir, "rbd-nbd")
if err = os.Remove(logFile); err != nil { if err = os.Remove(logFile); err != nil {
util.WarningLog(ctx, "failed to remove logfile: %s, error: %v", log.WarningLog(ctx, "failed to remove logfile: %s, error: %v",
logFile, err) logFile, err)
} }
} }

View File

@ -21,6 +21,8 @@ import (
"sync" "sync"
"github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util"
kubeclient "github.com/ceph/ceph-csi/internal/util/k8s"
"github.com/ceph/ceph-csi/internal/util/log"
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
@ -56,7 +58,7 @@ func getSecret(c *k8s.Clientset, ns, name string) (map[string]string, error) {
secret, err := c.CoreV1().Secrets(ns).Get(context.TODO(), name, metav1.GetOptions{}) secret, err := c.CoreV1().Secrets(ns).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil { if err != nil {
util.ErrorLogMsg("get secret failed, err: %v", err) log.ErrorLogMsg("get secret failed, err: %v", err)
return nil, err return nil, err
} }
@ -74,14 +76,14 @@ func callNodeStageVolume(ns *NodeServer, c *k8s.Clientset, pv *v1.PersistentVolu
volID := pv.Spec.PersistentVolumeSource.CSI.VolumeHandle volID := pv.Spec.PersistentVolumeSource.CSI.VolumeHandle
stagingParentPath := stagingPath + pv.Name + "/globalmount" stagingParentPath := stagingPath + pv.Name + "/globalmount"
util.DefaultLog("sending nodeStageVolume for volID: %s, stagingPath: %s", log.DefaultLog("sending nodeStageVolume for volID: %s, stagingPath: %s",
volID, stagingParentPath) volID, stagingParentPath)
deviceSecret, err := getSecret(c, deviceSecret, err := getSecret(c,
pv.Spec.PersistentVolumeSource.CSI.NodeStageSecretRef.Namespace, pv.Spec.PersistentVolumeSource.CSI.NodeStageSecretRef.Namespace,
pv.Spec.PersistentVolumeSource.CSI.NodeStageSecretRef.Name) pv.Spec.PersistentVolumeSource.CSI.NodeStageSecretRef.Name)
if err != nil { if err != nil {
util.ErrorLogMsg("getSecret failed for volID: %s, err: %v", volID, err) log.ErrorLogMsg("getSecret failed for volID: %s, err: %v", volID, err)
return err return err
} }
@ -116,7 +118,7 @@ func callNodeStageVolume(ns *NodeServer, c *k8s.Clientset, pv *v1.PersistentVolu
_, err = ns.NodeStageVolume(context.TODO(), req) _, err = ns.NodeStageVolume(context.TODO(), req)
if err != nil { if err != nil {
util.ErrorLogMsg("nodeStageVolume request failed, volID: %s, stagingPath: %s, err: %v", log.ErrorLogMsg("nodeStageVolume request failed, volID: %s, stagingPath: %s, err: %v",
volID, stagingParentPath, err) volID, stagingParentPath, err)
return err return err
@ -127,10 +129,10 @@ func callNodeStageVolume(ns *NodeServer, c *k8s.Clientset, pv *v1.PersistentVolu
// runVolumeHealer heal the volumes attached on a node. // runVolumeHealer heal the volumes attached on a node.
func runVolumeHealer(ns *NodeServer, conf *util.Config) error { func runVolumeHealer(ns *NodeServer, conf *util.Config) error {
c := util.NewK8sClient() c := kubeclient.NewK8sClient()
val, err := c.StorageV1().VolumeAttachments().List(context.TODO(), metav1.ListOptions{}) val, err := c.StorageV1().VolumeAttachments().List(context.TODO(), metav1.ListOptions{})
if err != nil { if err != nil {
util.ErrorLogMsg("list volumeAttachments failed, err: %v", err) log.ErrorLogMsg("list volumeAttachments failed, err: %v", err)
return err return err
} }
@ -147,14 +149,13 @@ func runVolumeHealer(ns *NodeServer, conf *util.Config) error {
if err != nil { if err != nil {
// skip if volume doesn't exist // skip if volume doesn't exist
if !apierrors.IsNotFound(err) { if !apierrors.IsNotFound(err) {
util.ErrorLogMsg("get persistentVolumes failed for pv: %s, err: %v", pvName, err) log.ErrorLogMsg("get persistentVolumes failed for pv: %s, err: %v", pvName, err)
} }
continue continue
} }
// TODO: check with pv delete annotations, for eg: what happens when the pv is marked for delete // skip this volumeattachment if its pv is not bound or marked for deletion
// skip this volumeattachment if its pv is not bound if pv.Status.Phase != v1.VolumeBound || pv.DeletionTimestamp != nil {
if pv.Status.Phase != v1.VolumeBound {
continue continue
} }
// skip if mounter is not rbd-nbd // skip if mounter is not rbd-nbd
@ -167,7 +168,7 @@ func runVolumeHealer(ns *NodeServer, conf *util.Config) error {
if err != nil { if err != nil {
// skip if volume attachment doesn't exist // skip if volume attachment doesn't exist
if !apierrors.IsNotFound(err) { if !apierrors.IsNotFound(err) {
util.ErrorLogMsg("get volumeAttachments failed for volumeAttachment: %s, volID: %s, err: %v", log.ErrorLogMsg("get volumeAttachments failed for volumeAttachment: %s, volID: %s, err: %v",
val.Items[i].Name, pv.Spec.PersistentVolumeSource.CSI.VolumeHandle, err) val.Items[i].Name, pv.Spec.PersistentVolumeSource.CSI.VolumeHandle, err)
} }
@ -192,7 +193,7 @@ func runVolumeHealer(ns *NodeServer, conf *util.Config) error {
for s := range channel { for s := range channel {
if s != nil { if s != nil {
util.ErrorLogMsg("callNodeStageVolume failed, err: %v", s) log.ErrorLogMsg("callNodeStageVolume failed, err: %v", s)
} }
} }

View File

@ -23,6 +23,7 @@ import (
"github.com/ceph/ceph-csi/internal/journal" "github.com/ceph/ceph-csi/internal/journal"
"github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/log"
) )
const ( const (
@ -167,7 +168,7 @@ func checkSnapCloneExists(
err = parentVol.deleteSnapshot(ctx, rbdSnap) err = parentVol.deleteSnapshot(ctx, rbdSnap)
if err != nil { if err != nil {
if !errors.Is(err, ErrSnapNotFound) { if !errors.Is(err, ErrSnapNotFound) {
util.ErrorLog(ctx, "failed to delete snapshot %s: %v", rbdSnap, err) log.ErrorLog(ctx, "failed to delete snapshot %s: %v", rbdSnap, err)
return false, err return false, err
} }
@ -198,7 +199,7 @@ func checkSnapCloneExists(
// create snapshot // create snapshot
sErr := vol.createSnapshot(ctx, rbdSnap) sErr := vol.createSnapshot(ctx, rbdSnap)
if sErr != nil { if sErr != nil {
util.ErrorLog(ctx, "failed to create snapshot %s: %v", rbdSnap, sErr) log.ErrorLog(ctx, "failed to create snapshot %s: %v", rbdSnap, sErr)
err = undoSnapshotCloning(ctx, parentVol, rbdSnap, vol, cr) err = undoSnapshotCloning(ctx, parentVol, rbdSnap, vol, cr)
return false, err return false, err
@ -211,21 +212,21 @@ func checkSnapCloneExists(
if vol.ImageID == "" { if vol.ImageID == "" {
sErr := vol.getImageID() sErr := vol.getImageID()
if sErr != nil { if sErr != nil {
util.ErrorLog(ctx, "failed to get image id %s: %v", vol, sErr) log.ErrorLog(ctx, "failed to get image id %s: %v", vol, sErr)
err = undoSnapshotCloning(ctx, parentVol, rbdSnap, vol, cr) err = undoSnapshotCloning(ctx, parentVol, rbdSnap, vol, cr)
return false, err return false, err
} }
sErr = j.StoreImageID(ctx, vol.JournalPool, vol.ReservedID, vol.ImageID) sErr = j.StoreImageID(ctx, vol.JournalPool, vol.ReservedID, vol.ImageID)
if sErr != nil { if sErr != nil {
util.ErrorLog(ctx, "failed to store volume id %s: %v", vol, sErr) log.ErrorLog(ctx, "failed to store volume id %s: %v", vol, sErr)
err = undoSnapshotCloning(ctx, parentVol, rbdSnap, vol, cr) err = undoSnapshotCloning(ctx, parentVol, rbdSnap, vol, cr)
return false, err return false, err
} }
} }
util.DebugLog(ctx, "found existing image (%s) with name (%s) for request (%s)", log.DebugLog(ctx, "found existing image (%s) with name (%s) for request (%s)",
rbdSnap.VolID, rbdSnap.RbdSnapName, rbdSnap.RequestName) rbdSnap.VolID, rbdSnap.RbdSnapName, rbdSnap.RequestName)
return true, nil return true, nil
@ -335,13 +336,13 @@ func (rv *rbdVolume) Exists(ctx context.Context, parentVol *rbdVolume) (bool, er
if parentVol != nil && parentVol.isEncrypted() { if parentVol != nil && parentVol.isEncrypted() {
err = parentVol.copyEncryptionConfig(&rv.rbdImage) err = parentVol.copyEncryptionConfig(&rv.rbdImage)
if err != nil { if err != nil {
util.ErrorLog(ctx, err.Error()) log.ErrorLog(ctx, err.Error())
return false, err return false, err
} }
} }
util.DebugLog(ctx, "found existing volume (%s) with image name (%s) for request (%s)", log.DebugLog(ctx, "found existing volume (%s) with image name (%s) for request (%s)",
rv.VolID, rv.RbdImageName, rv.RequestName) rv.VolID, rv.RbdImageName, rv.RequestName)
return true, nil return true, nil
@ -357,13 +358,13 @@ func (rv *rbdVolume) repairImageID(ctx context.Context, j *journal.Connection) e
err := rv.getImageID() err := rv.getImageID()
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to get image id %s: %v", rv, err) log.ErrorLog(ctx, "failed to get image id %s: %v", rv, err)
return err return err
} }
err = j.StoreImageID(ctx, rv.JournalPool, rv.ReservedID, rv.ImageID) err = j.StoreImageID(ctx, rv.JournalPool, rv.ReservedID, rv.ImageID)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to store volume id %s: %v", rv, err) log.ErrorLog(ctx, "failed to store volume id %s: %v", rv, err)
return err return err
} }
@ -405,7 +406,7 @@ func reserveSnap(ctx context.Context, rbdSnap *rbdSnapshot, rbdVol *rbdVolume, c
return err return err
} }
util.DebugLog(ctx, "generated Volume ID (%s) and image name (%s) for request name (%s)", log.DebugLog(ctx, "generated Volume ID (%s) and image name (%s) for request name (%s)",
rbdSnap.VolID, rbdSnap.RbdSnapName, rbdSnap.RequestName) rbdSnap.VolID, rbdSnap.RbdSnapName, rbdSnap.RequestName)
return nil return nil
@ -481,7 +482,7 @@ func reserveVol(ctx context.Context, rbdVol *rbdVolume, rbdSnap *rbdSnapshot, cr
return err return err
} }
util.DebugLog(ctx, "generated Volume ID (%s) and image name (%s) for request name (%s)", log.DebugLog(ctx, "generated Volume ID (%s) and image name (%s) for request name (%s)",
rbdVol.VolID, rbdVol.RbdImageName, rbdVol.RequestName) rbdVol.VolID, rbdVol.RbdImageName, rbdVol.RequestName)
return nil return nil
@ -522,6 +523,7 @@ func undoVolReservation(ctx context.Context, rbdVol *rbdVolume, cr *util.Credent
// complete omap mapping between imageName and volumeID. // complete omap mapping between imageName and volumeID.
// RegenerateJournal performs below operations // RegenerateJournal performs below operations
// Extract clusterID, Mons after checkig clusterID mapping
// Extract parameters journalPool, pool from volumeAttributes // Extract parameters journalPool, pool from volumeAttributes
// Extract optional parameters volumeNamePrefix, kmsID, owner from volumeAttributes // Extract optional parameters volumeNamePrefix, kmsID, owner from volumeAttributes
// Extract information from volumeID // Extract information from volumeID
@ -537,7 +539,6 @@ func RegenerateJournal(
cr *util.Credentials) (string, error) { cr *util.Credentials) (string, error) {
ctx := context.Background() ctx := context.Background()
var ( var (
options map[string]string
vi util.CSIIdentifier vi util.CSIIdentifier
rbdVol *rbdVolume rbdVol *rbdVolume
kmsID string kmsID string
@ -545,7 +546,6 @@ func RegenerateJournal(
ok bool ok bool
) )
options = make(map[string]string)
rbdVol = &rbdVolume{} rbdVol = &rbdVolume{}
rbdVol.VolID = volumeID rbdVol.VolID = volumeID
@ -560,14 +560,8 @@ func RegenerateJournal(
return "", err return "", err
} }
// TODO check clusterID mapping exists rbdVol.Monitors, rbdVol.ClusterID, err = util.FetchMappedClusterIDAndMons(ctx, vi.ClusterID)
rbdVol.ClusterID = vi.ClusterID
options["clusterID"] = rbdVol.ClusterID
rbdVol.Monitors, _, err = util.GetMonsAndClusterID(options)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed getting mons (%s)", err)
return "", err return "", err
} }
@ -634,7 +628,7 @@ func RegenerateJournal(
undoErr := j.UndoReservation(ctx, rbdVol.JournalPool, rbdVol.Pool, undoErr := j.UndoReservation(ctx, rbdVol.JournalPool, rbdVol.Pool,
rbdVol.RbdImageName, rbdVol.RequestName) rbdVol.RbdImageName, rbdVol.RequestName)
if undoErr != nil { if undoErr != nil {
util.ErrorLog(ctx, "failed to undo reservation %s: %v", rbdVol, undoErr) log.ErrorLog(ctx, "failed to undo reservation %s: %v", rbdVol, undoErr)
} }
} }
}() }()
@ -644,7 +638,7 @@ func RegenerateJournal(
return "", err return "", err
} }
util.DebugLog(ctx, "re-generated Volume ID (%s) and image name (%s) for request name (%s)", log.DebugLog(ctx, "re-generated Volume ID (%s) and image name (%s) for request name (%s)",
rbdVol.VolID, rbdVol.RbdImageName, rbdVol.RequestName) rbdVol.VolID, rbdVol.RbdImageName, rbdVol.RequestName)
if rbdVol.ImageID == "" { if rbdVol.ImageID == "" {
err = rbdVol.storeImageID(ctx, j) err = rbdVol.storeImageID(ctx, j)
@ -660,13 +654,13 @@ func RegenerateJournal(
func (rv *rbdVolume) storeImageID(ctx context.Context, j *journal.Connection) error { func (rv *rbdVolume) storeImageID(ctx context.Context, j *journal.Connection) error {
err := rv.getImageID() err := rv.getImageID()
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to get image id %s: %v", rv, err) log.ErrorLog(ctx, "failed to get image id %s: %v", rv, err)
return err return err
} }
err = j.StoreImageID(ctx, rv.JournalPool, rv.ReservedID, rv.ImageID) err = j.StoreImageID(ctx, rv.JournalPool, rv.ReservedID, rv.ImageID)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to store volume id %s: %v", rv, err) log.ErrorLog(ctx, "failed to store volume id %s: %v", rv, err)
return err return err
} }

View File

@ -29,6 +29,8 @@ import (
"time" "time"
"github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/k8s"
"github.com/ceph/ceph-csi/internal/util/log"
"github.com/ceph/go-ceph/rados" "github.com/ceph/go-ceph/rados"
librbd "github.com/ceph/go-ceph/rbd" librbd "github.com/ceph/go-ceph/rbd"
@ -241,7 +243,7 @@ func createImage(ctx context.Context, pOpts *rbdVolume, cr *util.Credentials) er
return fmt.Errorf("failed to set data pool: %w", err) return fmt.Errorf("failed to set data pool: %w", err)
} }
} }
util.DebugLog(ctx, logMsg, log.DebugLog(ctx, logMsg,
pOpts, volSzMiB, pOpts.imageFeatureSet.Names(), pOpts.Monitors) pOpts, volSzMiB, pOpts.imageFeatureSet.Names(), pOpts.Monitors)
if pOpts.imageFeatureSet != 0 { if pOpts.imageFeatureSet != 0 {
@ -492,7 +494,7 @@ func isNotMountPoint(mounter mount.Interface, stagingTargetPath string) (bool, e
func addRbdManagerTask(ctx context.Context, pOpts *rbdVolume, arg []string) (bool, error) { func addRbdManagerTask(ctx context.Context, pOpts *rbdVolume, arg []string) (bool, error) {
args := []string{"rbd", "task", "add"} args := []string{"rbd", "task", "add"}
args = append(args, arg...) args = append(args, arg...)
util.DebugLog( log.DebugLog(
ctx, ctx,
"executing %v for image (%s) using mon %s, pool %s", "executing %v for image (%s) using mon %s, pool %s",
args, args,
@ -505,17 +507,17 @@ func addRbdManagerTask(ctx context.Context, pOpts *rbdVolume, arg []string) (boo
switch { switch {
case strings.Contains(stderr, rbdTaskRemoveCmdInvalidString1) && case strings.Contains(stderr, rbdTaskRemoveCmdInvalidString1) &&
strings.Contains(stderr, rbdTaskRemoveCmdInvalidString2): strings.Contains(stderr, rbdTaskRemoveCmdInvalidString2):
util.WarningLog( log.WarningLog(
ctx, ctx,
"cluster with cluster ID (%s) does not support Ceph manager based rbd commands"+ "cluster with cluster ID (%s) does not support Ceph manager based rbd commands"+
"(minimum ceph version required is v14.2.3)", "(minimum ceph version required is v14.2.3)",
pOpts.ClusterID) pOpts.ClusterID)
supported = false supported = false
case strings.HasPrefix(stderr, rbdTaskRemoveCmdAccessDeniedMessage): case strings.HasPrefix(stderr, rbdTaskRemoveCmdAccessDeniedMessage):
util.WarningLog(ctx, "access denied to Ceph MGR-based rbd commands on cluster ID (%s)", pOpts.ClusterID) log.WarningLog(ctx, "access denied to Ceph MGR-based rbd commands on cluster ID (%s)", pOpts.ClusterID)
supported = false supported = false
default: default:
util.WarningLog(ctx, "uncaught error while scheduling a task (%v): %s", err, stderr) log.WarningLog(ctx, "uncaught error while scheduling a task (%v): %s", err, stderr)
} }
} }
if err != nil { if err != nil {
@ -539,7 +541,7 @@ func (rv *rbdVolume) getTrashPath() string {
func deleteImage(ctx context.Context, pOpts *rbdVolume, cr *util.Credentials) error { func deleteImage(ctx context.Context, pOpts *rbdVolume, cr *util.Credentials) error {
image := pOpts.RbdImageName image := pOpts.RbdImageName
util.DebugLog(ctx, "rbd: delete %s using mon %s, pool %s", image, pOpts.Monitors, pOpts.Pool) log.DebugLog(ctx, "rbd: delete %s using mon %s, pool %s", image, pOpts.Monitors, pOpts.Pool)
// Support deleting the older rbd images whose imageID is not stored in omap // Support deleting the older rbd images whose imageID is not stored in omap
err := pOpts.getImageID() err := pOpts.getImageID()
@ -548,9 +550,9 @@ func deleteImage(ctx context.Context, pOpts *rbdVolume, cr *util.Credentials) er
} }
if pOpts.isEncrypted() { if pOpts.isEncrypted() {
util.DebugLog(ctx, "rbd: going to remove DEK for %q", pOpts) log.DebugLog(ctx, "rbd: going to remove DEK for %q", pOpts)
if err = pOpts.encryption.RemoveDEK(pOpts.VolID); err != nil { if err = pOpts.encryption.RemoveDEK(pOpts.VolID); err != nil {
util.WarningLog(ctx, "failed to clean the passphrase for volume %s: %s", pOpts.VolID, err) log.WarningLog(ctx, "failed to clean the passphrase for volume %s: %s", pOpts.VolID, err)
} }
} }
@ -562,7 +564,7 @@ func deleteImage(ctx context.Context, pOpts *rbdVolume, cr *util.Credentials) er
rbdImage := librbd.GetImage(pOpts.ioctx, image) rbdImage := librbd.GetImage(pOpts.ioctx, image)
err = rbdImage.Trash(0) err = rbdImage.Trash(0)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to delete rbd image: %s, error: %v", pOpts, err) log.ErrorLog(ctx, "failed to delete rbd image: %s, error: %v", pOpts, err)
return err return err
} }
@ -578,7 +580,7 @@ func deleteImage(ctx context.Context, pOpts *rbdVolume, cr *util.Credentials) er
} }
rbdCephMgrSupported, err := addRbdManagerTask(ctx, pOpts, args) rbdCephMgrSupported, err := addRbdManagerTask(ctx, pOpts, args)
if rbdCephMgrSupported && err != nil { if rbdCephMgrSupported && err != nil {
util.ErrorLog(ctx, "failed to add task to delete rbd image: %s, %v", pOpts, err) log.ErrorLog(ctx, "failed to add task to delete rbd image: %s, %v", pOpts, err)
return err return err
} }
@ -586,7 +588,7 @@ func deleteImage(ctx context.Context, pOpts *rbdVolume, cr *util.Credentials) er
if !rbdCephMgrSupported { if !rbdCephMgrSupported {
err = librbd.TrashRemove(pOpts.ioctx, pOpts.ImageID, true) err = librbd.TrashRemove(pOpts.ioctx, pOpts.ImageID, true)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to delete rbd image: %s, %v", pOpts, err) log.ErrorLog(ctx, "failed to delete rbd image: %s, %v", pOpts, err)
return err return err
} }
@ -625,7 +627,7 @@ func (rv *rbdVolume) getCloneDepth(ctx context.Context) (uint, error) {
if errors.Is(err, ErrImageNotFound) { if errors.Is(err, ErrImageNotFound) {
return depth, nil return depth, nil
} }
util.ErrorLog(ctx, "failed to check depth on image %s: %s", &vol, err) log.ErrorLog(ctx, "failed to check depth on image %s: %s", &vol, err)
return depth, err return depth, err
} }
@ -654,7 +656,7 @@ func flattenClonedRbdImages(
defer rv.Destroy() defer rv.Destroy()
err := rv.Connect(cr) err := rv.Connect(cr)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to open connection %s; err %v", rv, err) log.ErrorLog(ctx, "failed to open connection %s; err %v", rv, err)
return err return err
} }
@ -680,7 +682,7 @@ func flattenClonedRbdImages(
rv.RbdImageName = snapName.origSnapName rv.RbdImageName = snapName.origSnapName
err = rv.flattenRbdImage(ctx, cr, true, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth) err = rv.flattenRbdImage(ctx, cr, true, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to flatten %s; err %v", rv, err) log.ErrorLog(ctx, "failed to flatten %s; err %v", rv, err)
continue continue
} }
@ -703,7 +705,7 @@ func (rv *rbdVolume) flattenRbdImage(
if err != nil { if err != nil {
return err return err
} }
util.ExtendedLog( log.ExtendedLog(
ctx, ctx,
"clone depth is (%d), configured softlimit (%d) and hardlimit (%d) for %s", "clone depth is (%d), configured softlimit (%d) and hardlimit (%d) for %s",
depth, depth,
@ -724,7 +726,7 @@ func (rv *rbdVolume) flattenRbdImage(
if strings.Contains(err.Error(), rbdFlattenNoParent) { if strings.Contains(err.Error(), rbdFlattenNoParent) {
return nil return nil
} }
util.ErrorLog(ctx, "failed to add task flatten for %s : %v", rv, err) log.ErrorLog(ctx, "failed to add task flatten for %s : %v", rv, err)
return err return err
} }
@ -733,7 +735,7 @@ func (rv *rbdVolume) flattenRbdImage(
} }
} }
if !supported { if !supported {
util.ErrorLog( log.ErrorLog(
ctx, ctx,
"task manager does not support flatten,image will be flattened once hardlimit is reached: %v", "task manager does not support flatten,image will be flattened once hardlimit is reached: %v",
err) err)
@ -744,7 +746,7 @@ func (rv *rbdVolume) flattenRbdImage(
} }
err := rv.flatten() err := rv.flatten()
if err != nil { if err != nil {
util.ErrorLog(ctx, "rbd failed to flatten image %s %s: %v", rv.Pool, rv.RbdImageName, err) log.ErrorLog(ctx, "rbd failed to flatten image %s %s: %v", rv.Pool, rv.RbdImageName, err)
return err return err
} }
@ -826,7 +828,7 @@ func (rv *rbdVolume) checkImageChainHasFeature(ctx context.Context, feature uint
if errors.Is(err, ErrImageNotFound) { if errors.Is(err, ErrImageNotFound) {
return false, nil return false, nil
} }
util.ErrorLog(ctx, "failed to get image info for %s: %s", vol.String(), err) log.ErrorLog(ctx, "failed to get image info for %s: %s", vol.String(), err)
return false, err return false, err
} }
@ -856,7 +858,7 @@ func genSnapFromSnapID(
err := vi.DecomposeCSIID(rbdSnap.VolID) err := vi.DecomposeCSIID(rbdSnap.VolID)
if err != nil { if err != nil {
util.ErrorLog(ctx, "error decoding snapshot ID (%s) (%s)", err, rbdSnap.VolID) log.ErrorLog(ctx, "error decoding snapshot ID (%s) (%s)", err, rbdSnap.VolID)
return err return err
} }
@ -866,7 +868,7 @@ func genSnapFromSnapID(
rbdSnap.Monitors, _, err = util.GetMonsAndClusterID(options) rbdSnap.Monitors, _, err = util.GetMonsAndClusterID(options)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed getting mons (%s)", err) log.ErrorLog(ctx, "failed getting mons (%s)", err)
return err return err
} }
@ -955,7 +957,7 @@ func generateVolumeFromVolumeID(
rbdVol.Monitors, _, err = util.GetMonsAndClusterID(options) rbdVol.Monitors, _, err = util.GetMonsAndClusterID(options)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed getting mons (%s)", err) log.ErrorLog(ctx, "failed getting mons (%s)", err)
return rbdVol, err return rbdVol, err
} }
@ -1062,7 +1064,7 @@ func genVolFromVolID(
// be the same in the PV.Spec.CSI.VolumeHandle. Check the PV annotation for // be the same in the PV.Spec.CSI.VolumeHandle. Check the PV annotation for
// the new volumeHandle. If the new volumeHandle is found, generate the RBD // the new volumeHandle. If the new volumeHandle is found, generate the RBD
// volume structure from the new volumeHandle. // volume structure from the new volumeHandle.
c := util.NewK8sClient() c := k8s.NewK8sClient()
listOpt := metav1.ListOptions{ listOpt := metav1.ListOptions{
LabelSelector: PVReplicatedLabelKey, LabelSelector: PVReplicatedLabelKey,
} }
@ -1073,7 +1075,7 @@ func genVolFromVolID(
for i := range pvlist.Items { for i := range pvlist.Items {
if pvlist.Items[i].Spec.CSI != nil && pvlist.Items[i].Spec.CSI.VolumeHandle == volumeID { if pvlist.Items[i].Spec.CSI != nil && pvlist.Items[i].Spec.CSI.VolumeHandle == volumeID {
if v, ok := pvlist.Items[i].Annotations[PVVolumeHandleAnnotationKey]; ok { if v, ok := pvlist.Items[i].Annotations[PVVolumeHandleAnnotationKey]; ok {
util.UsefulLog(ctx, "found new volumeID %s for existing volumeID %s", v, volumeID) log.UsefulLog(ctx, "found new volumeID %s for existing volumeID %s", v, volumeID)
err = vi.DecomposeCSIID(v) err = vi.DecomposeCSIID(v)
if err != nil { if err != nil {
return vol, fmt.Errorf("%w: error decoding volume ID (%s) (%s)", return vol, fmt.Errorf("%w: error decoding volume ID (%s) (%s)",
@ -1103,12 +1105,12 @@ func generateVolumeFromMapping(
// extract clusterID mapping // extract clusterID mapping
for _, cm := range *mapping { for _, cm := range *mapping {
for key, val := range cm.ClusterIDMapping { for key, val := range cm.ClusterIDMapping {
mappedClusterID := getMappedID(key, val, vi.ClusterID) mappedClusterID := util.GetMappedID(key, val, vi.ClusterID)
if mappedClusterID == "" { if mappedClusterID == "" {
continue continue
} }
util.DebugLog(ctx, log.DebugLog(ctx,
"found new clusterID mapping %s for existing clusterID %s", "found new clusterID mapping %s for existing clusterID %s",
mappedClusterID, mappedClusterID,
vi.ClusterID) vi.ClusterID)
@ -1117,11 +1119,11 @@ func generateVolumeFromMapping(
poolID := fmt.Sprintf("%d", (vi.LocationID)) poolID := fmt.Sprintf("%d", (vi.LocationID))
for _, pools := range cm.RBDpoolIDMappingInfo { for _, pools := range cm.RBDpoolIDMappingInfo {
for key, val := range pools { for key, val := range pools {
mappedPoolID := getMappedID(key, val, poolID) mappedPoolID := util.GetMappedID(key, val, poolID)
if mappedPoolID == "" { if mappedPoolID == "" {
continue continue
} }
util.DebugLog(ctx, log.DebugLog(ctx,
"found new poolID mapping %s for existing pooID %s", "found new poolID mapping %s for existing pooID %s",
mappedPoolID, mappedPoolID,
poolID) poolID)
@ -1144,20 +1146,6 @@ func generateVolumeFromMapping(
return vol, util.ErrPoolNotFound return vol, util.ErrPoolNotFound
} }
// getMappedID check the input id is matching key or value.
// If key==id the value will be returned.
// If value==id the key will be returned.
func getMappedID(key, value, id string) string {
if key == id {
return value
}
if value == id {
return key
}
return ""
}
func genVolFromVolumeOptions( func genVolFromVolumeOptions(
ctx context.Context, ctx context.Context,
volOptions, credentials map[string]string, volOptions, credentials map[string]string,
@ -1181,7 +1169,7 @@ func genVolFromVolumeOptions(
rbdVol.Monitors, rbdVol.ClusterID, err = util.GetMonsAndClusterID(volOptions) rbdVol.Monitors, rbdVol.ClusterID, err = util.GetMonsAndClusterID(volOptions)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed getting mons (%s)", err) log.ErrorLog(ctx, "failed getting mons (%s)", err)
return nil, err return nil, err
} }
@ -1196,12 +1184,12 @@ func genVolFromVolumeOptions(
// if no image features is provided, it results in empty string // if no image features is provided, it results in empty string
// which disable all RBD image features as we expected // which disable all RBD image features as we expected
if err = rbdVol.validateImageFeatures(volOptions["imageFeatures"]); err != nil { if err = rbdVol.validateImageFeatures(volOptions["imageFeatures"]); err != nil {
util.ErrorLog(ctx, "failed to validate image features %v", err) log.ErrorLog(ctx, "failed to validate image features %v", err)
return nil, err return nil, err
} }
util.ExtendedLog( log.ExtendedLog(
ctx, ctx,
"setting disableInUseChecks: %t image features: %v mounter: %s", "setting disableInUseChecks: %t image features: %v mounter: %s",
disableInUseChecks, disableInUseChecks,
@ -1258,7 +1246,7 @@ func genSnapFromOptions(ctx context.Context, rbdVol *rbdVolume, snapOptions map[
rbdSnap.Monitors, rbdSnap.ClusterID, err = util.GetMonsAndClusterID(snapOptions) rbdSnap.Monitors, rbdSnap.ClusterID, err = util.GetMonsAndClusterID(snapOptions)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed getting mons (%s)", err) log.ErrorLog(ctx, "failed getting mons (%s)", err)
return nil, err return nil, err
} }
@ -1276,7 +1264,7 @@ func (rv *rbdVolume) hasSnapshotFeature() bool {
} }
func (rv *rbdVolume) createSnapshot(ctx context.Context, pOpts *rbdSnapshot) error { func (rv *rbdVolume) createSnapshot(ctx context.Context, pOpts *rbdSnapshot) error {
util.DebugLog(ctx, "rbd: snap create %s using mon %s", pOpts, pOpts.Monitors) log.DebugLog(ctx, "rbd: snap create %s using mon %s", pOpts, pOpts.Monitors)
image, err := rv.open() image, err := rv.open()
if err != nil { if err != nil {
return err return err
@ -1289,7 +1277,7 @@ func (rv *rbdVolume) createSnapshot(ctx context.Context, pOpts *rbdSnapshot) err
} }
func (rv *rbdVolume) deleteSnapshot(ctx context.Context, pOpts *rbdSnapshot) error { func (rv *rbdVolume) deleteSnapshot(ctx context.Context, pOpts *rbdSnapshot) error {
util.DebugLog(ctx, "rbd: snap rm %s using mon %s", pOpts, pOpts.Monitors) log.DebugLog(ctx, "rbd: snap rm %s using mon %s", pOpts, pOpts.Monitors)
image, err := rv.open() image, err := rv.open()
if err != nil { if err != nil {
return err return err
@ -1335,7 +1323,7 @@ func (rv *rbdVolume) cloneRbdImageFromSnapshot(
} }
} }
util.DebugLog(ctx, logMsg, log.DebugLog(ctx, logMsg,
pSnapOpts, rv, rv.imageFeatureSet.Names(), rv.Monitors) pSnapOpts, rv, rv.imageFeatureSet.Names(), rv.Monitors)
if rv.imageFeatureSet != 0 { if rv.imageFeatureSet != 0 {
@ -1373,7 +1361,7 @@ func (rv *rbdVolume) cloneRbdImageFromSnapshot(
if deleteClone { if deleteClone {
err = librbd.RemoveImage(rv.ioctx, rv.RbdImageName) err = librbd.RemoveImage(rv.ioctx, rv.RbdImageName)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to delete temporary image %q: %v", rv, err) log.ErrorLog(ctx, "failed to delete temporary image %q: %v", rv, err)
} }
} }
}() }()

View File

@ -138,58 +138,6 @@ func TestValidateImageFeatures(t *testing.T) {
} }
} }
func TestGetMappedID(t *testing.T) {
t.Parallel()
type args struct {
key string
value string
id string
}
tests := []struct {
name string
args args
expected string
}{
{
name: "test for matching key",
args: args{
key: "cluster1",
value: "cluster2",
id: "cluster1",
},
expected: "cluster2",
},
{
name: "test for matching value",
args: args{
key: "cluster1",
value: "cluster2",
id: "cluster2",
},
expected: "cluster1",
},
{
name: "test for invalid match",
args: args{
key: "cluster1",
value: "cluster2",
id: "cluster3",
},
expected: "",
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
val := getMappedID(tt.args.key, tt.args.value, tt.args.id)
if val != tt.expected {
t.Errorf("getMappedID() got = %v, expected %v", val, tt.expected)
}
})
}
}
func TestGetCephClientLogFileName(t *testing.T) { func TestGetCephClientLogFileName(t *testing.T) {
t.Parallel() t.Parallel()
type args struct { type args struct {

View File

@ -26,6 +26,7 @@ import (
"time" "time"
"github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/log"
librbd "github.com/ceph/go-ceph/rbd" librbd "github.com/ceph/go-ceph/rbd"
"github.com/ceph/go-ceph/rbd/admin" "github.com/ceph/go-ceph/rbd/admin"
@ -91,7 +92,7 @@ type ReplicationServer struct {
func getForceOption(ctx context.Context, parameters map[string]string) (bool, error) { func getForceOption(ctx context.Context, parameters map[string]string) (bool, error) {
val, ok := parameters[forceKey] val, ok := parameters[forceKey]
if !ok { if !ok {
util.WarningLog(ctx, "%s is not set in parameters, setting to default (%v)", forceKey, false) log.WarningLog(ctx, "%s is not set in parameters, setting to default (%v)", forceKey, false)
return false, nil return false, nil
} }
@ -108,7 +109,7 @@ func getForceOption(ctx context.Context, parameters map[string]string) (bool, er
func getMirroringMode(ctx context.Context, parameters map[string]string) (librbd.ImageMirrorMode, error) { func getMirroringMode(ctx context.Context, parameters map[string]string) (librbd.ImageMirrorMode, error) {
val, ok := parameters[imageMirroringKey] val, ok := parameters[imageMirroringKey]
if !ok { if !ok {
util.WarningLog( log.WarningLog(
ctx, ctx,
"%s is not set in parameters, setting to mirroringMode to default (%s)", "%s is not set in parameters, setting to mirroringMode to default (%s)",
imageMirroringKey, imageMirroringKey,
@ -206,7 +207,7 @@ func (rs *ReplicationServer) EnableVolumeReplication(ctx context.Context,
} }
if acquired := rs.VolumeLocks.TryAcquire(volumeID); !acquired { if acquired := rs.VolumeLocks.TryAcquire(volumeID); !acquired {
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volumeID) log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volumeID)
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volumeID) return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volumeID)
} }
@ -234,7 +235,7 @@ func (rs *ReplicationServer) EnableVolumeReplication(ctx context.Context,
mirroringInfo, err := rbdVol.getImageMirroringInfo() mirroringInfo, err := rbdVol.getImageMirroringInfo()
if err != nil { if err != nil {
util.ErrorLog(ctx, err.Error()) log.ErrorLog(ctx, err.Error())
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
@ -242,7 +243,7 @@ func (rs *ReplicationServer) EnableVolumeReplication(ctx context.Context,
if mirroringInfo.State != librbd.MirrorImageEnabled { if mirroringInfo.State != librbd.MirrorImageEnabled {
err = rbdVol.enableImageMirroring(mirroringMode) err = rbdVol.enableImageMirroring(mirroringMode)
if err != nil { if err != nil {
util.ErrorLog(ctx, err.Error()) log.ErrorLog(ctx, err.Error())
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
@ -253,7 +254,7 @@ func (rs *ReplicationServer) EnableVolumeReplication(ctx context.Context,
if err != nil { if err != nil {
return nil, err return nil, err
} }
util.DebugLog( log.DebugLog(
ctx, ctx,
"Added scheduling at interval %s, start time %s for volume %s", "Added scheduling at interval %s, start time %s for volume %s",
interval, interval,
@ -281,7 +282,7 @@ func (rs *ReplicationServer) DisableVolumeReplication(ctx context.Context,
defer cr.DeleteCredentials() defer cr.DeleteCredentials()
if acquired := rs.VolumeLocks.TryAcquire(volumeID); !acquired { if acquired := rs.VolumeLocks.TryAcquire(volumeID); !acquired {
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volumeID) log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volumeID)
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volumeID) return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volumeID)
} }
@ -309,7 +310,7 @@ func (rs *ReplicationServer) DisableVolumeReplication(ctx context.Context,
mirroringInfo, err := rbdVol.getImageMirroringInfo() mirroringInfo, err := rbdVol.getImageMirroringInfo()
if err != nil { if err != nil {
util.ErrorLog(ctx, err.Error()) log.ErrorLog(ctx, err.Error())
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
@ -394,7 +395,7 @@ func (rs *ReplicationServer) PromoteVolume(ctx context.Context,
defer cr.DeleteCredentials() defer cr.DeleteCredentials()
if acquired := rs.VolumeLocks.TryAcquire(volumeID); !acquired { if acquired := rs.VolumeLocks.TryAcquire(volumeID); !acquired {
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volumeID) log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volumeID)
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volumeID) return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volumeID)
} }
@ -417,7 +418,7 @@ func (rs *ReplicationServer) PromoteVolume(ctx context.Context,
mirroringInfo, err := rbdVol.getImageMirroringInfo() mirroringInfo, err := rbdVol.getImageMirroringInfo()
if err != nil { if err != nil {
util.ErrorLog(ctx, err.Error()) log.ErrorLog(ctx, err.Error())
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
@ -434,7 +435,7 @@ func (rs *ReplicationServer) PromoteVolume(ctx context.Context,
if !mirroringInfo.Primary { if !mirroringInfo.Primary {
err = rbdVol.promoteImage(req.Force) err = rbdVol.promoteImage(req.Force)
if err != nil { if err != nil {
util.ErrorLog(ctx, err.Error()) log.ErrorLog(ctx, err.Error())
// In case of the DR the image on the primary site cannot be // In case of the DR the image on the primary site cannot be
// demoted as the cluster is down, during failover the image need // demoted as the cluster is down, during failover the image need
// to be force promoted. RBD returns `Device or resource busy` // to be force promoted. RBD returns `Device or resource busy`
@ -470,7 +471,7 @@ func (rs *ReplicationServer) DemoteVolume(ctx context.Context,
defer cr.DeleteCredentials() defer cr.DeleteCredentials()
if acquired := rs.VolumeLocks.TryAcquire(volumeID); !acquired { if acquired := rs.VolumeLocks.TryAcquire(volumeID); !acquired {
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volumeID) log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volumeID)
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volumeID) return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volumeID)
} }
@ -492,7 +493,7 @@ func (rs *ReplicationServer) DemoteVolume(ctx context.Context,
} }
mirroringInfo, err := rbdVol.getImageMirroringInfo() mirroringInfo, err := rbdVol.getImageMirroringInfo()
if err != nil { if err != nil {
util.ErrorLog(ctx, err.Error()) log.ErrorLog(ctx, err.Error())
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
@ -509,7 +510,7 @@ func (rs *ReplicationServer) DemoteVolume(ctx context.Context,
if mirroringInfo.Primary { if mirroringInfo.Primary {
err = rbdVol.demoteImage() err = rbdVol.demoteImage()
if err != nil { if err != nil {
util.ErrorLog(ctx, err.Error()) log.ErrorLog(ctx, err.Error())
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
@ -525,7 +526,7 @@ func checkRemoteSiteStatus(ctx context.Context, mirrorStatus *librbd.GlobalMirro
for _, s := range mirrorStatus.SiteStatuses { for _, s := range mirrorStatus.SiteStatuses {
if s.MirrorUUID != "" { if s.MirrorUUID != "" {
if imageMirroringState(s.State.String()) != unknown && !s.Up { if imageMirroringState(s.State.String()) != unknown && !s.Up {
util.UsefulLog( log.UsefulLog(
ctx, ctx,
"peer site mirrorUUID=%s, mirroring state=%s, description=%s and lastUpdate=%s", "peer site mirrorUUID=%s, mirroring state=%s, description=%s and lastUpdate=%s",
s.MirrorUUID, s.MirrorUUID,
@ -559,7 +560,7 @@ func (rs *ReplicationServer) ResyncVolume(ctx context.Context,
defer cr.DeleteCredentials() defer cr.DeleteCredentials()
if acquired := rs.VolumeLocks.TryAcquire(volumeID); !acquired { if acquired := rs.VolumeLocks.TryAcquire(volumeID); !acquired {
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volumeID) log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volumeID)
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volumeID) return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volumeID)
} }
@ -583,7 +584,7 @@ func (rs *ReplicationServer) ResyncVolume(ctx context.Context,
if err != nil { if err != nil {
// in case of Resync the image will get deleted and gets recreated and // in case of Resync the image will get deleted and gets recreated and
// it takes time for this operation. // it takes time for this operation.
util.ErrorLog(ctx, err.Error()) log.ErrorLog(ctx, err.Error())
return nil, status.Error(codes.Aborted, err.Error()) return nil, status.Error(codes.Aborted, err.Error())
} }
@ -608,7 +609,7 @@ func (rs *ReplicationServer) ResyncVolume(ctx context.Context,
return resp, nil return resp, nil
} }
util.ErrorLog(ctx, err.Error()) log.ErrorLog(ctx, err.Error())
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
@ -616,7 +617,7 @@ func (rs *ReplicationServer) ResyncVolume(ctx context.Context,
localStatus, err := mirrorStatus.LocalStatus() localStatus, err := mirrorStatus.LocalStatus()
if err != nil { if err != nil {
util.ErrorLog(ctx, err.Error()) log.ErrorLog(ctx, err.Error())
return nil, fmt.Errorf("failed to get local status: %w", err) return nil, fmt.Errorf("failed to get local status: %w", err)
} }
@ -639,7 +640,7 @@ func (rs *ReplicationServer) ResyncVolume(ctx context.Context,
if strings.Contains(localStatus.State.String(), string(errorState)) { if strings.Contains(localStatus.State.String(), string(errorState)) {
err = rbdVol.resyncImage() err = rbdVol.resyncImage()
if err != nil { if err != nil {
util.ErrorLog(ctx, err.Error()) log.ErrorLog(ctx, err.Error())
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
@ -647,7 +648,7 @@ func (rs *ReplicationServer) ResyncVolume(ctx context.Context,
// convert the last update time to UTC // convert the last update time to UTC
lastUpdateTime := time.Unix(localStatus.LastUpdate, 0).UTC() lastUpdateTime := time.Unix(localStatus.LastUpdate, 0).UTC()
util.UsefulLog( log.UsefulLog(
ctx, ctx,
"image mirroring state=%s, description=%s and lastUpdate=%s", "image mirroring state=%s, description=%s and lastUpdate=%s",
localStatus.State.String(), localStatus.State.String(),

View File

@ -21,6 +21,7 @@ import (
"fmt" "fmt"
"github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/log"
) )
func createRBDClone( func createRBDClone(
@ -31,7 +32,7 @@ func createRBDClone(
// create snapshot // create snapshot
err := parentVol.createSnapshot(ctx, snap) err := parentVol.createSnapshot(ctx, snap)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to create snapshot %s: %v", snap, err) log.ErrorLog(ctx, "failed to create snapshot %s: %v", snap, err)
return err return err
} }
@ -40,7 +41,7 @@ func createRBDClone(
// create clone image and delete snapshot // create clone image and delete snapshot
err = cloneRbdVol.cloneRbdImageFromSnapshot(ctx, snap, parentVol) err = cloneRbdVol.cloneRbdImageFromSnapshot(ctx, snap, parentVol)
if err != nil { if err != nil {
util.ErrorLog( log.ErrorLog(
ctx, ctx,
"failed to clone rbd image %s from snapshot %s: %v", "failed to clone rbd image %s from snapshot %s: %v",
cloneRbdVol.RbdImageName, cloneRbdVol.RbdImageName,
@ -54,10 +55,10 @@ func createRBDClone(
} }
errSnap := parentVol.deleteSnapshot(ctx, snap) errSnap := parentVol.deleteSnapshot(ctx, snap)
if errSnap != nil { if errSnap != nil {
util.ErrorLog(ctx, "failed to delete snapshot: %v", errSnap) log.ErrorLog(ctx, "failed to delete snapshot: %v", errSnap)
delErr := deleteImage(ctx, cloneRbdVol, cr) delErr := deleteImage(ctx, cloneRbdVol, cr)
if delErr != nil { if delErr != nil {
util.ErrorLog(ctx, "failed to delete rbd image: %s with error: %v", cloneRbdVol, delErr) log.ErrorLog(ctx, "failed to delete rbd image: %s with error: %v", cloneRbdVol, delErr)
} }
return err return err
@ -65,10 +66,10 @@ func createRBDClone(
err = cloneRbdVol.getImageInfo() err = cloneRbdVol.getImageInfo()
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to get rbd image: %s details with error: %v", cloneRbdVol, err) log.ErrorLog(ctx, "failed to get rbd image: %s details with error: %v", cloneRbdVol, err)
delErr := deleteImage(ctx, cloneRbdVol, cr) delErr := deleteImage(ctx, cloneRbdVol, cr)
if delErr != nil { if delErr != nil {
util.ErrorLog(ctx, "failed to delete rbd image: %s with error: %v", cloneRbdVol, delErr) log.ErrorLog(ctx, "failed to delete rbd image: %s with error: %v", cloneRbdVol, delErr)
} }
return err return err
@ -88,7 +89,7 @@ func cleanUpSnapshot(
err := parentVol.deleteSnapshot(ctx, rbdSnap) err := parentVol.deleteSnapshot(ctx, rbdSnap)
if err != nil { if err != nil {
if !errors.Is(err, ErrSnapNotFound) { if !errors.Is(err, ErrSnapNotFound) {
util.ErrorLog(ctx, "failed to delete snapshot %q: %v", rbdSnap, err) log.ErrorLog(ctx, "failed to delete snapshot %q: %v", rbdSnap, err)
return err return err
} }
@ -98,7 +99,7 @@ func cleanUpSnapshot(
err := deleteImage(ctx, rbdVol, cr) err := deleteImage(ctx, rbdVol, cr)
if err != nil { if err != nil {
if !errors.Is(err, ErrImageNotFound) { if !errors.Is(err, ErrImageNotFound) {
util.ErrorLog(ctx, "failed to delete rbd image %q with error: %v", rbdVol, err) log.ErrorLog(ctx, "failed to delete rbd image %q with error: %v", rbdVol, err)
return err return err
} }
@ -134,7 +135,7 @@ func undoSnapshotCloning(
cr *util.Credentials) error { cr *util.Credentials) error {
err := cleanUpSnapshot(ctx, parentVol, rbdSnap, cloneVol, cr) err := cleanUpSnapshot(ctx, parentVol, rbdSnap, cloneVol, cr)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to clean up %s or %s: %v", cloneVol, rbdSnap, err) log.ErrorLog(ctx, "failed to clean up %s or %s: %v", cloneVol, rbdSnap, err)
return err return err
} }

View File

@ -23,6 +23,8 @@ import (
"fmt" "fmt"
"os/exec" "os/exec"
"github.com/ceph/ceph-csi/internal/util/log"
"github.com/ceph/go-ceph/rados" "github.com/ceph/go-ceph/rados"
) )
@ -50,14 +52,14 @@ func ExecCommand(ctx context.Context, program string, args ...string) (string, s
if err != nil { if err != nil {
err = fmt.Errorf("an error (%w) occurred while running %s args: %v", err, program, sanitizedArgs) err = fmt.Errorf("an error (%w) occurred while running %s args: %v", err, program, sanitizedArgs)
if ctx != context.TODO() { if ctx != context.TODO() {
UsefulLog(ctx, "%s", err) log.UsefulLog(ctx, "%s", err)
} }
return stdout, stderr, err return stdout, stderr, err
} }
if ctx != context.TODO() { if ctx != context.TODO() {
UsefulLog(ctx, "command succeeded: %s %v", program, sanitizedArgs) log.UsefulLog(ctx, "command succeeded: %s %v", program, sanitizedArgs)
} }
return stdout, stderr, nil return stdout, stderr, nil
@ -151,7 +153,7 @@ func CreateObject(ctx context.Context, monitors string, cr *Credentials, poolNam
if errors.Is(err, rados.ErrObjectExists) { if errors.Is(err, rados.ErrObjectExists) {
return JoinErrors(ErrObjectExists, err) return JoinErrors(ErrObjectExists, err)
} else if err != nil { } else if err != nil {
ErrorLog(ctx, "failed creating omap (%s) in pool (%s): (%v)", objectName, poolName, err) log.ErrorLog(ctx, "failed creating omap (%s) in pool (%s): (%v)", objectName, poolName, err)
return err return err
} }
@ -187,7 +189,7 @@ func RemoveObject(ctx context.Context, monitors string, cr *Credentials, poolNam
if errors.Is(err, rados.ErrNotFound) { if errors.Is(err, rados.ErrNotFound) {
return JoinErrors(ErrObjectNotFound, err) return JoinErrors(ErrObjectNotFound, err)
} else if err != nil { } else if err != nil {
ErrorLog(ctx, "failed removing omap (%s) in pool (%s): (%v)", oMapName, poolName, err) log.ErrorLog(ctx, "failed removing omap (%s) in pool (%s): (%v)", oMapName, poolName, err)
return err return err
} }

View File

@ -17,11 +17,14 @@ limitations under the License.
package util package util
import ( import (
"context"
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"os" "os"
"github.com/ceph/ceph-csi/internal/util/log"
) )
// clusterMappingConfigFile is the location of the cluster mapping config file. // clusterMappingConfigFile is the location of the cluster mapping config file.
@ -120,3 +123,67 @@ func getClusterMappingInfo(clusterID, filename string) (*[]ClusterMappingInfo, e
func GetClusterMappingInfo(clusterID string) (*[]ClusterMappingInfo, error) { func GetClusterMappingInfo(clusterID string) (*[]ClusterMappingInfo, error) {
return getClusterMappingInfo(clusterID, clusterMappingConfigFile) return getClusterMappingInfo(clusterID, clusterMappingConfigFile)
} }
// GetMappedID check the input id is matching key or value.
// If key==id the value will be returned.
// If value==id the key will be returned.
func GetMappedID(key, value, id string) string {
if key == id {
return value
}
if value == id {
return key
}
return ""
}
// fetchMappedClusterIDAndMons returns monitors and clusterID info after checking cluster mapping.
func fetchMappedClusterIDAndMons(ctx context.Context,
clusterID, clusterMappingConfigFile, csiConfigFile string) (string, string, error) {
var mons string
clusterMappingInfo, err := getClusterMappingInfo(clusterID, clusterMappingConfigFile)
if err != nil {
return "", "", err
}
if clusterMappingInfo != nil {
for _, cm := range *clusterMappingInfo {
for key, val := range cm.ClusterIDMapping {
mappedClusterID := GetMappedID(key, val, clusterID)
if mappedClusterID == "" {
continue
}
log.DebugLog(ctx,
"found new clusterID mapping %q for existing clusterID %q",
mappedClusterID,
clusterID)
mons, err = Mons(csiConfigFile, mappedClusterID)
if err != nil {
log.DebugLog(ctx, "failed getting mons with mapped cluster id %q: %v",
mappedClusterID, err)
continue
}
return mons, mappedClusterID, nil
}
}
}
// check original clusterID for backward compatibility when cluster ids were expected to be same.
mons, err = Mons(csiConfigFile, clusterID)
if err != nil {
log.ErrorLog(ctx, "failed getting mons with cluster id %q: %v", clusterID, err)
return "", "", err
}
return mons, clusterID, err
}
// FetchMappedClusterIDAndMons returns monitors and clusterID info after checking cluster mapping.
func FetchMappedClusterIDAndMons(ctx context.Context, clusterID string) (string, string, error) {
return fetchMappedClusterIDAndMons(ctx, clusterID, clusterMappingConfigFile, CsiConfigFile)
}

View File

@ -17,10 +17,12 @@ limitations under the License.
package util package util
import ( import (
"context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"reflect" "reflect"
"strings"
"testing" "testing"
) )
@ -239,3 +241,185 @@ func validateMapping(t *testing.T, clusterID, rbdPoolID, cephFSPoolID string, ma
return nil return nil
} }
func TestGetMappedID(t *testing.T) {
t.Parallel()
type args struct {
key string
value string
id string
}
tests := []struct {
name string
args args
expected string
}{
{
name: "test for matching key",
args: args{
key: "cluster1",
value: "cluster2",
id: "cluster1",
},
expected: "cluster2",
},
{
name: "test for matching value",
args: args{
key: "cluster1",
value: "cluster2",
id: "cluster2",
},
expected: "cluster1",
},
{
name: "test for invalid match",
args: args{
key: "cluster1",
value: "cluster2",
id: "cluster3",
},
expected: "",
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
val := GetMappedID(tt.args.key, tt.args.value, tt.args.id)
if val != tt.expected {
t.Errorf("getMappedID() got = %v, expected %v", val, tt.expected)
}
})
}
}
func TestFetchMappedClusterIDAndMons(t *testing.T) {
t.Parallel()
ctx := context.TODO()
type args struct {
ctx context.Context
clusterID string
}
mappingBasePath := t.TempDir()
csiConfigFile := mappingBasePath + "/config.json"
clusterMappingConfigFile := mappingBasePath + "/cluster-mapping.json"
csiConfig := []ClusterInfo{
{
ClusterID: "cluster-1",
Monitors: []string{"ip-1", "ip-2"},
},
{
ClusterID: "cluster-2",
Monitors: []string{"ip-3", "ip-4"},
},
}
csiConfigFileContent, err := json.Marshal(csiConfig)
if err != nil {
t.Errorf("failed to marshal csi config info %v", err)
}
err = ioutil.WriteFile(csiConfigFile, csiConfigFileContent, 0o600)
if err != nil {
t.Errorf("failed to write %s file content: %v", CsiConfigFile, err)
}
t.Run("cluster-mapping.json does not exist", func(t *testing.T) {
_, _, err = fetchMappedClusterIDAndMons(ctx, "cluster-2", clusterMappingConfigFile, csiConfigFile)
if err != nil {
t.Errorf("FetchMappedClusterIDAndMons() error = %v, wantErr %v", err, nil)
}
})
clusterMapping := []ClusterMappingInfo{
{
ClusterIDMapping: map[string]string{
"cluster-1": "cluster-3",
},
},
{
ClusterIDMapping: map[string]string{
"cluster-1": "cluster-4",
},
},
{
ClusterIDMapping: map[string]string{
"cluster-4": "cluster-3",
},
},
}
clusterMappingFileContent, err := json.Marshal(clusterMapping)
if err != nil {
t.Errorf("failed to marshal mapping info %v", err)
}
err = ioutil.WriteFile(clusterMappingConfigFile, clusterMappingFileContent, 0o600)
if err != nil {
t.Errorf("failed to write %s file content: %v", clusterMappingFileContent, err)
}
tests := []struct {
name string
args args
want string
want1 string
wantErr bool
}{
{
name: "test cluster id=cluster-1",
args: args{
ctx: ctx,
clusterID: "cluster-1",
},
want: strings.Join(csiConfig[0].Monitors, ","),
want1: "cluster-1",
wantErr: false,
},
{
name: "test cluster id=cluster-3",
args: args{
ctx: ctx,
clusterID: "cluster-3",
},
want: strings.Join(csiConfig[0].Monitors, ","),
want1: "cluster-1",
wantErr: false,
},
{
name: "test cluster id=cluster-4",
args: args{
ctx: ctx,
clusterID: "cluster-4",
},
want: strings.Join(csiConfig[0].Monitors, ","),
want1: "cluster-1",
wantErr: false,
},
{
name: "test missing cluster id=cluster-6",
args: args{
ctx: ctx,
clusterID: "cluster-6",
},
want: "",
want1: "",
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, got1, err := fetchMappedClusterIDAndMons(ctx, tt.args.clusterID, clusterMappingConfigFile, csiConfigFile)
if (err != nil) != tt.wantErr {
t.Errorf("FetchMappedClusterIDAndMons() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("FetchMappedClusterIDAndMons() got = %v, want %v", got, tt.want)
}
if got1 != tt.want1 {
t.Errorf("FetchMappedClusterIDAndMons() got1 = %v, want %v", got1, tt.want1)
}
})
}
}

View File

@ -25,15 +25,15 @@ import (
"path" "path"
"strconv" "strconv"
"strings" "strings"
"github.com/ceph/ceph-csi/internal/kms"
"github.com/ceph/ceph-csi/internal/util/log"
) )
const ( const (
mapperFilePrefix = "luks-rbd-" mapperFilePrefix = "luks-rbd-"
mapperFilePathPrefix = "/dev/mapper" mapperFilePathPrefix = "/dev/mapper"
// kmsConfigPath is the location of the vault config file.
kmsConfigPath = "/etc/ceph-csi-encryption-kms-config/config.json"
// Passphrase size - 20 bytes is 160 bits to satisfy: // Passphrase size - 20 bytes is 160 bits to satisfy:
// https://tools.ietf.org/html/rfc6749#section-10.10 // https://tools.ietf.org/html/rfc6749#section-10.10
encryptionPassphraseSize = 20 encryptionPassphraseSize = 20
@ -52,11 +52,11 @@ var (
) )
type VolumeEncryption struct { type VolumeEncryption struct {
KMS EncryptionKMS KMS kms.EncryptionKMS
// dekStore that will be used, this can be the EncryptionKMS or a // dekStore that will be used, this can be the EncryptionKMS or a
// different object implementing the DEKStore interface. // different object implementing the DEKStore interface.
dekStore DEKStore dekStore kms.DEKStore
id string id string
} }
@ -74,7 +74,7 @@ func FetchEncryptionKMSID(encrypted, kmsID string) (string, error) {
} }
if kmsID == "" { if kmsID == "" {
kmsID = defaultKMSType kmsID = kms.DefaultKMSType
} }
return kmsID, nil return kmsID, nil
@ -86,24 +86,24 @@ func FetchEncryptionKMSID(encrypted, kmsID string) (string, error) {
// Callers that receive a ErrDEKStoreNeeded error, should use // Callers that receive a ErrDEKStoreNeeded error, should use
// VolumeEncryption.SetDEKStore() to configure an alternative storage for the // VolumeEncryption.SetDEKStore() to configure an alternative storage for the
// DEKs. // DEKs.
func NewVolumeEncryption(id string, kms EncryptionKMS) (*VolumeEncryption, error) { func NewVolumeEncryption(id string, ekms kms.EncryptionKMS) (*VolumeEncryption, error) {
kmsID := id kmsID := id
if kmsID == "" { if kmsID == "" {
// if kmsID is not set, encryption is enabled, and the type is // if kmsID is not set, encryption is enabled, and the type is
// SecretsKMS // SecretsKMS
kmsID = defaultKMSType kmsID = kms.DefaultKMSType
} }
ve := &VolumeEncryption{ ve := &VolumeEncryption{
id: kmsID, id: kmsID,
KMS: kms, KMS: ekms,
} }
if kms.requiresDEKStore() == DEKStoreIntegrated { if ekms.RequiresDEKStore() == kms.DEKStoreIntegrated {
dekStore, ok := kms.(DEKStore) dekStore, ok := ekms.(kms.DEKStore)
if !ok { if !ok {
return nil, fmt.Errorf("KMS %T does not implement the "+ return nil, fmt.Errorf("KMS %T does not implement the "+
"DEKStore interface", kms) "DEKStore interface", ekms)
} }
ve.dekStore = dekStore ve.dekStore = dekStore
@ -116,7 +116,7 @@ func NewVolumeEncryption(id string, kms EncryptionKMS) (*VolumeEncryption, error
// SetDEKStore sets the DEKStore for this VolumeEncryption instance. It will be // SetDEKStore sets the DEKStore for this VolumeEncryption instance. It will be
// used when StoreNewCryptoPassphrase() or RemoveDEK() is called. // used when StoreNewCryptoPassphrase() or RemoveDEK() is called.
func (ve *VolumeEncryption) SetDEKStore(dekStore DEKStore) { func (ve *VolumeEncryption) SetDEKStore(dekStore kms.DEKStore) {
ve.dekStore = dekStore ve.dekStore = dekStore
} }
@ -139,72 +139,6 @@ func (ve *VolumeEncryption) GetID() string {
return ve.id return ve.id
} }
// EncryptionKMS provides external Key Management System for encryption
// passphrases storage.
type EncryptionKMS interface {
Destroy()
// requiresDEKStore returns the DEKStoreType that is needed to be
// configure for the KMS. Nothing needs to be done when this function
// returns DEKStoreIntegrated, otherwise you will need to configure an
// alternative storage for the DEKs.
requiresDEKStore() DEKStoreType
// EncryptDEK provides a way for a KMS to encrypt a DEK. In case the
// encryption is done transparently inside the KMS service, the
// function can return an unencrypted value.
EncryptDEK(volumeID, plainDEK string) (string, error)
// DecryptDEK provides a way for a KMS to decrypt a DEK. In case the
// encryption is done transparently inside the KMS service, the
// function does not need to do anything except return the encyptedDEK
// as it was received.
DecryptDEK(volumeID, encyptedDEK string) (string, error)
}
// DEKStoreType describes what DEKStore needs to be configured when using a
// particular KMS. A KMS might support different DEKStores depending on its
// configuration.
type DEKStoreType string
const (
// DEKStoreIntegrated indicates that the KMS itself supports storing
// DEKs.
DEKStoreIntegrated = DEKStoreType("")
// DEKStoreMetadata indicates that the KMS should be configured to
// store the DEK in the metadata of the volume.
DEKStoreMetadata = DEKStoreType("metadata")
)
// DEKStore allows KMS instances to implement a modular backend for DEK
// storage. This can be used to store the DEK in a different location, in case
// the KMS can not store passphrases for volumes.
type DEKStore interface {
// StoreDEK saves the DEK in the configured store.
StoreDEK(volumeID string, dek string) error
// FetchDEK reads the DEK from the configured store and returns it.
FetchDEK(volumeID string) (string, error)
// RemoveDEK deletes the DEK from the configured store.
RemoveDEK(volumeID string) error
}
// integratedDEK is a DEKStore that can not be configured. Either the KMS does
// not use a DEK, or the DEK is stored in the KMS without additional
// configuration options.
type integratedDEK struct{}
func (i integratedDEK) requiresDEKStore() DEKStoreType {
return DEKStoreIntegrated
}
func (i integratedDEK) EncryptDEK(volumeID, plainDEK string) (string, error) {
return plainDEK, nil
}
func (i integratedDEK) DecryptDEK(volumeID, encyptedDEK string) (string, error) {
return encyptedDEK, nil
}
// StoreCryptoPassphrase takes an unencrypted passphrase, encrypts it and saves // StoreCryptoPassphrase takes an unencrypted passphrase, encrypts it and saves
// it in the DEKStore. // it in the DEKStore.
func (ve *VolumeEncryption) StoreCryptoPassphrase(volumeID, passphrase string) error { func (ve *VolumeEncryption) StoreCryptoPassphrase(volumeID, passphrase string) error {
@ -262,7 +196,7 @@ func VolumeMapper(volumeID string) (mapperFile, mapperFilePath string) {
// EncryptVolume encrypts provided device with LUKS. // EncryptVolume encrypts provided device with LUKS.
func EncryptVolume(ctx context.Context, devicePath, passphrase string) error { func EncryptVolume(ctx context.Context, devicePath, passphrase string) error {
DebugLog(ctx, "Encrypting device %s with LUKS", devicePath) log.DebugLog(ctx, "Encrypting device %s with LUKS", devicePath)
if _, _, err := LuksFormat(devicePath, passphrase); err != nil { if _, _, err := LuksFormat(devicePath, passphrase); err != nil {
return fmt.Errorf("failed to encrypt device %s with LUKS: %w", devicePath, err) return fmt.Errorf("failed to encrypt device %s with LUKS: %w", devicePath, err)
} }
@ -272,10 +206,10 @@ func EncryptVolume(ctx context.Context, devicePath, passphrase string) error {
// OpenEncryptedVolume opens volume so that it can be used by the client. // OpenEncryptedVolume opens volume so that it can be used by the client.
func OpenEncryptedVolume(ctx context.Context, devicePath, mapperFile, passphrase string) error { func OpenEncryptedVolume(ctx context.Context, devicePath, mapperFile, passphrase string) error {
DebugLog(ctx, "Opening device %s with LUKS on %s", devicePath, mapperFile) log.DebugLog(ctx, "Opening device %s with LUKS on %s", devicePath, mapperFile)
_, stderr, err := LuksOpen(devicePath, mapperFile, passphrase) _, stderr, err := LuksOpen(devicePath, mapperFile, passphrase)
if err != nil { if err != nil {
ErrorLog(ctx, "failed to open LUKS device %q: %s", devicePath, stderr) log.ErrorLog(ctx, "failed to open LUKS device %q: %s", devicePath, stderr)
} }
return err return err
@ -283,10 +217,10 @@ func OpenEncryptedVolume(ctx context.Context, devicePath, mapperFile, passphrase
// ResizeEncryptedVolume resizes encrypted volume so that it can be used by the client. // ResizeEncryptedVolume resizes encrypted volume so that it can be used by the client.
func ResizeEncryptedVolume(ctx context.Context, mapperFile string) error { func ResizeEncryptedVolume(ctx context.Context, mapperFile string) error {
DebugLog(ctx, "Resizing LUKS device %s", mapperFile) log.DebugLog(ctx, "Resizing LUKS device %s", mapperFile)
_, stderr, err := LuksResize(mapperFile) _, stderr, err := LuksResize(mapperFile)
if err != nil { if err != nil {
ErrorLog(ctx, "failed to resize LUKS device %s: %s", mapperFile, stderr) log.ErrorLog(ctx, "failed to resize LUKS device %s: %s", mapperFile, stderr)
} }
return err return err
@ -294,7 +228,7 @@ func ResizeEncryptedVolume(ctx context.Context, mapperFile string) error {
// CloseEncryptedVolume closes encrypted volume so it can be detached. // CloseEncryptedVolume closes encrypted volume so it can be detached.
func CloseEncryptedVolume(ctx context.Context, mapperFile string) error { func CloseEncryptedVolume(ctx context.Context, mapperFile string) error {
DebugLog(ctx, "Closing LUKS device %s", mapperFile) log.DebugLog(ctx, "Closing LUKS device %s", mapperFile)
_, _, err := LuksClose(mapperFile) _, _, err := LuksClose(mapperFile)
return err return err
@ -317,7 +251,7 @@ func DeviceEncryptionStatus(ctx context.Context, devicePath string) (mappedDevic
mapPath := strings.TrimPrefix(devicePath, mapperFilePathPrefix+"/") mapPath := strings.TrimPrefix(devicePath, mapperFilePathPrefix+"/")
stdout, _, err := LuksStatus(mapPath) stdout, _, err := LuksStatus(mapPath)
if err != nil { if err != nil {
DebugLog(ctx, "device %s is not an active LUKS device: %v", devicePath, err) log.DebugLog(ctx, "device %s is not an active LUKS device: %v", devicePath, err)
return devicePath, "", nil return devicePath, "", nil
} }

View File

@ -20,26 +20,12 @@ import (
"encoding/base64" "encoding/base64"
"testing" "testing"
"github.com/ceph/ceph-csi/internal/kms"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func TestInitSecretsKMS(t *testing.T) {
t.Parallel()
secrets := map[string]string{}
// no passphrase in the secrets, should fail
kms, err := initSecretsKMS(secrets)
assert.Error(t, err)
assert.Nil(t, kms)
// set a passphrase and it should pass
secrets[encryptionPassphraseKey] = "plaintext encryption key"
kms, err = initSecretsKMS(secrets)
assert.NotNil(t, kms)
assert.NoError(t, err)
}
func TestGenerateNewEncryptionPassphrase(t *testing.T) { func TestGenerateNewEncryptionPassphrase(t *testing.T) {
t.Parallel() t.Parallel()
b64Passphrase, err := generateNewEncryptionPassphrase() b64Passphrase, err := generateNewEncryptionPassphrase()
@ -55,17 +41,18 @@ func TestGenerateNewEncryptionPassphrase(t *testing.T) {
func TestKMSWorkflow(t *testing.T) { func TestKMSWorkflow(t *testing.T) {
t.Parallel() t.Parallel()
secrets := map[string]string{ secrets := map[string]string{
encryptionPassphraseKey: "workflow test", // FIXME: use encryptionPassphraseKey from SecretsKMS
"encryptionPassphrase": "workflow test",
} }
kms, err := GetKMS("tenant", defaultKMSType, secrets) kmsProvider, err := kms.GetDefaultKMS(secrets)
assert.NoError(t, err) assert.NoError(t, err)
require.NotNil(t, kms) require.NotNil(t, kmsProvider)
ve, err := NewVolumeEncryption("", kms) ve, err := NewVolumeEncryption("", kmsProvider)
assert.NoError(t, err) assert.NoError(t, err)
require.NotNil(t, ve) require.NotNil(t, ve)
assert.Equal(t, defaultKMSType, ve.GetID()) assert.Equal(t, kms.DefaultKMSType, ve.GetID())
volumeID := "volume-id" volumeID := "volume-id"
@ -74,5 +61,5 @@ func TestKMSWorkflow(t *testing.T) {
passphrase, err := ve.GetCryptoPassphrase(volumeID) passphrase, err := ve.GetCryptoPassphrase(volumeID)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, secrets[encryptionPassphraseKey], passphrase) assert.Equal(t, secrets["encryptionPassphrase"], passphrase)
} }

View File

@ -8,6 +8,8 @@ import (
runtime_pprof "runtime/pprof" runtime_pprof "runtime/pprof"
"strconv" "strconv"
"github.com/ceph/ceph-csi/internal/util/log"
"github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/client_golang/prometheus/promhttp"
) )
@ -24,13 +26,13 @@ func StartMetricsServer(c *Config) {
http.Handle(c.MetricsPath, promhttp.Handler()) http.Handle(c.MetricsPath, promhttp.Handler())
err := http.ListenAndServe(addr, nil) err := http.ListenAndServe(addr, nil)
if err != nil { if err != nil {
FatalLogMsg("failed to listen on address %v: %s", addr, err) log.FatalLogMsg("failed to listen on address %v: %s", addr, err)
} }
} }
func addPath(name string, handler http.Handler) { func addPath(name string, handler http.Handler) {
http.Handle(name, handler) http.Handle(name, handler)
DebugLogMsg("DEBUG: registered profiling handler on /debug/pprof/%s\n", name) log.DebugLogMsg("DEBUG: registered profiling handler on /debug/pprof/%s\n", name)
} }
// EnableProfiling enables golang profiling. // EnableProfiling enables golang profiling.

View File

@ -17,6 +17,8 @@ import (
"fmt" "fmt"
"sync" "sync"
"github.com/ceph/ceph-csi/internal/util/log"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
) )
@ -240,6 +242,6 @@ func (ol *OperationLock) release(op operation, volumeID string) {
} }
} }
default: default:
ErrorLogMsg("%v operation not supported", op) log.ErrorLogMsg("%v operation not supported", op)
} }
} }

View File

@ -14,35 +14,37 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package util package k8s
import ( import (
"os" "os"
k8s "k8s.io/client-go/kubernetes" "github.com/ceph/ceph-csi/internal/util/log"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest" "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/clientcmd"
) )
// NewK8sClient create kubernetes client. // NewK8sClient create kubernetes client.
func NewK8sClient() *k8s.Clientset { func NewK8sClient() *kubernetes.Clientset {
var cfg *rest.Config var cfg *rest.Config
var err error var err error
cPath := os.Getenv("KUBERNETES_CONFIG_PATH") cPath := os.Getenv("KUBERNETES_CONFIG_PATH")
if cPath != "" { if cPath != "" {
cfg, err = clientcmd.BuildConfigFromFlags("", cPath) cfg, err = clientcmd.BuildConfigFromFlags("", cPath)
if err != nil { if err != nil {
FatalLogMsg("Failed to get cluster config with error: %v\n", err) log.FatalLogMsg("Failed to get cluster config with error: %v\n", err)
} }
} else { } else {
cfg, err = rest.InClusterConfig() cfg, err = rest.InClusterConfig()
if err != nil { if err != nil {
FatalLogMsg("Failed to get cluster config with error: %v\n", err) log.FatalLogMsg("Failed to get cluster config with error: %v\n", err)
} }
} }
client, err := k8s.NewForConfig(cfg) client, err := kubernetes.NewForConfig(cfg)
if err != nil { if err != nil {
FatalLogMsg("Failed to create client with error: %v\n", err) log.FatalLogMsg("Failed to create client with error: %v\n", err)
} }
return client return client

View File

@ -11,7 +11,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package util package log
import ( import (
"context" "context"

View File

@ -22,6 +22,9 @@ import (
"fmt" "fmt"
"strings" "strings"
"github.com/ceph/ceph-csi/internal/util/k8s"
"github.com/ceph/ceph-csi/internal/util/log"
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
) )
@ -32,7 +35,7 @@ const (
) )
func k8sGetNodeLabels(nodeName string) (map[string]string, error) { func k8sGetNodeLabels(nodeName string) (map[string]string, error) {
client := NewK8sClient() client := k8s.NewK8sClient()
node, err := client.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) node, err := client.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to get node %q information: %w", nodeName, err) return nil, fmt.Errorf("failed to get node %q information: %w", nodeName, err)
@ -59,7 +62,7 @@ func GetTopologyFromDomainLabels(domainLabels, nodeName, driverName string) (map
// Convert passed in labels to a map, and check for uniqueness // Convert passed in labels to a map, and check for uniqueness
labelsToRead := strings.SplitN(domainLabels, labelSeparator, -1) labelsToRead := strings.SplitN(domainLabels, labelSeparator, -1)
DefaultLog("passed in node labels for processing: %+v", labelsToRead) log.DefaultLog("passed in node labels for processing: %+v", labelsToRead)
labelsIn := make(map[string]bool) labelsIn := make(map[string]bool)
labelCount := 0 labelCount := 0
@ -106,7 +109,7 @@ func GetTopologyFromDomainLabels(domainLabels, nodeName, driverName string) (map
return nil, fmt.Errorf("missing domain labels %v on node %q", missingLabels, nodeName) return nil, fmt.Errorf("missing domain labels %v on node %q", missingLabels, nodeName)
} }
DefaultLog("list of domains processed: %+v", domainMap) log.DefaultLog("list of domains processed: %+v", domainMap)
topology := make(map[string]string) topology := make(map[string]string)
for domain, value := range domainMap { for domain, value := range domainMap {

View File

@ -26,6 +26,8 @@ import (
"strings" "strings"
"time" "time"
"github.com/ceph/ceph-csi/internal/util/log"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
"k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation"
"k8s.io/cloud-provider/volume/helpers" "k8s.io/cloud-provider/volume/helpers"
@ -216,7 +218,7 @@ func parseKernelRelease(release string) (int, int, int, int, error) {
func CheckKernelSupport(release string, supportedVersions []KernelVersion) bool { func CheckKernelSupport(release string, supportedVersions []KernelVersion) bool {
version, patchlevel, sublevel, extraversion, err := parseKernelRelease(release) version, patchlevel, sublevel, extraversion, err := parseKernelRelease(release)
if err != nil { if err != nil {
ErrorLogMsg("%v", err) log.ErrorLogMsg("%v", err)
return false return false
} }
@ -242,7 +244,7 @@ func CheckKernelSupport(release string, supportedVersions []KernelVersion) bool
} }
} }
} }
ErrorLogMsg("kernel %s does not support required features", release) log.ErrorLogMsg("kernel %s does not support required features", release)
return false return false
} }
@ -342,20 +344,6 @@ func contains(s []string, key string) bool {
return false return false
} }
// getKeys takes a map that uses strings for keys and returns a slice with the
// keys.
func getKeys(m map[string]interface{}) []string {
keys := make([]string, len(m))
i := 0
for k := range m {
keys[i] = k
i++
}
return keys
}
// CallStack returns the stack of the calls in the current goroutine. Useful // CallStack returns the stack of the calls in the current goroutine. Useful
// for debugging or reporting errors. This is a friendly alternative to // for debugging or reporting errors. This is a friendly alternative to
// assert() or panic(). // assert() or panic().

4
vendor/modules.txt vendored
View File

@ -412,7 +412,7 @@ golang.org/x/text/unicode/bidi
golang.org/x/text/unicode/norm golang.org/x/text/unicode/norm
# golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac # golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac
golang.org/x/time/rate golang.org/x/time/rate
# gomodules.xyz/jsonpatch/v2 v2.2.0 # gomodules.xyz/jsonpatch/v2 v2.2.0 => github.com/gomodules/jsonpatch/v2 v2.2.0
gomodules.xyz/jsonpatch/v2 gomodules.xyz/jsonpatch/v2
# google.golang.org/appengine v1.6.7 # google.golang.org/appengine v1.6.7
google.golang.org/appengine/internal google.golang.org/appengine/internal
@ -1055,10 +1055,12 @@ sigs.k8s.io/structured-merge-diff/v4/typed
sigs.k8s.io/structured-merge-diff/v4/value sigs.k8s.io/structured-merge-diff/v4/value
# sigs.k8s.io/yaml v1.2.0 # sigs.k8s.io/yaml v1.2.0
sigs.k8s.io/yaml sigs.k8s.io/yaml
# code.cloudfoundry.org/gofileutils => github.com/cloudfoundry/gofileutils v0.0.0-20170111115228-4d0c80011a0f
# github.com/golang/protobuf => github.com/golang/protobuf v1.4.3 # github.com/golang/protobuf => github.com/golang/protobuf v1.4.3
# github.com/hashicorp/vault/api => github.com/hashicorp/vault/api v1.0.5-0.20200902155336-f9d5ce5a171a # github.com/hashicorp/vault/api => github.com/hashicorp/vault/api v1.0.5-0.20200902155336-f9d5ce5a171a
# github.com/hashicorp/vault/sdk => github.com/hashicorp/vault/sdk v0.1.14-0.20201116234512-b4d4137dfe8b # github.com/hashicorp/vault/sdk => github.com/hashicorp/vault/sdk v0.1.14-0.20201116234512-b4d4137dfe8b
# github.com/portworx/sched-ops => github.com/portworx/sched-ops v0.20.4-openstorage-rc3 # github.com/portworx/sched-ops => github.com/portworx/sched-ops v0.20.4-openstorage-rc3
# gomodules.xyz/jsonpatch/v2 => github.com/gomodules/jsonpatch/v2 v2.2.0
# google.golang.org/grpc => google.golang.org/grpc v1.35.0 # google.golang.org/grpc => google.golang.org/grpc v1.35.0
# k8s.io/api => k8s.io/api v0.22.0 # k8s.io/api => k8s.io/api v0.22.0
# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.22.0 # k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.22.0