Merge pull request #156 from ceph/devel

sync downstream devel with upstream devel
This commit is contained in:
OpenShift Merge Robot 2023-02-07 02:36:00 -05:00 committed by GitHub
commit c83d2bd4d2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
994 changed files with 80190 additions and 32668 deletions

View File

@ -37,6 +37,13 @@ jobs:
body: | body: |
/test ci/centos/k8s-e2e-external-storage/1.25 /test ci/centos/k8s-e2e-external-storage/1.25
- name: Add comment to trigger external storage tests for Kubernetes 1.26
uses: peter-evans/create-or-update-comment@v2
with:
issue-number: ${{ github.event.pull_request.number }}
body: |
/test ci/centos/k8s-e2e-external-storage/1.26
- name: Add comment to trigger helm E2E tests for Kubernetes 1.23 - name: Add comment to trigger helm E2E tests for Kubernetes 1.23
uses: peter-evans/create-or-update-comment@v2 uses: peter-evans/create-or-update-comment@v2
with: with:
@ -58,6 +65,13 @@ jobs:
body: | body: |
/test ci/centos/mini-e2e-helm/k8s-1.25 /test ci/centos/mini-e2e-helm/k8s-1.25
- name: Add comment to trigger helm E2E tests for Kubernetes 1.26
uses: peter-evans/create-or-update-comment@v2
with:
issue-number: ${{ github.event.pull_request.number }}
body: |
/test ci/centos/mini-e2e-helm/k8s-1.26
- name: Add comment to trigger E2E tests for Kubernetes 1.23 - name: Add comment to trigger E2E tests for Kubernetes 1.23
uses: peter-evans/create-or-update-comment@v2 uses: peter-evans/create-or-update-comment@v2
with: with:
@ -79,6 +93,13 @@ jobs:
body: | body: |
/test ci/centos/mini-e2e/k8s-1.25 /test ci/centos/mini-e2e/k8s-1.25
- name: Add comment to trigger E2E tests for Kubernetes 1.26
uses: peter-evans/create-or-update-comment@v2
with:
issue-number: ${{ github.event.pull_request.number }}
body: |
/test ci/centos/mini-e2e/k8s-1.26
- name: Add comment to trigger cephfs upgrade tests - name: Add comment to trigger cephfs upgrade tests
uses: peter-evans/create-or-update-comment@v2 uses: peter-evans/create-or-update-comment@v2
with: with:

View File

@ -1,9 +1,6 @@
--- ---
defaults: defaults:
actions: actions:
# mergify.io has removed bot_account from its free open source plan.
# comment:
# bot_account: ceph-csi-bot # mergify[bot] will be commenting.
queue: queue:
# merge_bot_account: ceph-csi-bot #mergify[bot] will be merging prs. # merge_bot_account: ceph-csi-bot #mergify[bot] will be merging prs.
# update_bot_account: ceph-csi-bot #mergify will randomly pick and use # update_bot_account: ceph-csi-bot #mergify will randomly pick and use
@ -12,6 +9,13 @@ defaults:
method: rebase method: rebase
rebase_fallback: merge rebase_fallback: merge
update_method: rebase update_method: rebase
# For rebasing Mergify uses the author of the PR. Ideally we use the
# ceph-csi-bot for that, but this is a feature that needs a paid
# subscription.
#
# rebase:
# Use ceph-csi-bot for rebasing, not the account of the PR owner.
# bot_account: ceph-csi-bot
queue_rules: queue_rules:
- name: default - name: default
@ -59,6 +63,9 @@ pull_request_rules:
dismiss_reviews: dismiss_reviews:
approved: true approved: true
changes_requested: false changes_requested: false
label:
remove:
- ok-to-test
- name: ask to resolve conflict - name: ask to resolve conflict
conditions: conditions:

View File

@ -16,7 +16,7 @@ BASE_IMAGE=quay.io/ceph/ceph:v17
CEPH_VERSION=quincy CEPH_VERSION=quincy
# standard Golang options # standard Golang options
GOLANG_VERSION=1.18.8 GOLANG_VERSION=1.19.5
GO111MODULE=on GO111MODULE=on
# commitlint version # commitlint version
@ -38,12 +38,12 @@ SNAPSHOT_VERSION=v6.1.0
HELM_VERSION=v3.10.1 HELM_VERSION=v3.10.1
# minikube settings # minikube settings
MINIKUBE_VERSION=v1.28.0 MINIKUBE_VERSION=v1.29.0
VM_DRIVER=none VM_DRIVER=none
CHANGE_MINIKUBE_NONE_USER=true CHANGE_MINIKUBE_NONE_USER=true
# Rook options # Rook options
ROOK_VERSION=v1.10.4 ROOK_VERSION=v1.10.9
# Provide ceph image path # Provide ceph image path
ROOK_CEPH_CLUSTER_IMAGE=quay.io/ceph/ceph:v17 ROOK_CEPH_CLUSTER_IMAGE=quay.io/ceph/ceph:v17

View File

@ -2,7 +2,13 @@ ARG SRC_DIR="/go/src/github.com/ceph/ceph-csi/"
ARG GO_ARCH ARG GO_ARCH
ARG BASE_IMAGE ARG BASE_IMAGE
FROM ${BASE_IMAGE} as builder FROM ${BASE_IMAGE} as updated_base
RUN dnf -y update \
&& dnf clean all \
&& rm -rf /var/cache/yum
FROM updated_base as builder
LABEL stage="build" LABEL stage="build"
@ -28,8 +34,7 @@ RUN ${GOROOT}/bin/go version && ${GOROOT}/bin/go env
RUN dnf config-manager --disable \ RUN dnf config-manager --disable \
tcmu-runner,tcmu-runner-source,tcmu-runner-noarch || true tcmu-runner,tcmu-runner-source,tcmu-runner-noarch || true
RUN dnf -y update \ RUN dnf -y install --nodocs \
&& dnf -y install --nodocs \
librados-devel librbd-devel \ librados-devel librbd-devel \
/usr/bin/cc \ /usr/bin/cc \
make \ make \
@ -56,7 +61,7 @@ COPY . ${SRC_DIR}
RUN make cephcsi RUN make cephcsi
#-- Final container #-- Final container
FROM ${BASE_IMAGE} FROM updated_base
ARG SRC_DIR ARG SRC_DIR

View File

@ -64,7 +64,7 @@ sh-4.4# ls /var/lib/kubelet/plugins/kubernetes.io/csi/cephfs.csi.ceph.com/bc0146
ls: cannot access '/var/lib/kubelet/plugins/kubernetes.io/csi/cephfs.csi.ceph.com/bc0146ec2b5d9a9db62e698abbe0adcae19c0e01f5cf15d3d593ed33c7bc1a8d/globalmount': Permission denied ls: cannot access '/var/lib/kubelet/plugins/kubernetes.io/csi/cephfs.csi.ceph.com/bc0146ec2b5d9a9db62e698abbe0adcae19c0e01f5cf15d3d593ed33c7bc1a8d/globalmount': Permission denied
``` ```
### kernel clinet corruption detection ### kernel client corruption detection
A mountpoint is deemed corrupted if `stat()`-ing it returns one of the A mountpoint is deemed corrupted if `stat()`-ing it returns one of the
following errors: following errors:

File diff suppressed because it is too large Load Diff

View File

@ -30,7 +30,6 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
) )
const ( const (
@ -106,7 +105,7 @@ func createCephfsStorageClass(
return wait.PollImmediate(poll, timeout, func() (bool, error) { return wait.PollImmediate(poll, timeout, func() (bool, error) {
_, err = c.StorageV1().StorageClasses().Create(context.TODO(), &sc, metav1.CreateOptions{}) _, err = c.StorageV1().StorageClasses().Create(context.TODO(), &sc, metav1.CreateOptions{})
if err != nil { if err != nil {
e2elog.Logf("error creating StorageClass %q: %v", sc.Name, err) framework.Logf("error creating StorageClass %q: %v", sc.Name, err)
if isRetryableAPIError(err) { if isRetryableAPIError(err) {
return false, nil return false, nil
} }
@ -141,13 +140,13 @@ func createCephfsSecret(f *framework.Framework, secretName, userName, userKey st
func unmountCephFSVolume(f *framework.Framework, appName, pvcName string) error { func unmountCephFSVolume(f *framework.Framework, appName, pvcName string) error {
pod, err := f.ClientSet.CoreV1().Pods(f.UniqueName).Get(context.TODO(), appName, metav1.GetOptions{}) pod, err := f.ClientSet.CoreV1().Pods(f.UniqueName).Get(context.TODO(), appName, metav1.GetOptions{})
if err != nil { if err != nil {
e2elog.Logf("Error occurred getting pod %s in namespace %s", appName, f.UniqueName) framework.Logf("Error occurred getting pod %s in namespace %s", appName, f.UniqueName)
return fmt.Errorf("failed to get pod: %w", err) return fmt.Errorf("failed to get pod: %w", err)
} }
pvc, err := getPersistentVolumeClaim(f.ClientSet, f.UniqueName, pvcName) pvc, err := getPersistentVolumeClaim(f.ClientSet, f.UniqueName, pvcName)
if err != nil { if err != nil {
e2elog.Logf("Error occurred getting PVC %s in namespace %s", pvcName, f.UniqueName) framework.Logf("Error occurred getting PVC %s in namespace %s", pvcName, f.UniqueName)
return fmt.Errorf("failed to get pvc: %w", err) return fmt.Errorf("failed to get pvc: %w", err)
} }
@ -163,7 +162,7 @@ func unmountCephFSVolume(f *framework.Framework, appName, pvcName string) error
cephFSContainerName, cephFSContainerName,
cephCSINamespace) cephCSINamespace)
if stdErr != "" { if stdErr != "" {
e2elog.Logf("StdErr occurred: %s", stdErr) framework.Logf("StdErr occurred: %s", stdErr)
} }
return err return err
@ -339,7 +338,7 @@ func getSnapName(snapNamespace, snapName string) (string, error) {
snapIDRegex := regexp.MustCompile(`(\w+\-?){5}$`) snapIDRegex := regexp.MustCompile(`(\w+\-?){5}$`)
snapID := snapIDRegex.FindString(*sc.Status.SnapshotHandle) snapID := snapIDRegex.FindString(*sc.Status.SnapshotHandle)
snapshotName := fmt.Sprintf("csi-snap-%s", snapID) snapshotName := fmt.Sprintf("csi-snap-%s", snapID)
e2elog.Logf("snapshotName= %s", snapshotName) framework.Logf("snapshotName= %s", snapshotName)
return snapshotName, nil return snapshotName, nil
} }
@ -464,7 +463,7 @@ func validateFscryptAndAppBinding(pvcPath, appPath string, kms kmsConfig, f *fra
if !destroyed { if !destroyed {
return fmt.Errorf("passphrased was not destroyed: %s", msg) return fmt.Errorf("passphrased was not destroyed: %s", msg)
} else if msg != "" { } else if msg != "" {
e2elog.Logf("passphrase destroyed, but message returned: %s", msg) framework.Logf("passphrase destroyed, but message returned: %s", msg)
} }
} }
@ -479,17 +478,17 @@ func validateFscryptClone(
) { ) {
pvc, err := loadPVC(pvcPath) pvc, err := loadPVC(pvcPath)
if err != nil { if err != nil {
e2elog.Failf("failed to load PVC: %v", err) framework.Failf("failed to load PVC: %v", err)
} }
pvc.Namespace = f.UniqueName pvc.Namespace = f.UniqueName
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("failed to create PVC: %v", err) framework.Failf("failed to create PVC: %v", err)
} }
app, err := loadApp(appPath) app, err := loadApp(appPath)
if err != nil { if err != nil {
e2elog.Failf("failed to load application: %v", err) framework.Failf("failed to load application: %v", err)
} }
label := make(map[string]string) label := make(map[string]string)
label[appKey] = appLabel label[appKey] = appLabel
@ -501,18 +500,18 @@ func validateFscryptClone(
} }
wErr := writeDataInPod(app, &opt, f) wErr := writeDataInPod(app, &opt, f)
if wErr != nil { if wErr != nil {
e2elog.Failf("failed to write data from application %v", wErr) framework.Failf("failed to write data from application %v", wErr)
} }
pvcClone, err := loadPVC(pvcSmartClonePath) pvcClone, err := loadPVC(pvcSmartClonePath)
if err != nil { if err != nil {
e2elog.Failf("failed to load PVC: %v", err) framework.Failf("failed to load PVC: %v", err)
} }
pvcClone.Spec.DataSource.Name = pvc.Name pvcClone.Spec.DataSource.Name = pvc.Name
pvcClone.Namespace = f.UniqueName pvcClone.Namespace = f.UniqueName
appClone, err := loadApp(appSmartClonePath) appClone, err := loadApp(appSmartClonePath)
if err != nil { if err != nil {
e2elog.Failf("failed to load application: %v", err) framework.Failf("failed to load application: %v", err)
} }
appClone.Namespace = f.UniqueName appClone.Namespace = f.UniqueName
appClone.Labels = map[string]string{ appClone.Labels = map[string]string{
@ -521,50 +520,50 @@ func validateFscryptClone(
err = createPVCAndApp(f.UniqueName, f, pvcClone, appClone, deployTimeout) err = createPVCAndApp(f.UniqueName, f, pvcClone, appClone, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("failed to create PVC or application (%s): %v", f.UniqueName, err) framework.Failf("failed to create PVC or application (%s): %v", f.UniqueName, err)
} }
_, csiVolumeHandle, err := getInfoFromPVC(pvcClone.Namespace, pvcClone.Name, f) _, csiVolumeHandle, err := getInfoFromPVC(pvcClone.Namespace, pvcClone.Name, f)
if err != nil { if err != nil {
e2elog.Failf("failed to get pvc info: %s", err) framework.Failf("failed to get pvc info: %s", err)
} }
if kms != noKMS && kms.canGetPassphrase() { if kms != noKMS && kms.canGetPassphrase() {
// check new passphrase created // check new passphrase created
stdOut, stdErr := kms.getPassphrase(f, csiVolumeHandle) stdOut, stdErr := kms.getPassphrase(f, csiVolumeHandle)
if stdOut != "" { if stdOut != "" {
e2elog.Logf("successfully read the passphrase from vault: %s", stdOut) framework.Logf("successfully read the passphrase from vault: %s", stdOut)
} }
if stdErr != "" { if stdErr != "" {
e2elog.Failf("failed to read passphrase from vault: %s", stdErr) framework.Failf("failed to read passphrase from vault: %s", stdErr)
} }
} }
// delete parent pvc // delete parent pvc
err = deletePVCAndApp("", f, pvc, app) err = deletePVCAndApp("", f, pvc, app)
if err != nil { if err != nil {
e2elog.Failf("failed to delete PVC or application: %v", err) framework.Failf("failed to delete PVC or application: %v", err)
} }
err = deletePVCAndApp(f.UniqueName, f, pvcClone, appClone) err = deletePVCAndApp(f.UniqueName, f, pvcClone, appClone)
if err != nil { if err != nil {
e2elog.Failf("failed to delete PVC or application (%s): %v", f.UniqueName, err) framework.Failf("failed to delete PVC or application (%s): %v", f.UniqueName, err)
} }
if kms != noKMS && kms.canGetPassphrase() { if kms != noKMS && kms.canGetPassphrase() {
// check passphrase deleted // check passphrase deleted
stdOut, _ := kms.getPassphrase(f, csiVolumeHandle) stdOut, _ := kms.getPassphrase(f, csiVolumeHandle)
if stdOut != "" { if stdOut != "" {
e2elog.Failf("passphrase found in vault while should be deleted: %s", stdOut) framework.Failf("passphrase found in vault while should be deleted: %s", stdOut)
} }
} }
if kms != noKMS && kms.canVerifyKeyDestroyed() { if kms != noKMS && kms.canVerifyKeyDestroyed() {
destroyed, msg := kms.verifyKeyDestroyed(f, csiVolumeHandle) destroyed, msg := kms.verifyKeyDestroyed(f, csiVolumeHandle)
if !destroyed { if !destroyed {
e2elog.Failf("passphrased was not destroyed: %s", msg) framework.Failf("passphrased was not destroyed: %s", msg)
} else if msg != "" { } else if msg != "" {
e2elog.Logf("passphrase destroyed, but message returned: %s", msg) framework.Logf("passphrase destroyed, but message returned: %s", msg)
} }
} }
} }

View File

@ -23,7 +23,6 @@ import (
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
) )
func validateBiggerCloneFromPVC(f *framework.Framework, func validateBiggerCloneFromPVC(f *framework.Framework,
@ -60,14 +59,14 @@ func validateBiggerCloneFromPVC(f *framework.Framework,
pvcClone, err := loadPVC(pvcClonePath) pvcClone, err := loadPVC(pvcClonePath)
if err != nil { if err != nil {
e2elog.Failf("failed to load PVC: %v", err) framework.Failf("failed to load PVC: %v", err)
} }
pvcClone.Namespace = f.UniqueName pvcClone.Namespace = f.UniqueName
pvcClone.Spec.DataSource.Name = pvc.Name pvcClone.Spec.DataSource.Name = pvc.Name
pvcClone.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(newSize) pvcClone.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(newSize)
appClone, err := loadApp(appClonePath) appClone, err := loadApp(appClonePath)
if err != nil { if err != nil {
e2elog.Failf("failed to load application: %v", err) framework.Failf("failed to load application: %v", err)
} }
appClone.Namespace = f.UniqueName appClone.Namespace = f.UniqueName
appClone.Labels = label appClone.Labels = label

View File

@ -24,7 +24,7 @@ import (
. "github.com/onsi/gomega" // nolint . "github.com/onsi/gomega" // nolint
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/framework"
) )
var ( var (
@ -69,7 +69,7 @@ func deleteVault() {
func createORDeleteVault(action kubectlAction) { func createORDeleteVault(action kubectlAction) {
data, err := replaceNamespaceInTemplate(vaultExamplePath + vaultServicePath) data, err := replaceNamespaceInTemplate(vaultExamplePath + vaultServicePath)
if err != nil { if err != nil {
e2elog.Failf("failed to read content from %s %v", vaultExamplePath+vaultServicePath, err) framework.Failf("failed to read content from %s %v", vaultExamplePath+vaultServicePath, err)
} }
data = strings.ReplaceAll(data, "vault.default", "vault."+cephCSINamespace) data = strings.ReplaceAll(data, "vault.default", "vault."+cephCSINamespace)
@ -77,26 +77,26 @@ func createORDeleteVault(action kubectlAction) {
data = strings.ReplaceAll(data, "value: default", "value: "+cephCSINamespace) data = strings.ReplaceAll(data, "value: default", "value: "+cephCSINamespace)
err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout) err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("failed to %s vault statefulset %v", action, err) framework.Failf("failed to %s vault statefulset %v", action, err)
} }
data, err = replaceNamespaceInTemplate(vaultExamplePath + vaultRBACPath) data, err = replaceNamespaceInTemplate(vaultExamplePath + vaultRBACPath)
if err != nil { if err != nil {
e2elog.Failf("failed to read content from %s %v", vaultExamplePath+vaultRBACPath, err) framework.Failf("failed to read content from %s %v", vaultExamplePath+vaultRBACPath, err)
} }
err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout) err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("failed to %s vault statefulset %v", action, err) framework.Failf("failed to %s vault statefulset %v", action, err)
} }
data, err = replaceNamespaceInTemplate(vaultExamplePath + vaultConfigPath) data, err = replaceNamespaceInTemplate(vaultExamplePath + vaultConfigPath)
if err != nil { if err != nil {
e2elog.Failf("failed to read content from %s %v", vaultExamplePath+vaultConfigPath, err) framework.Failf("failed to read content from %s %v", vaultExamplePath+vaultConfigPath, err)
} }
data = strings.ReplaceAll(data, "default", cephCSINamespace) data = strings.ReplaceAll(data, "default", cephCSINamespace)
err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout) err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("failed to %s vault configmap %v", action, err) framework.Failf("failed to %s vault configmap %v", action, err)
} }
} }

View File

@ -32,7 +32,7 @@ import (
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
) )
// execCommandInPodWithName run command in pod using podName. // execCommandInPodWithName run command in pod using podName.
@ -44,7 +44,7 @@ func execCommandInPodWithName(
nameSpace string, nameSpace string,
) (string, string, error) { ) (string, string, error) {
cmd := []string{"/bin/sh", "-c", cmdString} cmd := []string{"/bin/sh", "-c", cmdString}
podOpt := framework.ExecOptions{ podOpt := e2epod.ExecOptions{
Command: cmd, Command: cmd,
PodName: podName, PodName: podName,
Namespace: nameSpace, Namespace: nameSpace,
@ -55,7 +55,7 @@ func execCommandInPodWithName(
PreserveWhitespace: true, PreserveWhitespace: true,
} }
return f.ExecWithOptions(podOpt) return e2epod.ExecWithOptions(f, podOpt)
} }
// loadAppDeployment loads the deployment app config and return deployment // loadAppDeployment loads the deployment app config and return deployment
@ -92,7 +92,7 @@ func deleteDeploymentApp(clientSet kubernetes.Interface, name, ns string, deploy
return fmt.Errorf("failed to delete deployment: %w", err) return fmt.Errorf("failed to delete deployment: %w", err)
} }
start := time.Now() start := time.Now()
e2elog.Logf("Waiting for deployment %q to be deleted", name) framework.Logf("Waiting for deployment %q to be deleted", name)
return wait.PollImmediate(poll, timeout, func() (bool, error) { return wait.PollImmediate(poll, timeout, func() (bool, error) {
_, err := clientSet.AppsV1().Deployments(ns).Get(context.TODO(), name, metav1.GetOptions{}) _, err := clientSet.AppsV1().Deployments(ns).Get(context.TODO(), name, metav1.GetOptions{})
@ -103,7 +103,7 @@ func deleteDeploymentApp(clientSet kubernetes.Interface, name, ns string, deploy
if apierrs.IsNotFound(err) { if apierrs.IsNotFound(err) {
return true, nil return true, nil
} }
e2elog.Logf("%q deployment to be deleted (%d seconds elapsed)", name, int(time.Since(start).Seconds())) framework.Logf("%q deployment to be deleted (%d seconds elapsed)", name, int(time.Since(start).Seconds()))
return false, fmt.Errorf("failed to get deployment: %w", err) return false, fmt.Errorf("failed to get deployment: %w", err)
} }
@ -116,7 +116,7 @@ func deleteDeploymentApp(clientSet kubernetes.Interface, name, ns string, deploy
func waitForDeploymentInAvailableState(clientSet kubernetes.Interface, name, ns string, deployTimeout int) error { func waitForDeploymentInAvailableState(clientSet kubernetes.Interface, name, ns string, deployTimeout int) error {
timeout := time.Duration(deployTimeout) * time.Minute timeout := time.Duration(deployTimeout) * time.Minute
start := time.Now() start := time.Now()
e2elog.Logf("Waiting up to %q to be in Available state", name) framework.Logf("Waiting up to %q to be in Available state", name)
return wait.PollImmediate(poll, timeout, func() (bool, error) { return wait.PollImmediate(poll, timeout, func() (bool, error) {
d, err := clientSet.AppsV1().Deployments(ns).Get(context.TODO(), name, metav1.GetOptions{}) d, err := clientSet.AppsV1().Deployments(ns).Get(context.TODO(), name, metav1.GetOptions{})
@ -127,7 +127,7 @@ func waitForDeploymentInAvailableState(clientSet kubernetes.Interface, name, ns
if apierrs.IsNotFound(err) { if apierrs.IsNotFound(err) {
return false, nil return false, nil
} }
e2elog.Logf("%q deployment to be Available (%d seconds elapsed)", name, int(time.Since(start).Seconds())) framework.Logf("%q deployment to be Available (%d seconds elapsed)", name, int(time.Since(start).Seconds()))
return false, err return false, err
} }
@ -154,7 +154,7 @@ func waitForDeploymentComplete(clientSet kubernetes.Interface, name, ns string,
if apierrs.IsNotFound(err) { if apierrs.IsNotFound(err) {
return false, nil return false, nil
} }
e2elog.Logf("deployment error: %v", err) framework.Logf("deployment error: %v", err)
return false, err return false, err
} }
@ -166,7 +166,7 @@ func waitForDeploymentComplete(clientSet kubernetes.Interface, name, ns string,
if deployment.Status.Replicas == deployment.Status.ReadyReplicas { if deployment.Status.Replicas == deployment.Status.ReadyReplicas {
return true, nil return true, nil
} }
e2elog.Logf( framework.Logf(
"deployment status: expected replica count %d running replica count %d", "deployment status: expected replica count %d running replica count %d",
deployment.Status.Replicas, deployment.Status.Replicas,
deployment.Status.ReadyReplicas) deployment.Status.ReadyReplicas)
@ -286,7 +286,7 @@ func (rnr *rookNFSResource) Do(action kubectlAction) error {
if err != nil { if err != nil {
// depending on the Ceph/Rook version, modules are // depending on the Ceph/Rook version, modules are
// enabled by default // enabled by default
e2elog.Logf("enabling module %q failed: %v", module, err) framework.Logf("enabling module %q failed: %v", module, err)
} }
} }
@ -295,7 +295,7 @@ func (rnr *rookNFSResource) Do(action kubectlAction) error {
cmd := fmt.Sprintf("ceph orch set backend %s", rnr.orchBackend) cmd := fmt.Sprintf("ceph orch set backend %s", rnr.orchBackend)
_, _, err := execCommandInToolBoxPod(rnr.f, cmd, rookNamespace) _, _, err := execCommandInToolBoxPod(rnr.f, cmd, rookNamespace)
if err != nil { if err != nil {
e2elog.Logf("setting orch backend %q failed: %v", rnr.orchBackend, err) framework.Logf("setting orch backend %q failed: %v", rnr.orchBackend, err)
} }
} }
@ -318,14 +318,14 @@ func waitForDeploymentUpdateScale(
if isRetryableAPIError(upsErr) { if isRetryableAPIError(upsErr) {
return false, nil return false, nil
} }
e2elog.Logf( framework.Logf(
"Deployment UpdateScale %s/%s has not completed yet (%d seconds elapsed)", "Deployment UpdateScale %s/%s has not completed yet (%d seconds elapsed)",
ns, deploymentName, int(time.Since(start).Seconds())) ns, deploymentName, int(time.Since(start).Seconds()))
return false, fmt.Errorf("error update scale deployment %s/%s: %w", ns, deploymentName, upsErr) return false, fmt.Errorf("error update scale deployment %s/%s: %w", ns, deploymentName, upsErr)
} }
if scaleResult.Spec.Replicas != scale.Spec.Replicas { if scaleResult.Spec.Replicas != scale.Spec.Replicas {
e2elog.Logf("scale result not matching for deployment %s/%s, desired scale %d, got %d", framework.Logf("scale result not matching for deployment %s/%s, desired scale %d, got %d",
ns, deploymentName, scale.Spec.Replicas, scaleResult.Spec.Replicas) ns, deploymentName, scale.Spec.Replicas, scaleResult.Spec.Replicas)
return false, fmt.Errorf("error scale not matching in deployment %s/%s: %w", ns, deploymentName, upsErr) return false, fmt.Errorf("error scale not matching in deployment %s/%s: %w", ns, deploymentName, upsErr)
@ -354,7 +354,7 @@ func waitForDeploymentUpdate(
if isRetryableAPIError(upErr) { if isRetryableAPIError(upErr) {
return false, nil return false, nil
} }
e2elog.Logf( framework.Logf(
"Deployment Update %s/%s has not completed yet (%d seconds elapsed)", "Deployment Update %s/%s has not completed yet (%d seconds elapsed)",
deployment.Namespace, deployment.Name, int(time.Since(start).Seconds())) deployment.Namespace, deployment.Name, int(time.Since(start).Seconds()))
@ -391,7 +391,7 @@ func waitForContainersArgsUpdate(
containers []string, containers []string,
timeout int, timeout int,
) error { ) error {
e2elog.Logf("waiting for deployment updates %s/%s", ns, deploymentName) framework.Logf("waiting for deployment updates %s/%s", ns, deploymentName)
// wait for the deployment to be available // wait for the deployment to be available
err := waitForDeploymentInAvailableState(c, deploymentName, ns, deployTimeout) err := waitForDeploymentInAvailableState(c, deploymentName, ns, deployTimeout)
@ -463,14 +463,14 @@ func waitForContainersArgsUpdate(
if isRetryableAPIError(getErr) { if isRetryableAPIError(getErr) {
return false, nil return false, nil
} }
e2elog.Logf( framework.Logf(
"Deployment Get %s/%s has not completed yet (%d seconds elapsed)", "Deployment Get %s/%s has not completed yet (%d seconds elapsed)",
ns, deploymentName, int(time.Since(start).Seconds())) ns, deploymentName, int(time.Since(start).Seconds()))
return false, fmt.Errorf("error getting deployment %s/%s: %w", ns, deploymentName, getErr) return false, fmt.Errorf("error getting deployment %s/%s: %w", ns, deploymentName, getErr)
} }
if deploy.Status.Replicas != count { if deploy.Status.Replicas != count {
e2elog.Logf("Expected deployment %s/%s replicas %d, got %d", ns, deploymentName, count, deploy.Status.Replicas) framework.Logf("Expected deployment %s/%s replicas %d, got %d", ns, deploymentName, count, deploy.Status.Replicas)
return false, fmt.Errorf("error expected deployment %s/%s replicas %d, got %d", return false, fmt.Errorf("error expected deployment %s/%s replicas %d, got %d",
ns, deploymentName, count, deploy.Status.Replicas) ns, deploymentName, count, deploy.Status.Replicas)

View File

@ -22,7 +22,7 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/framework"
frameworkPod "k8s.io/kubernetes/test/e2e/framework/pod" frameworkPod "k8s.io/kubernetes/test/e2e/framework/pod"
) )
@ -32,7 +32,7 @@ func logsCSIPods(label string, c clientset.Interface) {
} }
podList, err := c.CoreV1().Pods(cephCSINamespace).List(context.TODO(), opt) podList, err := c.CoreV1().Pods(cephCSINamespace).List(context.TODO(), opt)
if err != nil { if err != nil {
e2elog.Logf("failed to list pods with selector %s %v", label, err) framework.Logf("failed to list pods with selector %s %v", label, err)
return return
} }
@ -50,11 +50,11 @@ func kubectlLogPod(c clientset.Interface, pod *v1.Pod) {
if err != nil { if err != nil {
logs, err = getPreviousPodLogs(c, pod.Namespace, pod.Name, container[i].Name) logs, err = getPreviousPodLogs(c, pod.Namespace, pod.Name, container[i].Name)
if err != nil { if err != nil {
e2elog.Logf("Failed to get logs of pod %v, container %v, err: %v", pod.Name, container[i].Name, err) framework.Logf("Failed to get logs of pod %v, container %v, err: %v", pod.Name, container[i].Name, err)
} }
} }
e2elog.Logf("Logs of %v/%v:%v on node %v\n", pod.Namespace, pod.Name, container[i].Name, pod.Spec.NodeName) framework.Logf("Logs of %v/%v:%v on node %v\n", pod.Namespace, pod.Name, container[i].Name, pod.Spec.NodeName)
e2elog.Logf("STARTLOG\n\n%s\n\nENDLOG for container %v:%v:%v", logs, pod.Namespace, pod.Name, container[i].Name) framework.Logf("STARTLOG\n\n%s\n\nENDLOG for container %v:%v:%v", logs, pod.Namespace, pod.Name, container[i].Name)
} }
} }

View File

@ -28,7 +28,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/framework"
) )
func createNamespace(c kubernetes.Interface, name string) error { func createNamespace(c kubernetes.Interface, name string) error {
@ -46,7 +46,7 @@ func createNamespace(c kubernetes.Interface, name string) error {
return wait.PollImmediate(poll, timeout, func() (bool, error) { return wait.PollImmediate(poll, timeout, func() (bool, error) {
_, err := c.CoreV1().Namespaces().Get(context.TODO(), name, metav1.GetOptions{}) _, err := c.CoreV1().Namespaces().Get(context.TODO(), name, metav1.GetOptions{})
if err != nil { if err != nil {
e2elog.Logf("Error getting namespace: '%s': %v", name, err) framework.Logf("Error getting namespace: '%s': %v", name, err)
if apierrs.IsNotFound(err) { if apierrs.IsNotFound(err) {
return false, nil return false, nil
} }
@ -74,7 +74,7 @@ func deleteNamespace(c kubernetes.Interface, name string) error {
if apierrs.IsNotFound(err) { if apierrs.IsNotFound(err) {
return true, nil return true, nil
} }
e2elog.Logf("Error getting namespace: '%s': %v", name, err) framework.Logf("Error getting namespace: '%s': %v", name, err)
if isRetryableAPIError(err) { if isRetryableAPIError(err) {
return false, nil return false, nil
} }

View File

@ -31,7 +31,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2edebug "k8s.io/kubernetes/test/e2e/framework/debug"
"k8s.io/pod-security-admission/api" "k8s.io/pod-security-admission/api"
) )
@ -56,19 +56,19 @@ func deployNFSPlugin(f *framework.Framework) {
err := deleteResource(nfsDirPath + nfsProvisionerRBAC) err := deleteResource(nfsDirPath + nfsProvisionerRBAC)
if err != nil { if err != nil {
e2elog.Failf("failed to delete provisioner rbac %s: %v", nfsDirPath+nfsProvisionerRBAC, err) framework.Failf("failed to delete provisioner rbac %s: %v", nfsDirPath+nfsProvisionerRBAC, err)
} }
err = deleteResource(nfsDirPath + nfsNodePluginRBAC) err = deleteResource(nfsDirPath + nfsNodePluginRBAC)
if err != nil { if err != nil {
e2elog.Failf("failed to delete nodeplugin rbac %s: %v", nfsDirPath+nfsNodePluginRBAC, err) framework.Failf("failed to delete nodeplugin rbac %s: %v", nfsDirPath+nfsNodePluginRBAC, err)
} }
// the pool should not be deleted, as it may contain configurations // the pool should not be deleted, as it may contain configurations
// from non-e2e related CephNFS objects // from non-e2e related CephNFS objects
err = createPool(f, nfsPoolName) err = createPool(f, nfsPoolName)
if err != nil { if err != nil {
e2elog.Failf("failed to create pool for NFS config %q: %v", nfsPoolName, err) framework.Failf("failed to create pool for NFS config %q: %v", nfsPoolName, err)
} }
createORDeleteNFSResources(f, kubectlCreate) createORDeleteNFSResources(f, kubectlCreate)
@ -125,7 +125,7 @@ func createORDeleteNFSResources(f *framework.Framework, action kubectlAction) {
for _, r := range resources { for _, r := range resources {
err := r.Do(action) err := r.Do(action)
if err != nil { if err != nil {
e2elog.Failf("failed to %s resource: %v", action, err) framework.Failf("failed to %s resource: %v", action, err)
} }
} }
} }
@ -186,7 +186,7 @@ func createNFSStorageClass(
return wait.PollImmediate(poll, timeout, func() (bool, error) { return wait.PollImmediate(poll, timeout, func() (bool, error) {
_, err = c.StorageV1().StorageClasses().Create(context.TODO(), &sc, metav1.CreateOptions{}) _, err = c.StorageV1().StorageClasses().Create(context.TODO(), &sc, metav1.CreateOptions{})
if err != nil { if err != nil {
e2elog.Logf("error creating StorageClass %q: %v", sc.Name, err) framework.Logf("error creating StorageClass %q: %v", sc.Name, err)
if apierrs.IsAlreadyExists(err) { if apierrs.IsAlreadyExists(err) {
return true, nil return true, nil
} }
@ -205,7 +205,7 @@ func createNFSStorageClass(
func unmountNFSVolume(f *framework.Framework, appName, pvcName string) error { func unmountNFSVolume(f *framework.Framework, appName, pvcName string) error {
pod, err := f.ClientSet.CoreV1().Pods(f.UniqueName).Get(context.TODO(), appName, metav1.GetOptions{}) pod, err := f.ClientSet.CoreV1().Pods(f.UniqueName).Get(context.TODO(), appName, metav1.GetOptions{})
if err != nil { if err != nil {
e2elog.Logf("Error occurred getting pod %s in namespace %s", appName, f.UniqueName) framework.Logf("Error occurred getting pod %s in namespace %s", appName, f.UniqueName)
return fmt.Errorf("failed to get pod: %w", err) return fmt.Errorf("failed to get pod: %w", err)
} }
@ -213,7 +213,7 @@ func unmountNFSVolume(f *framework.Framework, appName, pvcName string) error {
PersistentVolumeClaims(f.UniqueName). PersistentVolumeClaims(f.UniqueName).
Get(context.TODO(), pvcName, metav1.GetOptions{}) Get(context.TODO(), pvcName, metav1.GetOptions{})
if err != nil { if err != nil {
e2elog.Logf("Error occurred getting PVC %s in namespace %s", pvcName, f.UniqueName) framework.Logf("Error occurred getting PVC %s in namespace %s", pvcName, f.UniqueName)
return fmt.Errorf("failed to get pvc: %w", err) return fmt.Errorf("failed to get pvc: %w", err)
} }
@ -229,7 +229,7 @@ func unmountNFSVolume(f *framework.Framework, appName, pvcName string) error {
"csi-nfsplugin", // name of the container "csi-nfsplugin", // name of the container
cephCSINamespace) cephCSINamespace)
if stdErr != "" { if stdErr != "" {
e2elog.Logf("StdErr occurred: %s", stdErr) framework.Logf("StdErr occurred: %s", stdErr)
} }
return err return err
@ -249,7 +249,7 @@ var _ = Describe("nfs", func() {
if cephCSINamespace != defaultNs { if cephCSINamespace != defaultNs {
err := createNamespace(c, cephCSINamespace) err := createNamespace(c, cephCSINamespace)
if err != nil { if err != nil {
e2elog.Failf("failed to create namespace %s: %v", cephCSINamespace, err) framework.Failf("failed to create namespace %s: %v", cephCSINamespace, err)
} }
} }
deployNFSPlugin(f) deployNFSPlugin(f)
@ -259,25 +259,25 @@ var _ = Describe("nfs", func() {
subvolumegroup = defaultSubvolumegroup subvolumegroup = defaultSubvolumegroup
err := createConfigMap(nfsDirPath, f.ClientSet, f) err := createConfigMap(nfsDirPath, f.ClientSet, f)
if err != nil { if err != nil {
e2elog.Failf("failed to create configmap: %v", err) framework.Failf("failed to create configmap: %v", err)
} }
// create nfs provisioner secret // create nfs provisioner secret
key, err := createCephUser(f, keyringCephFSProvisionerUsername, cephFSProvisionerCaps()) key, err := createCephUser(f, keyringCephFSProvisionerUsername, cephFSProvisionerCaps())
if err != nil { if err != nil {
e2elog.Failf("failed to create user %s: %v", keyringCephFSProvisionerUsername, err) framework.Failf("failed to create user %s: %v", keyringCephFSProvisionerUsername, err)
} }
err = createCephfsSecret(f, cephFSProvisionerSecretName, keyringCephFSProvisionerUsername, key) err = createCephfsSecret(f, cephFSProvisionerSecretName, keyringCephFSProvisionerUsername, key)
if err != nil { if err != nil {
e2elog.Failf("failed to create provisioner secret: %v", err) framework.Failf("failed to create provisioner secret: %v", err)
} }
// create nfs plugin secret // create nfs plugin secret
key, err = createCephUser(f, keyringCephFSNodePluginUsername, cephFSNodePluginCaps()) key, err = createCephUser(f, keyringCephFSNodePluginUsername, cephFSNodePluginCaps())
if err != nil { if err != nil {
e2elog.Failf("failed to create user %s: %v", keyringCephFSNodePluginUsername, err) framework.Failf("failed to create user %s: %v", keyringCephFSNodePluginUsername, err)
} }
err = createCephfsSecret(f, cephFSNodePluginSecretName, keyringCephFSNodePluginUsername, key) err = createCephfsSecret(f, cephFSNodePluginSecretName, keyringCephFSNodePluginUsername, key)
if err != nil { if err != nil {
e2elog.Failf("failed to create node secret: %v", err) framework.Failf("failed to create node secret: %v", err)
} }
}) })
@ -294,34 +294,34 @@ var _ = Describe("nfs", func() {
logsCSIPods("app=csi-nfsplugin", c) logsCSIPods("app=csi-nfsplugin", c)
// log all details from the namespace where Ceph-CSI is deployed // log all details from the namespace where Ceph-CSI is deployed
framework.DumpAllNamespaceInfo(c, cephCSINamespace) e2edebug.DumpAllNamespaceInfo(c, cephCSINamespace)
} }
err := deleteConfigMap(nfsDirPath) err := deleteConfigMap(nfsDirPath)
if err != nil { if err != nil {
e2elog.Failf("failed to delete configmap: %v", err) framework.Failf("failed to delete configmap: %v", err)
} }
err = c.CoreV1(). err = c.CoreV1().
Secrets(cephCSINamespace). Secrets(cephCSINamespace).
Delete(context.TODO(), cephFSProvisionerSecretName, metav1.DeleteOptions{}) Delete(context.TODO(), cephFSProvisionerSecretName, metav1.DeleteOptions{})
if err != nil { if err != nil {
e2elog.Failf("failed to delete provisioner secret: %v", err) framework.Failf("failed to delete provisioner secret: %v", err)
} }
err = c.CoreV1(). err = c.CoreV1().
Secrets(cephCSINamespace). Secrets(cephCSINamespace).
Delete(context.TODO(), cephFSNodePluginSecretName, metav1.DeleteOptions{}) Delete(context.TODO(), cephFSNodePluginSecretName, metav1.DeleteOptions{})
if err != nil { if err != nil {
e2elog.Failf("failed to delete node secret: %v", err) framework.Failf("failed to delete node secret: %v", err)
} }
err = deleteResource(nfsExamplePath + "storageclass.yaml") err = deleteResource(nfsExamplePath + "storageclass.yaml")
if err != nil { if err != nil {
e2elog.Failf("failed to delete storageclass: %v", err) framework.Failf("failed to delete storageclass: %v", err)
} }
if deployNFS { if deployNFS {
deleteNFSPlugin() deleteNFSPlugin()
if cephCSINamespace != defaultNs { if cephCSINamespace != defaultNs {
err := deleteNamespace(c, cephCSINamespace) err := deleteNamespace(c, cephCSINamespace)
if err != nil { if err != nil {
e2elog.Failf("failed to delete namespace %s: %v", cephCSINamespace, err) framework.Failf("failed to delete namespace %s: %v", cephCSINamespace, err)
} }
} }
} }
@ -345,38 +345,38 @@ var _ = Describe("nfs", func() {
metadataPool, getErr := getCephFSMetadataPoolName(f, fileSystemName) metadataPool, getErr := getCephFSMetadataPoolName(f, fileSystemName)
if getErr != nil { if getErr != nil {
e2elog.Failf("failed getting cephFS metadata pool name: %v", getErr) framework.Failf("failed getting cephFS metadata pool name: %v", getErr)
} }
By("checking provisioner deployment is running", func() { By("checking provisioner deployment is running", func() {
err := waitForDeploymentComplete(f.ClientSet, nfsDeploymentName, cephCSINamespace, deployTimeout) err := waitForDeploymentComplete(f.ClientSet, nfsDeploymentName, cephCSINamespace, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("timeout waiting for deployment %s: %v", nfsDeploymentName, err) framework.Failf("timeout waiting for deployment %s: %v", nfsDeploymentName, err)
} }
}) })
By("checking nodeplugin deamonset pods are running", func() { By("checking nodeplugin deamonset pods are running", func() {
err := waitForDaemonSets(nfsDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout) err := waitForDaemonSets(nfsDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("timeout waiting for daemonset %s: %v", nfsDeamonSetName, err) framework.Failf("timeout waiting for daemonset %s: %v", nfsDeamonSetName, err)
} }
}) })
By("verify RWOP volume support", func() { By("verify RWOP volume support", func() {
err := createNFSStorageClass(f.ClientSet, f, false, nil) err := createNFSStorageClass(f.ClientSet, f, false, nil)
if err != nil { if err != nil {
e2elog.Failf("failed to create NFS storageclass: %v", err) framework.Failf("failed to create NFS storageclass: %v", err)
} }
pvc, err := loadPVC(pvcRWOPPath) pvc, err := loadPVC(pvcRWOPPath)
if err != nil { if err != nil {
e2elog.Failf("failed to load PVC: %v", err) framework.Failf("failed to load PVC: %v", err)
} }
pvc.Namespace = f.UniqueName pvc.Namespace = f.UniqueName
// create application // create application
app, err := loadApp(appRWOPPath) app, err := loadApp(appRWOPPath)
if err != nil { if err != nil {
e2elog.Failf("failed to load application: %v", err) framework.Failf("failed to load application: %v", err)
} }
app.Namespace = f.UniqueName app.Namespace = f.UniqueName
baseAppName := app.Name baseAppName := app.Name
@ -384,59 +384,59 @@ var _ = Describe("nfs", func() {
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
if err != nil { if err != nil {
if rwopMayFail(err) { if rwopMayFail(err) {
e2elog.Logf("RWOP is not supported: %v", err) framework.Logf("RWOP is not supported: %v", err)
return return
} }
e2elog.Failf("failed to create PVC: %v", err) framework.Failf("failed to create PVC: %v", err)
} }
err = createApp(f.ClientSet, app, deployTimeout) err = createApp(f.ClientSet, app, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("failed to create application: %v", err) framework.Failf("failed to create application: %v", err)
} }
validateSubvolumeCount(f, 1, fileSystemName, defaultSubvolumegroup) validateSubvolumeCount(f, 1, fileSystemName, defaultSubvolumegroup)
err = validateRWOPPodCreation(f, pvc, app, baseAppName) err = validateRWOPPodCreation(f, pvc, app, baseAppName)
if err != nil { if err != nil {
e2elog.Failf("failed to validate RWOP pod creation: %v", err) framework.Failf("failed to validate RWOP pod creation: %v", err)
} }
validateSubvolumeCount(f, 0, fileSystemName, defaultSubvolumegroup) validateSubvolumeCount(f, 0, fileSystemName, defaultSubvolumegroup)
err = deleteResource(nfsExamplePath + "storageclass.yaml") err = deleteResource(nfsExamplePath + "storageclass.yaml")
if err != nil { if err != nil {
e2elog.Failf("failed to delete NFS storageclass: %v", err) framework.Failf("failed to delete NFS storageclass: %v", err)
} }
}) })
By("create a storageclass with pool and a PVC then bind it to an app", func() { By("create a storageclass with pool and a PVC then bind it to an app", func() {
err := createNFSStorageClass(f.ClientSet, f, false, nil) err := createNFSStorageClass(f.ClientSet, f, false, nil)
if err != nil { if err != nil {
e2elog.Failf("failed to create NFS storageclass: %v", err) framework.Failf("failed to create NFS storageclass: %v", err)
} }
err = validatePVCAndAppBinding(pvcPath, appPath, f) err = validatePVCAndAppBinding(pvcPath, appPath, f)
if err != nil { if err != nil {
e2elog.Failf("failed to validate NFS pvc and application binding: %v", err) framework.Failf("failed to validate NFS pvc and application binding: %v", err)
} }
err = deleteResource(nfsExamplePath + "storageclass.yaml") err = deleteResource(nfsExamplePath + "storageclass.yaml")
if err != nil { if err != nil {
e2elog.Failf("failed to delete NFS storageclass: %v", err) framework.Failf("failed to delete NFS storageclass: %v", err)
} }
}) })
By("create a PVC and bind it to an app", func() { By("create a PVC and bind it to an app", func() {
err := createNFSStorageClass(f.ClientSet, f, false, nil) err := createNFSStorageClass(f.ClientSet, f, false, nil)
if err != nil { if err != nil {
e2elog.Failf("failed to create NFS storageclass: %v", err) framework.Failf("failed to create NFS storageclass: %v", err)
} }
err = validatePVCAndAppBinding(pvcPath, appPath, f) err = validatePVCAndAppBinding(pvcPath, appPath, f)
if err != nil { if err != nil {
e2elog.Failf("failed to validate NFS pvc and application binding: %v", err) framework.Failf("failed to validate NFS pvc and application binding: %v", err)
} }
}) })
By("create a PVC and bind it to an app with normal user", func() { By("create a PVC and bind it to an app with normal user", func() {
err := validateNormalUserPVCAccess(pvcPath, f) err := validateNormalUserPVCAccess(pvcPath, f)
if err != nil { if err != nil {
e2elog.Failf("failed to validate normal user NFS pvc and application binding: %v", err) framework.Failf("failed to validate normal user NFS pvc and application binding: %v", err)
} }
}) })
@ -444,13 +444,13 @@ var _ = Describe("nfs", func() {
totalCount := 2 totalCount := 2
pvc, err := loadPVC(pvcPath) pvc, err := loadPVC(pvcPath)
if err != nil { if err != nil {
e2elog.Failf("failed to load PVC: %v", err) framework.Failf("failed to load PVC: %v", err)
} }
pvc.Namespace = f.UniqueName pvc.Namespace = f.UniqueName
app, err := loadApp(appPath) app, err := loadApp(appPath)
if err != nil { if err != nil {
e2elog.Failf("failed to load application: %v", err) framework.Failf("failed to load application: %v", err)
} }
app.Namespace = f.UniqueName app.Namespace = f.UniqueName
// create PVC and app // create PVC and app
@ -458,11 +458,11 @@ var _ = Describe("nfs", func() {
name := fmt.Sprintf("%s%d", f.UniqueName, i) name := fmt.Sprintf("%s%d", f.UniqueName, i)
err = createPVCAndApp(name, f, pvc, app, deployTimeout) err = createPVCAndApp(name, f, pvc, app, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("failed to create PVC or application: %v", err) framework.Failf("failed to create PVC or application: %v", err)
} }
err = validateSubvolumePath(f, pvc.Name, pvc.Namespace, fileSystemName, defaultSubvolumegroup) err = validateSubvolumePath(f, pvc.Name, pvc.Namespace, fileSystemName, defaultSubvolumegroup)
if err != nil { if err != nil {
e2elog.Failf("failed to validate subvolumePath: %v", err) framework.Failf("failed to validate subvolumePath: %v", err)
} }
} }
@ -472,7 +472,7 @@ var _ = Describe("nfs", func() {
name := fmt.Sprintf("%s%d", f.UniqueName, i) name := fmt.Sprintf("%s%d", f.UniqueName, i)
err = deletePVCAndApp(name, f, pvc, app) err = deletePVCAndApp(name, f, pvc, app)
if err != nil { if err != nil {
e2elog.Failf("failed to delete PVC or application: %v", err) framework.Failf("failed to delete PVC or application: %v", err)
} }
} }
@ -482,24 +482,24 @@ var _ = Describe("nfs", func() {
By("check data persist after recreating pod", func() { By("check data persist after recreating pod", func() {
err := checkDataPersist(pvcPath, appPath, f) err := checkDataPersist(pvcPath, appPath, f)
if err != nil { if err != nil {
e2elog.Failf("failed to check data persist in pvc: %v", err) framework.Failf("failed to check data persist in pvc: %v", err)
} }
}) })
By("Create PVC, bind it to an app, unmount volume and check app deletion", func() { By("Create PVC, bind it to an app, unmount volume and check app deletion", func() {
pvc, app, err := createPVCAndAppBinding(pvcPath, appPath, f, deployTimeout) pvc, app, err := createPVCAndAppBinding(pvcPath, appPath, f, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("failed to create PVC or application: %v", err) framework.Failf("failed to create PVC or application: %v", err)
} }
err = unmountNFSVolume(f, app.Name, pvc.Name) err = unmountNFSVolume(f, app.Name, pvc.Name)
if err != nil { if err != nil {
e2elog.Failf("failed to unmount volume: %v", err) framework.Failf("failed to unmount volume: %v", err)
} }
err = deletePVCAndApp("", f, pvc, app) err = deletePVCAndApp("", f, pvc, app)
if err != nil { if err != nil {
e2elog.Failf("failed to delete PVC or application: %v", err) framework.Failf("failed to delete PVC or application: %v", err)
} }
}) })
@ -507,13 +507,13 @@ var _ = Describe("nfs", func() {
// create PVC and bind it to an app // create PVC and bind it to an app
pvc, err := loadPVC(pvcPath) pvc, err := loadPVC(pvcPath)
if err != nil { if err != nil {
e2elog.Failf("failed to load PVC: %v", err) framework.Failf("failed to load PVC: %v", err)
} }
pvc.Namespace = f.UniqueName pvc.Namespace = f.UniqueName
app, err := loadApp(appPath) app, err := loadApp(appPath)
if err != nil { if err != nil {
e2elog.Failf("failed to load application: %v", err) framework.Failf("failed to load application: %v", err)
} }
app.Namespace = f.UniqueName app.Namespace = f.UniqueName
@ -525,7 +525,7 @@ var _ = Describe("nfs", func() {
app.Spec.Volumes[0].PersistentVolumeClaim.ReadOnly = true app.Spec.Volumes[0].PersistentVolumeClaim.ReadOnly = true
err = createPVCAndApp("", f, pvc, app, deployTimeout) err = createPVCAndApp("", f, pvc, app, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("failed to create PVC or application: %v", err) framework.Failf("failed to create PVC or application: %v", err)
} }
opt := metav1.ListOptions{ opt := metav1.ListOptions{
@ -540,31 +540,31 @@ var _ = Describe("nfs", func() {
&opt) &opt)
readOnlyErr := fmt.Sprintf("cannot create %s: Read-only file system", filePath) readOnlyErr := fmt.Sprintf("cannot create %s: Read-only file system", filePath)
if !strings.Contains(stdErr, readOnlyErr) { if !strings.Contains(stdErr, readOnlyErr) {
e2elog.Failf(stdErr) framework.Failf(stdErr)
} }
// delete PVC and app // delete PVC and app
err = deletePVCAndApp("", f, pvc, app) err = deletePVCAndApp("", f, pvc, app)
if err != nil { if err != nil {
e2elog.Failf("failed to delete PVC or application: %v", err) framework.Failf("failed to delete PVC or application: %v", err)
} }
}) })
// delete nfs provisioner secret // delete nfs provisioner secret
err := deleteCephUser(f, keyringCephFSProvisionerUsername) err := deleteCephUser(f, keyringCephFSProvisionerUsername)
if err != nil { if err != nil {
e2elog.Failf("failed to delete user %s: %v", keyringCephFSProvisionerUsername, err) framework.Failf("failed to delete user %s: %v", keyringCephFSProvisionerUsername, err)
} }
// delete nfs plugin secret // delete nfs plugin secret
err = deleteCephUser(f, keyringCephFSNodePluginUsername) err = deleteCephUser(f, keyringCephFSNodePluginUsername)
if err != nil { if err != nil {
e2elog.Failf("failed to delete user %s: %v", keyringCephFSNodePluginUsername, err) framework.Failf("failed to delete user %s: %v", keyringCephFSNodePluginUsername, err)
} }
By("Resize PVC and check application directory size", func() { By("Resize PVC and check application directory size", func() {
err := resizePVCAndValidateSize(pvcPath, appPath, f) err := resizePVCAndValidateSize(pvcPath, appPath, f)
if err != nil { if err != nil {
e2elog.Failf("failed to resize PVC: %v", err) framework.Failf("failed to resize PVC: %v", err)
} }
}) })
@ -579,28 +579,28 @@ var _ = Describe("nfs", func() {
wg.Add(totalCount) wg.Add(totalCount)
err := createNFSSnapshotClass(f) err := createNFSSnapshotClass(f)
if err != nil { if err != nil {
e2elog.Failf("failed to delete NFS snapshotclass: %v", err) framework.Failf("failed to delete NFS snapshotclass: %v", err)
} }
defer func() { defer func() {
err = deleteNFSSnapshotClass() err = deleteNFSSnapshotClass()
if err != nil { if err != nil {
e2elog.Failf("failed to delete VolumeSnapshotClass: %v", err) framework.Failf("failed to delete VolumeSnapshotClass: %v", err)
} }
}() }()
pvc, err := loadPVC(pvcPath) pvc, err := loadPVC(pvcPath)
if err != nil { if err != nil {
e2elog.Failf("failed to load PVC: %v", err) framework.Failf("failed to load PVC: %v", err)
} }
pvc.Namespace = f.UniqueName pvc.Namespace = f.UniqueName
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("failed to create PVC: %v", err) framework.Failf("failed to create PVC: %v", err)
} }
app, err := loadApp(appPath) app, err := loadApp(appPath)
if err != nil { if err != nil {
e2elog.Failf("failed to load application: %v", err) framework.Failf("failed to load application: %v", err)
} }
app.Namespace = f.UniqueName app.Namespace = f.UniqueName
@ -613,12 +613,12 @@ var _ = Describe("nfs", func() {
} }
checkSum, err := writeDataAndCalChecksum(app, &opt, f) checkSum, err := writeDataAndCalChecksum(app, &opt, f)
if err != nil { if err != nil {
e2elog.Failf("failed to calculate checksum: %v", err) framework.Failf("failed to calculate checksum: %v", err)
} }
_, pv, err := getPVCAndPV(f.ClientSet, pvc.Name, pvc.Namespace) _, pv, err := getPVCAndPV(f.ClientSet, pvc.Name, pvc.Namespace)
if err != nil { if err != nil {
e2elog.Failf("failed to get PV object for %s: %v", pvc.Name, err) framework.Failf("failed to get PV object for %s: %v", pvc.Name, err)
} }
snap := getSnapshot(snapshotPath) snap := getSnapshot(snapshotPath)
@ -638,22 +638,22 @@ var _ = Describe("nfs", func() {
for i, err := range wgErrs { for i, err := range wgErrs {
if err != nil { if err != nil {
// not using Failf() as it aborts the test and does not log other errors // not using Failf() as it aborts the test and does not log other errors
e2elog.Logf("failed to create snapshot (%s%d): %v", f.UniqueName, i, err) framework.Logf("failed to create snapshot (%s%d): %v", f.UniqueName, i, err)
failed++ failed++
} }
} }
if failed != 0 { if failed != 0 {
e2elog.Failf("creating snapshots failed, %d errors were logged", failed) framework.Failf("creating snapshots failed, %d errors were logged", failed)
} }
validateCephFSSnapshotCount(f, totalCount, defaultSubvolumegroup, pv) validateCephFSSnapshotCount(f, totalCount, defaultSubvolumegroup, pv)
pvcClone, err := loadPVC(pvcClonePath) pvcClone, err := loadPVC(pvcClonePath)
if err != nil { if err != nil {
e2elog.Failf("failed to load PVC: %v", err) framework.Failf("failed to load PVC: %v", err)
} }
appClone, err := loadApp(appClonePath) appClone, err := loadApp(appClonePath)
if err != nil { if err != nil {
e2elog.Failf("failed to load application: %v", err) framework.Failf("failed to load application: %v", err)
} }
pvcClone.Namespace = f.UniqueName pvcClone.Namespace = f.UniqueName
appClone.Namespace = f.UniqueName appClone.Namespace = f.UniqueName
@ -673,14 +673,14 @@ var _ = Describe("nfs", func() {
} }
filePath := a.Spec.Containers[0].VolumeMounts[0].MountPath + "/test" filePath := a.Spec.Containers[0].VolumeMounts[0].MountPath + "/test"
var checkSumClone string var checkSumClone string
e2elog.Logf("Calculating checksum clone for filepath %s", filePath) framework.Logf("Calculating checksum clone for filepath %s", filePath)
checkSumClone, chErrs[n] = calculateSHA512sum(f, &a, filePath, &opt) checkSumClone, chErrs[n] = calculateSHA512sum(f, &a, filePath, &opt)
e2elog.Logf("checksum for clone is %s", checkSumClone) framework.Logf("checksum for clone is %s", checkSumClone)
if chErrs[n] != nil { if chErrs[n] != nil {
e2elog.Logf("Failed calculating checksum clone %s", chErrs[n]) framework.Logf("Failed calculating checksum clone %s", chErrs[n])
} }
if checkSumClone != checkSum { if checkSumClone != checkSum {
e2elog.Logf("checksum didn't match. checksum=%s and checksumclone=%s", checkSum, checkSumClone) framework.Logf("checksum didn't match. checksum=%s and checksumclone=%s", checkSum, checkSumClone)
} }
} }
wg.Done() wg.Done()
@ -691,23 +691,23 @@ var _ = Describe("nfs", func() {
for i, err := range wgErrs { for i, err := range wgErrs {
if err != nil { if err != nil {
// not using Failf() as it aborts the test and does not log other errors // not using Failf() as it aborts the test and does not log other errors
e2elog.Logf("failed to create PVC and app (%s%d): %v", f.UniqueName, i, err) framework.Logf("failed to create PVC and app (%s%d): %v", f.UniqueName, i, err)
failed++ failed++
} }
} }
if failed != 0 { if failed != 0 {
e2elog.Failf("creating PVCs and apps failed, %d errors were logged", failed) framework.Failf("creating PVCs and apps failed, %d errors were logged", failed)
} }
for i, err := range chErrs { for i, err := range chErrs {
if err != nil { if err != nil {
// not using Failf() as it aborts the test and does not log other errors // not using Failf() as it aborts the test and does not log other errors
e2elog.Logf("failed to calculate checksum (%s%d): %v", f.UniqueName, i, err) framework.Logf("failed to calculate checksum (%s%d): %v", f.UniqueName, i, err)
failed++ failed++
} }
} }
if failed != 0 { if failed != 0 {
e2elog.Failf("calculating checksum failed, %d errors were logged", failed) framework.Failf("calculating checksum failed, %d errors were logged", failed)
} }
validateSubvolumeCount(f, totalSubvolumes, fileSystemName, subvolumegroup) validateSubvolumeCount(f, totalSubvolumes, fileSystemName, subvolumegroup)
@ -729,12 +729,12 @@ var _ = Describe("nfs", func() {
for i, err := range wgErrs { for i, err := range wgErrs {
if err != nil { if err != nil {
// not using Failf() as it aborts the test and does not log other errors // not using Failf() as it aborts the test and does not log other errors
e2elog.Logf("failed to delete PVC and app (%s%d): %v", f.UniqueName, i, err) framework.Logf("failed to delete PVC and app (%s%d): %v", f.UniqueName, i, err)
failed++ failed++
} }
} }
if failed != 0 { if failed != 0 {
e2elog.Failf("deleting PVCs and apps failed, %d errors were logged", failed) framework.Failf("deleting PVCs and apps failed, %d errors were logged", failed)
} }
parentPVCCount := totalSubvolumes - totalCount parentPVCCount := totalSubvolumes - totalCount
@ -755,14 +755,14 @@ var _ = Describe("nfs", func() {
} }
filePath := a.Spec.Containers[0].VolumeMounts[0].MountPath + "/test" filePath := a.Spec.Containers[0].VolumeMounts[0].MountPath + "/test"
var checkSumClone string var checkSumClone string
e2elog.Logf("Calculating checksum clone for filepath %s", filePath) framework.Logf("Calculating checksum clone for filepath %s", filePath)
checkSumClone, chErrs[n] = calculateSHA512sum(f, &a, filePath, &opt) checkSumClone, chErrs[n] = calculateSHA512sum(f, &a, filePath, &opt)
e2elog.Logf("checksum for clone is %s", checkSumClone) framework.Logf("checksum for clone is %s", checkSumClone)
if chErrs[n] != nil { if chErrs[n] != nil {
e2elog.Logf("Failed calculating checksum clone %s", chErrs[n]) framework.Logf("Failed calculating checksum clone %s", chErrs[n])
} }
if checkSumClone != checkSum { if checkSumClone != checkSum {
e2elog.Logf("checksum didn't match. checksum=%s and checksumclone=%s", checkSum, checkSumClone) framework.Logf("checksum didn't match. checksum=%s and checksumclone=%s", checkSum, checkSumClone)
} }
} }
wg.Done() wg.Done()
@ -773,23 +773,23 @@ var _ = Describe("nfs", func() {
for i, err := range wgErrs { for i, err := range wgErrs {
if err != nil { if err != nil {
// not using Failf() as it aborts the test and does not log other errors // not using Failf() as it aborts the test and does not log other errors
e2elog.Logf("failed to create PVC and app (%s%d): %v", f.UniqueName, i, err) framework.Logf("failed to create PVC and app (%s%d): %v", f.UniqueName, i, err)
failed++ failed++
} }
} }
if failed != 0 { if failed != 0 {
e2elog.Failf("creating PVCs and apps failed, %d errors were logged", failed) framework.Failf("creating PVCs and apps failed, %d errors were logged", failed)
} }
for i, err := range chErrs { for i, err := range chErrs {
if err != nil { if err != nil {
// not using Failf() as it aborts the test and does not log other errors // not using Failf() as it aborts the test and does not log other errors
e2elog.Logf("failed to calculate checksum (%s%d): %v", f.UniqueName, i, err) framework.Logf("failed to calculate checksum (%s%d): %v", f.UniqueName, i, err)
failed++ failed++
} }
} }
if failed != 0 { if failed != 0 {
e2elog.Failf("calculating checksum failed, %d errors were logged", failed) framework.Failf("calculating checksum failed, %d errors were logged", failed)
} }
validateSubvolumeCount(f, totalSubvolumes, fileSystemName, subvolumegroup) validateSubvolumeCount(f, totalSubvolumes, fileSystemName, subvolumegroup)
@ -810,12 +810,12 @@ var _ = Describe("nfs", func() {
for i, err := range wgErrs { for i, err := range wgErrs {
if err != nil { if err != nil {
// not using Failf() as it aborts the test and does not log other errors // not using Failf() as it aborts the test and does not log other errors
e2elog.Logf("failed to delete snapshot (%s%d): %v", f.UniqueName, i, err) framework.Logf("failed to delete snapshot (%s%d): %v", f.UniqueName, i, err)
failed++ failed++
} }
} }
if failed != 0 { if failed != 0 {
e2elog.Failf("deleting snapshots failed, %d errors were logged", failed) framework.Failf("deleting snapshots failed, %d errors were logged", failed)
} }
validateCephFSSnapshotCount(f, 0, defaultSubvolumegroup, pv) validateCephFSSnapshotCount(f, 0, defaultSubvolumegroup, pv)
@ -835,12 +835,12 @@ var _ = Describe("nfs", func() {
for i, err := range wgErrs { for i, err := range wgErrs {
if err != nil { if err != nil {
// not using Failf() as it aborts the test and does not log other errors // not using Failf() as it aborts the test and does not log other errors
e2elog.Logf("failed to delete PVC and app (%s%d): %v", f.UniqueName, i, err) framework.Logf("failed to delete PVC and app (%s%d): %v", f.UniqueName, i, err)
failed++ failed++
} }
} }
if failed != 0 { if failed != 0 {
e2elog.Failf("deleting PVCs and apps failed, %d errors were logged", failed) framework.Failf("deleting PVCs and apps failed, %d errors were logged", failed)
} }
validateSubvolumeCount(f, parentPVCCount, fileSystemName, subvolumegroup) validateSubvolumeCount(f, parentPVCCount, fileSystemName, subvolumegroup)
@ -849,7 +849,7 @@ var _ = Describe("nfs", func() {
// delete parent pvc // delete parent pvc
err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout) err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("failed to delete PVC or application: %v", err) framework.Failf("failed to delete PVC or application: %v", err)
} }
validateSubvolumeCount(f, 0, fileSystemName, subvolumegroup) validateSubvolumeCount(f, 0, fileSystemName, subvolumegroup)
@ -867,17 +867,17 @@ var _ = Describe("nfs", func() {
totalSubvolumes := totalCount + 1 totalSubvolumes := totalCount + 1
pvc, err := loadPVC(pvcPath) pvc, err := loadPVC(pvcPath)
if err != nil { if err != nil {
e2elog.Failf("failed to load PVC: %v", err) framework.Failf("failed to load PVC: %v", err)
} }
pvc.Namespace = f.UniqueName pvc.Namespace = f.UniqueName
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("failed to create PVC: %v", err) framework.Failf("failed to create PVC: %v", err)
} }
app, err := loadApp(appPath) app, err := loadApp(appPath)
if err != nil { if err != nil {
e2elog.Failf("failed to load application: %v", err) framework.Failf("failed to load application: %v", err)
} }
app.Namespace = f.UniqueName app.Namespace = f.UniqueName
app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvc.Name app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvc.Name
@ -889,18 +889,18 @@ var _ = Describe("nfs", func() {
} }
checkSum, err := writeDataAndCalChecksum(app, &opt, f) checkSum, err := writeDataAndCalChecksum(app, &opt, f)
if err != nil { if err != nil {
e2elog.Failf("failed to calculate checksum: %v", err) framework.Failf("failed to calculate checksum: %v", err)
} }
pvcClone, err := loadPVC(pvcSmartClonePath) pvcClone, err := loadPVC(pvcSmartClonePath)
if err != nil { if err != nil {
e2elog.Failf("failed to load PVC: %v", err) framework.Failf("failed to load PVC: %v", err)
} }
pvcClone.Spec.DataSource.Name = pvc.Name pvcClone.Spec.DataSource.Name = pvc.Name
pvcClone.Namespace = f.UniqueName pvcClone.Namespace = f.UniqueName
appClone, err := loadApp(appSmartClonePath) appClone, err := loadApp(appSmartClonePath)
if err != nil { if err != nil {
e2elog.Failf("failed to load application: %v", err) framework.Failf("failed to load application: %v", err)
} }
appClone.Namespace = f.UniqueName appClone.Namespace = f.UniqueName
appClone.Labels = label appClone.Labels = label
@ -913,14 +913,14 @@ var _ = Describe("nfs", func() {
if wgErrs[n] == nil { if wgErrs[n] == nil {
filePath := a.Spec.Containers[0].VolumeMounts[0].MountPath + "/test" filePath := a.Spec.Containers[0].VolumeMounts[0].MountPath + "/test"
var checkSumClone string var checkSumClone string
e2elog.Logf("Calculating checksum clone for filepath %s", filePath) framework.Logf("Calculating checksum clone for filepath %s", filePath)
checkSumClone, chErrs[n] = calculateSHA512sum(f, &a, filePath, &opt) checkSumClone, chErrs[n] = calculateSHA512sum(f, &a, filePath, &opt)
e2elog.Logf("checksum for clone is %s", checkSumClone) framework.Logf("checksum for clone is %s", checkSumClone)
if chErrs[n] != nil { if chErrs[n] != nil {
e2elog.Logf("Failed calculating checksum clone %s", chErrs[n]) framework.Logf("Failed calculating checksum clone %s", chErrs[n])
} }
if checkSumClone != checkSum { if checkSumClone != checkSum {
e2elog.Logf("checksum didn't match. checksum=%s and checksumclone=%s", checkSum, checkSumClone) framework.Logf("checksum didn't match. checksum=%s and checksumclone=%s", checkSum, checkSumClone)
} }
} }
wg.Done() wg.Done()
@ -932,23 +932,23 @@ var _ = Describe("nfs", func() {
for i, err := range wgErrs { for i, err := range wgErrs {
if err != nil { if err != nil {
// not using Failf() as it aborts the test and does not log other errors // not using Failf() as it aborts the test and does not log other errors
e2elog.Logf("failed to create PVC or application (%s%d): %v", f.UniqueName, i, err) framework.Logf("failed to create PVC or application (%s%d): %v", f.UniqueName, i, err)
failed++ failed++
} }
} }
if failed != 0 { if failed != 0 {
e2elog.Failf("deleting PVCs and apps failed, %d errors were logged", failed) framework.Failf("deleting PVCs and apps failed, %d errors were logged", failed)
} }
for i, err := range chErrs { for i, err := range chErrs {
if err != nil { if err != nil {
// not using Failf() as it aborts the test and does not log other errors // not using Failf() as it aborts the test and does not log other errors
e2elog.Logf("failed to calculate checksum (%s%d): %v", f.UniqueName, i, err) framework.Logf("failed to calculate checksum (%s%d): %v", f.UniqueName, i, err)
failed++ failed++
} }
} }
if failed != 0 { if failed != 0 {
e2elog.Failf("calculating checksum failed, %d errors were logged", failed) framework.Failf("calculating checksum failed, %d errors were logged", failed)
} }
validateSubvolumeCount(f, totalSubvolumes, fileSystemName, subvolumegroup) validateSubvolumeCount(f, totalSubvolumes, fileSystemName, subvolumegroup)
@ -957,7 +957,7 @@ var _ = Describe("nfs", func() {
// delete parent pvc // delete parent pvc
err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout) err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("failed to delete PVC or application: %v", err) framework.Failf("failed to delete PVC or application: %v", err)
} }
wg.Add(totalCount) wg.Add(totalCount)
@ -975,12 +975,12 @@ var _ = Describe("nfs", func() {
for i, err := range wgErrs { for i, err := range wgErrs {
if err != nil { if err != nil {
// not using Failf() as it aborts the test and does not log other errors // not using Failf() as it aborts the test and does not log other errors
e2elog.Logf("failed to delete PVC or application (%s%d): %v", f.UniqueName, i, err) framework.Logf("failed to delete PVC or application (%s%d): %v", f.UniqueName, i, err)
failed++ failed++
} }
} }
if failed != 0 { if failed != 0 {
e2elog.Failf("deleting PVCs and apps failed, %d errors were logged", failed) framework.Failf("deleting PVCs and apps failed, %d errors were logged", failed)
} }
validateSubvolumeCount(f, 0, fileSystemName, subvolumegroup) validateSubvolumeCount(f, 0, fileSystemName, subvolumegroup)

View File

@ -25,6 +25,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
) )
func createNodeLabel(f *framework.Framework, labelKey, labelValue string) error { func createNodeLabel(f *framework.Framework, labelKey, labelValue string) error {
@ -35,7 +36,7 @@ func createNodeLabel(f *framework.Framework, labelKey, labelValue string) error
return fmt.Errorf("failed to list node: %w", err) return fmt.Errorf("failed to list node: %w", err)
} }
for i := range nodes.Items { for i := range nodes.Items {
framework.AddOrUpdateLabelOnNode(f.ClientSet, nodes.Items[i].Name, labelKey, labelValue) e2enode.AddOrUpdateLabelOnNode(f.ClientSet, nodes.Items[i].Name, labelKey, labelValue)
} }
return nil return nil
@ -47,7 +48,7 @@ func deleteNodeLabel(c kubernetes.Interface, labelKey string) error {
return fmt.Errorf("failed to list node: %w", err) return fmt.Errorf("failed to list node: %w", err)
} }
for i := range nodes.Items { for i := range nodes.Items {
framework.RemoveLabelOffNode(c, nodes.Items[i].Name, labelKey) e2enode.RemoveLabelOffNode(c, nodes.Items[i].Name, labelKey)
} }
return nil return nil
@ -59,7 +60,7 @@ func checkNodeHasLabel(c kubernetes.Interface, labelKey, labelValue string) erro
return fmt.Errorf("failed to list node: %w", err) return fmt.Errorf("failed to list node: %w", err)
} }
for i := range nodes.Items { for i := range nodes.Items {
framework.ExpectNodeHasLabel(c, nodes.Items[i].Name, labelKey, labelValue) e2enode.ExpectNodeHasLabel(c, nodes.Items[i].Name, labelKey, labelValue)
} }
return nil return nil

View File

@ -30,7 +30,7 @@ import (
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/client/conditions" "k8s.io/kubernetes/pkg/client/conditions"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
) )
const errRWOPConflict = "node has pod using PersistentVolumeClaim with the same name and ReadWriteOncePod access mode." const errRWOPConflict = "node has pod using PersistentVolumeClaim with the same name and ReadWriteOncePod access mode."
@ -40,17 +40,17 @@ const errRWOPConflict = "node has pod using PersistentVolumeClaim with the same
func getDaemonSetLabelSelector(f *framework.Framework, ns, daemonSetName string) (string, error) { func getDaemonSetLabelSelector(f *framework.Framework, ns, daemonSetName string) (string, error) {
ds, err := f.ClientSet.AppsV1().DaemonSets(ns).Get(context.TODO(), daemonSetName, metav1.GetOptions{}) ds, err := f.ClientSet.AppsV1().DaemonSets(ns).Get(context.TODO(), daemonSetName, metav1.GetOptions{})
if err != nil { if err != nil {
e2elog.Logf("Error getting daemonsets with name %s in namespace %s", daemonSetName, ns) framework.Logf("Error getting daemonsets with name %s in namespace %s", daemonSetName, ns)
return "", err return "", err
} }
s, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector) s, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector)
if err != nil { if err != nil {
e2elog.Logf("Error parsing %s daemonset selector in namespace %s", daemonSetName, ns) framework.Logf("Error parsing %s daemonset selector in namespace %s", daemonSetName, ns)
return "", err return "", err
} }
e2elog.Logf("LabelSelector for %s daemonsets in namespace %s: %s", daemonSetName, ns, s.String()) framework.Logf("LabelSelector for %s daemonsets in namespace %s: %s", daemonSetName, ns, s.String())
return s.String(), nil return s.String(), nil
} }
@ -58,12 +58,12 @@ func getDaemonSetLabelSelector(f *framework.Framework, ns, daemonSetName string)
func waitForDaemonSets(name, ns string, c kubernetes.Interface, t int) error { func waitForDaemonSets(name, ns string, c kubernetes.Interface, t int) error {
timeout := time.Duration(t) * time.Minute timeout := time.Duration(t) * time.Minute
start := time.Now() start := time.Now()
e2elog.Logf("Waiting up to %v for all daemonsets in namespace '%s' to start", timeout, ns) framework.Logf("Waiting up to %v for all daemonsets in namespace '%s' to start", timeout, ns)
return wait.PollImmediate(poll, timeout, func() (bool, error) { return wait.PollImmediate(poll, timeout, func() (bool, error) {
ds, err := c.AppsV1().DaemonSets(ns).Get(context.TODO(), name, metav1.GetOptions{}) ds, err := c.AppsV1().DaemonSets(ns).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil { if err != nil {
e2elog.Logf("Error getting daemonsets in namespace: '%s': %v", ns, err) framework.Logf("Error getting daemonsets in namespace: '%s': %v", ns, err)
if strings.Contains(err.Error(), "not found") { if strings.Contains(err.Error(), "not found") {
return false, nil return false, nil
} }
@ -75,7 +75,7 @@ func waitForDaemonSets(name, ns string, c kubernetes.Interface, t int) error {
} }
dNum := ds.Status.DesiredNumberScheduled dNum := ds.Status.DesiredNumberScheduled
ready := ds.Status.NumberReady ready := ds.Status.NumberReady
e2elog.Logf( framework.Logf(
"%d / %d pods ready in namespace '%s' in daemonset '%s' (%d seconds elapsed)", "%d / %d pods ready in namespace '%s' in daemonset '%s' (%d seconds elapsed)",
ready, ready,
dNum, dNum,
@ -98,7 +98,7 @@ func findPodAndContainerName(f *framework.Framework, ns, cn string, opt *metav1.
listErr error listErr error
) )
err := wait.PollImmediate(poll, timeout, func() (bool, error) { err := wait.PollImmediate(poll, timeout, func() (bool, error) {
podList, listErr = f.PodClientNS(ns).List(context.TODO(), *opt) podList, listErr = e2epod.PodClientNS(f, ns).List(context.TODO(), *opt)
if listErr != nil { if listErr != nil {
if isRetryableAPIError(listErr) { if isRetryableAPIError(listErr) {
return false, nil return false, nil
@ -137,14 +137,14 @@ func getCommandInPodOpts(
f *framework.Framework, f *framework.Framework,
c, ns, cn string, c, ns, cn string,
opt *metav1.ListOptions, opt *metav1.ListOptions,
) (framework.ExecOptions, error) { ) (e2epod.ExecOptions, error) {
cmd := []string{"/bin/sh", "-c", c} cmd := []string{"/bin/sh", "-c", c}
pName, cName, err := findPodAndContainerName(f, ns, cn, opt) pName, cName, err := findPodAndContainerName(f, ns, cn, opt)
if err != nil { if err != nil {
return framework.ExecOptions{}, err return e2epod.ExecOptions{}, err
} }
return framework.ExecOptions{ return e2epod.ExecOptions{
Command: cmd, Command: cmd,
PodName: pName, PodName: pName,
Namespace: ns, Namespace: ns,
@ -188,7 +188,7 @@ func execCommandInDaemonsetPod(
} }
cmd := []string{"/bin/sh", "-c", c} cmd := []string{"/bin/sh", "-c", c}
podOpt := framework.ExecOptions{ podOpt := e2epod.ExecOptions{
Command: cmd, Command: cmd,
Namespace: ns, Namespace: ns,
PodName: podName, PodName: podName,
@ -204,7 +204,7 @@ func execCommandInDaemonsetPod(
// listPods returns slice of pods matching given ListOptions and namespace. // listPods returns slice of pods matching given ListOptions and namespace.
func listPods(f *framework.Framework, ns string, opt *metav1.ListOptions) ([]v1.Pod, error) { func listPods(f *framework.Framework, ns string, opt *metav1.ListOptions) ([]v1.Pod, error) {
podList, err := f.PodClientNS(ns).List(context.TODO(), *opt) podList, err := e2epod.PodClientNS(f, ns).List(context.TODO(), *opt)
if len(podList.Items) == 0 { if len(podList.Items) == 0 {
return podList.Items, fmt.Errorf("podlist for label '%s' in namespace %s is empty", opt.LabelSelector, ns) return podList.Items, fmt.Errorf("podlist for label '%s' in namespace %s is empty", opt.LabelSelector, ns)
} }
@ -212,18 +212,18 @@ func listPods(f *framework.Framework, ns string, opt *metav1.ListOptions) ([]v1.
return podList.Items, err return podList.Items, err
} }
func execWithRetry(f *framework.Framework, opts *framework.ExecOptions) (string, string, error) { func execWithRetry(f *framework.Framework, opts *e2epod.ExecOptions) (string, string, error) {
timeout := time.Duration(deployTimeout) * time.Minute timeout := time.Duration(deployTimeout) * time.Minute
var stdOut, stdErr string var stdOut, stdErr string
err := wait.PollImmediate(poll, timeout, func() (bool, error) { err := wait.PollImmediate(poll, timeout, func() (bool, error) {
var execErr error var execErr error
stdOut, stdErr, execErr = f.ExecWithOptions(*opts) stdOut, stdErr, execErr = e2epod.ExecWithOptions(f, *opts)
if execErr != nil { if execErr != nil {
if isRetryableAPIError(execErr) { if isRetryableAPIError(execErr) {
return false, nil return false, nil
} }
e2elog.Logf("failed to execute command: %v", execErr) framework.Logf("failed to execute command: %v", execErr)
return false, fmt.Errorf("failed to execute command: %w", execErr) return false, fmt.Errorf("failed to execute command: %w", execErr)
} }
@ -242,7 +242,7 @@ func execCommandInPod(f *framework.Framework, c, ns string, opt *metav1.ListOpti
stdOut, stdErr, err := execWithRetry(f, &podOpt) stdOut, stdErr, err := execWithRetry(f, &podOpt)
if stdErr != "" { if stdErr != "" {
e2elog.Logf("stdErr occurred: %v", stdErr) framework.Logf("stdErr occurred: %v", stdErr)
} }
return stdOut, stdErr, err return stdOut, stdErr, err
@ -258,7 +258,7 @@ func execCommandInContainer(
stdOut, stdErr, err := execWithRetry(f, &podOpt) stdOut, stdErr, err := execWithRetry(f, &podOpt)
if stdErr != "" { if stdErr != "" {
e2elog.Logf("stdErr occurred: %v", stdErr) framework.Logf("stdErr occurred: %v", stdErr)
} }
return stdOut, stdErr, err return stdOut, stdErr, err
@ -268,7 +268,7 @@ func execCommandInContainerByPodName(
f *framework.Framework, shellCmd, namespace, podName, containerName string, f *framework.Framework, shellCmd, namespace, podName, containerName string,
) (string, string, error) { ) (string, string, error) {
cmd := []string{"/bin/sh", "-c", shellCmd} cmd := []string{"/bin/sh", "-c", shellCmd}
execOpts := framework.ExecOptions{ execOpts := e2epod.ExecOptions{
Command: cmd, Command: cmd,
PodName: podName, PodName: podName,
Namespace: namespace, Namespace: namespace,
@ -281,7 +281,7 @@ func execCommandInContainerByPodName(
stdOut, stdErr, err := execWithRetry(f, &execOpts) stdOut, stdErr, err := execWithRetry(f, &execOpts)
if stdErr != "" { if stdErr != "" {
e2elog.Logf("stdErr occurred: %v", stdErr) framework.Logf("stdErr occurred: %v", stdErr)
} }
return stdOut, stdErr, err return stdOut, stdErr, err
@ -298,7 +298,7 @@ func execCommandInToolBoxPod(f *framework.Framework, c, ns string) (string, stri
stdOut, stdErr, err := execWithRetry(f, &podOpt) stdOut, stdErr, err := execWithRetry(f, &podOpt)
if stdErr != "" { if stdErr != "" {
e2elog.Logf("stdErr occurred: %v", stdErr) framework.Logf("stdErr occurred: %v", stdErr)
} }
return stdOut, stdErr, err return stdOut, stdErr, err
@ -312,7 +312,7 @@ func execCommandInPodAndAllowFail(f *framework.Framework, c, ns string, opt *met
stdOut, stdErr, err := execWithRetry(f, &podOpt) stdOut, stdErr, err := execWithRetry(f, &podOpt)
if err != nil { if err != nil {
e2elog.Logf("command %s failed: %v", c, err) framework.Logf("command %s failed: %v", c, err)
} }
return stdOut, stdErr return stdOut, stdErr
@ -351,7 +351,7 @@ func createAppErr(c kubernetes.Interface, app *v1.Pod, timeout int, errString st
func waitForPodInRunningState(name, ns string, c kubernetes.Interface, t int, expectedError string) error { func waitForPodInRunningState(name, ns string, c kubernetes.Interface, t int, expectedError string) error {
timeout := time.Duration(t) * time.Minute timeout := time.Duration(t) * time.Minute
start := time.Now() start := time.Now()
e2elog.Logf("Waiting up to %v to be in Running state", name) framework.Logf("Waiting up to %v to be in Running state", name)
return wait.PollImmediate(poll, timeout, func() (bool, error) { return wait.PollImmediate(poll, timeout, func() (bool, error) {
pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), name, metav1.GetOptions{}) pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), name, metav1.GetOptions{})
@ -376,13 +376,13 @@ func waitForPodInRunningState(name, ns string, c kubernetes.Interface, t int, ex
return false, err return false, err
} }
if strings.Contains(events.String(), expectedError) { if strings.Contains(events.String(), expectedError) {
e2elog.Logf("Expected Error %q found successfully", expectedError) framework.Logf("Expected Error %q found successfully", expectedError)
return true, err return true, err
} }
} }
case v1.PodUnknown: case v1.PodUnknown:
e2elog.Logf( framework.Logf(
"%s app is in %s phase expected to be in Running state (%d seconds elapsed)", "%s app is in %s phase expected to be in Running state (%d seconds elapsed)",
name, name,
pod.Status.Phase, pod.Status.Phase,
@ -400,7 +400,7 @@ func deletePod(name, ns string, c kubernetes.Interface, t int) error {
return fmt.Errorf("failed to delete app: %w", err) return fmt.Errorf("failed to delete app: %w", err)
} }
start := time.Now() start := time.Now()
e2elog.Logf("Waiting for pod %v to be deleted", name) framework.Logf("Waiting for pod %v to be deleted", name)
return wait.PollImmediate(poll, timeout, func() (bool, error) { return wait.PollImmediate(poll, timeout, func() (bool, error) {
_, err := c.CoreV1().Pods(ns).Get(context.TODO(), name, metav1.GetOptions{}) _, err := c.CoreV1().Pods(ns).Get(context.TODO(), name, metav1.GetOptions{})
@ -411,7 +411,7 @@ func deletePod(name, ns string, c kubernetes.Interface, t int) error {
if apierrs.IsNotFound(err) { if apierrs.IsNotFound(err) {
return true, nil return true, nil
} }
e2elog.Logf("%s app to be deleted (%d seconds elapsed)", name, int(time.Since(start).Seconds())) framework.Logf("%s app to be deleted (%d seconds elapsed)", name, int(time.Since(start).Seconds()))
return false, fmt.Errorf("failed to get app: %w", err) return false, fmt.Errorf("failed to get app: %w", err)
} }
@ -431,7 +431,7 @@ func deletePodWithLabel(label, ns string, skipNotFound bool) error {
label, label,
fmt.Sprintf("--ignore-not-found=%t", skipNotFound)) fmt.Sprintf("--ignore-not-found=%t", skipNotFound))
if err != nil { if err != nil {
e2elog.Logf("failed to delete pod %v", err) framework.Logf("failed to delete pod %v", err)
} }
return err return err
@ -449,7 +449,7 @@ func calculateSHA512sum(f *framework.Framework, app *v1.Pod, filePath string, op
} }
// extract checksum from sha512sum output. // extract checksum from sha512sum output.
checkSum := strings.Split(sha512sumOut, "")[0] checkSum := strings.Split(sha512sumOut, "")[0]
e2elog.Logf("Calculated checksum %s", checkSum) framework.Logf("Calculated checksum %s", checkSum)
return checkSum, nil return checkSum, nil
} }

View File

@ -29,7 +29,6 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
) )
@ -57,13 +56,13 @@ func createPVCAndvalidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClai
name := pvc.Name name := pvc.Name
namespace := pvc.Namespace namespace := pvc.Namespace
start := time.Now() start := time.Now()
e2elog.Logf("Waiting up to %v to be in Bound state", pvc) framework.Logf("Waiting up to %v to be in Bound state", pvc)
return wait.PollImmediate(poll, timeout, func() (bool, error) { return wait.PollImmediate(poll, timeout, func() (bool, error) {
e2elog.Logf("waiting for PVC %s (%d seconds elapsed)", name, int(time.Since(start).Seconds())) framework.Logf("waiting for PVC %s (%d seconds elapsed)", name, int(time.Since(start).Seconds()))
pvc, err = c.CoreV1().PersistentVolumeClaims(namespace).Get(context.TODO(), name, metav1.GetOptions{}) pvc, err = c.CoreV1().PersistentVolumeClaims(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil { if err != nil {
e2elog.Logf("Error getting pvc %q in namespace %q: %v", name, namespace, err) framework.Logf("Error getting pvc %q in namespace %q: %v", name, namespace, err)
if isRetryableAPIError(err) { if isRetryableAPIError(err) {
return false, nil return false, nil
} }
@ -132,7 +131,7 @@ func deletePVCAndPV(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, pv *v
pvcToDelete := pvc pvcToDelete := pvc
err = wait.PollImmediate(poll, timeout, func() (bool, error) { err = wait.PollImmediate(poll, timeout, func() (bool, error) {
// Check that the PVC is deleted. // Check that the PVC is deleted.
e2elog.Logf( framework.Logf(
"waiting for PVC %s in state %s to be deleted (%d seconds elapsed)", "waiting for PVC %s in state %s to be deleted (%d seconds elapsed)",
pvcToDelete.Name, pvcToDelete.Name,
pvcToDelete.Status.String(), pvcToDelete.Status.String(),
@ -144,7 +143,7 @@ func deletePVCAndPV(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, pv *v
if pvcToDelete.Status.Phase == "" { if pvcToDelete.Status.Phase == "" {
// this is unexpected, an empty Phase is not defined // this is unexpected, an empty Phase is not defined
// FIXME: see https://github.com/ceph/ceph-csi/issues/1874 // FIXME: see https://github.com/ceph/ceph-csi/issues/1874
e2elog.Logf("PVC %s is in a weird state: %s", pvcToDelete.Name, pvcToDelete.String()) framework.Logf("PVC %s is in a weird state: %s", pvcToDelete.Name, pvcToDelete.String())
} }
return false, nil return false, nil
@ -170,7 +169,7 @@ func deletePVCAndPV(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, pv *v
return wait.PollImmediate(poll, timeout, func() (bool, error) { return wait.PollImmediate(poll, timeout, func() (bool, error) {
// Check that the PV is deleted. // Check that the PV is deleted.
e2elog.Logf( framework.Logf(
"waiting for PV %s in state %s to be deleted (%d seconds elapsed)", "waiting for PV %s in state %s to be deleted (%d seconds elapsed)",
pvToDelete.Name, pvToDelete.Name,
pvToDelete.Status.String(), pvToDelete.Status.String(),
@ -200,7 +199,7 @@ func getPersistentVolumeClaim(c kubernetes.Interface, namespace, name string) (*
err = wait.PollImmediate(1*time.Second, timeout, func() (bool, error) { err = wait.PollImmediate(1*time.Second, timeout, func() (bool, error) {
pvc, err = c.CoreV1().PersistentVolumeClaims(namespace).Get(context.TODO(), name, metav1.GetOptions{}) pvc, err = c.CoreV1().PersistentVolumeClaims(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil { if err != nil {
e2elog.Logf("Error getting pvc %q in namespace %q: %v", name, namespace, err) framework.Logf("Error getting pvc %q in namespace %q: %v", name, namespace, err)
if isRetryableAPIError(err) { if isRetryableAPIError(err) {
return false, nil return false, nil
} }
@ -223,7 +222,7 @@ func getPersistentVolume(c kubernetes.Interface, name string) (*v1.PersistentVol
err = wait.PollImmediate(1*time.Second, timeout, func() (bool, error) { err = wait.PollImmediate(1*time.Second, timeout, func() (bool, error) {
pv, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), name, metav1.GetOptions{}) pv, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), name, metav1.GetOptions{})
if err != nil { if err != nil {
e2elog.Logf("Error getting pv %q: %v", name, err) framework.Logf("Error getting pv %q: %v", name, err)
if isRetryableAPIError(err) { if isRetryableAPIError(err) {
return false, nil return false, nil
} }
@ -258,7 +257,7 @@ func deletePVCAndValidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClai
nameSpace := pvc.Namespace nameSpace := pvc.Namespace
name := pvc.Name name := pvc.Name
var err error var err error
e2elog.Logf("Deleting PersistentVolumeClaim %v on namespace %v", name, nameSpace) framework.Logf("Deleting PersistentVolumeClaim %v on namespace %v", name, nameSpace)
pvc, err = getPersistentVolumeClaim(c, nameSpace, name) pvc, err = getPersistentVolumeClaim(c, nameSpace, name)
if err != nil { if err != nil {
@ -277,19 +276,19 @@ func deletePVCAndValidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClai
return wait.PollImmediate(poll, timeout, func() (bool, error) { return wait.PollImmediate(poll, timeout, func() (bool, error) {
// Check that the PVC is really deleted. // Check that the PVC is really deleted.
e2elog.Logf( framework.Logf(
"waiting for PVC %s in state %s to be deleted (%d seconds elapsed)", "waiting for PVC %s in state %s to be deleted (%d seconds elapsed)",
name, name,
pvc.Status.String(), pvc.Status.String(),
int(time.Since(start).Seconds())) int(time.Since(start).Seconds()))
pvc, err = c.CoreV1().PersistentVolumeClaims(nameSpace).Get(context.TODO(), name, metav1.GetOptions{}) pvc, err = c.CoreV1().PersistentVolumeClaims(nameSpace).Get(context.TODO(), name, metav1.GetOptions{})
if err == nil { if err == nil {
e2elog.Logf("PVC %s (status: %s) has not been deleted yet, rechecking...", name, pvc.Status) framework.Logf("PVC %s (status: %s) has not been deleted yet, rechecking...", name, pvc.Status)
return false, nil return false, nil
} }
if isRetryableAPIError(err) { if isRetryableAPIError(err) {
e2elog.Logf("failed to verify deletion of PVC %s (status: %s): %v", name, pvc.Status, err) framework.Logf("failed to verify deletion of PVC %s (status: %s): %v", name, pvc.Status, err)
return false, nil return false, nil
} }
@ -300,12 +299,12 @@ func deletePVCAndValidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClai
// Examine the pv.ClaimRef and UID. Expect nil values. // Examine the pv.ClaimRef and UID. Expect nil values.
oldPV, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{}) oldPV, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{})
if err == nil { if err == nil {
e2elog.Logf("PV %s (status: %s) has not been deleted yet, rechecking...", pv.Name, oldPV.Status) framework.Logf("PV %s (status: %s) has not been deleted yet, rechecking...", pv.Name, oldPV.Status)
return false, nil return false, nil
} }
if isRetryableAPIError(err) { if isRetryableAPIError(err) {
e2elog.Logf("failed to verify deletion of PV %s (status: %s): %v", pv.Name, oldPV.Status, err) framework.Logf("failed to verify deletion of PV %s (status: %s): %v", pv.Name, oldPV.Status, err)
return false, nil return false, nil
} }
@ -387,12 +386,12 @@ func getMetricsForPVC(f *framework.Framework, pvc *v1.PersistentVolumeClaim, t i
return wait.PollImmediate(poll, timeout, func() (bool, error) { return wait.PollImmediate(poll, timeout, func() (bool, error) {
stdOut, stdErr, err := execCommandInToolBoxPod(f, cmd, rookNamespace) stdOut, stdErr, err := execCommandInToolBoxPod(f, cmd, rookNamespace)
if err != nil { if err != nil {
e2elog.Logf("failed to get metrics for pvc %q (%v): %v", pvc.Name, err, stdErr) framework.Logf("failed to get metrics for pvc %q (%v): %v", pvc.Name, err, stdErr)
return false, nil return false, nil
} }
if stdOut == "" { if stdOut == "" {
e2elog.Logf("no metrics received from kubelet on IP %s", kubelet) framework.Logf("no metrics received from kubelet on IP %s", kubelet)
return false, nil return false, nil
} }
@ -406,13 +405,13 @@ func getMetricsForPVC(f *framework.Framework, pvc *v1.PersistentVolumeClaim, t i
} }
if strings.Contains(line, namespace) && strings.Contains(line, name) { if strings.Contains(line, namespace) && strings.Contains(line, name) {
// TODO: validate metrics if possible // TODO: validate metrics if possible
e2elog.Logf("found metrics for pvc %s/%s: %s", pvc.Namespace, pvc.Name, line) framework.Logf("found metrics for pvc %s/%s: %s", pvc.Namespace, pvc.Name, line)
return true, nil return true, nil
} }
} }
e2elog.Logf("no metrics found for pvc %s/%s", pvc.Namespace, pvc.Name) framework.Logf("no metrics found for pvc %s/%s", pvc.Namespace, pvc.Name)
return false, nil return false, nil
}) })

1211
e2e/rbd.go

File diff suppressed because it is too large Load Diff

View File

@ -35,7 +35,6 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
) )
// nolint:gomnd // numbers specify Kernel versions. // nolint:gomnd // numbers specify Kernel versions.
@ -168,7 +167,7 @@ func createRBDStorageClass(
return wait.PollImmediate(poll, timeout, func() (bool, error) { return wait.PollImmediate(poll, timeout, func() (bool, error) {
_, err = c.StorageV1().StorageClasses().Create(context.TODO(), &sc, metav1.CreateOptions{}) _, err = c.StorageV1().StorageClasses().Create(context.TODO(), &sc, metav1.CreateOptions{})
if err != nil { if err != nil {
e2elog.Logf("error creating StorageClass %q: %v", sc.Name, err) framework.Logf("error creating StorageClass %q: %v", sc.Name, err)
if isRetryableAPIError(err) { if isRetryableAPIError(err) {
return false, nil return false, nil
} }
@ -326,13 +325,13 @@ func validateImageOwner(pvcPath string, f *framework.Framework) error {
} }
if radosNamespace != "" { if radosNamespace != "" {
e2elog.Logf( framework.Logf(
"found image journal %s in pool %s namespace %s", "found image journal %s in pool %s namespace %s",
"csi.volume."+imageData.imageID, "csi.volume."+imageData.imageID,
defaultRBDPool, defaultRBDPool,
radosNamespace) radosNamespace)
} else { } else {
e2elog.Logf("found image journal %s in pool %s", "csi.volume."+imageData.imageID, defaultRBDPool) framework.Logf("found image journal %s in pool %s", "csi.volume."+imageData.imageID, defaultRBDPool)
} }
if !strings.Contains(stdOut, pvc.Namespace) { if !strings.Contains(stdOut, pvc.Namespace) {
@ -347,7 +346,7 @@ func logErrors(f *framework.Framework, msg string, wgErrs []error) int {
for i, err := range wgErrs { for i, err := range wgErrs {
if err != nil { if err != nil {
// not using Failf() as it aborts the test and does not log other errors // not using Failf() as it aborts the test and does not log other errors
e2elog.Logf("%s (%s%d): %v", msg, f.UniqueName, i, err) framework.Logf("%s (%s%d): %v", msg, f.UniqueName, i, err)
failures++ failures++
} }
} }
@ -526,7 +525,7 @@ func validateEncryptedPVCAndAppBinding(pvcPath, appPath string, kms kmsConfig, f
if !destroyed { if !destroyed {
return fmt.Errorf("passphrased was not destroyed: %s", msg) return fmt.Errorf("passphrased was not destroyed: %s", msg)
} else if msg != "" { } else if msg != "" {
e2elog.Logf("passphrase destroyed, but message returned: %s", msg) framework.Logf("passphrase destroyed, but message returned: %s", msg)
} }
} }
@ -575,7 +574,7 @@ func validateEncryptedFilesystemAndAppBinding(pvcPath, appPath string, kms kmsCo
if !destroyed { if !destroyed {
return fmt.Errorf("passphrased was not destroyed: %s", msg) return fmt.Errorf("passphrased was not destroyed: %s", msg)
} else if msg != "" { } else if msg != "" {
e2elog.Logf("passphrase destroyed, but message returned: %s", msg) framework.Logf("passphrase destroyed, but message returned: %s", msg)
} }
} }
@ -723,23 +722,26 @@ func deleteBackingRBDImage(f *framework.Framework, pvc *v1.PersistentVolumeClaim
return err return err
} }
//nolint:unused // required for reclaimspace e2e.
// rbdDuImage contains the disk-usage statistics of an RBD image. // rbdDuImage contains the disk-usage statistics of an RBD image.
//
//nolint:unused // required for reclaimspace e2e.
type rbdDuImage struct { type rbdDuImage struct {
Name string `json:"name"` Name string `json:"name"`
ProvisionedSize uint64 `json:"provisioned_size"` ProvisionedSize uint64 `json:"provisioned_size"`
UsedSize uint64 `json:"used_size"` UsedSize uint64 `json:"used_size"`
} }
//nolint:unused // required for reclaimspace e2e.
// rbdDuImageList contains the list of images returned by 'rbd du'. // rbdDuImageList contains the list of images returned by 'rbd du'.
//
//nolint:unused // required for reclaimspace e2e.
type rbdDuImageList struct { type rbdDuImageList struct {
Images []*rbdDuImage `json:"images"` Images []*rbdDuImage `json:"images"`
} }
//nolint:deadcode,unused // required for reclaimspace e2e.
// getRbdDu runs 'rbd du' on the RBD image and returns a rbdDuImage struct with // getRbdDu runs 'rbd du' on the RBD image and returns a rbdDuImage struct with
// the result. // the result.
//
//nolint:deadcode,unused // required for reclaimspace e2e.
func getRbdDu(f *framework.Framework, pvc *v1.PersistentVolumeClaim) (*rbdDuImage, error) { func getRbdDu(f *framework.Framework, pvc *v1.PersistentVolumeClaim) (*rbdDuImage, error) {
rdil := rbdDuImageList{} rdil := rbdDuImageList{}
@ -768,11 +770,12 @@ func getRbdDu(f *framework.Framework, pvc *v1.PersistentVolumeClaim) (*rbdDuImag
return nil, fmt.Errorf("image %s not found", imageData.imageName) return nil, fmt.Errorf("image %s not found", imageData.imageName)
} }
//nolint:deadcode,unused // required for reclaimspace e2e.
// sparsifyBackingRBDImage runs `rbd sparsify` on the RBD image. Once done, all // sparsifyBackingRBDImage runs `rbd sparsify` on the RBD image. Once done, all
// data blocks that contain zeros are discarded/trimmed/unmapped and do not // data blocks that contain zeros are discarded/trimmed/unmapped and do not
// take up any space anymore. This can be used to verify that an empty, but // take up any space anymore. This can be used to verify that an empty, but
// allocated (with zerofill) extents have been released. // allocated (with zerofill) extents have been released.
//
//nolint:deadcode,unused // required for reclaimspace e2e.
func sparsifyBackingRBDImage(f *framework.Framework, pvc *v1.PersistentVolumeClaim) error { func sparsifyBackingRBDImage(f *framework.Framework, pvc *v1.PersistentVolumeClaim) error {
imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f) imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f)
if err != nil { if err != nil {
@ -849,9 +852,9 @@ func getPVCImageInfoInPool(f *framework.Framework, pvc *v1.PersistentVolumeClaim
} }
if radosNamespace != "" { if radosNamespace != "" {
e2elog.Logf("found image %s in pool %s namespace %s", imageData.imageName, pool, radosNamespace) framework.Logf("found image %s in pool %s namespace %s", imageData.imageName, pool, radosNamespace)
} else { } else {
e2elog.Logf("found image %s in pool %s", imageData.imageName, pool) framework.Logf("found image %s in pool %s", imageData.imageName, pool)
} }
return stdOut, nil return stdOut, nil
@ -896,13 +899,13 @@ func checkPVCImageJournalInPool(f *framework.Framework, pvc *v1.PersistentVolume
} }
if radosNamespace != "" { if radosNamespace != "" {
e2elog.Logf( framework.Logf(
"found image journal %s in pool %s namespace %s", "found image journal %s in pool %s namespace %s",
"csi.volume."+imageData.imageID, "csi.volume."+imageData.imageID,
pool, pool,
radosNamespace) radosNamespace)
} else { } else {
e2elog.Logf("found image journal %s in pool %s", "csi.volume."+imageData.imageID, pool) framework.Logf("found image journal %s in pool %s", "csi.volume."+imageData.imageID, pool)
} }
return nil return nil
@ -931,13 +934,13 @@ func checkPVCCSIJournalInPool(f *framework.Framework, pvc *v1.PersistentVolumeCl
} }
if radosNamespace != "" { if radosNamespace != "" {
e2elog.Logf( framework.Logf(
"found CSI journal entry %s in pool %s namespace %s", "found CSI journal entry %s in pool %s namespace %s",
"csi.volume."+imageData.pvName, "csi.volume."+imageData.pvName,
pool, pool,
radosNamespace) radosNamespace)
} else { } else {
e2elog.Logf("found CSI journal entry %s in pool %s", "csi.volume."+imageData.pvName, pool) framework.Logf("found CSI journal entry %s in pool %s", "csi.volume."+imageData.pvName, pool)
} }
return nil return nil
@ -1039,7 +1042,7 @@ func waitToRemoveImagesFromTrash(f *framework.Framework, poolName string, t int)
return true, nil return true, nil
} }
errReason = fmt.Errorf("found %d images found in trash. Image details %v", len(imagesInTrash), imagesInTrash) errReason = fmt.Errorf("found %d images found in trash. Image details %v", len(imagesInTrash), imagesInTrash)
e2elog.Logf(errReason.Error()) framework.Logf(errReason.Error())
return false, nil return false, nil
}) })

View File

@ -30,7 +30,6 @@ import (
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
"k8s.io/cloud-provider/volume/helpers" "k8s.io/cloud-provider/volume/helpers"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
) )
func expandPVCSize(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, size string, t int) error { func expandPVCSize(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, size string, t int) error {
@ -49,15 +48,15 @@ func expandPVCSize(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, size s
Expect(err).Should(BeNil()) Expect(err).Should(BeNil())
start := time.Now() start := time.Now()
e2elog.Logf("Waiting up to %v to be in Resized state", pvc) framework.Logf("Waiting up to %v to be in Resized state", pvc)
return wait.PollImmediate(poll, timeout, func() (bool, error) { return wait.PollImmediate(poll, timeout, func() (bool, error) {
e2elog.Logf("waiting for PVC %s (%d seconds elapsed)", pvcName, int(time.Since(start).Seconds())) framework.Logf("waiting for PVC %s (%d seconds elapsed)", pvcName, int(time.Since(start).Seconds()))
updatedPVC, err = c.CoreV1(). updatedPVC, err = c.CoreV1().
PersistentVolumeClaims(pvcNamespace). PersistentVolumeClaims(pvcNamespace).
Get(context.TODO(), pvcName, metav1.GetOptions{}) Get(context.TODO(), pvcName, metav1.GetOptions{})
if err != nil { if err != nil {
e2elog.Logf("Error getting pvc in namespace: '%s': %v", pvcNamespace, err) framework.Logf("Error getting pvc in namespace: '%s': %v", pvcNamespace, err)
if isRetryableAPIError(err) { if isRetryableAPIError(err) {
return false, nil return false, nil
} }
@ -66,7 +65,7 @@ func expandPVCSize(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, size s
} }
pvcConditions := updatedPVC.Status.Conditions pvcConditions := updatedPVC.Status.Conditions
if len(pvcConditions) > 0 { if len(pvcConditions) > 0 {
e2elog.Logf("pvc state %v", pvcConditions[0].Type) framework.Logf("pvc state %v", pvcConditions[0].Type)
if pvcConditions[0].Type == v1.PersistentVolumeClaimResizing || if pvcConditions[0].Type == v1.PersistentVolumeClaimResizing ||
pvcConditions[0].Type == v1.PersistentVolumeClaimFileSystemResizePending { pvcConditions[0].Type == v1.PersistentVolumeClaimFileSystemResizePending {
return false, nil return false, nil
@ -74,7 +73,7 @@ func expandPVCSize(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, size s
} }
if !updatedPVC.Status.Capacity[v1.ResourceStorage].Equal(resource.MustParse(size)) { if !updatedPVC.Status.Capacity[v1.ResourceStorage].Equal(resource.MustParse(size)) {
e2elog.Logf( framework.Logf(
"current size in status %v,expected size %v", "current size in status %v,expected size %v",
updatedPVC.Status.Capacity[v1.ResourceStorage], updatedPVC.Status.Capacity[v1.ResourceStorage],
resource.MustParse(size)) resource.MustParse(size))
@ -187,13 +186,13 @@ func checkAppMntSize(f *framework.Framework, opt *metav1.ListOptions, size, cmd,
start := time.Now() start := time.Now()
return wait.PollImmediate(poll, timeout, func() (bool, error) { return wait.PollImmediate(poll, timeout, func() (bool, error) {
e2elog.Logf("executing cmd %s (%d seconds elapsed)", cmd, int(time.Since(start).Seconds())) framework.Logf("executing cmd %s (%d seconds elapsed)", cmd, int(time.Since(start).Seconds()))
output, stdErr, err := execCommandInPod(f, cmd, ns, opt) output, stdErr, err := execCommandInPod(f, cmd, ns, opt)
if err != nil { if err != nil {
return false, err return false, err
} }
if stdErr != "" { if stdErr != "" {
e2elog.Logf("failed to execute command in app pod %v", stdErr) framework.Logf("failed to execute command in app pod %v", stdErr)
return false, nil return false, nil
} }
@ -208,7 +207,7 @@ func checkAppMntSize(f *framework.Framework, opt *metav1.ListOptions, size, cmd,
return false, err return false, err
} }
if actualSize != expectedSize { if actualSize != expectedSize {
e2elog.Logf("expected size %s found %s information", size, output) framework.Logf("expected size %s found %s information", size, output)
return false, nil return false, nil
} }

View File

@ -30,7 +30,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
) )
func getSnapshotClass(path string) snapapi.VolumeSnapshotClass { func getSnapshotClass(path string) snapapi.VolumeSnapshotClass {
@ -74,20 +73,20 @@ func createSnapshot(snap *snapapi.VolumeSnapshot, t int) error {
if err != nil { if err != nil {
return fmt.Errorf("failed to create volumesnapshot: %w", err) return fmt.Errorf("failed to create volumesnapshot: %w", err)
} }
e2elog.Logf("snapshot with name %v created in %v namespace", snap.Name, snap.Namespace) framework.Logf("snapshot with name %v created in %v namespace", snap.Name, snap.Namespace)
timeout := time.Duration(t) * time.Minute timeout := time.Duration(t) * time.Minute
name := snap.Name name := snap.Name
start := time.Now() start := time.Now()
e2elog.Logf("waiting for %v to be in ready state", snap) framework.Logf("waiting for %v to be in ready state", snap)
return wait.PollImmediate(poll, timeout, func() (bool, error) { return wait.PollImmediate(poll, timeout, func() (bool, error) {
e2elog.Logf("waiting for snapshot %s (%d seconds elapsed)", snap.Name, int(time.Since(start).Seconds())) framework.Logf("waiting for snapshot %s (%d seconds elapsed)", snap.Name, int(time.Since(start).Seconds()))
snaps, err := sclient. snaps, err := sclient.
VolumeSnapshots(snap.Namespace). VolumeSnapshots(snap.Namespace).
Get(context.TODO(), name, metav1.GetOptions{}) Get(context.TODO(), name, metav1.GetOptions{})
if err != nil { if err != nil {
e2elog.Logf("Error getting snapshot in namespace: '%s': %v", snap.Namespace, err) framework.Logf("Error getting snapshot in namespace: '%s': %v", snap.Namespace, err)
if isRetryableAPIError(err) { if isRetryableAPIError(err) {
return false, nil return false, nil
} }
@ -103,7 +102,7 @@ func createSnapshot(snap *snapapi.VolumeSnapshot, t int) error {
if *snaps.Status.ReadyToUse { if *snaps.Status.ReadyToUse {
return true, nil return true, nil
} }
e2elog.Logf("snapshot %s in %v state", snap.Name, *snaps.Status.ReadyToUse) framework.Logf("snapshot %s in %v state", snap.Name, *snaps.Status.ReadyToUse)
return false, nil return false, nil
}) })
@ -125,10 +124,10 @@ func deleteSnapshot(snap *snapapi.VolumeSnapshot, t int) error {
timeout := time.Duration(t) * time.Minute timeout := time.Duration(t) * time.Minute
name := snap.Name name := snap.Name
start := time.Now() start := time.Now()
e2elog.Logf("Waiting up to %v to be deleted", snap) framework.Logf("Waiting up to %v to be deleted", snap)
return wait.PollImmediate(poll, timeout, func() (bool, error) { return wait.PollImmediate(poll, timeout, func() (bool, error) {
e2elog.Logf("deleting snapshot %s (%d seconds elapsed)", name, int(time.Since(start).Seconds())) framework.Logf("deleting snapshot %s (%d seconds elapsed)", name, int(time.Since(start).Seconds()))
_, err := sclient. _, err := sclient.
VolumeSnapshots(snap.Namespace). VolumeSnapshots(snap.Namespace).
Get(context.TODO(), name, metav1.GetOptions{}) Get(context.TODO(), name, metav1.GetOptions{})
@ -227,7 +226,7 @@ func createNFSSnapshotClass(f *framework.Framework) error {
return wait.PollImmediate(poll, timeout, func() (bool, error) { return wait.PollImmediate(poll, timeout, func() (bool, error) {
_, err = sclient.VolumeSnapshotClasses().Create(context.TODO(), &sc, metav1.CreateOptions{}) _, err = sclient.VolumeSnapshotClasses().Create(context.TODO(), &sc, metav1.CreateOptions{})
if err != nil { if err != nil {
e2elog.Logf("error creating SnapshotClass %q: %v", sc.Name, err) framework.Logf("error creating SnapshotClass %q: %v", sc.Name, err)
if apierrs.IsAlreadyExists(err) { if apierrs.IsAlreadyExists(err) {
return true, nil return true, nil
} }
@ -256,7 +255,7 @@ func deleteNFSSnapshotClass() error {
return wait.PollImmediate(poll, timeout, func() (bool, error) { return wait.PollImmediate(poll, timeout, func() (bool, error) {
err = sclient.VolumeSnapshotClasses().Delete(context.TODO(), sc.Name, metav1.DeleteOptions{}) err = sclient.VolumeSnapshotClasses().Delete(context.TODO(), sc.Name, metav1.DeleteOptions{})
if err != nil { if err != nil {
e2elog.Logf("error deleting SnapshotClass %q: %v", sc.Name, err) framework.Logf("error deleting SnapshotClass %q: %v", sc.Name, err)
if apierrs.IsNotFound(err) { if apierrs.IsNotFound(err) {
return true, nil return true, nil
} }
@ -341,14 +340,14 @@ func validateBiggerPVCFromSnapshot(f *framework.Framework,
} }
pvcClone, err := loadPVC(pvcClonePath) pvcClone, err := loadPVC(pvcClonePath)
if err != nil { if err != nil {
e2elog.Failf("failed to load PVC: %v", err) framework.Failf("failed to load PVC: %v", err)
} }
pvcClone.Namespace = f.UniqueName pvcClone.Namespace = f.UniqueName
pvcClone.Spec.DataSource.Name = snap.Name pvcClone.Spec.DataSource.Name = snap.Name
pvcClone.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(newSize) pvcClone.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(newSize)
appClone, err := loadApp(appClonePath) appClone, err := loadApp(appClonePath)
if err != nil { if err != nil {
e2elog.Failf("failed to load application: %v", err) framework.Failf("failed to load application: %v", err)
} }
appClone.Namespace = f.UniqueName appClone.Namespace = f.UniqueName
appClone.Labels = label appClone.Labels = label
@ -384,28 +383,28 @@ func validateBiggerPVCFromSnapshot(f *framework.Framework,
) )
imageList, err = listRBDImages(f, defaultRBDPool) imageList, err = listRBDImages(f, defaultRBDPool)
if err != nil { if err != nil {
e2elog.Failf("failed to list rbd images: %v", err) framework.Failf("failed to list rbd images: %v", err)
} }
e2elog.Logf("list of rbd images: %v", imageList) framework.Logf("list of rbd images: %v", imageList)
volSnapName, stdErr, err = execCommandInToolBoxPod(f, volSnapName, stdErr, err = execCommandInToolBoxPod(f,
formatImageMetaGetCmd(defaultRBDPool, imageList[0], volSnapNameKey), formatImageMetaGetCmd(defaultRBDPool, imageList[0], volSnapNameKey),
rookNamespace) rookNamespace)
if checkGetKeyError(err, stdErr) { if checkGetKeyError(err, stdErr) {
e2elog.Failf("found volume snapshot name %s/%s %s=%s: err=%v stdErr=%q", framework.Failf("found volume snapshot name %s/%s %s=%s: err=%v stdErr=%q",
rbdOptions(defaultRBDPool), imageList[0], volSnapNameKey, volSnapName, err, stdErr) rbdOptions(defaultRBDPool), imageList[0], volSnapNameKey, volSnapName, err, stdErr)
} }
volSnapNamespace, stdErr, err = execCommandInToolBoxPod(f, volSnapNamespace, stdErr, err = execCommandInToolBoxPod(f,
formatImageMetaGetCmd(defaultRBDPool, imageList[0], volSnapNamespaceKey), formatImageMetaGetCmd(defaultRBDPool, imageList[0], volSnapNamespaceKey),
rookNamespace) rookNamespace)
if checkGetKeyError(err, stdErr) { if checkGetKeyError(err, stdErr) {
e2elog.Failf("found volume snapshot namespace %s/%s %s=%s: err=%v stdErr=%q", framework.Failf("found volume snapshot namespace %s/%s %s=%s: err=%v stdErr=%q",
rbdOptions(defaultRBDPool), imageList[0], volSnapNamespaceKey, volSnapNamespace, err, stdErr) rbdOptions(defaultRBDPool), imageList[0], volSnapNamespaceKey, volSnapNamespace, err, stdErr)
} }
volSnapContentName, stdErr, err = execCommandInToolBoxPod(f, volSnapContentName, stdErr, err = execCommandInToolBoxPod(f,
formatImageMetaGetCmd(defaultRBDPool, imageList[0], volSnapContentNameKey), formatImageMetaGetCmd(defaultRBDPool, imageList[0], volSnapContentNameKey),
rookNamespace) rookNamespace)
if checkGetKeyError(err, stdErr) { if checkGetKeyError(err, stdErr) {
e2elog.Failf("found snapshotcontent name %s/%s %s=%s: err=%v stdErr=%q", framework.Failf("found snapshotcontent name %s/%s %s=%s: err=%v stdErr=%q",
rbdOptions(defaultRBDPool), imageList[0], volSnapContentNameKey, rbdOptions(defaultRBDPool), imageList[0], volSnapContentNameKey,
volSnapContentName, err, stdErr) volSnapContentName, err, stdErr)
} }

View File

@ -29,7 +29,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2edebug "k8s.io/kubernetes/test/e2e/framework/debug"
"k8s.io/pod-security-admission/api" "k8s.io/pod-security-admission/api"
) )
@ -64,7 +64,7 @@ var _ = Describe("CephFS Upgrade Testing", func() {
if cephCSINamespace != defaultNs { if cephCSINamespace != defaultNs {
err = createNamespace(c, cephCSINamespace) err = createNamespace(c, cephCSINamespace)
if err != nil { if err != nil {
e2elog.Failf("failed to create namespace: %v", err) framework.Failf("failed to create namespace: %v", err)
} }
} }
@ -72,44 +72,44 @@ var _ = Describe("CephFS Upgrade Testing", func() {
// when we are done upgrading. // when we are done upgrading.
cwd, err = os.Getwd() cwd, err = os.Getwd()
if err != nil { if err != nil {
e2elog.Failf("failed to getwd: %v", err) framework.Failf("failed to getwd: %v", err)
} }
deployVault(f.ClientSet, deployTimeout) deployVault(f.ClientSet, deployTimeout)
err = upgradeAndDeployCSI(upgradeVersion, "cephfs") err = upgradeAndDeployCSI(upgradeVersion, "cephfs")
if err != nil { if err != nil {
e2elog.Failf("failed to upgrade csi: %v", err) framework.Failf("failed to upgrade csi: %v", err)
} }
err = createConfigMap(cephFSDirPath, f.ClientSet, f) err = createConfigMap(cephFSDirPath, f.ClientSet, f)
if err != nil { if err != nil {
e2elog.Failf("failed to create configmap: %v", err) framework.Failf("failed to create configmap: %v", err)
} }
var key string var key string
// create cephFS provisioner secret // create cephFS provisioner secret
key, err = createCephUser(f, keyringCephFSProvisionerUsername, cephFSProvisionerCaps()) key, err = createCephUser(f, keyringCephFSProvisionerUsername, cephFSProvisionerCaps())
if err != nil { if err != nil {
e2elog.Failf("failed to create user %s: %v", keyringCephFSProvisionerUsername, err) framework.Failf("failed to create user %s: %v", keyringCephFSProvisionerUsername, err)
} }
err = createCephfsSecret(f, cephFSProvisionerSecretName, keyringCephFSProvisionerUsername, key) err = createCephfsSecret(f, cephFSProvisionerSecretName, keyringCephFSProvisionerUsername, key)
if err != nil { if err != nil {
e2elog.Failf("failed to create provisioner secret: %v", err) framework.Failf("failed to create provisioner secret: %v", err)
} }
// create cephFS plugin secret // create cephFS plugin secret
key, err = createCephUser(f, keyringCephFSNodePluginUsername, cephFSNodePluginCaps()) key, err = createCephUser(f, keyringCephFSNodePluginUsername, cephFSNodePluginCaps())
if err != nil { if err != nil {
e2elog.Failf("failed to create user %s: %v", keyringCephFSNodePluginUsername, err) framework.Failf("failed to create user %s: %v", keyringCephFSNodePluginUsername, err)
} }
err = createCephfsSecret(f, cephFSNodePluginSecretName, keyringCephFSNodePluginUsername, key) err = createCephfsSecret(f, cephFSNodePluginSecretName, keyringCephFSNodePluginUsername, key)
if err != nil { if err != nil {
e2elog.Failf("failed to create node secret: %v", err) framework.Failf("failed to create node secret: %v", err)
} }
err = createCephFSSnapshotClass(f) err = createCephFSSnapshotClass(f)
if err != nil { if err != nil {
e2elog.Failf("failed to create snapshotclass: %v", err) framework.Failf("failed to create snapshotclass: %v", err)
} }
err = createCephfsStorageClass(f.ClientSet, f, true, nil) err = createCephfsStorageClass(f.ClientSet, f, true, nil)
if err != nil { if err != nil {
e2elog.Failf("failed to create storageclass: %v", err) framework.Failf("failed to create storageclass: %v", err)
} }
}) })
AfterEach(func() { AfterEach(func() {
@ -125,31 +125,31 @@ var _ = Describe("CephFS Upgrade Testing", func() {
logsCSIPods("app=csi-cephfsplugin", c) logsCSIPods("app=csi-cephfsplugin", c)
// log all details from the namespace where Ceph-CSI is deployed // log all details from the namespace where Ceph-CSI is deployed
framework.DumpAllNamespaceInfo(c, cephCSINamespace) e2edebug.DumpAllNamespaceInfo(c, cephCSINamespace)
} }
err = deleteConfigMap(cephFSDirPath) err = deleteConfigMap(cephFSDirPath)
if err != nil { if err != nil {
e2elog.Failf("failed to delete configmap: %v", err) framework.Failf("failed to delete configmap: %v", err)
} }
err = c.CoreV1(). err = c.CoreV1().
Secrets(cephCSINamespace). Secrets(cephCSINamespace).
Delete(context.TODO(), cephFSProvisionerSecretName, metav1.DeleteOptions{}) Delete(context.TODO(), cephFSProvisionerSecretName, metav1.DeleteOptions{})
if err != nil { if err != nil {
e2elog.Failf("failed to delete provisioner secret: %v", err) framework.Failf("failed to delete provisioner secret: %v", err)
} }
err = c.CoreV1(). err = c.CoreV1().
Secrets(cephCSINamespace). Secrets(cephCSINamespace).
Delete(context.TODO(), cephFSNodePluginSecretName, metav1.DeleteOptions{}) Delete(context.TODO(), cephFSNodePluginSecretName, metav1.DeleteOptions{})
if err != nil { if err != nil {
e2elog.Failf("failed to delete node secret: %v", err) framework.Failf("failed to delete node secret: %v", err)
} }
err = deleteResource(cephFSExamplePath + "storageclass.yaml") err = deleteResource(cephFSExamplePath + "storageclass.yaml")
if err != nil { if err != nil {
e2elog.Failf("failed to delete storageclass: %v", err) framework.Failf("failed to delete storageclass: %v", err)
} }
err = deleteResource(cephFSExamplePath + "snapshotclass.yaml") err = deleteResource(cephFSExamplePath + "snapshotclass.yaml")
if err != nil { if err != nil {
e2elog.Failf("failed to delete storageclass: %v", err) framework.Failf("failed to delete storageclass: %v", err)
} }
deleteVault() deleteVault()
if deployCephFS { if deployCephFS {
@ -158,7 +158,7 @@ var _ = Describe("CephFS Upgrade Testing", func() {
err = deleteNamespace(c, cephCSINamespace) err = deleteNamespace(c, cephCSINamespace)
if err != nil { if err != nil {
if err != nil { if err != nil {
e2elog.Failf("failed to delete namespace: %v", err) framework.Failf("failed to delete namespace: %v", err)
} }
} }
} }
@ -174,13 +174,13 @@ var _ = Describe("CephFS Upgrade Testing", func() {
By("checking provisioner deployment is running", func() { By("checking provisioner deployment is running", func() {
err = waitForDeploymentComplete(f.ClientSet, cephFSDeploymentName, cephCSINamespace, deployTimeout) err = waitForDeploymentComplete(f.ClientSet, cephFSDeploymentName, cephCSINamespace, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("timeout waiting for deployment %s: %v", cephFSDeploymentName, err) framework.Failf("timeout waiting for deployment %s: %v", cephFSDeploymentName, err)
} }
}) })
By("checking nodeplugin deamonset pods are running", func() { By("checking nodeplugin deamonset pods are running", func() {
err = waitForDaemonSets(cephFSDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout) err = waitForDaemonSets(cephFSDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("timeout waiting for daemonset %s: %v", cephFSDeamonSetName, err) framework.Failf("timeout waiting for daemonset %s: %v", cephFSDeamonSetName, err)
} }
}) })
@ -193,13 +193,13 @@ var _ = Describe("CephFS Upgrade Testing", func() {
pvc, err = loadPVC(pvcPath) pvc, err = loadPVC(pvcPath)
if err != nil { if err != nil {
e2elog.Failf("failed to load pvc: %v", err) framework.Failf("failed to load pvc: %v", err)
} }
pvc.Namespace = f.UniqueName pvc.Namespace = f.UniqueName
app, err = loadApp(appPath) app, err = loadApp(appPath)
if err != nil { if err != nil {
e2elog.Failf("failed to load application: %v", err) framework.Failf("failed to load application: %v", err)
} }
label[appKey] = appLabel label[appKey] = appLabel
app.Namespace = f.UniqueName app.Namespace = f.UniqueName
@ -208,12 +208,12 @@ var _ = Describe("CephFS Upgrade Testing", func() {
pvc.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(pvcSize) pvc.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(pvcSize)
err = createPVCAndApp("", f, pvc, app, deployTimeout) err = createPVCAndApp("", f, pvc, app, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("failed to create pvc and application: %v", err) framework.Failf("failed to create pvc and application: %v", err)
} }
var pv *v1.PersistentVolume var pv *v1.PersistentVolume
_, pv, err = getPVCAndPV(f.ClientSet, pvc.Name, pvc.Namespace) _, pv, err = getPVCAndPV(f.ClientSet, pvc.Name, pvc.Namespace)
if err != nil { if err != nil {
e2elog.Failf("failed to get PV object for %s: %v", pvc.Name, err) framework.Failf("failed to get PV object for %s: %v", pvc.Name, err)
} }
opt := metav1.ListOptions{ opt := metav1.ListOptions{
@ -230,19 +230,19 @@ var _ = Describe("CephFS Upgrade Testing", func() {
app.Namespace, app.Namespace,
&opt) &opt)
if stdErr != "" { if stdErr != "" {
e2elog.Failf("failed to write data to a file %s", stdErr) framework.Failf("failed to write data to a file %s", stdErr)
} }
// force an immediate write of all cached data to disk. // force an immediate write of all cached data to disk.
_, stdErr = execCommandInPodAndAllowFail(f, fmt.Sprintf("sync %s", filePath), app.Namespace, &opt) _, stdErr = execCommandInPodAndAllowFail(f, fmt.Sprintf("sync %s", filePath), app.Namespace, &opt)
if stdErr != "" { if stdErr != "" {
e2elog.Failf("failed to sync data to a disk %s", stdErr) framework.Failf("failed to sync data to a disk %s", stdErr)
} }
e2elog.Logf("Calculating checksum of %s", filePath) framework.Logf("Calculating checksum of %s", filePath)
checkSum, err = calculateSHA512sum(f, app, filePath, &opt) checkSum, err = calculateSHA512sum(f, app, filePath, &opt)
if err != nil { if err != nil {
e2elog.Failf("failed to calculate checksum: %v", err) framework.Failf("failed to calculate checksum: %v", err)
} }
// Create snapshot of the pvc // Create snapshot of the pvc
snapshotPath := cephFSExamplePath + "snapshot.yaml" snapshotPath := cephFSExamplePath + "snapshot.yaml"
@ -252,31 +252,31 @@ var _ = Describe("CephFS Upgrade Testing", func() {
snap.Spec.Source.PersistentVolumeClaimName = &pvc.Name snap.Spec.Source.PersistentVolumeClaimName = &pvc.Name
err = createSnapshot(&snap, deployTimeout) err = createSnapshot(&snap, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("failed to create snapshot %v", err) framework.Failf("failed to create snapshot %v", err)
} }
validateCephFSSnapshotCount(f, 1, defaultSubvolumegroup, pv) validateCephFSSnapshotCount(f, 1, defaultSubvolumegroup, pv)
err = deletePod(app.Name, app.Namespace, f.ClientSet, deployTimeout) err = deletePod(app.Name, app.Namespace, f.ClientSet, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("failed to delete application: %v", err) framework.Failf("failed to delete application: %v", err)
} }
deleteCephfsPlugin() deleteCephfsPlugin()
// switch back to current changes. // switch back to current changes.
err = os.Chdir(cwd) err = os.Chdir(cwd)
if err != nil { if err != nil {
e2elog.Failf("failed to d chdir: %v", err) framework.Failf("failed to d chdir: %v", err)
} }
deployCephfsPlugin() deployCephfsPlugin()
err = waitForDeploymentComplete(f.ClientSet, cephFSDeploymentName, cephCSINamespace, deployTimeout) err = waitForDeploymentComplete(f.ClientSet, cephFSDeploymentName, cephCSINamespace, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("timeout waiting for upgraded deployment %s: %v", cephFSDeploymentName, err) framework.Failf("timeout waiting for upgraded deployment %s: %v", cephFSDeploymentName, err)
} }
err = waitForDaemonSets(cephFSDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout) err = waitForDaemonSets(cephFSDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("timeout waiting for upgraded daemonset %s: %v", cephFSDeamonSetName, err) framework.Failf("timeout waiting for upgraded daemonset %s: %v", cephFSDeamonSetName, err)
} }
app.Labels = label app.Labels = label
@ -284,7 +284,7 @@ var _ = Describe("CephFS Upgrade Testing", func() {
// an earlier release. // an earlier release.
err = createApp(f.ClientSet, app, deployTimeout) err = createApp(f.ClientSet, app, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("failed to create application: %v", err) framework.Failf("failed to create application: %v", err)
} }
}) })
@ -294,13 +294,13 @@ var _ = Describe("CephFS Upgrade Testing", func() {
label := make(map[string]string) label := make(map[string]string)
pvcClone, err = loadPVC(pvcClonePath) pvcClone, err = loadPVC(pvcClonePath)
if err != nil { if err != nil {
e2elog.Failf("failed to load pvc: %v", err) framework.Failf("failed to load pvc: %v", err)
} }
pvcClone.Namespace = f.UniqueName pvcClone.Namespace = f.UniqueName
pvcClone.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(pvcSize) pvcClone.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(pvcSize)
appClone, err = loadApp(appClonePath) appClone, err = loadApp(appClonePath)
if err != nil { if err != nil {
e2elog.Failf("failed to load application: %v", err) framework.Failf("failed to load application: %v", err)
} }
label[appKey] = "validate-snap-cephfs" label[appKey] = "validate-snap-cephfs"
appClone.Namespace = f.UniqueName appClone.Namespace = f.UniqueName
@ -308,12 +308,12 @@ var _ = Describe("CephFS Upgrade Testing", func() {
appClone.Labels = label appClone.Labels = label
err = createPVCAndApp("", f, pvcClone, appClone, deployTimeout) err = createPVCAndApp("", f, pvcClone, appClone, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("failed to create pvc and application: %v", err) framework.Failf("failed to create pvc and application: %v", err)
} }
var pv *v1.PersistentVolume var pv *v1.PersistentVolume
_, pv, err = getPVCAndPV(f.ClientSet, pvc.Name, pvc.Namespace) _, pv, err = getPVCAndPV(f.ClientSet, pvc.Name, pvc.Namespace)
if err != nil { if err != nil {
e2elog.Failf("failed to get PV object for %s: %v", pvc.Name, err) framework.Failf("failed to get PV object for %s: %v", pvc.Name, err)
} }
opt := metav1.ListOptions{ opt := metav1.ListOptions{
@ -323,15 +323,15 @@ var _ = Describe("CephFS Upgrade Testing", func() {
testFilePath := filepath.Join(mountPath, "testClone") testFilePath := filepath.Join(mountPath, "testClone")
newCheckSum, err = calculateSHA512sum(f, appClone, testFilePath, &opt) newCheckSum, err = calculateSHA512sum(f, appClone, testFilePath, &opt)
if err != nil { if err != nil {
e2elog.Failf("failed to calculate checksum: %v", err) framework.Failf("failed to calculate checksum: %v", err)
} }
if strings.Compare(newCheckSum, checkSum) != 0 { if strings.Compare(newCheckSum, checkSum) != 0 {
e2elog.Failf( framework.Failf(
"The checksum of files did not match, expected %s received %s ", "The checksum of files did not match, expected %s received %s ",
checkSum, checkSum,
newCheckSum) newCheckSum)
} }
e2elog.Logf("The checksum of files matched") framework.Logf("The checksum of files matched")
// delete cloned pvc and pod // delete cloned pvc and pod
err = deletePVCAndApp("", f, pvcClone, appClone) err = deletePVCAndApp("", f, pvcClone, appClone)
@ -347,7 +347,7 @@ var _ = Describe("CephFS Upgrade Testing", func() {
snap.Spec.Source.PersistentVolumeClaimName = &pvc.Name snap.Spec.Source.PersistentVolumeClaimName = &pvc.Name
err = deleteSnapshot(&snap, deployTimeout) err = deleteSnapshot(&snap, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("failed to delete snapshot %v", err) framework.Failf("failed to delete snapshot %v", err)
} }
validateCephFSSnapshotCount(f, 0, defaultSubvolumegroup, pv) validateCephFSSnapshotCount(f, 0, defaultSubvolumegroup, pv)
}) })
@ -359,14 +359,14 @@ var _ = Describe("CephFS Upgrade Testing", func() {
pvcClone, err = loadPVC(pvcSmartClonePath) pvcClone, err = loadPVC(pvcSmartClonePath)
if err != nil { if err != nil {
e2elog.Failf("failed to load pvc: %v", err) framework.Failf("failed to load pvc: %v", err)
} }
pvcClone.Spec.DataSource.Name = pvc.Name pvcClone.Spec.DataSource.Name = pvc.Name
pvcClone.Namespace = f.UniqueName pvcClone.Namespace = f.UniqueName
pvcClone.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(pvcSize) pvcClone.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(pvcSize)
appClone, err = loadApp(appSmartClonePath) appClone, err = loadApp(appSmartClonePath)
if err != nil { if err != nil {
e2elog.Failf("failed to load application: %v", err) framework.Failf("failed to load application: %v", err)
} }
label[appKey] = "validate-snap-cephfs" label[appKey] = "validate-snap-cephfs"
appClone.Namespace = f.UniqueName appClone.Namespace = f.UniqueName
@ -374,7 +374,7 @@ var _ = Describe("CephFS Upgrade Testing", func() {
appClone.Labels = label appClone.Labels = label
err = createPVCAndApp("", f, pvcClone, appClone, deployTimeout) err = createPVCAndApp("", f, pvcClone, appClone, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("failed to create pvc and application: %v", err) framework.Failf("failed to create pvc and application: %v", err)
} }
opt := metav1.ListOptions{ opt := metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", appKey, label[appKey]), LabelSelector: fmt.Sprintf("%s=%s", appKey, label[appKey]),
@ -383,21 +383,21 @@ var _ = Describe("CephFS Upgrade Testing", func() {
testFilePath := filepath.Join(mountPath, "testClone") testFilePath := filepath.Join(mountPath, "testClone")
newCheckSum, err = calculateSHA512sum(f, appClone, testFilePath, &opt) newCheckSum, err = calculateSHA512sum(f, appClone, testFilePath, &opt)
if err != nil { if err != nil {
e2elog.Failf("failed to calculate checksum: %v", err) framework.Failf("failed to calculate checksum: %v", err)
} }
if strings.Compare(newCheckSum, checkSum) != 0 { if strings.Compare(newCheckSum, checkSum) != 0 {
e2elog.Failf( framework.Failf(
"The checksum of files did not match, expected %s received %s", "The checksum of files did not match, expected %s received %s",
checkSum, checkSum,
newCheckSum) newCheckSum)
} }
e2elog.Logf("The checksum of files matched") framework.Logf("The checksum of files matched")
// delete cloned pvc and pod // delete cloned pvc and pod
err = deletePVCAndApp("", f, pvcClone, appClone) err = deletePVCAndApp("", f, pvcClone, appClone)
if err != nil { if err != nil {
e2elog.Failf("failed to delete pvc and application: %v", err) framework.Failf("failed to delete pvc and application: %v", err)
} }
}) })
@ -411,40 +411,40 @@ var _ = Describe("CephFS Upgrade Testing", func() {
} }
pvc, err = getPersistentVolumeClaim(f.ClientSet, pvc.Namespace, pvc.Name) pvc, err = getPersistentVolumeClaim(f.ClientSet, pvc.Namespace, pvc.Name)
if err != nil { if err != nil {
e2elog.Failf("failed to get pvc: %v", err) framework.Failf("failed to get pvc: %v", err)
} }
// resize PVC // resize PVC
err = expandPVCSize(f.ClientSet, pvc, pvcExpandSize, deployTimeout) err = expandPVCSize(f.ClientSet, pvc, pvcExpandSize, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("failed to expand pvc: %v", err) framework.Failf("failed to expand pvc: %v", err)
} }
// wait for application pod to come up after resize // wait for application pod to come up after resize
err = waitForPodInRunningState(app.Name, app.Namespace, f.ClientSet, deployTimeout, noError) err = waitForPodInRunningState(app.Name, app.Namespace, f.ClientSet, deployTimeout, noError)
if err != nil { if err != nil {
e2elog.Failf("timeout waiting for pod to be in running state: %v", err) framework.Failf("timeout waiting for pod to be in running state: %v", err)
} }
// validate if resize is successful. // validate if resize is successful.
err = checkDirSize(app, f, &opt, pvcExpandSize) err = checkDirSize(app, f, &opt, pvcExpandSize)
if err != nil { if err != nil {
e2elog.Failf("failed to check directory size: %v", err) framework.Failf("failed to check directory size: %v", err)
} }
}) })
By("delete pvc and app") By("delete pvc and app")
err = deletePVCAndApp("", f, pvc, app) err = deletePVCAndApp("", f, pvc, app)
if err != nil { if err != nil {
e2elog.Failf("failed to delete pvc and application: %v", err) framework.Failf("failed to delete pvc and application: %v", err)
} }
// delete cephFS provisioner secret // delete cephFS provisioner secret
err = deleteCephUser(f, keyringCephFSProvisionerUsername) err = deleteCephUser(f, keyringCephFSProvisionerUsername)
if err != nil { if err != nil {
e2elog.Failf("failed to delete user %s: %v", keyringCephFSProvisionerUsername, err) framework.Failf("failed to delete user %s: %v", keyringCephFSProvisionerUsername, err)
} }
// delete cephFS plugin secret // delete cephFS plugin secret
err = deleteCephUser(f, keyringCephFSNodePluginUsername) err = deleteCephUser(f, keyringCephFSNodePluginUsername)
if err != nil { if err != nil {
e2elog.Failf("failed to delete user %s: %v", keyringCephFSNodePluginUsername, err) framework.Failf("failed to delete user %s: %v", keyringCephFSNodePluginUsername, err)
} }
}) })
}) })

View File

@ -29,7 +29,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2edebug "k8s.io/kubernetes/test/e2e/framework/debug"
"k8s.io/pod-security-admission/api" "k8s.io/pod-security-admission/api"
) )
@ -60,7 +60,7 @@ var _ = Describe("RBD Upgrade Testing", func() {
if cephCSINamespace != defaultNs { if cephCSINamespace != defaultNs {
err := createNamespace(c, cephCSINamespace) err := createNamespace(c, cephCSINamespace)
if err != nil { if err != nil {
e2elog.Failf("failed to create namespace: %v", err) framework.Failf("failed to create namespace: %v", err)
} }
} }
@ -69,52 +69,52 @@ var _ = Describe("RBD Upgrade Testing", func() {
var err error var err error
cwd, err = os.Getwd() cwd, err = os.Getwd()
if err != nil { if err != nil {
e2elog.Failf("failed to do getwd: %v", err) framework.Failf("failed to do getwd: %v", err)
} }
deployVault(f.ClientSet, deployTimeout) deployVault(f.ClientSet, deployTimeout)
err = upgradeAndDeployCSI(upgradeVersion, "rbd") err = upgradeAndDeployCSI(upgradeVersion, "rbd")
if err != nil { if err != nil {
e2elog.Failf("failed to upgrade and deploy CSI: %v", err) framework.Failf("failed to upgrade and deploy CSI: %v", err)
} }
err = createConfigMap(rbdDirPath, f.ClientSet, f) err = createConfigMap(rbdDirPath, f.ClientSet, f)
if err != nil { if err != nil {
e2elog.Failf("failed to create configmap: %v", err) framework.Failf("failed to create configmap: %v", err)
} }
err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy) err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy)
if err != nil { if err != nil {
e2elog.Failf("failed to create storageclass: %v", err) framework.Failf("failed to create storageclass: %v", err)
} }
// create rbd provisioner secret // create rbd provisioner secret
key, err := createCephUser(f, keyringRBDProvisionerUsername, rbdProvisionerCaps("", "")) key, err := createCephUser(f, keyringRBDProvisionerUsername, rbdProvisionerCaps("", ""))
if err != nil { if err != nil {
e2elog.Failf("failed to create user %s: %v", keyringRBDProvisionerUsername, err) framework.Failf("failed to create user %s: %v", keyringRBDProvisionerUsername, err)
} }
err = createRBDSecret(f, rbdProvisionerSecretName, keyringRBDProvisionerUsername, key) err = createRBDSecret(f, rbdProvisionerSecretName, keyringRBDProvisionerUsername, key)
if err != nil { if err != nil {
e2elog.Failf("failed to create provisioner secret: %v", err) framework.Failf("failed to create provisioner secret: %v", err)
} }
// create rbd plugin secret // create rbd plugin secret
key, err = createCephUser(f, keyringRBDNodePluginUsername, rbdNodePluginCaps("", "")) key, err = createCephUser(f, keyringRBDNodePluginUsername, rbdNodePluginCaps("", ""))
if err != nil { if err != nil {
e2elog.Failf("failed to create user %s: %v", keyringRBDNodePluginUsername, err) framework.Failf("failed to create user %s: %v", keyringRBDNodePluginUsername, err)
} }
err = createRBDSecret(f, rbdNodePluginSecretName, keyringRBDNodePluginUsername, key) err = createRBDSecret(f, rbdNodePluginSecretName, keyringRBDNodePluginUsername, key)
if err != nil { if err != nil {
e2elog.Failf("failed to create node secret: %v", err) framework.Failf("failed to create node secret: %v", err)
} }
err = createRBDSnapshotClass(f) err = createRBDSnapshotClass(f)
if err != nil { if err != nil {
e2elog.Failf("failed to create snapshotclass: %v", err) framework.Failf("failed to create snapshotclass: %v", err)
} }
err = createNodeLabel(f, nodeRegionLabel, regionValue) err = createNodeLabel(f, nodeRegionLabel, regionValue)
if err != nil { if err != nil {
e2elog.Failf("failed to create node label: %v", err) framework.Failf("failed to create node label: %v", err)
} }
err = createNodeLabel(f, nodeZoneLabel, zoneValue) err = createNodeLabel(f, nodeZoneLabel, zoneValue)
if err != nil { if err != nil {
e2elog.Failf("failed to create node label: %v", err) framework.Failf("failed to create node label: %v", err)
} }
}) })
AfterEach(func() { AfterEach(func() {
@ -130,32 +130,32 @@ var _ = Describe("RBD Upgrade Testing", func() {
logsCSIPods("app=csi-rbdplugin", c) logsCSIPods("app=csi-rbdplugin", c)
// log all details from the namespace where Ceph-CSI is deployed // log all details from the namespace where Ceph-CSI is deployed
framework.DumpAllNamespaceInfo(c, cephCSINamespace) e2edebug.DumpAllNamespaceInfo(c, cephCSINamespace)
} }
err := deleteConfigMap(rbdDirPath) err := deleteConfigMap(rbdDirPath)
if err != nil { if err != nil {
e2elog.Failf("failed to delete configmap: %v", err) framework.Failf("failed to delete configmap: %v", err)
} }
err = c.CoreV1(). err = c.CoreV1().
Secrets(cephCSINamespace). Secrets(cephCSINamespace).
Delete(context.TODO(), rbdProvisionerSecretName, metav1.DeleteOptions{}) Delete(context.TODO(), rbdProvisionerSecretName, metav1.DeleteOptions{})
if err != nil { if err != nil {
e2elog.Failf("failed to delete provisioner secret: %v", err) framework.Failf("failed to delete provisioner secret: %v", err)
} }
err = c.CoreV1(). err = c.CoreV1().
Secrets(cephCSINamespace). Secrets(cephCSINamespace).
Delete(context.TODO(), rbdNodePluginSecretName, metav1.DeleteOptions{}) Delete(context.TODO(), rbdNodePluginSecretName, metav1.DeleteOptions{})
if err != nil { if err != nil {
e2elog.Failf("failed to delete node secret: %v", err) framework.Failf("failed to delete node secret: %v", err)
} }
err = deleteResource(rbdExamplePath + "storageclass.yaml") err = deleteResource(rbdExamplePath + "storageclass.yaml")
if err != nil { if err != nil {
e2elog.Failf("failed to delete storageclass: %v", err) framework.Failf("failed to delete storageclass: %v", err)
} }
err = deleteResource(rbdExamplePath + "snapshotclass.yaml") err = deleteResource(rbdExamplePath + "snapshotclass.yaml")
if err != nil { if err != nil {
e2elog.Failf("failed to delete snapshotclass: %v", err) framework.Failf("failed to delete snapshotclass: %v", err)
} }
deleteVault() deleteVault()
if deployRBD { if deployRBD {
@ -163,17 +163,17 @@ var _ = Describe("RBD Upgrade Testing", func() {
if cephCSINamespace != defaultNs { if cephCSINamespace != defaultNs {
err = deleteNamespace(c, cephCSINamespace) err = deleteNamespace(c, cephCSINamespace)
if err != nil { if err != nil {
e2elog.Failf("failed to delete namespace: %v", err) framework.Failf("failed to delete namespace: %v", err)
} }
} }
} }
err = deleteNodeLabel(c, nodeRegionLabel) err = deleteNodeLabel(c, nodeRegionLabel)
if err != nil { if err != nil {
e2elog.Failf("failed to delete node label: %v", err) framework.Failf("failed to delete node label: %v", err)
} }
err = deleteNodeLabel(c, nodeZoneLabel) err = deleteNodeLabel(c, nodeZoneLabel)
if err != nil { if err != nil {
e2elog.Failf("failed to delete node label: %v", err) framework.Failf("failed to delete node label: %v", err)
} }
}) })
@ -189,14 +189,14 @@ var _ = Describe("RBD Upgrade Testing", func() {
By("checking provisioner deployment is running", func() { By("checking provisioner deployment is running", func() {
err := waitForDeploymentComplete(f.ClientSet, rbdDeploymentName, cephCSINamespace, deployTimeout) err := waitForDeploymentComplete(f.ClientSet, rbdDeploymentName, cephCSINamespace, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("timeout waiting for deployment %s: %v", rbdDeploymentName, err) framework.Failf("timeout waiting for deployment %s: %v", rbdDeploymentName, err)
} }
}) })
By("checking nodeplugin deamonset pods are running", func() { By("checking nodeplugin deamonset pods are running", func() {
err := waitForDaemonSets(rbdDaemonsetName, cephCSINamespace, f.ClientSet, deployTimeout) err := waitForDaemonSets(rbdDaemonsetName, cephCSINamespace, f.ClientSet, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("timeout waiting for daemonset %s: %v", rbdDaemonsetName, err) framework.Failf("timeout waiting for daemonset %s: %v", rbdDaemonsetName, err)
} }
}) })
@ -208,13 +208,13 @@ var _ = Describe("RBD Upgrade Testing", func() {
pvc, err = loadPVC(pvcPath) pvc, err = loadPVC(pvcPath)
if err != nil { if err != nil {
e2elog.Failf("failed to load pvc: %v", err) framework.Failf("failed to load pvc: %v", err)
} }
pvc.Namespace = f.UniqueName pvc.Namespace = f.UniqueName
app, err = loadApp(appPath) app, err = loadApp(appPath)
if err != nil { if err != nil {
e2elog.Failf("failed to load application: %v", err) framework.Failf("failed to load application: %v", err)
} }
label[appKey] = appLabel label[appKey] = appLabel
app.Namespace = f.UniqueName app.Namespace = f.UniqueName
@ -222,7 +222,7 @@ var _ = Describe("RBD Upgrade Testing", func() {
pvc.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(pvcSize) pvc.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(pvcSize)
err = createPVCAndApp("", f, pvc, app, deployTimeout) err = createPVCAndApp("", f, pvc, app, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("failed to create pvc: %v", err) framework.Failf("failed to create pvc: %v", err)
} }
opt := metav1.ListOptions{ opt := metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", appKey, label[appKey]), LabelSelector: fmt.Sprintf("%s=%s", appKey, label[appKey]),
@ -238,22 +238,22 @@ var _ = Describe("RBD Upgrade Testing", func() {
app.Namespace, app.Namespace,
&opt) &opt)
if stdErr != "" { if stdErr != "" {
e2elog.Failf("failed to write data to a file %s", stdErr) framework.Failf("failed to write data to a file %s", stdErr)
} }
// force an immediate write of all cached data to disk. // force an immediate write of all cached data to disk.
_, stdErr = execCommandInPodAndAllowFail(f, fmt.Sprintf("sync %s", filePath), app.Namespace, &opt) _, stdErr = execCommandInPodAndAllowFail(f, fmt.Sprintf("sync %s", filePath), app.Namespace, &opt)
if stdErr != "" { if stdErr != "" {
e2elog.Failf("failed to sync data to a disk %s", stdErr) framework.Failf("failed to sync data to a disk %s", stdErr)
} }
opt = metav1.ListOptions{ opt = metav1.ListOptions{
LabelSelector: fmt.Sprintf("app=%s", appLabel), LabelSelector: fmt.Sprintf("app=%s", appLabel),
} }
e2elog.Logf("Calculating checksum of %s", filePath) framework.Logf("Calculating checksum of %s", filePath)
checkSum, err = calculateSHA512sum(f, app, filePath, &opt) checkSum, err = calculateSHA512sum(f, app, filePath, &opt)
if err != nil { if err != nil {
e2elog.Failf("failed to calculate checksum: %v", err) framework.Failf("failed to calculate checksum: %v", err)
} }
// Create snapshot of the pvc // Create snapshot of the pvc
@ -264,30 +264,30 @@ var _ = Describe("RBD Upgrade Testing", func() {
snap.Spec.Source.PersistentVolumeClaimName = &pvc.Name snap.Spec.Source.PersistentVolumeClaimName = &pvc.Name
err = createSnapshot(&snap, deployTimeout) err = createSnapshot(&snap, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("failed to create snapshot %v", err) framework.Failf("failed to create snapshot %v", err)
} }
err = deletePod(app.Name, app.Namespace, f.ClientSet, deployTimeout) err = deletePod(app.Name, app.Namespace, f.ClientSet, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("failed to delete application: %v", err) framework.Failf("failed to delete application: %v", err)
} }
deleteRBDPlugin() deleteRBDPlugin()
err = os.Chdir(cwd) err = os.Chdir(cwd)
if err != nil { if err != nil {
e2elog.Failf("failed to change directory: %v", err) framework.Failf("failed to change directory: %v", err)
} }
deployRBDPlugin() deployRBDPlugin()
err = waitForDeploymentComplete(f.ClientSet, rbdDeploymentName, cephCSINamespace, deployTimeout) err = waitForDeploymentComplete(f.ClientSet, rbdDeploymentName, cephCSINamespace, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("timeout waiting for upgraded deployment %s: %v", rbdDeploymentName, err) framework.Failf("timeout waiting for upgraded deployment %s: %v", rbdDeploymentName, err)
} }
err = waitForDaemonSets(rbdDaemonsetName, cephCSINamespace, f.ClientSet, deployTimeout) err = waitForDaemonSets(rbdDaemonsetName, cephCSINamespace, f.ClientSet, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("timeout waiting for upgraded daemonset %s: %v", rbdDaemonsetName, err) framework.Failf("timeout waiting for upgraded daemonset %s: %v", rbdDaemonsetName, err)
} }
// validate if the app gets bound to a pvc created by // validate if the app gets bound to a pvc created by
@ -295,7 +295,7 @@ var _ = Describe("RBD Upgrade Testing", func() {
app.Labels = label app.Labels = label
err = createApp(f.ClientSet, app, deployTimeout) err = createApp(f.ClientSet, app, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("failed to create application: %v", err) framework.Failf("failed to create application: %v", err)
} }
}) })
@ -305,14 +305,14 @@ var _ = Describe("RBD Upgrade Testing", func() {
label := make(map[string]string) label := make(map[string]string)
pvcClone, err := loadPVC(pvcClonePath) pvcClone, err := loadPVC(pvcClonePath)
if err != nil { if err != nil {
e2elog.Failf("failed to load pvc: %v", err) framework.Failf("failed to load pvc: %v", err)
} }
pvcClone.Namespace = f.UniqueName pvcClone.Namespace = f.UniqueName
pvcClone.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(pvcSize) pvcClone.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(pvcSize)
pvcClone.Spec.DataSource.Name = "rbd-pvc-snapshot" pvcClone.Spec.DataSource.Name = "rbd-pvc-snapshot"
appClone, err := loadApp(appClonePath) appClone, err := loadApp(appClonePath)
if err != nil { if err != nil {
e2elog.Failf("failed to load application: %v", err) framework.Failf("failed to load application: %v", err)
} }
label[appKey] = "validate-snap-clone" label[appKey] = "validate-snap-clone"
appClone.Namespace = f.UniqueName appClone.Namespace = f.UniqueName
@ -320,7 +320,7 @@ var _ = Describe("RBD Upgrade Testing", func() {
appClone.Labels = label appClone.Labels = label
err = createPVCAndApp("", f, pvcClone, appClone, deployTimeout) err = createPVCAndApp("", f, pvcClone, appClone, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("failed to create pvc: %v", err) framework.Failf("failed to create pvc: %v", err)
} }
opt := metav1.ListOptions{ opt := metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", appKey, label[appKey]), LabelSelector: fmt.Sprintf("%s=%s", appKey, label[appKey]),
@ -329,20 +329,20 @@ var _ = Describe("RBD Upgrade Testing", func() {
testFilePath := filepath.Join(mountPath, "testClone") testFilePath := filepath.Join(mountPath, "testClone")
newCheckSum, err := calculateSHA512sum(f, appClone, testFilePath, &opt) newCheckSum, err := calculateSHA512sum(f, appClone, testFilePath, &opt)
if err != nil { if err != nil {
e2elog.Failf("failed to calculate checksum: %v", err) framework.Failf("failed to calculate checksum: %v", err)
} }
if strings.Compare(newCheckSum, checkSum) != 0 { if strings.Compare(newCheckSum, checkSum) != 0 {
e2elog.Failf( framework.Failf(
"The checksum of files did not match, expected %s received %s", "The checksum of files did not match, expected %s received %s",
checkSum, checkSum,
newCheckSum) newCheckSum)
} }
e2elog.Logf("The checksum of files matched") framework.Logf("The checksum of files matched")
// delete cloned pvc and pod // delete cloned pvc and pod
err = deletePVCAndApp("", f, pvcClone, appClone) err = deletePVCAndApp("", f, pvcClone, appClone)
if err != nil { if err != nil {
e2elog.Failf("failed to delete pvc and application: %v", err) framework.Failf("failed to delete pvc and application: %v", err)
} }
}) })
@ -353,14 +353,14 @@ var _ = Describe("RBD Upgrade Testing", func() {
pvcClone, err := loadPVC(pvcSmartClonePath) pvcClone, err := loadPVC(pvcSmartClonePath)
if err != nil { if err != nil {
e2elog.Failf("failed to load pvc: %v", err) framework.Failf("failed to load pvc: %v", err)
} }
pvcClone.Spec.DataSource.Name = pvc.Name pvcClone.Spec.DataSource.Name = pvc.Name
pvcClone.Namespace = f.UniqueName pvcClone.Namespace = f.UniqueName
pvcClone.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(pvcSize) pvcClone.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(pvcSize)
appClone, err := loadApp(appSmartClonePath) appClone, err := loadApp(appSmartClonePath)
if err != nil { if err != nil {
e2elog.Failf("failed to load application: %v", err) framework.Failf("failed to load application: %v", err)
} }
label[appKey] = "validate-clone" label[appKey] = "validate-clone"
appClone.Namespace = f.UniqueName appClone.Namespace = f.UniqueName
@ -368,7 +368,7 @@ var _ = Describe("RBD Upgrade Testing", func() {
appClone.Labels = label appClone.Labels = label
err = createPVCAndApp("", f, pvcClone, appClone, deployTimeout) err = createPVCAndApp("", f, pvcClone, appClone, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("failed to create pvc: %v", err) framework.Failf("failed to create pvc: %v", err)
} }
opt := metav1.ListOptions{ opt := metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", appKey, label[appKey]), LabelSelector: fmt.Sprintf("%s=%s", appKey, label[appKey]),
@ -377,20 +377,20 @@ var _ = Describe("RBD Upgrade Testing", func() {
testFilePath := filepath.Join(mountPath, "testClone") testFilePath := filepath.Join(mountPath, "testClone")
newCheckSum, err := calculateSHA512sum(f, appClone, testFilePath, &opt) newCheckSum, err := calculateSHA512sum(f, appClone, testFilePath, &opt)
if err != nil { if err != nil {
e2elog.Failf("failed to calculate checksum: %v", err) framework.Failf("failed to calculate checksum: %v", err)
} }
if strings.Compare(newCheckSum, checkSum) != 0 { if strings.Compare(newCheckSum, checkSum) != 0 {
e2elog.Failf( framework.Failf(
"The checksum of files did not match, expected %s received %s", "The checksum of files did not match, expected %s received %s",
checkSum, checkSum,
newCheckSum) newCheckSum)
} }
e2elog.Logf("The checksum of files matched") framework.Logf("The checksum of files matched")
// delete cloned pvc and pod // delete cloned pvc and pod
err = deletePVCAndApp("", f, pvcClone, appClone) err = deletePVCAndApp("", f, pvcClone, appClone)
if err != nil { if err != nil {
e2elog.Failf("failed to delete pvc and application: %v", err) framework.Failf("failed to delete pvc and application: %v", err)
} }
}) })
@ -405,41 +405,41 @@ var _ = Describe("RBD Upgrade Testing", func() {
var err error var err error
pvc, err = getPersistentVolumeClaim(f.ClientSet, pvc.Namespace, pvc.Name) pvc, err = getPersistentVolumeClaim(f.ClientSet, pvc.Namespace, pvc.Name)
if err != nil { if err != nil {
e2elog.Failf("failed to get pvc: %v", err) framework.Failf("failed to get pvc: %v", err)
} }
// resize PVC // resize PVC
err = expandPVCSize(f.ClientSet, pvc, pvcExpandSize, deployTimeout) err = expandPVCSize(f.ClientSet, pvc, pvcExpandSize, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("failed to expand pvc: %v", err) framework.Failf("failed to expand pvc: %v", err)
} }
// wait for application pod to come up after resize // wait for application pod to come up after resize
err = waitForPodInRunningState(app.Name, app.Namespace, f.ClientSet, deployTimeout, noError) err = waitForPodInRunningState(app.Name, app.Namespace, f.ClientSet, deployTimeout, noError)
if err != nil { if err != nil {
e2elog.Failf("timeout waiting for pod to be in running state: %v", err) framework.Failf("timeout waiting for pod to be in running state: %v", err)
} }
// validate if resize is successful. // validate if resize is successful.
err = checkDirSize(app, f, &opt, pvcExpandSize) err = checkDirSize(app, f, &opt, pvcExpandSize)
if err != nil { if err != nil {
e2elog.Failf("failed to check directory size: %v", err) framework.Failf("failed to check directory size: %v", err)
} }
}) })
By("delete pvc and app", func() { By("delete pvc and app", func() {
err := deletePVCAndApp("", f, pvc, app) err := deletePVCAndApp("", f, pvc, app)
if err != nil { if err != nil {
e2elog.Failf("failed to delete pvc and application: %v", err) framework.Failf("failed to delete pvc and application: %v", err)
} }
}) })
// delete RBD provisioner secret // delete RBD provisioner secret
err := deleteCephUser(f, keyringRBDProvisionerUsername) err := deleteCephUser(f, keyringRBDProvisionerUsername)
if err != nil { if err != nil {
e2elog.Failf("failed to delete user %s: %v", keyringRBDProvisionerUsername, err) framework.Failf("failed to delete user %s: %v", keyringRBDProvisionerUsername, err)
} }
// delete RBD plugin secret // delete RBD plugin secret
err = deleteCephUser(f, keyringRBDNodePluginUsername) err = deleteCephUser(f, keyringRBDNodePluginUsername)
if err != nil { if err != nil {
e2elog.Failf("failed to delete user %s: %v", keyringRBDNodePluginUsername, err) framework.Failf("failed to delete user %s: %v", keyringRBDNodePluginUsername, err)
} }
}) })
}) })

View File

@ -41,7 +41,7 @@ import (
utilyaml "k8s.io/apimachinery/pkg/util/yaml" utilyaml "k8s.io/apimachinery/pkg/util/yaml"
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
) )
/* #nosec:G101, values not credentials, just a reference to the location.*/ /* #nosec:G101, values not credentials, just a reference to the location.*/
@ -218,11 +218,11 @@ func validateOmapCount(f *framework.Framework, count int, driver, pool, mode str
stdOut, stdErr, err := execCommandInToolBoxPod(f, cmd, rookNamespace) stdOut, stdErr, err := execCommandInToolBoxPod(f, cmd, rookNamespace)
if err != nil { if err != nil {
if !strings.Contains(err.Error(), exitOneErr) { if !strings.Contains(err.Error(), exitOneErr) {
e2elog.Failf("failed to execute rados command '%s' : err=%v stdErr=%s", cmd, err, stdErr) framework.Failf("failed to execute rados command '%s' : err=%v stdErr=%s", cmd, err, stdErr)
} }
} }
if stdErr != "" { if stdErr != "" {
e2elog.Failf("failed to execute rados command '%s' : stdErr=%s", cmd, stdErr) framework.Failf("failed to execute rados command '%s' : stdErr=%s", cmd, stdErr)
} }
err = compareStdoutWithCount(stdOut, count) err = compareStdoutWithCount(stdOut, count)
if err == nil { if err == nil {
@ -232,10 +232,10 @@ func validateOmapCount(f *framework.Framework, count int, driver, pool, mode str
if strings.Contains(err.Error(), "expected omap object count") { if strings.Contains(err.Error(), "expected omap object count") {
stdOut, stdErr, err = execCommandInToolBoxPod(f, filterLessCmds[i], rookNamespace) stdOut, stdErr, err = execCommandInToolBoxPod(f, filterLessCmds[i], rookNamespace)
if err == nil { if err == nil {
e2elog.Logf("additional debug info: rados ls command output: %s, stdErr: %s", stdOut, stdErr) framework.Logf("additional debug info: rados ls command output: %s, stdErr: %s", stdOut, stdErr)
} }
} }
e2elog.Failf("%v", saveErr) framework.Failf("%v", saveErr)
} }
} }
} }
@ -320,11 +320,11 @@ func getSecret(path string) (v1.Secret, error) {
func deleteResource(scPath string) error { func deleteResource(scPath string) error {
data, err := replaceNamespaceInTemplate(scPath) data, err := replaceNamespaceInTemplate(scPath)
if err != nil { if err != nil {
e2elog.Logf("failed to read content from %s %v", scPath, err) framework.Logf("failed to read content from %s %v", scPath, err)
} }
err = retryKubectlInput(cephCSINamespace, kubectlDelete, data, deployTimeout, "--ignore-not-found=true") err = retryKubectlInput(cephCSINamespace, kubectlDelete, data, deployTimeout, "--ignore-not-found=true")
if err != nil { if err != nil {
e2elog.Logf("failed to delete %s %v", scPath, err) framework.Logf("failed to delete %s %v", scPath, err)
} }
return err return err
@ -789,21 +789,21 @@ func writeDataAndCalChecksum(app *v1.Pod, opt *metav1.ListOptions, f *framework.
// write data in PVC // write data in PVC
err := writeDataInPod(app, opt, f) err := writeDataInPod(app, opt, f)
if err != nil { if err != nil {
e2elog.Logf("failed to write data in the pod: %v", err) framework.Logf("failed to write data in the pod: %v", err)
return "", err return "", err
} }
checkSum, err := calculateSHA512sum(f, app, filePath, opt) checkSum, err := calculateSHA512sum(f, app, filePath, opt)
if err != nil { if err != nil {
e2elog.Logf("failed to calculate checksum: %v", err) framework.Logf("failed to calculate checksum: %v", err)
return checkSum, err return checkSum, err
} }
err = deletePod(app.Name, app.Namespace, f.ClientSet, deployTimeout) err = deletePod(app.Name, app.Namespace, f.ClientSet, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("failed to delete pod: %v", err) framework.Failf("failed to delete pod: %v", err)
} }
return checkSum, nil return checkSum, nil
@ -824,18 +824,18 @@ func validatePVCClone(
chErrs := make([]error, totalCount) chErrs := make([]error, totalCount)
pvc, err := loadPVC(sourcePvcPath) pvc, err := loadPVC(sourcePvcPath)
if err != nil { if err != nil {
e2elog.Failf("failed to load PVC: %v", err) framework.Failf("failed to load PVC: %v", err)
} }
label := make(map[string]string) label := make(map[string]string)
pvc.Namespace = f.UniqueName pvc.Namespace = f.UniqueName
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("failed to create PVC: %v", err) framework.Failf("failed to create PVC: %v", err)
} }
app, err := loadApp(sourceAppPath) app, err := loadApp(sourceAppPath)
if err != nil { if err != nil {
e2elog.Failf("failed to load app: %v", err) framework.Failf("failed to load app: %v", err)
} }
label[appKey] = appLabel label[appKey] = appLabel
app.Namespace = f.UniqueName app.Namespace = f.UniqueName
@ -848,19 +848,19 @@ func validatePVCClone(
checkSum := "" checkSum := ""
pvc, err = getPersistentVolumeClaim(f.ClientSet, pvc.Namespace, pvc.Name) pvc, err = getPersistentVolumeClaim(f.ClientSet, pvc.Namespace, pvc.Name)
if err != nil { if err != nil {
e2elog.Failf("failed to get pvc %v", err) framework.Failf("failed to get pvc %v", err)
} }
if *pvc.Spec.VolumeMode == v1.PersistentVolumeFilesystem { if *pvc.Spec.VolumeMode == v1.PersistentVolumeFilesystem {
checkSum, err = writeDataAndCalChecksum(app, &opt, f) checkSum, err = writeDataAndCalChecksum(app, &opt, f)
if err != nil { if err != nil {
e2elog.Failf("failed to calculate checksum: %v", err) framework.Failf("failed to calculate checksum: %v", err)
} }
} }
// validate created backend rbd images // validate created backend rbd images
validateRBDImageCount(f, 1, defaultRBDPool) validateRBDImageCount(f, 1, defaultRBDPool)
pvcClone, err := loadPVC(clonePvcPath) pvcClone, err := loadPVC(clonePvcPath)
if err != nil { if err != nil {
e2elog.Failf("failed to load PVC: %v", err) framework.Failf("failed to load PVC: %v", err)
} }
pvcClone.Spec.DataSource.Name = pvc.Name pvcClone.Spec.DataSource.Name = pvc.Name
pvcClone.Namespace = f.UniqueName pvcClone.Namespace = f.UniqueName
@ -870,7 +870,7 @@ func validatePVCClone(
appClone, err := loadApp(clonePvcAppPath) appClone, err := loadApp(clonePvcAppPath)
if err != nil { if err != nil {
e2elog.Failf("failed to load application: %v", err) framework.Failf("failed to load application: %v", err)
} }
appClone.Namespace = f.UniqueName appClone.Namespace = f.UniqueName
wg.Add(totalCount) wg.Add(totalCount)
@ -902,7 +902,7 @@ func validatePVCClone(
// check new passphrase created // check new passphrase created
stdOut, stdErr := kms.getPassphrase(f, imageData.csiVolumeHandle) stdOut, stdErr := kms.getPassphrase(f, imageData.csiVolumeHandle)
if stdOut != "" { if stdOut != "" {
e2elog.Logf("successfully read the passphrase from vault: %s", stdOut) framework.Logf("successfully read the passphrase from vault: %s", stdOut)
} }
if stdErr != "" { if stdErr != "" {
wgErrs[n] = fmt.Errorf("failed to read passphrase from vault: %s", stdErr) wgErrs[n] = fmt.Errorf("failed to read passphrase from vault: %s", stdErr)
@ -913,14 +913,14 @@ func validatePVCClone(
if *pvc.Spec.VolumeMode == v1.PersistentVolumeFilesystem && wgErrs[n] == nil { if *pvc.Spec.VolumeMode == v1.PersistentVolumeFilesystem && wgErrs[n] == nil {
filePath := a.Spec.Containers[0].VolumeMounts[0].MountPath + "/test" filePath := a.Spec.Containers[0].VolumeMounts[0].MountPath + "/test"
var checkSumClone string var checkSumClone string
e2elog.Logf("Calculating checksum clone for filepath %s", filePath) framework.Logf("Calculating checksum clone for filepath %s", filePath)
checkSumClone, chErrs[n] = calculateSHA512sum(f, &a, filePath, &opt) checkSumClone, chErrs[n] = calculateSHA512sum(f, &a, filePath, &opt)
e2elog.Logf("checksum for clone is %s", checkSumClone) framework.Logf("checksum for clone is %s", checkSumClone)
if chErrs[n] != nil { if chErrs[n] != nil {
e2elog.Logf("Failed calculating checksum clone %s", chErrs[n]) framework.Logf("Failed calculating checksum clone %s", chErrs[n])
} }
if checkSumClone != checkSum { if checkSumClone != checkSum {
e2elog.Logf("checksum didn't match. checksum=%s and checksumclone=%s", checkSum, checkSumClone) framework.Logf("checksum didn't match. checksum=%s and checksumclone=%s", checkSum, checkSumClone)
} }
} }
if wgErrs[n] == nil && validatePVC != nil && kms != noKMS { if wgErrs[n] == nil && validatePVC != nil && kms != noKMS {
@ -935,23 +935,23 @@ func validatePVCClone(
for i, err := range wgErrs { for i, err := range wgErrs {
if err != nil { if err != nil {
// not using Failf() as it aborts the test and does not log other errors // not using Failf() as it aborts the test and does not log other errors
e2elog.Logf("failed to create PVC (%s%d): %v", f.UniqueName, i, err) framework.Logf("failed to create PVC (%s%d): %v", f.UniqueName, i, err)
failed++ failed++
} }
} }
if failed != 0 { if failed != 0 {
e2elog.Failf("creating PVCs failed, %d errors were logged", failed) framework.Failf("creating PVCs failed, %d errors were logged", failed)
} }
for i, err := range chErrs { for i, err := range chErrs {
if err != nil { if err != nil {
// not using Failf() as it aborts the test and does not log other errors // not using Failf() as it aborts the test and does not log other errors
e2elog.Logf("failed to calculate checksum (%s%d): %v", f.UniqueName, i, err) framework.Logf("failed to calculate checksum (%s%d): %v", f.UniqueName, i, err)
failed++ failed++
} }
} }
if failed != 0 { if failed != 0 {
e2elog.Failf("calculating checksum failed, %d errors were logged", failed) framework.Failf("calculating checksum failed, %d errors were logged", failed)
} }
// total images in cluster is 1 parent rbd image+ total // total images in cluster is 1 parent rbd image+ total
@ -961,7 +961,7 @@ func validatePVCClone(
// delete parent pvc // delete parent pvc
err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout) err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("failed to delete PVC: %v", err) framework.Failf("failed to delete PVC: %v", err)
} }
totalCloneCount = totalCount + totalCount totalCloneCount = totalCount + totalCount
@ -1013,12 +1013,12 @@ func validatePVCClone(
for i, err := range wgErrs { for i, err := range wgErrs {
if err != nil { if err != nil {
// not using Failf() as it aborts the test and does not log other errors // not using Failf() as it aborts the test and does not log other errors
e2elog.Logf("failed to delete PVC and application (%s%d): %v", f.UniqueName, i, err) framework.Logf("failed to delete PVC and application (%s%d): %v", f.UniqueName, i, err)
failed++ failed++
} }
} }
if failed != 0 { if failed != 0 {
e2elog.Failf("deleting PVCs and applications failed, %d errors were logged", failed) framework.Failf("deleting PVCs and applications failed, %d errors were logged", failed)
} }
validateRBDImageCount(f, 0, defaultRBDPool) validateRBDImageCount(f, 0, defaultRBDPool)
@ -1037,28 +1037,28 @@ func validatePVCSnapshot(
chErrs := make([]error, totalCount) chErrs := make([]error, totalCount)
err := createRBDSnapshotClass(f) err := createRBDSnapshotClass(f)
if err != nil { if err != nil {
e2elog.Failf("failed to create storageclass: %v", err) framework.Failf("failed to create storageclass: %v", err)
} }
defer func() { defer func() {
err = deleteRBDSnapshotClass() err = deleteRBDSnapshotClass()
if err != nil { if err != nil {
e2elog.Failf("failed to delete VolumeSnapshotClass: %v", err) framework.Failf("failed to delete VolumeSnapshotClass: %v", err)
} }
}() }()
pvc, err := loadPVC(pvcPath) pvc, err := loadPVC(pvcPath)
if err != nil { if err != nil {
e2elog.Failf("failed to load PVC: %v", err) framework.Failf("failed to load PVC: %v", err)
} }
label := make(map[string]string) label := make(map[string]string)
pvc.Namespace = f.UniqueName pvc.Namespace = f.UniqueName
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("failed to create PVC: %v", err) framework.Failf("failed to create PVC: %v", err)
} }
app, err := loadApp(appPath) app, err := loadApp(appPath)
if err != nil { if err != nil {
e2elog.Failf("failed to load app: %v", err) framework.Failf("failed to load app: %v", err)
} }
// write data in PVC // write data in PVC
label[appKey] = appLabel label[appKey] = appLabel
@ -1070,7 +1070,7 @@ func validatePVCSnapshot(
app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvc.Name app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvc.Name
checkSum, err := writeDataAndCalChecksum(app, &opt, f) checkSum, err := writeDataAndCalChecksum(app, &opt, f)
if err != nil { if err != nil {
e2elog.Failf("failed to calculate checksum: %v", err) framework.Failf("failed to calculate checksum: %v", err)
} }
validateRBDImageCount(f, 1, defaultRBDPool) validateRBDImageCount(f, 1, defaultRBDPool)
snap := getSnapshot(snapshotPath) snap := getSnapshot(snapshotPath)
@ -1110,23 +1110,23 @@ func validatePVCSnapshot(
for i, err := range wgErrs { for i, err := range wgErrs {
if err != nil { if err != nil {
// not using Failf() as it aborts the test and does not log other errors // not using Failf() as it aborts the test and does not log other errors
e2elog.Logf("failed to create snapshot (%s%d): %v", f.UniqueName, i, err) framework.Logf("failed to create snapshot (%s%d): %v", f.UniqueName, i, err)
failed++ failed++
} }
} }
if failed != 0 { if failed != 0 {
e2elog.Failf("creating snapshots failed, %d errors were logged", failed) framework.Failf("creating snapshots failed, %d errors were logged", failed)
} }
// total images in cluster is 1 parent rbd image+ total snaps // total images in cluster is 1 parent rbd image+ total snaps
validateRBDImageCount(f, totalCount+1, defaultRBDPool) validateRBDImageCount(f, totalCount+1, defaultRBDPool)
pvcClone, err := loadPVC(pvcClonePath) pvcClone, err := loadPVC(pvcClonePath)
if err != nil { if err != nil {
e2elog.Failf("failed to load PVC: %v", err) framework.Failf("failed to load PVC: %v", err)
} }
appClone, err := loadApp(appClonePath) appClone, err := loadApp(appClonePath)
if err != nil { if err != nil {
e2elog.Failf("failed to load application: %v", err) framework.Failf("failed to load application: %v", err)
} }
pvcClone.Namespace = f.UniqueName pvcClone.Namespace = f.UniqueName
appClone.Namespace = f.UniqueName appClone.Namespace = f.UniqueName
@ -1170,14 +1170,14 @@ func validatePVCSnapshot(
if wgErrs[n] == nil { if wgErrs[n] == nil {
filePath := a.Spec.Containers[0].VolumeMounts[0].MountPath + "/test" filePath := a.Spec.Containers[0].VolumeMounts[0].MountPath + "/test"
var checkSumClone string var checkSumClone string
e2elog.Logf("calculating checksum clone for filepath %s", filePath) framework.Logf("calculating checksum clone for filepath %s", filePath)
checkSumClone, chErrs[n] = calculateSHA512sum(f, &a, filePath, &opt) checkSumClone, chErrs[n] = calculateSHA512sum(f, &a, filePath, &opt)
e2elog.Logf("checksum value for the clone is %s with pod name %s", checkSumClone, name) framework.Logf("checksum value for the clone is %s with pod name %s", checkSumClone, name)
if chErrs[n] != nil { if chErrs[n] != nil {
e2elog.Logf("failed to calculte checksum for clone: %s", chErrs[n]) framework.Logf("failed to calculte checksum for clone: %s", chErrs[n])
} }
if checkSumClone != checkSum { if checkSumClone != checkSum {
e2elog.Logf( framework.Logf(
"checksum value didn't match. checksum=%s and checksumclone=%s", "checksum value didn't match. checksum=%s and checksumclone=%s",
checkSum, checkSum,
checkSumClone) checkSumClone)
@ -1191,23 +1191,23 @@ func validatePVCSnapshot(
for i, err := range wgErrs { for i, err := range wgErrs {
if err != nil { if err != nil {
// not using Failf() as it aborts the test and does not log other errors // not using Failf() as it aborts the test and does not log other errors
e2elog.Logf("failed to create PVC and application (%s%d): %v", f.UniqueName, i, err) framework.Logf("failed to create PVC and application (%s%d): %v", f.UniqueName, i, err)
failed++ failed++
} }
} }
if failed != 0 { if failed != 0 {
e2elog.Failf("creating PVCs and applications failed, %d errors were logged", failed) framework.Failf("creating PVCs and applications failed, %d errors were logged", failed)
} }
for i, err := range chErrs { for i, err := range chErrs {
if err != nil { if err != nil {
// not using Failf() as it aborts the test and does not log other errors // not using Failf() as it aborts the test and does not log other errors
e2elog.Logf("failed to calculate checksum (%s%d): %v", f.UniqueName, i, err) framework.Logf("failed to calculate checksum (%s%d): %v", f.UniqueName, i, err)
failed++ failed++
} }
} }
if failed != 0 { if failed != 0 {
e2elog.Failf("calculating checksum failed, %d errors were logged", failed) framework.Failf("calculating checksum failed, %d errors were logged", failed)
} }
// total images in cluster is 1 parent rbd image+ total // total images in cluster is 1 parent rbd image+ total
// snaps+ total clones // snaps+ total clones
@ -1228,12 +1228,12 @@ func validatePVCSnapshot(
for i, err := range wgErrs { for i, err := range wgErrs {
if err != nil { if err != nil {
// not using Failf() as it aborts the test and does not log other errors // not using Failf() as it aborts the test and does not log other errors
e2elog.Logf("failed to delete PVC and application (%s%d): %v", f.UniqueName, i, err) framework.Logf("failed to delete PVC and application (%s%d): %v", f.UniqueName, i, err)
failed++ failed++
} }
} }
if failed != 0 { if failed != 0 {
e2elog.Failf("deleting PVCs and applications failed, %d errors were logged", failed) framework.Failf("deleting PVCs and applications failed, %d errors were logged", failed)
} }
// total images in cluster is 1 parent rbd image+ total // total images in cluster is 1 parent rbd image+ total
@ -1259,12 +1259,12 @@ func validatePVCSnapshot(
for i, err := range wgErrs { for i, err := range wgErrs {
if err != nil { if err != nil {
// not using Failf() as it aborts the test and does not log other errors // not using Failf() as it aborts the test and does not log other errors
e2elog.Logf("failed to create PVC and application (%s%d): %v", f.UniqueName, i, err) framework.Logf("failed to create PVC and application (%s%d): %v", f.UniqueName, i, err)
failed++ failed++
} }
} }
if failed != 0 { if failed != 0 {
e2elog.Failf("creating PVCs and applications failed, %d errors were logged", failed) framework.Failf("creating PVCs and applications failed, %d errors were logged", failed)
} }
// total images in cluster is 1 parent rbd image+ total // total images in cluster is 1 parent rbd image+ total
@ -1274,7 +1274,7 @@ func validatePVCSnapshot(
// delete parent pvc // delete parent pvc
err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout) err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout)
if err != nil { if err != nil {
e2elog.Failf("failed to delete PVC: %v", err) framework.Failf("failed to delete PVC: %v", err)
} }
// total images in cluster is total snaps+ total clones // total images in cluster is total snaps+ total clones
@ -1325,12 +1325,12 @@ func validatePVCSnapshot(
for i, err := range wgErrs { for i, err := range wgErrs {
if err != nil { if err != nil {
// not using Failf() as it aborts the test and does not log other errors // not using Failf() as it aborts the test and does not log other errors
e2elog.Logf("failed to delete snapshot (%s%d): %v", f.UniqueName, i, err) framework.Logf("failed to delete snapshot (%s%d): %v", f.UniqueName, i, err)
failed++ failed++
} }
} }
if failed != 0 { if failed != 0 {
e2elog.Failf("deleting snapshots failed, %d errors were logged", failed) framework.Failf("deleting snapshots failed, %d errors were logged", failed)
} }
validateRBDImageCount(f, totalCount, defaultRBDPool) validateRBDImageCount(f, totalCount, defaultRBDPool)
@ -1349,12 +1349,12 @@ func validatePVCSnapshot(
for i, err := range wgErrs { for i, err := range wgErrs {
if err != nil { if err != nil {
// not using Failf() as it aborts the test and does not log other errors // not using Failf() as it aborts the test and does not log other errors
e2elog.Logf("failed to delete PVC and application (%s%d): %v", f.UniqueName, i, err) framework.Logf("failed to delete PVC and application (%s%d): %v", f.UniqueName, i, err)
failed++ failed++
} }
} }
if failed != 0 { if failed != 0 {
e2elog.Failf("deleting PVCs and applications failed, %d errors were logged", failed) framework.Failf("deleting PVCs and applications failed, %d errors were logged", failed)
} }
// validate created backend rbd images // validate created backend rbd images
@ -1436,7 +1436,7 @@ func validateController(
pv.ResourceVersion = "" pv.ResourceVersion = ""
err = createPVCAndPV(f.ClientSet, pvc, pv) err = createPVCAndPV(f.ClientSet, pvc, pv)
if err != nil { if err != nil {
e2elog.Failf("failed to create PVC or PV: %v", err) framework.Failf("failed to create PVC or PV: %v", err)
} }
// bind PVC to application // bind PVC to application
app, err := loadApp(appPath) app, err := loadApp(appPath)
@ -1496,7 +1496,7 @@ func validateController(
func k8sVersionGreaterEquals(c kubernetes.Interface, major, minor int) bool { func k8sVersionGreaterEquals(c kubernetes.Interface, major, minor int) bool {
v, err := c.Discovery().ServerVersion() v, err := c.Discovery().ServerVersion()
if err != nil { if err != nil {
e2elog.Failf("failed to get server version: %v", err) framework.Failf("failed to get server version: %v", err)
// Failf() marks the case as failure, and returns from the // Failf() marks the case as failure, and returns from the
// Go-routine that runs the case. This function will not have a // Go-routine that runs the case. This function will not have a
// return value. // return value.
@ -1514,7 +1514,7 @@ func waitForJobCompletion(c kubernetes.Interface, ns, job string, timeout int) e
t := time.Duration(timeout) * time.Minute t := time.Duration(timeout) * time.Minute
start := time.Now() start := time.Now()
e2elog.Logf("waiting for Job %s/%s to be in state %q", ns, job, batch.JobComplete) framework.Logf("waiting for Job %s/%s to be in state %q", ns, job, batch.JobComplete)
return wait.PollImmediate(poll, t, func() (bool, error) { return wait.PollImmediate(poll, t, func() (bool, error) {
j, err := c.BatchV1().Jobs(ns).Get(context.TODO(), job, metav1.GetOptions{}) j, err := c.BatchV1().Jobs(ns).Get(context.TODO(), job, metav1.GetOptions{})
@ -1531,7 +1531,7 @@ func waitForJobCompletion(c kubernetes.Interface, ns, job string, timeout int) e
return true, nil return true, nil
} }
e2elog.Logf( framework.Logf(
"Job %s/%s has not completed yet (%d seconds elapsed)", "Job %s/%s has not completed yet (%d seconds elapsed)",
ns, job, int(time.Since(start).Seconds())) ns, job, int(time.Since(start).Seconds()))
@ -1561,7 +1561,7 @@ func (ka kubectlAction) String() string {
// no error occurred, or the timeout passed. // no error occurred, or the timeout passed.
func retryKubectlInput(namespace string, action kubectlAction, data string, t int, args ...string) error { func retryKubectlInput(namespace string, action kubectlAction, data string, t int, args ...string) error {
timeout := time.Duration(t) * time.Minute timeout := time.Duration(t) * time.Minute
e2elog.Logf("waiting for kubectl (%s -f args %s) to finish", action, args) framework.Logf("waiting for kubectl (%s -f args %s) to finish", action, args)
start := time.Now() start := time.Now()
return wait.PollImmediate(poll, timeout, func() (bool, error) { return wait.PollImmediate(poll, timeout, func() (bool, error) {
@ -1571,7 +1571,7 @@ func retryKubectlInput(namespace string, action kubectlAction, data string, t in
} }
cmd = append(cmd, []string{string(action), "-f", "-"}...) cmd = append(cmd, []string{string(action), "-f", "-"}...)
_, err := framework.RunKubectlInput(namespace, data, cmd...) _, err := e2ekubectl.RunKubectlInput(namespace, data, cmd...)
if err != nil { if err != nil {
if isRetryableAPIError(err) { if isRetryableAPIError(err) {
return false, nil return false, nil
@ -1582,7 +1582,7 @@ func retryKubectlInput(namespace string, action kubectlAction, data string, t in
if action == kubectlDelete && isNotFoundCLIError(err) { if action == kubectlDelete && isNotFoundCLIError(err) {
return true, nil return true, nil
} }
e2elog.Logf( framework.Logf(
"will run kubectl (%s) args (%s) again (%d seconds elapsed)", "will run kubectl (%s) args (%s) again (%d seconds elapsed)",
action, action,
args, args,
@ -1600,7 +1600,7 @@ func retryKubectlInput(namespace string, action kubectlAction, data string, t in
// occurred, or the timeout passed. // occurred, or the timeout passed.
func retryKubectlFile(namespace string, action kubectlAction, filename string, t int, args ...string) error { func retryKubectlFile(namespace string, action kubectlAction, filename string, t int, args ...string) error {
timeout := time.Duration(t) * time.Minute timeout := time.Duration(t) * time.Minute
e2elog.Logf("waiting for kubectl (%s -f %q args %s) to finish", action, filename, args) framework.Logf("waiting for kubectl (%s -f %q args %s) to finish", action, filename, args)
start := time.Now() start := time.Now()
return wait.PollImmediate(poll, timeout, func() (bool, error) { return wait.PollImmediate(poll, timeout, func() (bool, error) {
@ -1610,7 +1610,7 @@ func retryKubectlFile(namespace string, action kubectlAction, filename string, t
} }
cmd = append(cmd, []string{string(action), "-f", filename}...) cmd = append(cmd, []string{string(action), "-f", filename}...)
_, err := framework.RunKubectl(namespace, cmd...) _, err := e2ekubectl.RunKubectl(namespace, cmd...)
if err != nil { if err != nil {
if isRetryableAPIError(err) { if isRetryableAPIError(err) {
return false, nil return false, nil
@ -1621,7 +1621,7 @@ func retryKubectlFile(namespace string, action kubectlAction, filename string, t
if action == kubectlDelete && isNotFoundCLIError(err) { if action == kubectlDelete && isNotFoundCLIError(err) {
return true, nil return true, nil
} }
e2elog.Logf( framework.Logf(
"will run kubectl (%s -f %q args %s) again (%d seconds elapsed)", "will run kubectl (%s -f %q args %s) again (%d seconds elapsed)",
action, action,
filename, filename,
@ -1642,11 +1642,11 @@ func retryKubectlFile(namespace string, action kubectlAction, filename string, t
func retryKubectlArgs(namespace string, action kubectlAction, t int, args ...string) error { func retryKubectlArgs(namespace string, action kubectlAction, t int, args ...string) error {
timeout := time.Duration(t) * time.Minute timeout := time.Duration(t) * time.Minute
args = append([]string{string(action)}, args...) args = append([]string{string(action)}, args...)
e2elog.Logf("waiting for kubectl (%s args) to finish", args) framework.Logf("waiting for kubectl (%s args) to finish", args)
start := time.Now() start := time.Now()
return wait.PollImmediate(poll, timeout, func() (bool, error) { return wait.PollImmediate(poll, timeout, func() (bool, error) {
_, err := framework.RunKubectl(namespace, args...) _, err := e2ekubectl.RunKubectl(namespace, args...)
if err != nil { if err != nil {
if isRetryableAPIError(err) { if isRetryableAPIError(err) {
return false, nil return false, nil
@ -1657,7 +1657,7 @@ func retryKubectlArgs(namespace string, action kubectlAction, t int, args ...str
if action == kubectlDelete && isNotFoundCLIError(err) { if action == kubectlDelete && isNotFoundCLIError(err) {
return true, nil return true, nil
} }
e2elog.Logf( framework.Logf(
"will run kubectl (%s) again (%d seconds elapsed)", "will run kubectl (%s) again (%d seconds elapsed)",
args, args,
int(time.Since(start).Seconds())) int(time.Since(start).Seconds()))

137
go.mod
View File

@ -1,11 +1,11 @@
module github.com/ceph/ceph-csi module github.com/ceph/ceph-csi
go 1.18 go 1.19
require ( require (
github.com/IBM/keyprotect-go-client v0.9.2 github.com/IBM/keyprotect-go-client v0.9.2
github.com/aws/aws-sdk-go v1.44.172 github.com/aws/aws-sdk-go v1.44.190
github.com/aws/aws-sdk-go-v2/service/sts v1.17.7 github.com/aws/aws-sdk-go-v2/service/sts v1.18.2
github.com/ceph/ceph-csi/api v0.0.0-00010101000000-000000000000 github.com/ceph/ceph-csi/api v0.0.0-00010101000000-000000000000
// TODO: API for managing subvolume metadata and snapshot metadata requires `ceph_ci_untested` build-tag // TODO: API for managing subvolume metadata and snapshot metadata requires `ceph_ci_untested` build-tag
github.com/ceph/go-ceph v0.19.0 github.com/ceph/go-ceph v0.19.0
@ -18,33 +18,33 @@ require (
github.com/google/uuid v1.3.0 github.com/google/uuid v1.3.0
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
github.com/hashicorp/vault/api v1.8.2 github.com/hashicorp/vault/api v1.8.3
github.com/kubernetes-csi/csi-lib-utils v0.11.0 github.com/kubernetes-csi/csi-lib-utils v0.11.0
github.com/kubernetes-csi/external-snapshotter/client/v6 v6.2.0 github.com/kubernetes-csi/external-snapshotter/client/v6 v6.2.0
github.com/libopenstorage/secrets v0.0.0-20210908194121-a1d19aa9713a github.com/libopenstorage/secrets v0.0.0-20210908194121-a1d19aa9713a
github.com/onsi/ginkgo/v2 v2.4.0 github.com/onsi/ginkgo/v2 v2.8.0
github.com/onsi/gomega v1.23.0 github.com/onsi/gomega v1.26.0
github.com/pkg/xattr v0.4.9 github.com/pkg/xattr v0.4.9
github.com/prometheus/client_golang v1.14.0 github.com/prometheus/client_golang v1.14.0
github.com/stretchr/testify v1.8.1 github.com/stretchr/testify v1.8.1
golang.org/x/crypto v0.4.0 golang.org/x/crypto v0.5.0
golang.org/x/net v0.5.0 golang.org/x/net v0.5.0
golang.org/x/sys v0.4.0 golang.org/x/sys v0.4.0
google.golang.org/grpc v1.51.0 google.golang.org/grpc v1.52.0
google.golang.org/protobuf v1.28.1 google.golang.org/protobuf v1.28.1
k8s.io/api v0.26.0 k8s.io/api v0.26.1
k8s.io/apimachinery v0.26.0 k8s.io/apimachinery v0.26.1
k8s.io/client-go v12.0.0+incompatible k8s.io/client-go v12.0.0+incompatible
k8s.io/cloud-provider v0.25.4 k8s.io/cloud-provider v0.26.1
k8s.io/klog/v2 v2.80.1 k8s.io/klog/v2 v2.80.1
// //
// when updating k8s.io/kubernetes, make sure to update the replace section too // when updating k8s.io/kubernetes, make sure to update the replace section too
// //
k8s.io/kubernetes v1.25.4 k8s.io/kubernetes v1.26.1
k8s.io/mount-utils v0.25.4 k8s.io/mount-utils v0.26.1
k8s.io/pod-security-admission v0.0.0 k8s.io/pod-security-admission v0.0.0
k8s.io/utils v0.0.0-20221108210102-8e77b1f39fe2 k8s.io/utils v0.0.0-20221128185143-99ec85e7a448
sigs.k8s.io/controller-runtime v0.11.0-beta.0.0.20211208212546-f236f0345ad2 sigs.k8s.io/controller-runtime v0.14.2
) )
require ( require (
@ -60,17 +60,20 @@ require (
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect github.com/blang/semver/v4 v4.0.0 // indirect
github.com/cenkalti/backoff/v3 v3.0.0 // indirect github.com/cenkalti/backoff/v3 v3.0.0 // indirect
github.com/cenkalti/backoff/v4 v4.1.3 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect
github.com/docker/distribution v2.8.1+incompatible // indirect github.com/docker/distribution v2.8.1+incompatible // indirect
github.com/emicklei/go-restful/v3 v3.9.0 // indirect github.com/emicklei/go-restful/v3 v3.9.0 // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect
github.com/evanphx/json-patch/v5 v5.6.0 // indirect
github.com/fatih/color v1.9.0 // indirect github.com/fatih/color v1.9.0 // indirect
github.com/felixge/httpsnoop v1.0.1 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect
github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/gemalto/flume v0.13.0 // indirect github.com/gemalto/flume v0.13.0 // indirect
github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 // indirect github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 // indirect
github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/logr v1.2.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.20.0 // indirect github.com/go-openapi/jsonreference v0.20.0 // indirect
github.com/go-openapi/swag v0.22.3 // indirect github.com/go-openapi/swag v0.22.3 // indirect
@ -81,7 +84,7 @@ require (
github.com/google/gnostic v0.6.9 // indirect github.com/google/gnostic v0.6.9 // indirect
github.com/google/go-cmp v0.5.9 // indirect github.com/google/go-cmp v0.5.9 // indirect
github.com/google/gofuzz v1.2.0 // indirect github.com/google/gofuzz v1.2.0 // indirect
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-hclog v0.16.2 // indirect github.com/hashicorp/go-hclog v0.16.2 // indirect
@ -99,10 +102,10 @@ require (
github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect
github.com/hashicorp/vault v1.4.2 // indirect github.com/hashicorp/vault v1.4.2 // indirect
github.com/hashicorp/vault/sdk v0.6.0 // indirect github.com/hashicorp/vault/sdk v0.7.0 // indirect
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d // indirect github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d // indirect
github.com/imdario/mergo v0.3.12 // indirect github.com/imdario/mergo v0.3.12 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/inconshreveable/mousetrap v1.0.1 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect github.com/json-iterator/go v1.1.12 // indirect
@ -110,7 +113,7 @@ require (
github.com/mailru/easyjson v0.7.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.16 // indirect github.com/mattn/go-isatty v0.0.16 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
github.com/mitchellh/copystructure v1.0.0 // indirect github.com/mitchellh/copystructure v1.0.0 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect
@ -118,7 +121,7 @@ require (
github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mitchellh/reflectwalk v1.0.1 // indirect github.com/mitchellh/reflectwalk v1.0.1 // indirect
github.com/moby/spdystream v0.2.0 // indirect github.com/moby/spdystream v0.2.0 // indirect
github.com/moby/sys/mountinfo v0.6.0 // indirect github.com/moby/sys/mountinfo v0.6.2 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
@ -133,40 +136,39 @@ require (
github.com/prometheus/common v0.37.0 // indirect github.com/prometheus/common v0.37.0 // indirect
github.com/prometheus/procfs v0.8.0 // indirect github.com/prometheus/procfs v0.8.0 // indirect
github.com/ryanuber/go-glob v1.0.0 // indirect github.com/ryanuber/go-glob v1.0.0 // indirect
github.com/spf13/cobra v1.4.0 // indirect github.com/spf13/cobra v1.6.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/pflag v1.0.5 // indirect
go.opentelemetry.io/contrib v0.20.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0 // indirect go.opentelemetry.io/otel v1.10.0 // indirect
go.opentelemetry.io/otel v0.20.0 // indirect go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect
go.opentelemetry.io/otel/exporters/otlp v0.20.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect
go.opentelemetry.io/otel/metric v0.20.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 // indirect
go.opentelemetry.io/otel/sdk v0.20.0 // indirect go.opentelemetry.io/otel/metric v0.31.0 // indirect
go.opentelemetry.io/otel/sdk/export/metric v0.20.0 // indirect go.opentelemetry.io/otel/sdk v1.10.0 // indirect
go.opentelemetry.io/otel/sdk/metric v0.20.0 // indirect go.opentelemetry.io/otel/trace v1.10.0 // indirect
go.opentelemetry.io/otel/trace v0.20.0 // indirect go.opentelemetry.io/proto/otlp v0.19.0 // indirect
go.opentelemetry.io/proto/otlp v0.7.0 // indirect
go.uber.org/atomic v1.10.0 // indirect go.uber.org/atomic v1.10.0 // indirect
go.uber.org/multierr v1.8.0 // indirect go.uber.org/multierr v1.8.0 // indirect
go.uber.org/zap v1.23.0 // indirect go.uber.org/zap v1.24.0 // indirect
golang.org/x/oauth2 v0.2.0 // indirect golang.org/x/oauth2 v0.2.0 // indirect
golang.org/x/term v0.4.0 // indirect golang.org/x/term v0.4.0 // indirect
golang.org/x/text v0.6.0 // indirect golang.org/x/text v0.6.0 // indirect
golang.org/x/time v0.2.0 // indirect golang.org/x/time v0.3.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
google.golang.org/appengine v1.6.7 // indirect google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e // indirect google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/square/go-jose.v2 v2.5.1 // indirect gopkg.in/square/go-jose.v2 v2.5.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apiextensions-apiserver v0.25.4 // indirect k8s.io/apiextensions-apiserver v0.26.1 // indirect
k8s.io/apiserver v0.25.4 // indirect k8s.io/apiserver v0.26.1 // indirect
k8s.io/component-base v0.25.4 // indirect k8s.io/component-base v0.26.1 // indirect
k8s.io/component-helpers v0.25.4 // indirect k8s.io/component-helpers v0.26.1 // indirect
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect
k8s.io/kubectl v0.0.0 // indirect k8s.io/kubectl v0.0.0 // indirect
k8s.io/kubelet v0.0.0 // indirect k8s.io/kubelet v0.0.0 // indirect
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.33 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.35 // indirect
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect sigs.k8s.io/yaml v1.3.0 // indirect
@ -175,36 +177,39 @@ require (
replace ( replace (
code.cloudfoundry.org/gofileutils => github.com/cloudfoundry/gofileutils v0.0.0-20170111115228-4d0c80011a0f code.cloudfoundry.org/gofileutils => github.com/cloudfoundry/gofileutils v0.0.0-20170111115228-4d0c80011a0f
github.com/ceph/ceph-csi/api => ./api github.com/ceph/ceph-csi/api => ./api
// Required for kubernetes 1.26
github.com/onsi/ginkgo/v2 => github.com/onsi/ginkgo/v2 v2.4.0
github.com/portworx/sched-ops => github.com/portworx/sched-ops v0.20.4-openstorage-rc3 github.com/portworx/sched-ops => github.com/portworx/sched-ops v0.20.4-openstorage-rc3
gomodules.xyz/jsonpatch/v2 => github.com/gomodules/jsonpatch/v2 v2.2.0 gomodules.xyz/jsonpatch/v2 => github.com/gomodules/jsonpatch/v2 v2.2.0
// //
// k8s.io/kubernetes depends on these k8s.io packages, but unversioned // k8s.io/kubernetes depends on these k8s.io packages, but unversioned
// //
k8s.io/api => k8s.io/api v0.25.4 k8s.io/api => k8s.io/api v0.26.1
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.25.4 k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.26.1
k8s.io/apimachinery => k8s.io/apimachinery v0.25.4 k8s.io/apimachinery => k8s.io/apimachinery v0.26.1
k8s.io/apiserver => k8s.io/apiserver v0.25.4 k8s.io/apiserver => k8s.io/apiserver v0.26.1
k8s.io/cli-runtime => k8s.io/cli-runtime v0.25.4 k8s.io/cli-runtime => k8s.io/cli-runtime v0.26.1
k8s.io/client-go => k8s.io/client-go v0.25.4 k8s.io/client-go => k8s.io/client-go v0.26.1
k8s.io/cloud-provider => k8s.io/cloud-provider v0.25.4 k8s.io/cloud-provider => k8s.io/cloud-provider v0.26.1
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.25.4 k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.26.1
k8s.io/code-generator => k8s.io/code-generator v0.25.4 k8s.io/code-generator => k8s.io/code-generator v0.26.1
k8s.io/component-base => k8s.io/component-base v0.25.4 k8s.io/component-base => k8s.io/component-base v0.26.1
k8s.io/component-helpers => k8s.io/component-helpers v0.25.4 k8s.io/component-helpers => k8s.io/component-helpers v0.26.1
k8s.io/controller-manager => k8s.io/controller-manager v0.25.4 k8s.io/controller-manager => k8s.io/controller-manager v0.26.1
k8s.io/cri-api => k8s.io/cri-api v0.25.4 k8s.io/cri-api => k8s.io/cri-api v0.26.1
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.25.4 k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.26.1
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.25.4 k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.26.1
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.25.4 k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.26.1
k8s.io/kube-proxy => k8s.io/kube-proxy v0.25.4 k8s.io/kube-proxy => k8s.io/kube-proxy v0.26.1
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.25.4 k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.26.1
k8s.io/kubectl => k8s.io/kubectl v0.25.4 k8s.io/kubectl => k8s.io/kubectl v0.26.1
k8s.io/kubelet => k8s.io/kubelet v0.25.4 k8s.io/kubelet => k8s.io/kubelet v0.26.1
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.25.4 k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.26.1
k8s.io/metrics => k8s.io/metrics v0.25.4 k8s.io/metrics => k8s.io/metrics v0.26.1
k8s.io/mount-utils => k8s.io/mount-utils v0.25.4 k8s.io/mount-utils => k8s.io/mount-utils v0.26.1
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.25.4 k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.26.1
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.25.4 k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.26.1
// layeh.com seems to be misbehaving // layeh.com seems to be misbehaving
layeh.com/radius => github.com/layeh/radius v0.0.0-20190322222518-890bc1058917 layeh.com/radius => github.com/layeh/radius v0.0.0-20190322222518-890bc1058917
) )

288
go.sum
View File

@ -50,31 +50,22 @@ git.apache.org/thrift.git v0.12.0/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqbl
github.com/Azure/azure-sdk-for-go v36.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v36.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
github.com/Azure/go-autorest/autorest v0.9.2/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.2/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
github.com/Azure/go-autorest/autorest v0.11.27/go.mod h1:7l8ybrIdUmGqZMTD0sRtAr8NvbHjfofbf8RSP2q7w7U=
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
github.com/Azure/go-autorest/autorest/adal v0.7.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.7.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
github.com/Azure/go-autorest/autorest/adal v0.9.20/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
github.com/Azure/go-autorest/autorest/azure/auth v0.4.0/go.mod h1:Oo5cRhLvZteXzI2itUm5ziqsoIxRkzrt3t61FeZaS18= github.com/Azure/go-autorest/autorest/azure/auth v0.4.0/go.mod h1:Oo5cRhLvZteXzI2itUm5ziqsoIxRkzrt3t61FeZaS18=
github.com/Azure/go-autorest/autorest/azure/cli v0.3.0/go.mod h1:rNYMNAefZMRowqCV0cVhr/YDW5dD7afFq9nXAXL4ykE= github.com/Azure/go-autorest/autorest/azure/cli v0.3.0/go.mod h1:rNYMNAefZMRowqCV0cVhr/YDW5dD7afFq9nXAXL4ykE=
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU=
github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA=
github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI=
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
@ -120,6 +111,7 @@ github.com/ansel1/merry/v2 v2.0.1/go.mod h1:dD5OhpiPrVkvgseRYd+xgYlx7s6ytU3v9BTT
github.com/ansel1/vespucci/v4 v4.1.1/go.mod h1:zzdrO4IgBfgcGMbGTk/qNGL8JPslmW3nPpcBHKReFYY= github.com/ansel1/vespucci/v4 v4.1.1/go.mod h1:zzdrO4IgBfgcGMbGTk/qNGL8JPslmW3nPpcBHKReFYY=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20220418222510-f25a4f6275ed/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20220418222510-f25a4f6275ed/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY=
github.com/antlr/antlr4/runtime/Go/antlr v1.4.10/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY=
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/apple/foundationdb/bindings/go v0.0.0-20190411004307-cd5c9d91fad2/go.mod h1:OMVSB21p9+xQUIqlGizHPZfjK+SHws1ht+ZytVDoz9U= github.com/apple/foundationdb/bindings/go v0.0.0-20190411004307-cd5c9d91fad2/go.mod h1:OMVSB21p9+xQUIqlGizHPZfjK+SHws1ht+ZytVDoz9U=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
@ -140,8 +132,8 @@ github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:l
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.25.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.44.172 h1:JwhHWVkU/UUq8b4kc2ETzoYg6UXlSslK1EthXcXY8kI= github.com/aws/aws-sdk-go v1.44.190 h1:QC+Pf/Ooj7Waf2obOPZbIQOqr00hy4h54j3ZK9mvHcc=
github.com/aws/aws-sdk-go v1.44.172/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go v1.44.190/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-sdk-go-v2 v1.17.3 h1:shN7NlnVzvDUgPQ+1rLMSxY8OWRNDRYtiqe0p/PgrhY= github.com/aws/aws-sdk-go-v2 v1.17.3 h1:shN7NlnVzvDUgPQ+1rLMSxY8OWRNDRYtiqe0p/PgrhY=
github.com/aws/aws-sdk-go-v2 v1.17.3/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= github.com/aws/aws-sdk-go-v2 v1.17.3/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.27 h1:I3cakv2Uy1vNmmhRQmFptYDxOvBnwCdNwyw63N0RaRU= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.27 h1:I3cakv2Uy1vNmmhRQmFptYDxOvBnwCdNwyw63N0RaRU=
@ -150,12 +142,11 @@ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.21 h1:5NbbMrIzmUn/TXFqAl
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.21/go.mod h1:+Gxn8jYn5k9ebfHEqlhrMirFjSW0v0C9fI+KN5vk2kE= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.21/go.mod h1:+Gxn8jYn5k9ebfHEqlhrMirFjSW0v0C9fI+KN5vk2kE=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.21 h1:5C6XgTViSb0bunmU57b3CT+MhxULqHH2721FVA+/kDM= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.21 h1:5C6XgTViSb0bunmU57b3CT+MhxULqHH2721FVA+/kDM=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.21/go.mod h1:lRToEJsn+DRA9lW4O9L9+/3hjTkUzlzyzHqn8MTds5k= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.21/go.mod h1:lRToEJsn+DRA9lW4O9L9+/3hjTkUzlzyzHqn8MTds5k=
github.com/aws/aws-sdk-go-v2/service/sts v1.17.7 h1:9Mtq1KM6nD8/+HStvWcvYnixJ5N85DX+P+OY3kI3W2k= github.com/aws/aws-sdk-go-v2/service/sts v1.18.2 h1:J/4wIaGInCEYCGhTSruxCxeoA5cy91a+JT7cHFKFSHQ=
github.com/aws/aws-sdk-go-v2/service/sts v1.17.7/go.mod h1:+lGbb3+1ugwKrNTWcf2RT05Xmp543B06zDFTwiTLp7I= github.com/aws/aws-sdk-go-v2/service/sts v1.18.2/go.mod h1:+lGbb3+1ugwKrNTWcf2RT05Xmp543B06zDFTwiTLp7I=
github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8= github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8=
github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc=
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
@ -176,6 +167,9 @@ github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx2
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c=
github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs=
github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4=
github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/centrify/cloud-golang-sdk v0.0.0-20190214225812-119110094d0f/go.mod h1:C0rtzmGXgN78pYR0tGJFhtHgkbAs0lIbHwkB81VxDQE= github.com/centrify/cloud-golang-sdk v0.0.0-20190214225812-119110094d0f/go.mod h1:C0rtzmGXgN78pYR0tGJFhtHgkbAs0lIbHwkB81VxDQE=
github.com/ceph/go-ceph v0.19.0 h1:cl5apHt98pCWSoUStiLdl7Mlk3ke8fUGF4HI66Nxy/A= github.com/ceph/go-ceph v0.19.0 h1:cl5apHt98pCWSoUStiLdl7Mlk3ke8fUGF4HI66Nxy/A=
@ -228,7 +222,7 @@ github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
@ -289,23 +283,23 @@ github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMi
github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch/v5 v5.5.0 h1:bAmFiUJ+o0o2B4OiTFeE3MqCOtyo+jjPP9iZ0VRxYUc= github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww=
github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0=
github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/frankban/quicktest v1.4.0/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ= github.com/frankban/quicktest v1.4.0/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ=
github.com/frankban/quicktest v1.4.1/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ= github.com/frankban/quicktest v1.4.1/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ=
github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk= github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
github.com/gammazero/deque v0.0.0-20190130191400-2afb3858e9c7/go.mod h1:GeIq9qoE43YdGnDXURnmKTnGg15pQz4mYkXSTChbneI= github.com/gammazero/deque v0.0.0-20190130191400-2afb3858e9c7/go.mod h1:GeIq9qoE43YdGnDXURnmKTnGg15pQz4mYkXSTChbneI=
github.com/gammazero/workerpool v0.0.0-20190406235159-88d534f22b56/go.mod h1:w9RqFVO2BM3xwWEcAB8Fwp0OviTBBEiRmSBDfbXnd3w= github.com/gammazero/workerpool v0.0.0-20190406235159-88d534f22b56/go.mod h1:w9RqFVO2BM3xwWEcAB8Fwp0OviTBBEiRmSBDfbXnd3w=
@ -313,7 +307,6 @@ github.com/gemalto/flume v0.13.0 h1:EEeQvAxyFys3BH8IxEU7ZpM6Kr1sYn20HuZq6dgyMR8=
github.com/gemalto/flume v0.13.0/go.mod h1:3iOEZiK/HD8SnFTqHCQoOHQKaHlBY0b6z55P8SLaOzk= github.com/gemalto/flume v0.13.0/go.mod h1:3iOEZiK/HD8SnFTqHCQoOHQKaHlBY0b6z55P8SLaOzk=
github.com/gemalto/kmip-go v0.0.8 h1:RvKWTd2ACxOs7OF1f6SvPYebjmQbN0myfDHVQmX/k8g= github.com/gemalto/kmip-go v0.0.8 h1:RvKWTd2ACxOs7OF1f6SvPYebjmQbN0myfDHVQmX/k8g=
github.com/gemalto/kmip-go v0.0.8/go.mod h1:7bAnjuzri8yGoJMwngnAd0HdXMRDQU+l1Zaiz12Tr68= github.com/gemalto/kmip-go v0.0.8/go.mod h1:7bAnjuzri8yGoJMwngnAd0HdXMRDQU+l1Zaiz12Tr68=
github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg=
github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew=
@ -342,6 +335,8 @@ github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbV
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk=
github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A=
github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4=
@ -352,7 +347,6 @@ github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34
github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA=
github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
@ -379,9 +373,9 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A=
github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ=
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@ -429,7 +423,7 @@ github.com/gomodules/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DK
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
github.com/google/cel-go v0.12.5/go.mod h1:Jk7ljRzLBhkmiAwBoUxB1sZSCVBAzkqPF25olK/iRDw= github.com/google/cel-go v0.12.6/go.mod h1:Jk7ljRzLBhkmiAwBoUxB1sZSCVBAzkqPF25olK/iRDw=
github.com/google/fscrypt v0.3.3 h1:qwx9OCR/xZE68VGr/r0/yugFhlGpIOGsH9JHrttP7vc= github.com/google/fscrypt v0.3.3 h1:qwx9OCR/xZE68VGr/r0/yugFhlGpIOGsH9JHrttP7vc=
github.com/google/fscrypt v0.3.3/go.mod h1:H1JHtH8BVe0dYNhzx1Ztkn3azQ0OBdoOmM828vEWAXc= github.com/google/fscrypt v0.3.3/go.mod h1:H1JHtH8BVe0dYNhzx1Ztkn3azQ0OBdoOmM828vEWAXc=
github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ=
@ -493,7 +487,6 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORR
github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA= github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA=
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
github.com/gorilla/sessions v1.2.0/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gorilla/sessions v1.2.0/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
@ -510,8 +503,9 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf
github.com/grpc-ecosystem/grpc-gateway v1.6.2/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.6.2/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw= github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw=
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI= github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI=
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
@ -582,7 +576,7 @@ github.com/hashicorp/go-secure-stdlib/password v0.1.1 h1:6JzmBqXprakgFEHwBgdchsj
github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U=
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts=
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4=
github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.1 h1:Yc026VyMyIpq1UWRnakHRG01U8fJm+nEfEmjoAb00n8= github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.2 h1:phcbL8urUzF/kxA/Oj6awENaRwfWsjP59GW7u2qlDyY=
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc=
github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
@ -645,8 +639,8 @@ github.com/hashicorp/vault/api v1.0.5-0.20191122173911-80fcc7907c78/go.mod h1:Uf
github.com/hashicorp/vault/api v1.0.5-0.20200215224050-f6547fa8e820/go.mod h1:3f12BMfgDGjTsTtIUj+ZKZwSobQpZtYGFIEehOv5z1o= github.com/hashicorp/vault/api v1.0.5-0.20200215224050-f6547fa8e820/go.mod h1:3f12BMfgDGjTsTtIUj+ZKZwSobQpZtYGFIEehOv5z1o=
github.com/hashicorp/vault/api v1.0.5-0.20200317185738-82f498082f02/go.mod h1:3f12BMfgDGjTsTtIUj+ZKZwSobQpZtYGFIEehOv5z1o= github.com/hashicorp/vault/api v1.0.5-0.20200317185738-82f498082f02/go.mod h1:3f12BMfgDGjTsTtIUj+ZKZwSobQpZtYGFIEehOv5z1o=
github.com/hashicorp/vault/api v1.0.5-0.20200902155336-f9d5ce5a171a/go.mod h1:R3Umvhlxi2TN7Ex2hzOowyeNb+SfbVWI973N+ctaFMk= github.com/hashicorp/vault/api v1.0.5-0.20200902155336-f9d5ce5a171a/go.mod h1:R3Umvhlxi2TN7Ex2hzOowyeNb+SfbVWI973N+ctaFMk=
github.com/hashicorp/vault/api v1.8.2 h1:C7OL9YtOtwQbTKI9ogB0A1wffRbCN+rH/LLCHO3d8HM= github.com/hashicorp/vault/api v1.8.3 h1:cHQOLcMhBR+aVI0HzhPxO62w2+gJhIrKguQNONPzu6o=
github.com/hashicorp/vault/api v1.8.2/go.mod h1:ML8aYzBIhY5m1MD1B2Q0JV89cC85YVH4t5kBaZiyVaE= github.com/hashicorp/vault/api v1.8.3/go.mod h1:4g/9lj9lmuJQMtT6CmVMHC5FW1yENaVv+Nv4ZfG8fAg=
github.com/hashicorp/vault/sdk v0.1.8/go.mod h1:tHZfc6St71twLizWNHvnnbiGFo1aq0eD2jGPLtP8kAU= github.com/hashicorp/vault/sdk v0.1.8/go.mod h1:tHZfc6St71twLizWNHvnnbiGFo1aq0eD2jGPLtP8kAU=
github.com/hashicorp/vault/sdk v0.1.14-0.20190730042320-0dc007d98cc8/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= github.com/hashicorp/vault/sdk v0.1.14-0.20190730042320-0dc007d98cc8/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M=
github.com/hashicorp/vault/sdk v0.1.14-0.20191108161836-82f2b5571044/go.mod h1:PcekaFGiPJyHnFy+NZhP6ll650zEw51Ag7g/YEa+EOU= github.com/hashicorp/vault/sdk v0.1.14-0.20191108161836-82f2b5571044/go.mod h1:PcekaFGiPJyHnFy+NZhP6ll650zEw51Ag7g/YEa+EOU=
@ -656,8 +650,8 @@ github.com/hashicorp/vault/sdk v0.1.14-0.20200317185738-82f498082f02/go.mod h1:W
github.com/hashicorp/vault/sdk v0.1.14-0.20200427170607-03332aaf8d18/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10= github.com/hashicorp/vault/sdk v0.1.14-0.20200427170607-03332aaf8d18/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10=
github.com/hashicorp/vault/sdk v0.1.14-0.20200429182704-29fce8f27ce4/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10= github.com/hashicorp/vault/sdk v0.1.14-0.20200429182704-29fce8f27ce4/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10=
github.com/hashicorp/vault/sdk v0.1.14-0.20200519221838-e0cfd64bc267/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10= github.com/hashicorp/vault/sdk v0.1.14-0.20200519221838-e0cfd64bc267/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10=
github.com/hashicorp/vault/sdk v0.6.0 h1:6Z+In5DXHiUfZvIZdMx7e2loL1PPyDjA4bVh9ZTIAhs= github.com/hashicorp/vault/sdk v0.7.0 h1:2pQRO40R1etpKkia5fb4kjrdYMx3BHklPxl1pxpxDHg=
github.com/hashicorp/vault/sdk v0.6.0/go.mod h1:+DRpzoXIdMvKc88R4qxr+edwy/RvH5QK8itmxLiDHLc= github.com/hashicorp/vault/sdk v0.7.0/go.mod h1:KyfArJkhooyba7gYCKSq8v66QdqJmnbAxtV/OX1+JTs=
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ=
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
@ -668,8 +662,9 @@ github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc=
github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/influxdata/influxdb v0.0.0-20190411212539-d24b7ba8c4c4/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= github.com/influxdata/influxdb v0.0.0-20190411212539-d24b7ba8c4c4/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY=
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ=
github.com/jackc/pgx v3.3.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= github.com/jackc/pgx v3.3.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I=
@ -724,7 +719,6 @@ github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
@ -772,8 +766,8 @@ github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2 h1:hAHbPm5IJGijwng3PWk09JkG9WeqChjprR5s9bBZ+OM=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/matttproud/golang_protobuf_extensions v1.0.2/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI=
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
github.com/mholt/archiver v3.1.1+incompatible/go.mod h1:Dh2dOXnSdiLxRiPoVfIr/fI1TwETms9B8CTWfeh7ROU= github.com/mholt/archiver v3.1.1+incompatible/go.mod h1:Dh2dOXnSdiLxRiPoVfIr/fI1TwETms9B8CTWfeh7ROU=
@ -806,9 +800,9 @@ github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY7
github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
github.com/moby/sys/mountinfo v0.6.0 h1:gUDhXQx58YNrpHlK4nSL+7y2pxFZkUcXqzFDKWdC0Oo= github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78=
github.com/moby/sys/mountinfo v0.6.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@ -830,9 +824,6 @@ github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uY
github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nwaples/rardecode v1.0.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0= github.com/nwaples/rardecode v1.0.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw=
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
@ -841,24 +832,16 @@ github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:v
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU=
github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk=
github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs=
github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo=
github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM=
github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys=
github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg=
github.com/onsi/gomega v1.26.0 h1:03cDLK28U6hWvCAns6NeydX3zIm4SF3ci69ulidS32Q=
github.com/onsi/gomega v1.26.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
@ -899,7 +882,6 @@ github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
github.com/pkg/xattr v0.4.9 h1:5883YPCtkSd8LFbs13nXplj9g9tlrwoJRjgpgMu1/fE= github.com/pkg/xattr v0.4.9 h1:5883YPCtkSd8LFbs13nXplj9g9tlrwoJRjgpgMu1/fE=
github.com/pkg/xattr v0.4.9/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU= github.com/pkg/xattr v0.4.9/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
@ -999,12 +981,11 @@ github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q= github.com/spf13/cobra v1.6.0 h1:42a0n6jwCot1pUmomAp4T7DeMD+20LFv4Q54pxLf2LI=
github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
@ -1058,13 +1039,13 @@ go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= go.etcd.io/etcd/api/v3 v3.5.5/go.mod h1:KFtNaxGDw4Yx/BA4iPPwevUTAuqcsPxzyX8PHydchN8=
go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/pkg/v3 v3.5.5/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ=
go.etcd.io/etcd/client/v2 v2.305.4/go.mod h1:Ud+VUwIi9/uQHOMA+4ekToJ12lTxlv0zB/+DHwTGEbU= go.etcd.io/etcd/client/v2 v2.305.5/go.mod h1:zQjKllfqfBVyVStbt4FaosoX2iYd8fV/GRy/PbowgP4=
go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= go.etcd.io/etcd/client/v3 v3.5.5/go.mod h1:aApjR4WGlSumpnJ2kloS75h6aHUmAyaPLjHMxpc7E7c=
go.etcd.io/etcd/pkg/v3 v3.5.4/go.mod h1:OI+TtO+Aa3nhQSppMbwE4ld3uF1/fqqwbpfndbbrEe0= go.etcd.io/etcd/pkg/v3 v3.5.5/go.mod h1:6ksYFxttiUGzC2uxyqiyOEvhAiD0tuIqSZkX3TyPdaE=
go.etcd.io/etcd/raft/v3 v3.5.4/go.mod h1:SCuunjYvZFC0fBX0vxMSPjuZmpcSk+XaAcMrD6Do03w= go.etcd.io/etcd/raft/v3 v3.5.5/go.mod h1:76TA48q03g1y1VpTue92jZLr9lIHKUNcYdZOOGyx8rI=
go.etcd.io/etcd/server/v3 v3.5.4/go.mod h1:S5/YTU15KxymM5l3T6b09sNOHPXqGYIZStpuuGbb65c= go.etcd.io/etcd/server/v3 v3.5.5/go.mod h1:rZ95vDw/jrvsbj9XpTqPrTAB9/kzchVdhRirySPkUBc=
go.mongodb.org/mongo-driver v1.2.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.2.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.opencensus.io v0.19.1/go.mod h1:gug0GbSHa8Pafr0d2urOSgoXHZ6x/RUlaiT0d9pqb4A= go.opencensus.io v0.19.1/go.mod h1:gug0GbSHa8Pafr0d2urOSgoXHZ6x/RUlaiT0d9pqb4A=
go.opencensus.io v0.19.2/go.mod h1:NO/8qkisMZLZ1FCsKNqtJPwc8/TaclWyY0B6wcYNg9M= go.opencensus.io v0.19.2/go.mod h1:NO/8qkisMZLZ1FCsKNqtJPwc8/TaclWyY0B6wcYNg9M=
@ -1076,29 +1057,35 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
go.opentelemetry.io/contrib v0.20.0 h1:ubFQUn0VCZ0gPwIoJfBJVpeBlyRMxu8Mm/huKWYd9p0= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.25.0/go.mod h1:E5NNboN0UqSAki0Atn9kVwaN7I+l25gGxDqBueo/74E=
go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.0 h1:Ajldaqhxqw/gNzQA45IKFWLdG7jZuXX/wBW1d5qvbUI=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0 h1:Q3C9yzW6I9jqEc8sawxzxZmY48fs9u220KXq6d5s3XU= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.0/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= go.opentelemetry.io/otel v1.0.1/go.mod h1:OPEOD4jIT2SlZPMmwT6FqZz2C0ZNdQqiWcoK6M0SNFU=
go.opentelemetry.io/otel v0.20.0 h1:eaP0Fqu7SXHwvjiqDq83zImeehOHX8doTvU9AwXON8g= go.opentelemetry.io/otel v1.8.0/go.mod h1:2pkj+iMj0o03Y+cW6/m8Y4WkRdYN3AvCXCnzRMp9yvM=
go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4=
go.opentelemetry.io/otel/exporters/otlp v0.20.0 h1:PTNgq9MRmQqqJY0REVbZFvwkYOA85vbdQU/nVfxDyqg= go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ=
go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y=
go.opentelemetry.io/otel/metric v0.20.0 h1:4kzhXFP+btKm4jwxpjIqjs41A7MakRFUS86bqLHTIw8= go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY=
go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.1/go.mod h1:Kv8liBeVNFkkkbilbgWRpV+wWuu+H5xdOT6HAgd30iw=
go.opentelemetry.io/otel/oteltest v0.20.0 h1:HiITxCawalo5vQzdHfKeZurV8x7ljcqAgiWzF6Vaeaw= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 h1:pDDYmo0QadUPal5fwXoY1pmMpFcdyhXOmL5drCrI3vU=
go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag=
go.opentelemetry.io/otel/sdk v0.20.0 h1:JsxtGXd06J8jrnya7fdI/U/MR6yXA5DtbZy+qoHQlr8= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.1/go.mod h1:xOvWoTOrQjxjW61xtOmD/WKGRYb/P4NzRo3bs65U6Rk=
go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 h1:KtiUEhQmj/Pa874bVYKGNVdq8NPKiacPbaRRtgXi+t4=
go.opentelemetry.io/otel/sdk/export/metric v0.20.0 h1:c5VRjxCXdQlx1HjzwGdQHzZaVI82b5EbBgOu2ljD92g= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0=
go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs=
go.opentelemetry.io/otel/sdk/metric v0.20.0 h1:7ao1wpzHRVKf0OQ7GIxiQJA6X7DLX9o14gmVon7mMK8= go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A=
go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= go.opentelemetry.io/otel/sdk v1.0.1/go.mod h1:HrdXne+BiwsOHYYkBE5ysIcv2bvdZstxzmCQhxTcZkI=
go.opentelemetry.io/otel/trace v0.20.0 h1:1DL6EXUdcg95gukhuRRvLDO/4X5THh/5dIV52lqtnbw= go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY=
go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE=
go.opentelemetry.io/proto/otlp v0.7.0 h1:rwOQPCuKAKmwGKq2aVNnYIibI6wnV7EvzgfTCzcdGg8= go.opentelemetry.io/otel/trace v1.0.1/go.mod h1:5g4i4fKLaX2BQpSBsxw8YYcgKpMMSW3x7ZTuYBr3sUk=
go.opentelemetry.io/otel/trace v1.8.0/go.mod h1:0Bt3PXY8w+3pheS3hQUt+wow8b1ojPaTBoTCh2zIFI4=
go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E=
go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg=
go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw=
go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
@ -1106,8 +1093,8 @@ go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk=
go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak=
@ -1118,8 +1105,8 @@ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY= go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY= go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
golang.org/x/arch v0.0.0-20180920145803-b19384d3c130/go.mod h1:cYlCBUl1MsqxdiKgmc4uh7TxZfWSFLOGSRR090WDxt8= golang.org/x/arch v0.0.0-20180920145803-b19384d3c130/go.mod h1:cYlCBUl1MsqxdiKgmc4uh7TxZfWSFLOGSRR090WDxt8=
golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@ -1131,17 +1118,15 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8= golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -1181,6 +1166,7 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -1213,7 +1199,6 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
@ -1229,7 +1214,6 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
@ -1238,9 +1222,9 @@ golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw=
golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@ -1277,6 +1261,7 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -1303,13 +1288,10 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -1336,7 +1318,6 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -1346,6 +1327,7 @@ golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@ -1357,16 +1339,16 @@ golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
@ -1374,6 +1356,7 @@ golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9sn
golang.org/x/term v0.0.0-20210422114643-f5beecf764ed/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210422114643-f5beecf764ed/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
golang.org/x/term v0.4.0 h1:O7UWfv5+A2qiuulQk30kVinPoMtoIPeVaKLEgLpVkvg= golang.org/x/term v0.4.0 h1:O7UWfv5+A2qiuulQk30kVinPoMtoIPeVaKLEgLpVkvg=
golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -1387,6 +1370,7 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -1397,8 +1381,8 @@ golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxb
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.2.0 h1:52I/1L54xyEQAYdtcSuxtiT84KGYTBGXwayxmIpNJhE= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.2.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@ -1457,7 +1441,6 @@ golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82u
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
@ -1468,6 +1451,7 @@ golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -1579,11 +1563,12 @@ google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEc
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220208230804-65c12eb4c068/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220208230804-65c12eb4c068/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e h1:S9GbmC1iCgvbLyAokVCwiO6tVIrU9Y7c5oMx1V/ki/Y= google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 h1:a2S6M0+660BgMNl++4JPlcAO/CjkqYItDEZwkoDQK7c=
google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
@ -1615,11 +1600,14 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ
google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U= google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= google.golang.org/grpc v1.52.0 h1:kd48UiU7EHsV4rnLyOJRuP/Il/UHE7gdDAQ+SZI7nZk=
google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
@ -1667,7 +1655,6 @@ gopkg.in/square/go-jose.v2 v2.4.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76
gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w=
gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/src-d/go-billy.v4 v4.3.0/go.mod h1:tm33zBoOwxjYHZIE+OV8bxTWFMJLrconzFMd38aARFk= gopkg.in/src-d/go-billy.v4 v4.3.0/go.mod h1:tm33zBoOwxjYHZIE+OV8bxTWFMJLrconzFMd38aARFk=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@ -1696,61 +1683,60 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.25.4 h1:3YO8J4RtmG7elEgaWMb4HgmpS2CfY1QlaOz9nwB+ZSs= k8s.io/api v0.26.1 h1:f+SWYiPd/GsiWwVRz+NbFyCgvv75Pk9NK6dlkZgpCRQ=
k8s.io/api v0.25.4/go.mod h1:IG2+RzyPQLllQxnhzD8KQNEu4c4YvyDTpSMztf4A0OQ= k8s.io/api v0.26.1/go.mod h1:xd/GBNgR0f707+ATNyPmQ1oyKSgndzXij81FzWGsejg=
k8s.io/apiextensions-apiserver v0.25.4 h1:7hu9pF+xikxQuQZ7/30z/qxIPZc2J1lFElPtr7f+B6U= k8s.io/apiextensions-apiserver v0.26.1 h1:cB8h1SRk6e/+i3NOrQgSFij1B2S0Y0wDoNl66bn8RMI=
k8s.io/apiextensions-apiserver v0.25.4/go.mod h1:bkSGki5YBoZWdn5pWtNIdGvDrrsRWlmnvl9a+tAw5vQ= k8s.io/apiextensions-apiserver v0.26.1/go.mod h1:AptjOSXDGuE0JICx/Em15PaoO7buLwTs0dGleIHixSM=
k8s.io/apimachinery v0.25.4 h1:CtXsuaitMESSu339tfhVXhQrPET+EiWnIY1rcurKnAc= k8s.io/apimachinery v0.26.1 h1:8EZ/eGJL+hY/MYCNwhmDzVqq2lPl3N3Bo8rvweJwXUQ=
k8s.io/apimachinery v0.25.4/go.mod h1:jaF9C/iPNM1FuLl7Zuy5b9v+n35HGSh6AQ4HYRkCqwo= k8s.io/apimachinery v0.26.1/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74=
k8s.io/apiserver v0.25.4 h1:/3TwZcgLqX7wUxq7TtXOUqXeBTwXIblVMQdhR5XZ7yo= k8s.io/apiserver v0.26.1 h1:6vmnAqCDO194SVCPU3MU8NcDgSqsUA62tBUSWrFXhsc=
k8s.io/apiserver v0.25.4/go.mod h1:rPcm567XxjOnnd7jedDUnGJGmDGAo+cT6H7QHAN+xV0= k8s.io/apiserver v0.26.1/go.mod h1:wr75z634Cv+sifswE9HlAo5FQ7UoUauIICRlOE+5dCg=
k8s.io/client-go v0.25.4 h1:3RNRDffAkNU56M/a7gUfXaEzdhZlYhoW8dgViGy5fn8= k8s.io/client-go v0.26.1 h1:87CXzYJnAMGaa/IDDfRdhTzxk/wzGZ+/HUQpqgVSZXU=
k8s.io/client-go v0.25.4/go.mod h1:8trHCAC83XKY0wsBIpbirZU4NTUpbuhc2JnI7OruGZw= k8s.io/client-go v0.26.1/go.mod h1:IWNSglg+rQ3OcvDkhY6+QLeasV4OYHDjdqeWkDQZwGE=
k8s.io/cloud-provider v0.25.4 h1:juIfru1jVX6BlDWcJ18sv6aWxMSMmcjWf2HNXUtnkiI= k8s.io/cloud-provider v0.26.1 h1:qEZmsGWGptOtVSpeMdTsapHX2BEqIk7rc5MA4caBqE0=
k8s.io/cloud-provider v0.25.4/go.mod h1:L018fvnYxHrJP93UNSu8ODZYd/HCukliBzzNsV4TqC0= k8s.io/cloud-provider v0.26.1/go.mod h1:6PheIxRySYuRBBxtTUADya8S2rbr18xKi+fhGbLkduc=
k8s.io/code-generator v0.25.4/go.mod h1:9F5fuVZOMWRme7MYj2YT3L9ropPWPokd9VRhVyD3+0w= k8s.io/code-generator v0.26.1/go.mod h1:OMoJ5Dqx1wgaQzKgc+ZWaZPfGjdRq/Y3WubFrZmeI3I=
k8s.io/component-base v0.25.4 h1:n1bjg9Yt+G1C0WnIDJmg2fo6wbEU1UGMRiQSjmj7hNQ= k8s.io/component-base v0.26.1 h1:4ahudpeQXHZL5kko+iDHqLj/FSGAEUnSVO0EBbgDd+4=
k8s.io/component-base v0.25.4/go.mod h1:nnZJU8OP13PJEm6/p5V2ztgX2oyteIaAGKGMYb2L2cY= k8s.io/component-base v0.26.1/go.mod h1:VHrLR0b58oC035w6YQiBSbtsf0ThuSwXP+p5dD/kAWU=
k8s.io/component-helpers v0.25.4 h1:FIwp5ZzjaA7p0lekHQM28E7qTd9Aetx5O9e7r4n2Kqo= k8s.io/component-helpers v0.26.1 h1:Y5h1OYUJTGyHZlSAsc7mcfNsWF08S/MlrQyF/vn93mU=
k8s.io/component-helpers v0.25.4/go.mod h1:X4KJ8SsJ/onWcDQkRhcE2WRG/iNMufCl7RsNSYtguJg= k8s.io/component-helpers v0.26.1/go.mod h1:jxNTnHb1axLe93MyVuvKj9T/+f4nxBVrj/xf01/UNFk=
k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20220902162205-c0856e24416d/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4=
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/kms v0.26.1/go.mod h1:ReC1IEGuxgfN+PDCIpR6w8+XMmDE7uJhxcCwMZFdIYc=
k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU=
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E= k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E=
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4=
k8s.io/kubectl v0.25.4 h1:O3OA1z4V1ZyvxCvScjq0pxAP7ABgznr8UvnVObgI6Dc= k8s.io/kubectl v0.26.1 h1:K8A0Jjlwg8GqrxOXxAbjY5xtmXYeYjLU96cHp2WMQ7s=
k8s.io/kubectl v0.25.4/go.mod h1:CKMrQ67Bn2YCP26tZStPQGq62zr9pvzEf65A0navm8k= k8s.io/kubectl v0.26.1/go.mod h1:miYFVzldVbdIiXMrHZYmL/EDWwJKM+F0sSsdxsATFPo=
k8s.io/kubelet v0.25.4 h1:24MmTTQGBHr08UkMYFC/RaLjuiMREM53HfRgJKWRquI= k8s.io/kubelet v0.26.1 h1:wQyCQYmLW6GN3v7gVTxnc3jAE4zMYDlzdF3FZV4rKas=
k8s.io/kubelet v0.25.4/go.mod h1:dWAxzvWR7B6LrSgE+6H6Dc7bOzNOzm+O+W6zLic9daA= k8s.io/kubelet v0.26.1/go.mod h1:gFVZ1Ab4XdjtnYdVRATwGwku7FhTxo6LVEZwYoQaDT8=
k8s.io/kubernetes v1.25.4 h1:M1+MR8IxE64zHhSSDn30twChLaOI+p0Kt77pvyQMKwU= k8s.io/kubernetes v1.26.1 h1:N+qxlptxpSU/VSLvqBGWyyw/kNhJRpEn1b5YP57+5rk=
k8s.io/kubernetes v1.25.4/go.mod h1:lvEY+3iJhh+sGIK1LorGkI56rW0eLGsfalnp68wQwYU= k8s.io/kubernetes v1.26.1/go.mod h1:dEfAfGVZBOr2uZLeVazLPj/8E+t8jYFbQqCiBudkB8o=
k8s.io/mount-utils v0.25.4 h1:+j1GBo6rH6sM1GvOI4jcu1IpjA5WssuwW7UEkQevaTU= k8s.io/mount-utils v0.26.1 h1:deN1IBPyi5UFEAgQYXBEDUejzQUNzRC1ML7BUMWljzA=
k8s.io/mount-utils v0.25.4/go.mod h1:odpFnGwJfFjN3SRnjfGS0902ubcj/W6hDOrNDmSSINo= k8s.io/mount-utils v0.26.1/go.mod h1:au99w4FWU5ZWelLb3Yx6kJc8RZ387IyWVM9tN65Yhxo=
k8s.io/pod-security-admission v0.25.4 h1:jUjWkuYPnuZo7HNj0FkiPjcoj0ERULXGSTCMiDM91A8= k8s.io/pod-security-admission v0.26.1 h1:EDIxsYFeKMzNvN/JB0PgQcuwBP6fIkIG2O8ZWJhzOp4=
k8s.io/pod-security-admission v0.25.4/go.mod h1:0xthTisMu4TTzHrzM5SCeaRoFwqBjM54DqdHVcwk62k= k8s.io/pod-security-admission v0.26.1/go.mod h1:hCbYTG5UtLlivmukkMPjAWf23PUBUHzEvR60xNVWN4c=
k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
k8s.io/utils v0.0.0-20221108210102-8e77b1f39fe2 h1:GfD9OzL11kvZN5iArC6oTS7RTj7oJOIfnislxYlqTj8= k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 h1:KTgPnR10d5zhztWptI952TNtt/4u5h3IzDXkdIMuo2Y=
k8s.io/utils v0.0.0-20221108210102-8e77b1f39fe2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= k8s.io/utils v0.0.0-20221128185143-99ec85e7a448/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.33 h1:LYqFq+6Cj2D0gFfrJvL7iElD4ET6ir3VDdhDdTK7rgc= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.35 h1:+xBL5uTc+BkPBwmMi3vYfUJjq+N3K+H6PXeETwf5cPI=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.33/go.mod h1:soWkSNf2tZC7aMibXEqVhCd73GOY5fJikn8qbdzemB0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.35/go.mod h1:WxjusMwXlKzfAs4p9km6XJRndVt2FROgMVCE4cdohFo=
sigs.k8s.io/controller-runtime v0.2.2/go.mod h1:9dyohw3ZtoXQuV1e766PHUn+cmrRCIcBh6XIMFNMZ+I= sigs.k8s.io/controller-runtime v0.2.2/go.mod h1:9dyohw3ZtoXQuV1e766PHUn+cmrRCIcBh6XIMFNMZ+I=
sigs.k8s.io/controller-runtime v0.11.0-beta.0.0.20211208212546-f236f0345ad2 h1:+ReKrjTrd57mtAU19BJkxSAaWRIQkFlaWcO6dGFVP1g= sigs.k8s.io/controller-runtime v0.14.2 h1:P6IwDhbsRWsBClt/8/h8Zy36bCuGuW5Op7MHpFrN/60=
sigs.k8s.io/controller-runtime v0.11.0-beta.0.0.20211208212546-f236f0345ad2/go.mod h1:KKwLiTooNGu+JmLZGn9Sl3Gjmfj66eMbCQznLP5zcqA= sigs.k8s.io/controller-runtime v0.14.2/go.mod h1:WqIdsAY6JBsjfc/CqO0CORmNtoCtE4S6qbPc9s68h+0=
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k=
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=

View File

@ -19,7 +19,6 @@ package kms
import ( import (
"errors" "errors"
"fmt" "fmt"
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"strconv" "strconv"
@ -254,7 +253,6 @@ func (vc *vaultConnection) initConnection(config map[string]interface{}) error {
// initCertificates sets VAULT_* environment variables in the vc.vaultConfig map, // initCertificates sets VAULT_* environment variables in the vc.vaultConfig map,
// these settings will be used when connecting to the Vault service with // these settings will be used when connecting to the Vault service with
// vc.connectVault(). // vc.connectVault().
//
func (vc *vaultConnection) initCertificates(config map[string]interface{}, secrets map[string]string) error { func (vc *vaultConnection) initCertificates(config map[string]interface{}, secrets map[string]string) error {
vaultConfig := make(map[string]interface{}) vaultConfig := make(map[string]interface{})
@ -481,9 +479,9 @@ func detectAuthMountPath(path string) (string, error) {
} }
// createTempFile writes data to a temporary file that contains the pattern in // createTempFile writes data to a temporary file that contains the pattern in
// the filename (see ioutil.TempFile for details). // the filename (see os.CreateTemp for details).
func createTempFile(pattern string, data []byte) (string, error) { func createTempFile(pattern string, data []byte) (string, error) {
t, err := ioutil.TempFile("", pattern) t, err := os.CreateTemp("", pattern)
if err != nil { if err != nil {
return "", fmt.Errorf("failed to create temporary file: %w", err) return "", fmt.Errorf("failed to create temporary file: %w", err)
} }

View File

@ -20,7 +20,6 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"io/ioutil"
"os" "os"
"github.com/libopenstorage/secrets/vault" "github.com/libopenstorage/secrets/vault"
@ -321,7 +320,7 @@ func (kms *vaultTenantSA) getToken() (string, error) {
// linked from the ServiceAccount. This path can then be used in place of the // linked from the ServiceAccount. This path can then be used in place of the
// standard `/var/run/secrets/kubernetes.io/serviceaccount/token` location. // standard `/var/run/secrets/kubernetes.io/serviceaccount/token` location.
func (kms *vaultTenantSA) getTokenPath() (string, error) { func (kms *vaultTenantSA) getTokenPath() (string, error) {
dir, err := ioutil.TempDir("", kms.tenantSAName) dir, err := os.MkdirTemp("", kms.tenantSAName)
if err != nil { if err != nil {
return "", fmt.Errorf("failed to create directory for ServiceAccount %s/%s: %w", kms.tenantSAName, kms.Tenant, err) return "", fmt.Errorf("failed to create directory for ServiceAccount %s/%s: %w", kms.tenantSAName, kms.Tenant, err)
} }

View File

@ -1026,6 +1026,29 @@ func (ns *NodeServer) NodeUnstageVolume(
return &csi.NodeUnstageVolumeResponse{}, nil return &csi.NodeUnstageVolumeResponse{}, nil
} }
// getStagingPath returns the staging path for the volume from the volume path.
// The staingTargetPath looks like
// /var/lib/kubelet/plugins/kubernetes.io/csi/pv/pvc-08937eb8-7e00-4033-b6ce-bc36147b4ed0/
// globalmount/0001-0009-rook-ceph-0000000000000002-50d53503-9da1-11ed-847e-bacf8f2f1297
// the last directory is the volumeID returned in the NodeStageVolumeRespose.
// The image-meta.json file is present in the staging path without volumeID.
func (ns *NodeServer) getStagingPath(volPath string) (string, error) {
mounts, err := ns.Mounter.GetMountRefs(volPath)
if err != nil {
return "", err
}
for _, mount := range mounts {
// strip the last directory from the staging path
stp := strings.Split(mount, "/")
stagingTargetPath := strings.Join(stp[:len(stp)-1], "/")
if checkRBDImageMetadataStashExists(stagingTargetPath) {
return stagingTargetPath, nil
}
}
return "", fmt.Errorf("failed to get staging path for volume %s", volPath)
}
// NodeExpandVolume resizes rbd volumes. // NodeExpandVolume resizes rbd volumes.
func (ns *NodeServer) NodeExpandVolume( func (ns *NodeServer) NodeExpandVolume(
ctx context.Context, ctx context.Context,
@ -1042,8 +1065,18 @@ func (ns *NodeServer) NodeExpandVolume(
volumePath := req.GetStagingTargetPath() volumePath := req.GetStagingTargetPath()
if volumePath == "" { if volumePath == "" {
// If Kubernetes version < v1.19.0 the volume_path would be // If Kubernetes version < v1.19.0 the volume_path would be
// having the staging_target_path information // having the staging_target_path information.
// stagingTargetPath is optional parameter in NodeExpandVolumeRequest request
// Kubernetes will not send the staging_target_path in the volume_path in some cases
// Refer https://github.com/kubernetes/kubernetes/issues/115343
var err error
volumePath, err = ns.getStagingPath(req.GetVolumePath())
if err != nil {
// If stagingTargetPath is not found in volumePath then use
// volumePath
volumePath = req.GetVolumePath() volumePath = req.GetVolumePath()
log.UsefulLog(ctx, "failed to find stagingTargetPath from volumePath %v: %v", volumePath, err)
}
} }
if volumePath == "" { if volumePath == "" {
return nil, status.Error(codes.InvalidArgument, "volume path must be provided") return nil, status.Error(codes.InvalidArgument, "volume path must be provided")

View File

@ -1731,6 +1731,14 @@ func stashRBDImageMetadata(volOptions *rbdVolume, metaDataPath string) error {
return nil return nil
} }
// checkRBDImageMetadataStashExists checks if the stashFile exists at the passed in path.
func checkRBDImageMetadataStashExists(metaDataPath string) bool {
imageMetaPath := filepath.Join(metaDataPath, stashFileName)
_, err := os.Stat(imageMetaPath)
return err == nil
}
// lookupRBDImageMetadataStash reads and returns stashed image metadata at passed in path. // lookupRBDImageMetadataStash reads and returns stashed image metadata at passed in path.
func lookupRBDImageMetadataStash(metaDataPath string) (rbdImageMetadataStash, error) { func lookupRBDImageMetadataStash(metaDataPath string) (rbdImageMetadataStash, error) {
var imgMeta rbdImageMetadataStash var imgMeta rbdImageMetadataStash

View File

@ -19,7 +19,6 @@ package rbd
import ( import (
"context" "context"
"errors" "errors"
"io/ioutil"
"os" "os"
"strings" "strings"
"testing" "testing"
@ -252,7 +251,7 @@ func TestStrategicActionOnLogFile(t *testing.T) {
var logFile [3]string var logFile [3]string
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
f, err := ioutil.TempFile(tmpDir, "rbd-*.log") f, err := os.CreateTemp(tmpDir, "rbd-*.log")
if err != nil { if err != nil {
t.Errorf("creating tempfile failed: %v", err) t.Errorf("creating tempfile failed: %v", err)
} }

View File

@ -19,7 +19,6 @@ package util
import ( import (
"errors" "errors"
"fmt" "fmt"
"io/ioutil"
"os" "os"
) )
@ -43,7 +42,7 @@ type Credentials struct {
} }
func storeKey(key string) (string, error) { func storeKey(key string) (string, error) {
tmpfile, err := ioutil.TempFile(tmpKeyFileLocation, tmpKeyFileNamePrefix) tmpfile, err := os.CreateTemp(tmpKeyFileLocation, tmpKeyFileNamePrefix)
if err != nil { if err != nil {
return "", fmt.Errorf("error creating a temporary keyfile: %w", err) return "", fmt.Errorf("error creating a temporary keyfile: %w", err)
} }

View File

@ -33,14 +33,14 @@ const (
// VolumeLocks implements a map with atomic operations. It stores a set of all volume IDs // VolumeLocks implements a map with atomic operations. It stores a set of all volume IDs
// with an ongoing operation. // with an ongoing operation.
type VolumeLocks struct { type VolumeLocks struct {
locks sets.String locks sets.Set[string]
mux sync.Mutex mux sync.Mutex
} }
// NewVolumeLocks returns new VolumeLocks. // NewVolumeLocks returns new VolumeLocks.
func NewVolumeLocks() *VolumeLocks { func NewVolumeLocks() *VolumeLocks {
return &VolumeLocks{ return &VolumeLocks{
locks: sets.NewString(), locks: sets.New[string](),
} }
} }

View File

@ -19,7 +19,6 @@ package log
import ( import (
"errors" "errors"
"fmt" "fmt"
"io/ioutil"
"os" "os"
"strings" "strings"
"testing" "testing"
@ -28,7 +27,7 @@ import (
func TestGzipLogFile(t *testing.T) { func TestGzipLogFile(t *testing.T) {
t.Parallel() t.Parallel()
tmpDir := t.TempDir() tmpDir := t.TempDir()
logFile, err := ioutil.TempFile(tmpDir, "rbd-*.log") logFile, err := os.CreateTemp(tmpDir, "rbd-*.log")
if err != nil { if err != nil {
fmt.Println(err) fmt.Println(err)
} }

View File

@ -1,3 +1,15 @@
# v1.18.2 (2023-01-25)
* **Documentation**: Doc only change to update wording in a key topic
# v1.18.1 (2023-01-23)
* No change notes available for this release.
# v1.18.0 (2023-01-05)
* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401).
# v1.17.7 (2022-12-15) # v1.17.7 (2022-12-15)
* **Dependency Update**: Updated to the latest SDK module versions * **Dependency Update**: Updated to the latest SDK module versions

View File

@ -12,12 +12,11 @@ import (
) )
// Returns a set of temporary security credentials that you can use to access // Returns a set of temporary security credentials that you can use to access
// Amazon Web Services resources that you might not normally have access to. These // Amazon Web Services resources. These temporary credentials consist of an access
// temporary credentials consist of an access key ID, a secret access key, and a // key ID, a secret access key, and a security token. Typically, you use AssumeRole
// security token. Typically, you use AssumeRole within your account or for // within your account or for cross-account access. For a comparison of AssumeRole
// cross-account access. For a comparison of AssumeRole with other API operations // with other API operations that produce temporary credentials, see Requesting
// that produce temporary credentials, see Requesting Temporary Security // Temporary Security Credentials
// Credentials
// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
// and Comparing the Amazon Web Services STS API operations // and Comparing the Amazon Web Services STS API operations
// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)

View File

@ -43,16 +43,17 @@ import (
// Temporary credentials obtained by using the Amazon Web Services account root // Temporary credentials obtained by using the Amazon Web Services account root
// user credentials have a maximum duration of 3,600 seconds (1 hour). Permissions // user credentials have a maximum duration of 3,600 seconds (1 hour). Permissions
// You can use the temporary credentials created by GetFederationToken in any // You can use the temporary credentials created by GetFederationToken in any
// Amazon Web Services service except the following: // Amazon Web Services service with the following exceptions:
// //
// * You cannot call any IAM // * You cannot call
// operations using the CLI or the Amazon Web Services API. // any IAM operations using the CLI or the Amazon Web Services API. This limitation
// does not apply to console sessions.
// //
// * You cannot call any // * You cannot call any STS operations except
// STS operations except GetCallerIdentity. // GetCallerIdentity.
// //
// You must pass an inline or managed // You can use temporary credentials for single sign-on (SSO)
// session policy // to the console. You must pass an inline or managed session policy
// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
// to this operation. You can pass a single JSON policy document to use as an // to this operation. You can pass a single JSON policy document to use as an
// inline session policy. You can also specify up to 10 managed policy Amazon // inline session policy. You can also specify up to 10 managed policy Amazon

View File

@ -3,4 +3,4 @@
package sts package sts
// goModuleVersion is the tagged release for this module // goModuleVersion is the tagged release for this module
const goModuleVersion = "1.17.7" const goModuleVersion = "1.18.2"

View File

@ -165,6 +165,9 @@ var defaultPartitions = endpoints.Partitions{
endpoints.EndpointKey{ endpoints.EndpointKey{
Region: "ap-southeast-3", Region: "ap-southeast-3",
}: endpoints.Endpoint{}, }: endpoints.Endpoint{},
endpoints.EndpointKey{
Region: "ap-southeast-4",
}: endpoints.Endpoint{},
endpoints.EndpointKey{ endpoints.EndpointKey{
Region: "aws-global", Region: "aws-global",
}: endpoints.Endpoint{ }: endpoints.Endpoint{

View File

@ -12,6 +12,8 @@ import (
type ExpiredTokenException struct { type ExpiredTokenException struct {
Message *string Message *string
ErrorCodeOverride *string
noSmithyDocumentSerde noSmithyDocumentSerde
} }
@ -24,7 +26,12 @@ func (e *ExpiredTokenException) ErrorMessage() string {
} }
return *e.Message return *e.Message
} }
func (e *ExpiredTokenException) ErrorCode() string { return "ExpiredTokenException" } func (e *ExpiredTokenException) ErrorCode() string {
if e.ErrorCodeOverride == nil {
return "ExpiredTokenException"
}
return *e.ErrorCodeOverride
}
func (e *ExpiredTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } func (e *ExpiredTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
// The request could not be fulfilled because the identity provider (IDP) that was // The request could not be fulfilled because the identity provider (IDP) that was
@ -35,6 +42,8 @@ func (e *ExpiredTokenException) ErrorFault() smithy.ErrorFault { return smithy.F
type IDPCommunicationErrorException struct { type IDPCommunicationErrorException struct {
Message *string Message *string
ErrorCodeOverride *string
noSmithyDocumentSerde noSmithyDocumentSerde
} }
@ -47,7 +56,12 @@ func (e *IDPCommunicationErrorException) ErrorMessage() string {
} }
return *e.Message return *e.Message
} }
func (e *IDPCommunicationErrorException) ErrorCode() string { return "IDPCommunicationError" } func (e *IDPCommunicationErrorException) ErrorCode() string {
if e.ErrorCodeOverride == nil {
return "IDPCommunicationError"
}
return *e.ErrorCodeOverride
}
func (e *IDPCommunicationErrorException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } func (e *IDPCommunicationErrorException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
// The identity provider (IdP) reported that authentication failed. This might be // The identity provider (IdP) reported that authentication failed. This might be
@ -57,6 +71,8 @@ func (e *IDPCommunicationErrorException) ErrorFault() smithy.ErrorFault { return
type IDPRejectedClaimException struct { type IDPRejectedClaimException struct {
Message *string Message *string
ErrorCodeOverride *string
noSmithyDocumentSerde noSmithyDocumentSerde
} }
@ -69,7 +85,12 @@ func (e *IDPRejectedClaimException) ErrorMessage() string {
} }
return *e.Message return *e.Message
} }
func (e *IDPRejectedClaimException) ErrorCode() string { return "IDPRejectedClaim" } func (e *IDPRejectedClaimException) ErrorCode() string {
if e.ErrorCodeOverride == nil {
return "IDPRejectedClaim"
}
return *e.ErrorCodeOverride
}
func (e *IDPRejectedClaimException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } func (e *IDPRejectedClaimException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
// The error returned if the message passed to DecodeAuthorizationMessage was // The error returned if the message passed to DecodeAuthorizationMessage was
@ -78,6 +99,8 @@ func (e *IDPRejectedClaimException) ErrorFault() smithy.ErrorFault { return smit
type InvalidAuthorizationMessageException struct { type InvalidAuthorizationMessageException struct {
Message *string Message *string
ErrorCodeOverride *string
noSmithyDocumentSerde noSmithyDocumentSerde
} }
@ -91,7 +114,10 @@ func (e *InvalidAuthorizationMessageException) ErrorMessage() string {
return *e.Message return *e.Message
} }
func (e *InvalidAuthorizationMessageException) ErrorCode() string { func (e *InvalidAuthorizationMessageException) ErrorCode() string {
if e.ErrorCodeOverride == nil {
return "InvalidAuthorizationMessageException" return "InvalidAuthorizationMessageException"
}
return *e.ErrorCodeOverride
} }
func (e *InvalidAuthorizationMessageException) ErrorFault() smithy.ErrorFault { func (e *InvalidAuthorizationMessageException) ErrorFault() smithy.ErrorFault {
return smithy.FaultClient return smithy.FaultClient
@ -103,6 +129,8 @@ func (e *InvalidAuthorizationMessageException) ErrorFault() smithy.ErrorFault {
type InvalidIdentityTokenException struct { type InvalidIdentityTokenException struct {
Message *string Message *string
ErrorCodeOverride *string
noSmithyDocumentSerde noSmithyDocumentSerde
} }
@ -115,7 +143,12 @@ func (e *InvalidIdentityTokenException) ErrorMessage() string {
} }
return *e.Message return *e.Message
} }
func (e *InvalidIdentityTokenException) ErrorCode() string { return "InvalidIdentityToken" } func (e *InvalidIdentityTokenException) ErrorCode() string {
if e.ErrorCodeOverride == nil {
return "InvalidIdentityToken"
}
return *e.ErrorCodeOverride
}
func (e *InvalidIdentityTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } func (e *InvalidIdentityTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
// The request was rejected because the policy document was malformed. The error // The request was rejected because the policy document was malformed. The error
@ -123,6 +156,8 @@ func (e *InvalidIdentityTokenException) ErrorFault() smithy.ErrorFault { return
type MalformedPolicyDocumentException struct { type MalformedPolicyDocumentException struct {
Message *string Message *string
ErrorCodeOverride *string
noSmithyDocumentSerde noSmithyDocumentSerde
} }
@ -135,7 +170,12 @@ func (e *MalformedPolicyDocumentException) ErrorMessage() string {
} }
return *e.Message return *e.Message
} }
func (e *MalformedPolicyDocumentException) ErrorCode() string { return "MalformedPolicyDocument" } func (e *MalformedPolicyDocumentException) ErrorCode() string {
if e.ErrorCodeOverride == nil {
return "MalformedPolicyDocument"
}
return *e.ErrorCodeOverride
}
func (e *MalformedPolicyDocumentException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } func (e *MalformedPolicyDocumentException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
// The request was rejected because the total packed size of the session policies // The request was rejected because the total packed size of the session policies
@ -153,6 +193,8 @@ func (e *MalformedPolicyDocumentException) ErrorFault() smithy.ErrorFault { retu
type PackedPolicyTooLargeException struct { type PackedPolicyTooLargeException struct {
Message *string Message *string
ErrorCodeOverride *string
noSmithyDocumentSerde noSmithyDocumentSerde
} }
@ -165,7 +207,12 @@ func (e *PackedPolicyTooLargeException) ErrorMessage() string {
} }
return *e.Message return *e.Message
} }
func (e *PackedPolicyTooLargeException) ErrorCode() string { return "PackedPolicyTooLarge" } func (e *PackedPolicyTooLargeException) ErrorCode() string {
if e.ErrorCodeOverride == nil {
return "PackedPolicyTooLarge"
}
return *e.ErrorCodeOverride
}
func (e *PackedPolicyTooLargeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } func (e *PackedPolicyTooLargeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
// STS is not activated in the requested region for the account that is being asked // STS is not activated in the requested region for the account that is being asked
@ -177,6 +224,8 @@ func (e *PackedPolicyTooLargeException) ErrorFault() smithy.ErrorFault { return
type RegionDisabledException struct { type RegionDisabledException struct {
Message *string Message *string
ErrorCodeOverride *string
noSmithyDocumentSerde noSmithyDocumentSerde
} }
@ -189,5 +238,10 @@ func (e *RegionDisabledException) ErrorMessage() string {
} }
return *e.Message return *e.Message
} }
func (e *RegionDisabledException) ErrorCode() string { return "RegionDisabledException" } func (e *RegionDisabledException) ErrorCode() string {
if e.ErrorCodeOverride == nil {
return "RegionDisabledException"
}
return *e.ErrorCodeOverride
}
func (e *RegionDisabledException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } func (e *RegionDisabledException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }

File diff suppressed because it is too large Load Diff

View File

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go" const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK // SDKVersion is the version of this SDK
const SDKVersion = "1.44.172" const SDKVersion = "1.44.190"

View File

@ -35,6 +35,8 @@ func NewUnmarshalTypedError(exceptions map[string]func(protocol.ResponseMetadata
} }
} }
// NewUnmarshalTypedErrorWithOptions works similar to NewUnmarshalTypedError applying options to the UnmarshalTypedError
// before returning it
func NewUnmarshalTypedErrorWithOptions(exceptions map[string]func(protocol.ResponseMetadata) error, optFns ...func(*UnmarshalTypedError)) *UnmarshalTypedError { func NewUnmarshalTypedErrorWithOptions(exceptions map[string]func(protocol.ResponseMetadata) error, optFns ...func(*UnmarshalTypedError)) *UnmarshalTypedError {
unmarshaledError := NewUnmarshalTypedError(exceptions) unmarshaledError := NewUnmarshalTypedError(exceptions)
for _, fn := range optFns { for _, fn := range optFns {
@ -43,6 +45,11 @@ func NewUnmarshalTypedErrorWithOptions(exceptions map[string]func(protocol.Respo
return unmarshaledError return unmarshaledError
} }
// WithQueryCompatibility is a helper function to construct a functional option for use with NewUnmarshalTypedErrorWithOptions.
// The queryExceptions given act as an override for unmarshalling errors when query compatible error codes are found.
// See also [awsQueryCompatible trait]
//
// [awsQueryCompatible trait]: https://smithy.io/2.0/aws/protocols/aws-query-protocol.html#aws-protocols-awsquerycompatible-trait
func WithQueryCompatibility(queryExceptions map[string]func(protocol.ResponseMetadata, string) error) func(*UnmarshalTypedError) { func WithQueryCompatibility(queryExceptions map[string]func(protocol.ResponseMetadata, string) error) func(*UnmarshalTypedError) {
return func(typedError *UnmarshalTypedError) { return func(typedError *UnmarshalTypedError) {
typedError.queryExceptions = queryExceptions typedError.queryExceptions = queryExceptions

File diff suppressed because it is too large Load Diff

View File

@ -16,17 +16,17 @@
// //
// To learn more, see the following resources: // To learn more, see the following resources:
// //
// - Amazon EC2: AmazonEC2 product page (http://aws.amazon.com/ec2), Amazon // - Amazon EC2: Amazon EC2 product page (http://aws.amazon.com/ec2), Amazon
// EC2 documentation (http://aws.amazon.com/documentation/ec2) // EC2 documentation (https://docs.aws.amazon.com/ec2/index.html)
// //
// - Amazon EBS: Amazon EBS product page (http://aws.amazon.com/ebs), Amazon // - Amazon EBS: Amazon EBS product page (http://aws.amazon.com/ebs), Amazon
// EBS documentation (http://aws.amazon.com/documentation/ebs) // EBS documentation (https://docs.aws.amazon.com/ebs/index.html)
// //
// - Amazon VPC: Amazon VPC product page (http://aws.amazon.com/vpc), Amazon // - Amazon VPC: Amazon VPC product page (http://aws.amazon.com/vpc), Amazon
// VPC documentation (http://aws.amazon.com/documentation/vpc) // VPC documentation (https://docs.aws.amazon.com/vpc/index.html)
// //
// - Amazon Web Services VPN: Amazon Web Services VPN product page (http://aws.amazon.com/vpn), // - VPN: VPN product page (http://aws.amazon.com/vpn), VPN documentation
// Amazon Web Services VPN documentation (http://aws.amazon.com/documentation/vpn) // (https://docs.aws.amazon.com/vpn/index.html)
// //
// See https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15 for more information on this service. // See https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15 for more information on this service.
// //

View File

@ -56,12 +56,11 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
// AssumeRole API operation for AWS Security Token Service. // AssumeRole API operation for AWS Security Token Service.
// //
// Returns a set of temporary security credentials that you can use to access // Returns a set of temporary security credentials that you can use to access
// Amazon Web Services resources that you might not normally have access to. // Amazon Web Services resources. These temporary credentials consist of an
// These temporary credentials consist of an access key ID, a secret access // access key ID, a secret access key, and a security token. Typically, you
// key, and a security token. Typically, you use AssumeRole within your account // use AssumeRole within your account or for cross-account access. For a comparison
// or for cross-account access. For a comparison of AssumeRole with other API // of AssumeRole with other API operations that produce temporary credentials,
// operations that produce temporary credentials, see Requesting Temporary Security // see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
// in the IAM User Guide. // in the IAM User Guide.
// //
@ -1103,13 +1102,15 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re
// # Permissions // # Permissions
// //
// You can use the temporary credentials created by GetFederationToken in any // You can use the temporary credentials created by GetFederationToken in any
// Amazon Web Services service except the following: // Amazon Web Services service with the following exceptions:
// //
// - You cannot call any IAM operations using the CLI or the Amazon Web Services // - You cannot call any IAM operations using the CLI or the Amazon Web Services
// API. // API. This limitation does not apply to console sessions.
// //
// - You cannot call any STS operations except GetCallerIdentity. // - You cannot call any STS operations except GetCallerIdentity.
// //
// You can use temporary credentials for single sign-on (SSO) to the console.
//
// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
// to this operation. You can pass a single JSON policy document to use as an // to this operation. You can pass a single JSON policy document to use as an
// inline session policy. You can also specify up to 10 managed policy Amazon // inline session policy. You can also specify up to 10 managed policy Amazon

25
vendor/github.com/cenkalti/backoff/v4/.gitignore generated vendored Normal file
View File

@ -0,0 +1,25 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
# IDEs
.idea/

10
vendor/github.com/cenkalti/backoff/v4/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,10 @@
language: go
go:
- 1.13
- 1.x
- tip
before_install:
- go get github.com/mattn/goveralls
- go get golang.org/x/tools/cmd/cover
script:
- $HOME/gopath/bin/goveralls -service=travis-ci

20
vendor/github.com/cenkalti/backoff/v4/LICENSE generated vendored Normal file
View File

@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2014 Cenk Altı
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

32
vendor/github.com/cenkalti/backoff/v4/README.md generated vendored Normal file
View File

@ -0,0 +1,32 @@
# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls]
This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client].
[Exponential backoff][exponential backoff wiki]
is an algorithm that uses feedback to multiplicatively decrease the rate of some process,
in order to gradually find an acceptable rate.
The retries exponentially increase and stop increasing when a certain threshold is met.
## Usage
Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end.
Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation.
## Contributing
* I would like to keep this library as small as possible.
* Please don't send a PR without opening an issue and discussing it first.
* If proposed change is not a common use case, I will probably not accept it.
[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4
[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png
[travis]: https://travis-ci.org/cenkalti/backoff
[travis image]: https://travis-ci.org/cenkalti/backoff.png?branch=master
[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master
[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master
[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java
[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff
[advanced example]: https://pkg.go.dev/github.com/cenkalti/backoff/v4?tab=doc#pkg-examples

66
vendor/github.com/cenkalti/backoff/v4/backoff.go generated vendored Normal file
View File

@ -0,0 +1,66 @@
// Package backoff implements backoff algorithms for retrying operations.
//
// Use Retry function for retrying operations that may fail.
// If Retry does not meet your needs,
// copy/paste the function into your project and modify as you wish.
//
// There is also Ticker type similar to time.Ticker.
// You can use it if you need to work with channels.
//
// See Examples section below for usage examples.
package backoff
import "time"
// BackOff is a backoff policy for retrying an operation.
type BackOff interface {
// NextBackOff returns the duration to wait before retrying the operation,
// or backoff. Stop to indicate that no more retries should be made.
//
// Example usage:
//
// duration := backoff.NextBackOff();
// if (duration == backoff.Stop) {
// // Do not retry operation.
// } else {
// // Sleep for duration and retry operation.
// }
//
NextBackOff() time.Duration
// Reset to initial state.
Reset()
}
// Stop indicates that no more retries should be made for use in NextBackOff().
const Stop time.Duration = -1
// ZeroBackOff is a fixed backoff policy whose backoff time is always zero,
// meaning that the operation is retried immediately without waiting, indefinitely.
type ZeroBackOff struct{}
func (b *ZeroBackOff) Reset() {}
func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 }
// StopBackOff is a fixed backoff policy that always returns backoff.Stop for
// NextBackOff(), meaning that the operation should never be retried.
type StopBackOff struct{}
func (b *StopBackOff) Reset() {}
func (b *StopBackOff) NextBackOff() time.Duration { return Stop }
// ConstantBackOff is a backoff policy that always returns the same backoff delay.
// This is in contrast to an exponential backoff policy,
// which returns a delay that grows longer as you call NextBackOff() over and over again.
type ConstantBackOff struct {
Interval time.Duration
}
func (b *ConstantBackOff) Reset() {}
func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval }
func NewConstantBackOff(d time.Duration) *ConstantBackOff {
return &ConstantBackOff{Interval: d}
}

62
vendor/github.com/cenkalti/backoff/v4/context.go generated vendored Normal file
View File

@ -0,0 +1,62 @@
package backoff
import (
"context"
"time"
)
// BackOffContext is a backoff policy that stops retrying after the context
// is canceled.
type BackOffContext interface { // nolint: golint
BackOff
Context() context.Context
}
type backOffContext struct {
BackOff
ctx context.Context
}
// WithContext returns a BackOffContext with context ctx
//
// ctx must not be nil
func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint
if ctx == nil {
panic("nil context")
}
if b, ok := b.(*backOffContext); ok {
return &backOffContext{
BackOff: b.BackOff,
ctx: ctx,
}
}
return &backOffContext{
BackOff: b,
ctx: ctx,
}
}
func getContext(b BackOff) context.Context {
if cb, ok := b.(BackOffContext); ok {
return cb.Context()
}
if tb, ok := b.(*backOffTries); ok {
return getContext(tb.delegate)
}
return context.Background()
}
func (b *backOffContext) Context() context.Context {
return b.ctx
}
func (b *backOffContext) NextBackOff() time.Duration {
select {
case <-b.ctx.Done():
return Stop
default:
return b.BackOff.NextBackOff()
}
}

161
vendor/github.com/cenkalti/backoff/v4/exponential.go generated vendored Normal file
View File

@ -0,0 +1,161 @@
package backoff
import (
"math/rand"
"time"
)
/*
ExponentialBackOff is a backoff implementation that increases the backoff
period for each retry attempt using a randomization function that grows exponentially.
NextBackOff() is calculated using the following formula:
randomized interval =
RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor])
In other words NextBackOff() will range between the randomization factor
percentage below and above the retry interval.
For example, given the following parameters:
RetryInterval = 2
RandomizationFactor = 0.5
Multiplier = 2
the actual backoff period used in the next retry attempt will range between 1 and 3 seconds,
multiplied by the exponential, that is, between 2 and 6 seconds.
Note: MaxInterval caps the RetryInterval and not the randomized interval.
If the time elapsed since an ExponentialBackOff instance is created goes past the
MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop.
The elapsed time can be reset by calling Reset().
Example: Given the following default arguments, for 10 tries the sequence will be,
and assuming we go over the MaxElapsedTime on the 10th try:
Request # RetryInterval (seconds) Randomized Interval (seconds)
1 0.5 [0.25, 0.75]
2 0.75 [0.375, 1.125]
3 1.125 [0.562, 1.687]
4 1.687 [0.8435, 2.53]
5 2.53 [1.265, 3.795]
6 3.795 [1.897, 5.692]
7 5.692 [2.846, 8.538]
8 8.538 [4.269, 12.807]
9 12.807 [6.403, 19.210]
10 19.210 backoff.Stop
Note: Implementation is not thread-safe.
*/
type ExponentialBackOff struct {
InitialInterval time.Duration
RandomizationFactor float64
Multiplier float64
MaxInterval time.Duration
// After MaxElapsedTime the ExponentialBackOff returns Stop.
// It never stops if MaxElapsedTime == 0.
MaxElapsedTime time.Duration
Stop time.Duration
Clock Clock
currentInterval time.Duration
startTime time.Time
}
// Clock is an interface that returns current time for BackOff.
type Clock interface {
Now() time.Time
}
// Default values for ExponentialBackOff.
const (
DefaultInitialInterval = 500 * time.Millisecond
DefaultRandomizationFactor = 0.5
DefaultMultiplier = 1.5
DefaultMaxInterval = 60 * time.Second
DefaultMaxElapsedTime = 15 * time.Minute
)
// NewExponentialBackOff creates an instance of ExponentialBackOff using default values.
func NewExponentialBackOff() *ExponentialBackOff {
b := &ExponentialBackOff{
InitialInterval: DefaultInitialInterval,
RandomizationFactor: DefaultRandomizationFactor,
Multiplier: DefaultMultiplier,
MaxInterval: DefaultMaxInterval,
MaxElapsedTime: DefaultMaxElapsedTime,
Stop: Stop,
Clock: SystemClock,
}
b.Reset()
return b
}
type systemClock struct{}
func (t systemClock) Now() time.Time {
return time.Now()
}
// SystemClock implements Clock interface that uses time.Now().
var SystemClock = systemClock{}
// Reset the interval back to the initial retry interval and restarts the timer.
// Reset must be called before using b.
func (b *ExponentialBackOff) Reset() {
b.currentInterval = b.InitialInterval
b.startTime = b.Clock.Now()
}
// NextBackOff calculates the next backoff interval using the formula:
// Randomized interval = RetryInterval * (1 ± RandomizationFactor)
func (b *ExponentialBackOff) NextBackOff() time.Duration {
// Make sure we have not gone over the maximum elapsed time.
elapsed := b.GetElapsedTime()
next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval)
b.incrementCurrentInterval()
if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime {
return b.Stop
}
return next
}
// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance
// is created and is reset when Reset() is called.
//
// The elapsed time is computed using time.Now().UnixNano(). It is
// safe to call even while the backoff policy is used by a running
// ticker.
func (b *ExponentialBackOff) GetElapsedTime() time.Duration {
return b.Clock.Now().Sub(b.startTime)
}
// Increments the current interval by multiplying it with the multiplier.
func (b *ExponentialBackOff) incrementCurrentInterval() {
// Check for overflow, if overflow is detected set the current interval to the max interval.
if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier {
b.currentInterval = b.MaxInterval
} else {
b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier)
}
}
// Returns a random value from the following interval:
// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval].
func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration {
if randomizationFactor == 0 {
return currentInterval // make sure no randomness is used when randomizationFactor is 0.
}
var delta = randomizationFactor * float64(currentInterval)
var minInterval = float64(currentInterval) - delta
var maxInterval = float64(currentInterval) + delta
// Get a random value from the range [minInterval, maxInterval].
// The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then
// we want a 33% chance for selecting either 1, 2 or 3.
return time.Duration(minInterval + (random * (maxInterval - minInterval + 1)))
}

112
vendor/github.com/cenkalti/backoff/v4/retry.go generated vendored Normal file
View File

@ -0,0 +1,112 @@
package backoff
import (
"errors"
"time"
)
// An Operation is executing by Retry() or RetryNotify().
// The operation will be retried using a backoff policy if it returns an error.
type Operation func() error
// Notify is a notify-on-error function. It receives an operation error and
// backoff delay if the operation failed (with an error).
//
// NOTE that if the backoff policy stated to stop retrying,
// the notify function isn't called.
type Notify func(error, time.Duration)
// Retry the operation o until it does not return error or BackOff stops.
// o is guaranteed to be run at least once.
//
// If o returns a *PermanentError, the operation is not retried, and the
// wrapped error is returned.
//
// Retry sleeps the goroutine for the duration returned by BackOff after a
// failed operation returns.
func Retry(o Operation, b BackOff) error {
return RetryNotify(o, b, nil)
}
// RetryNotify calls notify function with the error and wait duration
// for each failed attempt before sleep.
func RetryNotify(operation Operation, b BackOff, notify Notify) error {
return RetryNotifyWithTimer(operation, b, notify, nil)
}
// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer
// for each failed attempt before sleep.
// A default timer that uses system timer is used when nil is passed.
func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error {
var err error
var next time.Duration
if t == nil {
t = &defaultTimer{}
}
defer func() {
t.Stop()
}()
ctx := getContext(b)
b.Reset()
for {
if err = operation(); err == nil {
return nil
}
var permanent *PermanentError
if errors.As(err, &permanent) {
return permanent.Err
}
if next = b.NextBackOff(); next == Stop {
if cerr := ctx.Err(); cerr != nil {
return cerr
}
return err
}
if notify != nil {
notify(err, next)
}
t.Start(next)
select {
case <-ctx.Done():
return ctx.Err()
case <-t.C():
}
}
}
// PermanentError signals that the operation should not be retried.
type PermanentError struct {
Err error
}
func (e *PermanentError) Error() string {
return e.Err.Error()
}
func (e *PermanentError) Unwrap() error {
return e.Err
}
func (e *PermanentError) Is(target error) bool {
_, ok := target.(*PermanentError)
return ok
}
// Permanent wraps the given err in a *PermanentError.
func Permanent(err error) error {
if err == nil {
return nil
}
return &PermanentError{
Err: err,
}
}

97
vendor/github.com/cenkalti/backoff/v4/ticker.go generated vendored Normal file
View File

@ -0,0 +1,97 @@
package backoff
import (
"context"
"sync"
"time"
)
// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff.
//
// Ticks will continue to arrive when the previous operation is still running,
// so operations that take a while to fail could run in quick succession.
type Ticker struct {
C <-chan time.Time
c chan time.Time
b BackOff
ctx context.Context
timer Timer
stop chan struct{}
stopOnce sync.Once
}
// NewTicker returns a new Ticker containing a channel that will send
// the time at times specified by the BackOff argument. Ticker is
// guaranteed to tick at least once. The channel is closed when Stop
// method is called or BackOff stops. It is not safe to manipulate the
// provided backoff policy (notably calling NextBackOff or Reset)
// while the ticker is running.
func NewTicker(b BackOff) *Ticker {
return NewTickerWithTimer(b, &defaultTimer{})
}
// NewTickerWithTimer returns a new Ticker with a custom timer.
// A default timer that uses system timer is used when nil is passed.
func NewTickerWithTimer(b BackOff, timer Timer) *Ticker {
if timer == nil {
timer = &defaultTimer{}
}
c := make(chan time.Time)
t := &Ticker{
C: c,
c: c,
b: b,
ctx: getContext(b),
timer: timer,
stop: make(chan struct{}),
}
t.b.Reset()
go t.run()
return t
}
// Stop turns off a ticker. After Stop, no more ticks will be sent.
func (t *Ticker) Stop() {
t.stopOnce.Do(func() { close(t.stop) })
}
func (t *Ticker) run() {
c := t.c
defer close(c)
// Ticker is guaranteed to tick at least once.
afterC := t.send(time.Now())
for {
if afterC == nil {
return
}
select {
case tick := <-afterC:
afterC = t.send(tick)
case <-t.stop:
t.c = nil // Prevent future ticks from being sent to the channel.
return
case <-t.ctx.Done():
return
}
}
}
func (t *Ticker) send(tick time.Time) <-chan time.Time {
select {
case t.c <- tick:
case <-t.stop:
return nil
}
next := t.b.NextBackOff()
if next == Stop {
t.Stop()
return nil
}
t.timer.Start(next)
return t.timer.C()
}

35
vendor/github.com/cenkalti/backoff/v4/timer.go generated vendored Normal file
View File

@ -0,0 +1,35 @@
package backoff
import "time"
type Timer interface {
Start(duration time.Duration)
Stop()
C() <-chan time.Time
}
// defaultTimer implements Timer interface using time.Timer
type defaultTimer struct {
timer *time.Timer
}
// C returns the timers channel which receives the current time when the timer fires.
func (t *defaultTimer) C() <-chan time.Time {
return t.timer.C
}
// Start starts the timer to fire after the given duration
func (t *defaultTimer) Start(duration time.Duration) {
if t.timer == nil {
t.timer = time.NewTimer(duration)
} else {
t.timer.Reset(duration)
}
}
// Stop is called when the timer is not used anymore and resources may be freed.
func (t *defaultTimer) Stop() {
if t.timer != nil {
t.timer.Stop()
}
}

38
vendor/github.com/cenkalti/backoff/v4/tries.go generated vendored Normal file
View File

@ -0,0 +1,38 @@
package backoff
import "time"
/*
WithMaxRetries creates a wrapper around another BackOff, which will
return Stop if NextBackOff() has been called too many times since
the last time Reset() was called
Note: Implementation is not thread-safe.
*/
func WithMaxRetries(b BackOff, max uint64) BackOff {
return &backOffTries{delegate: b, maxTries: max}
}
type backOffTries struct {
delegate BackOff
maxTries uint64
numTries uint64
}
func (b *backOffTries) NextBackOff() time.Duration {
if b.maxTries == 0 {
return Stop
}
if b.maxTries > 0 {
if b.maxTries <= b.numTries {
return Stop
}
b.numTries++
}
return b.delegate.NextBackOff()
}
func (b *backOffTries) Reset() {
b.numTries = 0
b.delegate.Reset()
}

25
vendor/github.com/evanphx/json-patch/v5/LICENSE generated vendored Normal file
View File

@ -0,0 +1,25 @@
Copyright (c) 2014, Evan Phoenix
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the Evan Phoenix nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

38
vendor/github.com/evanphx/json-patch/v5/errors.go generated vendored Normal file
View File

@ -0,0 +1,38 @@
package jsonpatch
import "fmt"
// AccumulatedCopySizeError is an error type returned when the accumulated size
// increase caused by copy operations in a patch operation has exceeded the
// limit.
type AccumulatedCopySizeError struct {
limit int64
accumulated int64
}
// NewAccumulatedCopySizeError returns an AccumulatedCopySizeError.
func NewAccumulatedCopySizeError(l, a int64) *AccumulatedCopySizeError {
return &AccumulatedCopySizeError{limit: l, accumulated: a}
}
// Error implements the error interface.
func (a *AccumulatedCopySizeError) Error() string {
return fmt.Sprintf("Unable to complete the copy, the accumulated size increase of copy is %d, exceeding the limit %d", a.accumulated, a.limit)
}
// ArraySizeError is an error type returned when the array size has exceeded
// the limit.
type ArraySizeError struct {
limit int
size int
}
// NewArraySizeError returns an ArraySizeError.
func NewArraySizeError(l, s int) *ArraySizeError {
return &ArraySizeError{limit: l, size: s}
}
// Error implements the error interface.
func (a *ArraySizeError) Error() string {
return fmt.Sprintf("Unable to create array of size %d, limit is %d", a.size, a.limit)
}

408
vendor/github.com/evanphx/json-patch/v5/merge.go generated vendored Normal file
View File

@ -0,0 +1,408 @@
package jsonpatch
import (
"bytes"
"encoding/json"
"fmt"
"reflect"
)
func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode {
curDoc, err := cur.intoDoc()
if err != nil {
pruneNulls(patch)
return patch
}
patchDoc, err := patch.intoDoc()
if err != nil {
return patch
}
mergeDocs(curDoc, patchDoc, mergeMerge)
return cur
}
func mergeDocs(doc, patch *partialDoc, mergeMerge bool) {
for k, v := range patch.obj {
if v == nil {
if mergeMerge {
idx := -1
for i, key := range doc.keys {
if key == k {
idx = i
break
}
}
if idx == -1 {
doc.keys = append(doc.keys, k)
}
doc.obj[k] = nil
} else {
_ = doc.remove(k, &ApplyOptions{})
}
} else {
cur, ok := doc.obj[k]
if !ok || cur == nil {
if !mergeMerge {
pruneNulls(v)
}
_ = doc.set(k, v, &ApplyOptions{})
} else {
_ = doc.set(k, merge(cur, v, mergeMerge), &ApplyOptions{})
}
}
}
}
func pruneNulls(n *lazyNode) {
sub, err := n.intoDoc()
if err == nil {
pruneDocNulls(sub)
} else {
ary, err := n.intoAry()
if err == nil {
pruneAryNulls(ary)
}
}
}
func pruneDocNulls(doc *partialDoc) *partialDoc {
for k, v := range doc.obj {
if v == nil {
_ = doc.remove(k, &ApplyOptions{})
} else {
pruneNulls(v)
}
}
return doc
}
func pruneAryNulls(ary *partialArray) *partialArray {
newAry := []*lazyNode{}
for _, v := range *ary {
if v != nil {
pruneNulls(v)
}
newAry = append(newAry, v)
}
*ary = newAry
return ary
}
var errBadJSONDoc = fmt.Errorf("Invalid JSON Document")
var errBadJSONPatch = fmt.Errorf("Invalid JSON Patch")
var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents")
// MergeMergePatches merges two merge patches together, such that
// applying this resulting merged merge patch to a document yields the same
// as merging each merge patch to the document in succession.
func MergeMergePatches(patch1Data, patch2Data []byte) ([]byte, error) {
return doMergePatch(patch1Data, patch2Data, true)
}
// MergePatch merges the patchData into the docData.
func MergePatch(docData, patchData []byte) ([]byte, error) {
return doMergePatch(docData, patchData, false)
}
func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
doc := &partialDoc{}
docErr := json.Unmarshal(docData, doc)
patch := &partialDoc{}
patchErr := json.Unmarshal(patchData, patch)
if isSyntaxError(docErr) {
return nil, errBadJSONDoc
}
if isSyntaxError(patchErr) {
return nil, errBadJSONPatch
}
if docErr == nil && doc.obj == nil {
return nil, errBadJSONDoc
}
if patchErr == nil && patch.obj == nil {
return nil, errBadJSONPatch
}
if docErr != nil || patchErr != nil {
// Not an error, just not a doc, so we turn straight into the patch
if patchErr == nil {
if mergeMerge {
doc = patch
} else {
doc = pruneDocNulls(patch)
}
} else {
patchAry := &partialArray{}
patchErr = json.Unmarshal(patchData, patchAry)
if patchErr != nil {
return nil, errBadJSONPatch
}
pruneAryNulls(patchAry)
out, patchErr := json.Marshal(patchAry)
if patchErr != nil {
return nil, errBadJSONPatch
}
return out, nil
}
} else {
mergeDocs(doc, patch, mergeMerge)
}
return json.Marshal(doc)
}
func isSyntaxError(err error) bool {
if _, ok := err.(*json.SyntaxError); ok {
return true
}
if _, ok := err.(*syntaxError); ok {
return true
}
return false
}
// resemblesJSONArray indicates whether the byte-slice "appears" to be
// a JSON array or not.
// False-positives are possible, as this function does not check the internal
// structure of the array. It only checks that the outer syntax is present and
// correct.
func resemblesJSONArray(input []byte) bool {
input = bytes.TrimSpace(input)
hasPrefix := bytes.HasPrefix(input, []byte("["))
hasSuffix := bytes.HasSuffix(input, []byte("]"))
return hasPrefix && hasSuffix
}
// CreateMergePatch will return a merge patch document capable of converting
// the original document(s) to the modified document(s).
// The parameters can be bytes of either two JSON Documents, or two arrays of
// JSON documents.
// The merge patch returned follows the specification defined at http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07
func CreateMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
originalResemblesArray := resemblesJSONArray(originalJSON)
modifiedResemblesArray := resemblesJSONArray(modifiedJSON)
// Do both byte-slices seem like JSON arrays?
if originalResemblesArray && modifiedResemblesArray {
return createArrayMergePatch(originalJSON, modifiedJSON)
}
// Are both byte-slices are not arrays? Then they are likely JSON objects...
if !originalResemblesArray && !modifiedResemblesArray {
return createObjectMergePatch(originalJSON, modifiedJSON)
}
// None of the above? Then return an error because of mismatched types.
return nil, errBadMergeTypes
}
// createObjectMergePatch will return a merge-patch document capable of
// converting the original document to the modified document.
func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
originalDoc := map[string]interface{}{}
modifiedDoc := map[string]interface{}{}
err := json.Unmarshal(originalJSON, &originalDoc)
if err != nil {
return nil, errBadJSONDoc
}
err = json.Unmarshal(modifiedJSON, &modifiedDoc)
if err != nil {
return nil, errBadJSONDoc
}
dest, err := getDiff(originalDoc, modifiedDoc)
if err != nil {
return nil, err
}
return json.Marshal(dest)
}
// createArrayMergePatch will return an array of merge-patch documents capable
// of converting the original document to the modified document for each
// pair of JSON documents provided in the arrays.
// Arrays of mismatched sizes will result in an error.
func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
originalDocs := []json.RawMessage{}
modifiedDocs := []json.RawMessage{}
err := json.Unmarshal(originalJSON, &originalDocs)
if err != nil {
return nil, errBadJSONDoc
}
err = json.Unmarshal(modifiedJSON, &modifiedDocs)
if err != nil {
return nil, errBadJSONDoc
}
total := len(originalDocs)
if len(modifiedDocs) != total {
return nil, errBadJSONDoc
}
result := []json.RawMessage{}
for i := 0; i < len(originalDocs); i++ {
original := originalDocs[i]
modified := modifiedDocs[i]
patch, err := createObjectMergePatch(original, modified)
if err != nil {
return nil, err
}
result = append(result, json.RawMessage(patch))
}
return json.Marshal(result)
}
// Returns true if the array matches (must be json types).
// As is idiomatic for go, an empty array is not the same as a nil array.
func matchesArray(a, b []interface{}) bool {
if len(a) != len(b) {
return false
}
if (a == nil && b != nil) || (a != nil && b == nil) {
return false
}
for i := range a {
if !matchesValue(a[i], b[i]) {
return false
}
}
return true
}
// Returns true if the values matches (must be json types)
// The types of the values must match, otherwise it will always return false
// If two map[string]interface{} are given, all elements must match.
func matchesValue(av, bv interface{}) bool {
if reflect.TypeOf(av) != reflect.TypeOf(bv) {
return false
}
switch at := av.(type) {
case string:
bt := bv.(string)
if bt == at {
return true
}
case float64:
bt := bv.(float64)
if bt == at {
return true
}
case bool:
bt := bv.(bool)
if bt == at {
return true
}
case nil:
// Both nil, fine.
return true
case map[string]interface{}:
bt := bv.(map[string]interface{})
if len(bt) != len(at) {
return false
}
for key := range bt {
av, aOK := at[key]
bv, bOK := bt[key]
if aOK != bOK {
return false
}
if !matchesValue(av, bv) {
return false
}
}
return true
case []interface{}:
bt := bv.([]interface{})
return matchesArray(at, bt)
}
return false
}
// getDiff returns the (recursive) difference between a and b as a map[string]interface{}.
func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) {
into := map[string]interface{}{}
for key, bv := range b {
av, ok := a[key]
// value was added
if !ok {
into[key] = bv
continue
}
// If types have changed, replace completely
if reflect.TypeOf(av) != reflect.TypeOf(bv) {
into[key] = bv
continue
}
// Types are the same, compare values
switch at := av.(type) {
case map[string]interface{}:
bt := bv.(map[string]interface{})
dst := make(map[string]interface{}, len(bt))
dst, err := getDiff(at, bt)
if err != nil {
return nil, err
}
if len(dst) > 0 {
into[key] = dst
}
case string, float64, bool:
if !matchesValue(av, bv) {
into[key] = bv
}
case []interface{}:
bt := bv.([]interface{})
if !matchesArray(at, bt) {
into[key] = bv
}
case nil:
switch bv.(type) {
case nil:
// Both nil, fine.
default:
into[key] = bv
}
default:
panic(fmt.Sprintf("Unknown type:%T in key %s", av, key))
}
}
// Now add all deleted values as nil
for key := range a {
_, found := b[key]
if !found {
into[key] = nil
}
}
return into, nil
}

1135
vendor/github.com/evanphx/json-patch/v5/patch.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -65,7 +65,8 @@ being called, or called more than once, as well as concurrent calls to
Unfortunately this package is not perfect either. It's possible that it is Unfortunately this package is not perfect either. It's possible that it is
still missing some interfaces provided by the go core (let me know if you find still missing some interfaces provided by the go core (let me know if you find
one), and it won't work for applications adding their own interfaces into the one), and it won't work for applications adding their own interfaces into the
mix. mix. You can however use `httpsnoop.Unwrap(w)` to access the underlying
`http.ResponseWriter` and type-assert the result to its other interfaces.
However, hopefully the explanation above has sufficiently scared you of rolling However, hopefully the explanation above has sufficiently scared you of rolling
your own solution to this problem. httpsnoop may still break your application, your own solution to this problem. httpsnoop may still break your application,

View File

@ -3,7 +3,6 @@ package httpsnoop
import ( import (
"io" "io"
"net/http" "net/http"
"sync"
"time" "time"
) )
@ -36,17 +35,23 @@ func CaptureMetrics(hnd http.Handler, w http.ResponseWriter, r *http.Request) Me
// sugar on top of this func), but is a more usable interface if your // sugar on top of this func), but is a more usable interface if your
// application doesn't use the Go http.Handler interface. // application doesn't use the Go http.Handler interface.
func CaptureMetricsFn(w http.ResponseWriter, fn func(http.ResponseWriter)) Metrics { func CaptureMetricsFn(w http.ResponseWriter, fn func(http.ResponseWriter)) Metrics {
m := Metrics{Code: http.StatusOK}
m.CaptureMetrics(w, fn)
return m
}
// CaptureMetrics wraps w and calls fn with the wrapped w and updates
// Metrics m with the resulting metrics. This is similar to CaptureMetricsFn,
// but allows one to customize starting Metrics object.
func (m *Metrics) CaptureMetrics(w http.ResponseWriter, fn func(http.ResponseWriter)) {
var ( var (
start = time.Now() start = time.Now()
m = Metrics{Code: http.StatusOK}
headerWritten bool headerWritten bool
lock sync.Mutex
hooks = Hooks{ hooks = Hooks{
WriteHeader: func(next WriteHeaderFunc) WriteHeaderFunc { WriteHeader: func(next WriteHeaderFunc) WriteHeaderFunc {
return func(code int) { return func(code int) {
next(code) next(code)
lock.Lock()
defer lock.Unlock()
if !headerWritten { if !headerWritten {
m.Code = code m.Code = code
headerWritten = true headerWritten = true
@ -57,8 +62,7 @@ func CaptureMetricsFn(w http.ResponseWriter, fn func(http.ResponseWriter)) Metri
Write: func(next WriteFunc) WriteFunc { Write: func(next WriteFunc) WriteFunc {
return func(p []byte) (int, error) { return func(p []byte) (int, error) {
n, err := next(p) n, err := next(p)
lock.Lock()
defer lock.Unlock()
m.Written += int64(n) m.Written += int64(n)
headerWritten = true headerWritten = true
return n, err return n, err
@ -68,8 +72,7 @@ func CaptureMetricsFn(w http.ResponseWriter, fn func(http.ResponseWriter)) Metri
ReadFrom: func(next ReadFromFunc) ReadFromFunc { ReadFrom: func(next ReadFromFunc) ReadFromFunc {
return func(src io.Reader) (int64, error) { return func(src io.Reader) (int64, error) {
n, err := next(src) n, err := next(src)
lock.Lock()
defer lock.Unlock()
headerWritten = true headerWritten = true
m.Written += n m.Written += n
return n, err return n, err
@ -79,6 +82,5 @@ func CaptureMetricsFn(w http.ResponseWriter, fn func(http.ResponseWriter)) Metri
) )
fn(Wrap(w, hooks)) fn(Wrap(w, hooks))
m.Duration = time.Since(start) m.Duration += time.Since(start)
return m
} }

View File

@ -74,243 +74,275 @@ func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter {
// combination 1/32 // combination 1/32
case !i0 && !i1 && !i2 && !i3 && !i4: case !i0 && !i1 && !i2 && !i3 && !i4:
return struct { return struct {
Unwrapper
http.ResponseWriter http.ResponseWriter
}{rw} }{rw, rw}
// combination 2/32 // combination 2/32
case !i0 && !i1 && !i2 && !i3 && i4: case !i0 && !i1 && !i2 && !i3 && i4:
return struct { return struct {
Unwrapper
http.ResponseWriter http.ResponseWriter
http.Pusher http.Pusher
}{rw, rw} }{rw, rw, rw}
// combination 3/32 // combination 3/32
case !i0 && !i1 && !i2 && i3 && !i4: case !i0 && !i1 && !i2 && i3 && !i4:
return struct { return struct {
Unwrapper
http.ResponseWriter http.ResponseWriter
io.ReaderFrom io.ReaderFrom
}{rw, rw} }{rw, rw, rw}
// combination 4/32 // combination 4/32
case !i0 && !i1 && !i2 && i3 && i4: case !i0 && !i1 && !i2 && i3 && i4:
return struct { return struct {
Unwrapper
http.ResponseWriter http.ResponseWriter
io.ReaderFrom io.ReaderFrom
http.Pusher http.Pusher
}{rw, rw, rw} }{rw, rw, rw, rw}
// combination 5/32 // combination 5/32
case !i0 && !i1 && i2 && !i3 && !i4: case !i0 && !i1 && i2 && !i3 && !i4:
return struct { return struct {
Unwrapper
http.ResponseWriter http.ResponseWriter
http.Hijacker http.Hijacker
}{rw, rw} }{rw, rw, rw}
// combination 6/32 // combination 6/32
case !i0 && !i1 && i2 && !i3 && i4: case !i0 && !i1 && i2 && !i3 && i4:
return struct { return struct {
Unwrapper
http.ResponseWriter http.ResponseWriter
http.Hijacker http.Hijacker
http.Pusher http.Pusher
}{rw, rw, rw} }{rw, rw, rw, rw}
// combination 7/32 // combination 7/32
case !i0 && !i1 && i2 && i3 && !i4: case !i0 && !i1 && i2 && i3 && !i4:
return struct { return struct {
Unwrapper
http.ResponseWriter http.ResponseWriter
http.Hijacker http.Hijacker
io.ReaderFrom io.ReaderFrom
}{rw, rw, rw} }{rw, rw, rw, rw}
// combination 8/32 // combination 8/32
case !i0 && !i1 && i2 && i3 && i4: case !i0 && !i1 && i2 && i3 && i4:
return struct { return struct {
Unwrapper
http.ResponseWriter http.ResponseWriter
http.Hijacker http.Hijacker
io.ReaderFrom io.ReaderFrom
http.Pusher http.Pusher
}{rw, rw, rw, rw} }{rw, rw, rw, rw, rw}
// combination 9/32 // combination 9/32
case !i0 && i1 && !i2 && !i3 && !i4: case !i0 && i1 && !i2 && !i3 && !i4:
return struct { return struct {
Unwrapper
http.ResponseWriter http.ResponseWriter
http.CloseNotifier http.CloseNotifier
}{rw, rw} }{rw, rw, rw}
// combination 10/32 // combination 10/32
case !i0 && i1 && !i2 && !i3 && i4: case !i0 && i1 && !i2 && !i3 && i4:
return struct { return struct {
Unwrapper
http.ResponseWriter http.ResponseWriter
http.CloseNotifier http.CloseNotifier
http.Pusher http.Pusher
}{rw, rw, rw} }{rw, rw, rw, rw}
// combination 11/32 // combination 11/32
case !i0 && i1 && !i2 && i3 && !i4: case !i0 && i1 && !i2 && i3 && !i4:
return struct { return struct {
Unwrapper
http.ResponseWriter http.ResponseWriter
http.CloseNotifier http.CloseNotifier
io.ReaderFrom io.ReaderFrom
}{rw, rw, rw} }{rw, rw, rw, rw}
// combination 12/32 // combination 12/32
case !i0 && i1 && !i2 && i3 && i4: case !i0 && i1 && !i2 && i3 && i4:
return struct { return struct {
Unwrapper
http.ResponseWriter http.ResponseWriter
http.CloseNotifier http.CloseNotifier
io.ReaderFrom io.ReaderFrom
http.Pusher http.Pusher
}{rw, rw, rw, rw} }{rw, rw, rw, rw, rw}
// combination 13/32 // combination 13/32
case !i0 && i1 && i2 && !i3 && !i4: case !i0 && i1 && i2 && !i3 && !i4:
return struct { return struct {
Unwrapper
http.ResponseWriter http.ResponseWriter
http.CloseNotifier http.CloseNotifier
http.Hijacker http.Hijacker
}{rw, rw, rw} }{rw, rw, rw, rw}
// combination 14/32 // combination 14/32
case !i0 && i1 && i2 && !i3 && i4: case !i0 && i1 && i2 && !i3 && i4:
return struct { return struct {
Unwrapper
http.ResponseWriter http.ResponseWriter
http.CloseNotifier http.CloseNotifier
http.Hijacker http.Hijacker
http.Pusher http.Pusher
}{rw, rw, rw, rw} }{rw, rw, rw, rw, rw}
// combination 15/32 // combination 15/32
case !i0 && i1 && i2 && i3 && !i4: case !i0 && i1 && i2 && i3 && !i4:
return struct { return struct {
Unwrapper
http.ResponseWriter http.ResponseWriter
http.CloseNotifier http.CloseNotifier
http.Hijacker http.Hijacker
io.ReaderFrom io.ReaderFrom
}{rw, rw, rw, rw} }{rw, rw, rw, rw, rw}
// combination 16/32 // combination 16/32
case !i0 && i1 && i2 && i3 && i4: case !i0 && i1 && i2 && i3 && i4:
return struct { return struct {
Unwrapper
http.ResponseWriter http.ResponseWriter
http.CloseNotifier http.CloseNotifier
http.Hijacker http.Hijacker
io.ReaderFrom io.ReaderFrom
http.Pusher http.Pusher
}{rw, rw, rw, rw, rw}
// combination 17/32
case i0 && !i1 && !i2 && !i3 && !i4:
return struct {
http.ResponseWriter
http.Flusher
}{rw, rw}
// combination 18/32
case i0 && !i1 && !i2 && !i3 && i4:
return struct {
http.ResponseWriter
http.Flusher
http.Pusher
}{rw, rw, rw}
// combination 19/32
case i0 && !i1 && !i2 && i3 && !i4:
return struct {
http.ResponseWriter
http.Flusher
io.ReaderFrom
}{rw, rw, rw}
// combination 20/32
case i0 && !i1 && !i2 && i3 && i4:
return struct {
http.ResponseWriter
http.Flusher
io.ReaderFrom
http.Pusher
}{rw, rw, rw, rw}
// combination 21/32
case i0 && !i1 && i2 && !i3 && !i4:
return struct {
http.ResponseWriter
http.Flusher
http.Hijacker
}{rw, rw, rw}
// combination 22/32
case i0 && !i1 && i2 && !i3 && i4:
return struct {
http.ResponseWriter
http.Flusher
http.Hijacker
http.Pusher
}{rw, rw, rw, rw}
// combination 23/32
case i0 && !i1 && i2 && i3 && !i4:
return struct {
http.ResponseWriter
http.Flusher
http.Hijacker
io.ReaderFrom
}{rw, rw, rw, rw}
// combination 24/32
case i0 && !i1 && i2 && i3 && i4:
return struct {
http.ResponseWriter
http.Flusher
http.Hijacker
io.ReaderFrom
http.Pusher
}{rw, rw, rw, rw, rw}
// combination 25/32
case i0 && i1 && !i2 && !i3 && !i4:
return struct {
http.ResponseWriter
http.Flusher
http.CloseNotifier
}{rw, rw, rw}
// combination 26/32
case i0 && i1 && !i2 && !i3 && i4:
return struct {
http.ResponseWriter
http.Flusher
http.CloseNotifier
http.Pusher
}{rw, rw, rw, rw}
// combination 27/32
case i0 && i1 && !i2 && i3 && !i4:
return struct {
http.ResponseWriter
http.Flusher
http.CloseNotifier
io.ReaderFrom
}{rw, rw, rw, rw}
// combination 28/32
case i0 && i1 && !i2 && i3 && i4:
return struct {
http.ResponseWriter
http.Flusher
http.CloseNotifier
io.ReaderFrom
http.Pusher
}{rw, rw, rw, rw, rw}
// combination 29/32
case i0 && i1 && i2 && !i3 && !i4:
return struct {
http.ResponseWriter
http.Flusher
http.CloseNotifier
http.Hijacker
}{rw, rw, rw, rw}
// combination 30/32
case i0 && i1 && i2 && !i3 && i4:
return struct {
http.ResponseWriter
http.Flusher
http.CloseNotifier
http.Hijacker
http.Pusher
}{rw, rw, rw, rw, rw}
// combination 31/32
case i0 && i1 && i2 && i3 && !i4:
return struct {
http.ResponseWriter
http.Flusher
http.CloseNotifier
http.Hijacker
io.ReaderFrom
}{rw, rw, rw, rw, rw}
// combination 32/32
case i0 && i1 && i2 && i3 && i4:
return struct {
http.ResponseWriter
http.Flusher
http.CloseNotifier
http.Hijacker
io.ReaderFrom
http.Pusher
}{rw, rw, rw, rw, rw, rw} }{rw, rw, rw, rw, rw, rw}
// combination 17/32
case i0 && !i1 && !i2 && !i3 && !i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
}{rw, rw, rw}
// combination 18/32
case i0 && !i1 && !i2 && !i3 && i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.Pusher
}{rw, rw, rw, rw}
// combination 19/32
case i0 && !i1 && !i2 && i3 && !i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
io.ReaderFrom
}{rw, rw, rw, rw}
// combination 20/32
case i0 && !i1 && !i2 && i3 && i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
io.ReaderFrom
http.Pusher
}{rw, rw, rw, rw, rw}
// combination 21/32
case i0 && !i1 && i2 && !i3 && !i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.Hijacker
}{rw, rw, rw, rw}
// combination 22/32
case i0 && !i1 && i2 && !i3 && i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.Hijacker
http.Pusher
}{rw, rw, rw, rw, rw}
// combination 23/32
case i0 && !i1 && i2 && i3 && !i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.Hijacker
io.ReaderFrom
}{rw, rw, rw, rw, rw}
// combination 24/32
case i0 && !i1 && i2 && i3 && i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.Hijacker
io.ReaderFrom
http.Pusher
}{rw, rw, rw, rw, rw, rw}
// combination 25/32
case i0 && i1 && !i2 && !i3 && !i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.CloseNotifier
}{rw, rw, rw, rw}
// combination 26/32
case i0 && i1 && !i2 && !i3 && i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.CloseNotifier
http.Pusher
}{rw, rw, rw, rw, rw}
// combination 27/32
case i0 && i1 && !i2 && i3 && !i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.CloseNotifier
io.ReaderFrom
}{rw, rw, rw, rw, rw}
// combination 28/32
case i0 && i1 && !i2 && i3 && i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.CloseNotifier
io.ReaderFrom
http.Pusher
}{rw, rw, rw, rw, rw, rw}
// combination 29/32
case i0 && i1 && i2 && !i3 && !i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.CloseNotifier
http.Hijacker
}{rw, rw, rw, rw, rw}
// combination 30/32
case i0 && i1 && i2 && !i3 && i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.CloseNotifier
http.Hijacker
http.Pusher
}{rw, rw, rw, rw, rw, rw}
// combination 31/32
case i0 && i1 && i2 && i3 && !i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.CloseNotifier
http.Hijacker
io.ReaderFrom
}{rw, rw, rw, rw, rw, rw}
// combination 32/32
case i0 && i1 && i2 && i3 && i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.CloseNotifier
http.Hijacker
io.ReaderFrom
http.Pusher
}{rw, rw, rw, rw, rw, rw, rw}
} }
panic("unreachable") panic("unreachable")
} }
@ -320,6 +352,10 @@ type rw struct {
h Hooks h Hooks
} }
func (w *rw) Unwrap() http.ResponseWriter {
return w.w
}
func (w *rw) Header() http.Header { func (w *rw) Header() http.Header {
f := w.w.(http.ResponseWriter).Header f := w.w.(http.ResponseWriter).Header
if w.h.Header != nil { if w.h.Header != nil {
@ -383,3 +419,18 @@ func (w *rw) Push(target string, opts *http.PushOptions) error {
} }
return f(target, opts) return f(target, opts)
} }
type Unwrapper interface {
Unwrap() http.ResponseWriter
}
// Unwrap returns the underlying http.ResponseWriter from within zero or more
// layers of httpsnoop wrappers.
func Unwrap(w http.ResponseWriter) http.ResponseWriter {
if rw, ok := w.(Unwrapper); ok {
// recurse until rw.Unwrap() returns a non-Unwrapper
return Unwrap(rw.Unwrap())
} else {
return w
}
}

View File

@ -68,115 +68,131 @@ func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter {
// combination 1/16 // combination 1/16
case !i0 && !i1 && !i2 && !i3: case !i0 && !i1 && !i2 && !i3:
return struct { return struct {
Unwrapper
http.ResponseWriter http.ResponseWriter
}{rw} }{rw, rw}
// combination 2/16 // combination 2/16
case !i0 && !i1 && !i2 && i3: case !i0 && !i1 && !i2 && i3:
return struct { return struct {
Unwrapper
http.ResponseWriter http.ResponseWriter
io.ReaderFrom io.ReaderFrom
}{rw, rw} }{rw, rw, rw}
// combination 3/16 // combination 3/16
case !i0 && !i1 && i2 && !i3: case !i0 && !i1 && i2 && !i3:
return struct { return struct {
Unwrapper
http.ResponseWriter http.ResponseWriter
http.Hijacker http.Hijacker
}{rw, rw} }{rw, rw, rw}
// combination 4/16 // combination 4/16
case !i0 && !i1 && i2 && i3: case !i0 && !i1 && i2 && i3:
return struct { return struct {
Unwrapper
http.ResponseWriter http.ResponseWriter
http.Hijacker http.Hijacker
io.ReaderFrom io.ReaderFrom
}{rw, rw, rw} }{rw, rw, rw, rw}
// combination 5/16 // combination 5/16
case !i0 && i1 && !i2 && !i3: case !i0 && i1 && !i2 && !i3:
return struct { return struct {
Unwrapper
http.ResponseWriter http.ResponseWriter
http.CloseNotifier http.CloseNotifier
}{rw, rw} }{rw, rw, rw}
// combination 6/16 // combination 6/16
case !i0 && i1 && !i2 && i3: case !i0 && i1 && !i2 && i3:
return struct { return struct {
Unwrapper
http.ResponseWriter http.ResponseWriter
http.CloseNotifier http.CloseNotifier
io.ReaderFrom io.ReaderFrom
}{rw, rw, rw} }{rw, rw, rw, rw}
// combination 7/16 // combination 7/16
case !i0 && i1 && i2 && !i3: case !i0 && i1 && i2 && !i3:
return struct { return struct {
Unwrapper
http.ResponseWriter http.ResponseWriter
http.CloseNotifier http.CloseNotifier
http.Hijacker http.Hijacker
}{rw, rw, rw} }{rw, rw, rw, rw}
// combination 8/16 // combination 8/16
case !i0 && i1 && i2 && i3: case !i0 && i1 && i2 && i3:
return struct { return struct {
Unwrapper
http.ResponseWriter http.ResponseWriter
http.CloseNotifier http.CloseNotifier
http.Hijacker http.Hijacker
io.ReaderFrom io.ReaderFrom
}{rw, rw, rw, rw}
// combination 9/16
case i0 && !i1 && !i2 && !i3:
return struct {
http.ResponseWriter
http.Flusher
}{rw, rw}
// combination 10/16
case i0 && !i1 && !i2 && i3:
return struct {
http.ResponseWriter
http.Flusher
io.ReaderFrom
}{rw, rw, rw}
// combination 11/16
case i0 && !i1 && i2 && !i3:
return struct {
http.ResponseWriter
http.Flusher
http.Hijacker
}{rw, rw, rw}
// combination 12/16
case i0 && !i1 && i2 && i3:
return struct {
http.ResponseWriter
http.Flusher
http.Hijacker
io.ReaderFrom
}{rw, rw, rw, rw}
// combination 13/16
case i0 && i1 && !i2 && !i3:
return struct {
http.ResponseWriter
http.Flusher
http.CloseNotifier
}{rw, rw, rw}
// combination 14/16
case i0 && i1 && !i2 && i3:
return struct {
http.ResponseWriter
http.Flusher
http.CloseNotifier
io.ReaderFrom
}{rw, rw, rw, rw}
// combination 15/16
case i0 && i1 && i2 && !i3:
return struct {
http.ResponseWriter
http.Flusher
http.CloseNotifier
http.Hijacker
}{rw, rw, rw, rw}
// combination 16/16
case i0 && i1 && i2 && i3:
return struct {
http.ResponseWriter
http.Flusher
http.CloseNotifier
http.Hijacker
io.ReaderFrom
}{rw, rw, rw, rw, rw} }{rw, rw, rw, rw, rw}
// combination 9/16
case i0 && !i1 && !i2 && !i3:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
}{rw, rw, rw}
// combination 10/16
case i0 && !i1 && !i2 && i3:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
io.ReaderFrom
}{rw, rw, rw, rw}
// combination 11/16
case i0 && !i1 && i2 && !i3:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.Hijacker
}{rw, rw, rw, rw}
// combination 12/16
case i0 && !i1 && i2 && i3:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.Hijacker
io.ReaderFrom
}{rw, rw, rw, rw, rw}
// combination 13/16
case i0 && i1 && !i2 && !i3:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.CloseNotifier
}{rw, rw, rw, rw}
// combination 14/16
case i0 && i1 && !i2 && i3:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.CloseNotifier
io.ReaderFrom
}{rw, rw, rw, rw, rw}
// combination 15/16
case i0 && i1 && i2 && !i3:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.CloseNotifier
http.Hijacker
}{rw, rw, rw, rw, rw}
// combination 16/16
case i0 && i1 && i2 && i3:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.CloseNotifier
http.Hijacker
io.ReaderFrom
}{rw, rw, rw, rw, rw, rw}
} }
panic("unreachable") panic("unreachable")
} }
@ -186,6 +202,10 @@ type rw struct {
h Hooks h Hooks
} }
func (w *rw) Unwrap() http.ResponseWriter {
return w.w
}
func (w *rw) Header() http.Header { func (w *rw) Header() http.Header {
f := w.w.(http.ResponseWriter).Header f := w.w.(http.ResponseWriter).Header
if w.h.Header != nil { if w.h.Header != nil {
@ -241,3 +261,18 @@ func (w *rw) ReadFrom(src io.Reader) (int64, error) {
} }
return f(src) return f(src)
} }
type Unwrapper interface {
Unwrap() http.ResponseWriter
}
// Unwrap returns the underlying http.ResponseWriter from within zero or more
// layers of httpsnoop wrappers.
func Unwrap(w http.ResponseWriter) http.ResponseWriter {
if rw, ok := w.(Unwrapper); ok {
// recurse until rw.Unwrap() returns a non-Unwrapper
return Unwrap(rw.Unwrap())
} else {
return w
}
}

View File

@ -1,6 +1,6 @@
# Setup a Global .gitignore for OS and editor generated files: # go test -c output
# https://help.github.com/articles/ignoring-files *.test
# git config --global core.excludesfile ~/.gitignore_global *.test.exe
.vagrant # Output of go build ./cmd/fsnotify
*.sublime-project /fsnotify

View File

@ -1,62 +0,0 @@
# Names should be added to this file as
# Name or Organization <email address>
# The email address is not required for organizations.
# You can update this list using the following command:
#
# $ (head -n10 AUTHORS && git shortlog -se | sed -E 's/^\s+[0-9]+\t//') | tee AUTHORS
# Please keep the list sorted.
Aaron L <aaron@bettercoder.net>
Adrien Bustany <adrien@bustany.org>
Alexey Kazakov <alkazako@redhat.com>
Amit Krishnan <amit.krishnan@oracle.com>
Anmol Sethi <me@anmol.io>
Bjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>
Brian Goff <cpuguy83@gmail.com>
Bruno Bigras <bigras.bruno@gmail.com>
Caleb Spare <cespare@gmail.com>
Case Nelson <case@teammating.com>
Chris Howey <howeyc@gmail.com>
Christoffer Buchholz <christoffer.buchholz@gmail.com>
Daniel Wagner-Hall <dawagner@gmail.com>
Dave Cheney <dave@cheney.net>
Eric Lin <linxiulei@gmail.com>
Evan Phoenix <evan@fallingsnow.net>
Francisco Souza <f@souza.cc>
Gautam Dey <gautam.dey77@gmail.com>
Hari haran <hariharan.uno@gmail.com>
Ichinose Shogo <shogo82148@gmail.com>
Johannes Ebke <johannes@ebke.org>
John C Barstow <jbowtie@amathaine.com>
Kelvin Fo <vmirage@gmail.com>
Ken-ichirou MATSUZAWA <chamas@h4.dion.ne.jp>
Matt Layher <mdlayher@gmail.com>
Matthias Stone <matthias@bellstone.ca>
Nathan Youngman <git@nathany.com>
Nickolai Zeldovich <nickolai@csail.mit.edu>
Oliver Bristow <evilumbrella+github@gmail.com>
Patrick <patrick@dropbox.com>
Paul Hammond <paul@paulhammond.org>
Pawel Knap <pawelknap88@gmail.com>
Pieter Droogendijk <pieter@binky.org.uk>
Pratik Shinde <pratikshinde320@gmail.com>
Pursuit92 <JoshChase@techpursuit.net>
Riku Voipio <riku.voipio@linaro.org>
Rob Figueiredo <robfig@gmail.com>
Rodrigo Chiossi <rodrigochiossi@gmail.com>
Slawek Ligus <root@ooz.ie>
Soge Zhang <zhssoge@gmail.com>
Tiffany Jernigan <tiffany.jernigan@intel.com>
Tilak Sharma <tilaks@google.com>
Tobias Klauser <tobias.klauser@gmail.com>
Tom Payne <twpayne@gmail.com>
Travis Cline <travis.cline@gmail.com>
Tudor Golubenco <tudor.g@gmail.com>
Vahe Khachikyan <vahe@live.ca>
Yukang <moorekang@gmail.com>
bronze1man <bronze1man@gmail.com>
debrando <denis.brandolini@gmail.com>
henrikedwards <henrik.edwards@gmail.com>
铁哥 <guotie.9@gmail.com>

View File

@ -7,9 +7,116 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased] ## [Unreleased]
Nothing yet.
## [1.6.0] - 2022-10-13
This version of fsnotify needs Go 1.16 (this was already the case since 1.5.1,
but not documented). It also increases the minimum Linux version to 2.6.32.
### Additions
- all: add `Event.Has()` and `Op.Has()` ([#477])
This makes checking events a lot easier; for example:
if event.Op&Write == Write && !(event.Op&Remove == Remove) {
}
Becomes:
if event.Has(Write) && !event.Has(Remove) {
}
- all: add cmd/fsnotify ([#463])
A command-line utility for testing and some examples.
### Changes and fixes
- inotify: don't ignore events for files that don't exist ([#260], [#470])
Previously the inotify watcher would call `os.Lstat()` to check if a file
still exists before emitting events.
This was inconsistent with other platforms and resulted in inconsistent event
reporting (e.g. when a file is quickly removed and re-created), and generally
a source of confusion. It was added in 2013 to fix a memory leak that no
longer exists.
- all: return `ErrNonExistentWatch` when `Remove()` is called on a path that's
not watched ([#460])
- inotify: replace epoll() with non-blocking inotify ([#434])
Non-blocking inotify was not generally available at the time this library was
written in 2014, but now it is. As a result, the minimum Linux version is
bumped from 2.6.27 to 2.6.32. This hugely simplifies the code and is faster.
- kqueue: don't check for events every 100ms ([#480])
The watcher would wake up every 100ms, even when there was nothing to do. Now
it waits until there is something to do.
- macos: retry opening files on EINTR ([#475])
- kqueue: skip unreadable files ([#479])
kqueue requires a file descriptor for every file in a directory; this would
fail if a file was unreadable by the current user. Now these files are simply
skipped.
- windows: fix renaming a watched directory if the parent is also watched ([#370])
- windows: increase buffer size from 4K to 64K ([#485])
- windows: close file handle on Remove() ([#288])
- kqueue: put pathname in the error if watching a file fails ([#471])
- inotify, windows: calling Close() more than once could race ([#465])
- kqueue: improve Close() performance ([#233])
- all: various documentation additions and clarifications.
[#233]: https://github.com/fsnotify/fsnotify/pull/233
[#260]: https://github.com/fsnotify/fsnotify/pull/260
[#288]: https://github.com/fsnotify/fsnotify/pull/288
[#370]: https://github.com/fsnotify/fsnotify/pull/370
[#434]: https://github.com/fsnotify/fsnotify/pull/434
[#460]: https://github.com/fsnotify/fsnotify/pull/460
[#463]: https://github.com/fsnotify/fsnotify/pull/463
[#465]: https://github.com/fsnotify/fsnotify/pull/465
[#470]: https://github.com/fsnotify/fsnotify/pull/470
[#471]: https://github.com/fsnotify/fsnotify/pull/471
[#475]: https://github.com/fsnotify/fsnotify/pull/475
[#477]: https://github.com/fsnotify/fsnotify/pull/477
[#479]: https://github.com/fsnotify/fsnotify/pull/479
[#480]: https://github.com/fsnotify/fsnotify/pull/480
[#485]: https://github.com/fsnotify/fsnotify/pull/485
## [1.5.4] - 2022-04-25
* Windows: add missing defer to `Watcher.WatchList` [#447](https://github.com/fsnotify/fsnotify/pull/447)
* go.mod: use latest x/sys [#444](https://github.com/fsnotify/fsnotify/pull/444)
* Fix compilation for OpenBSD [#443](https://github.com/fsnotify/fsnotify/pull/443)
## [1.5.3] - 2022-04-22
* This version is retracted. An incorrect branch is published accidentally [#445](https://github.com/fsnotify/fsnotify/issues/445)
## [1.5.2] - 2022-04-21
* Add a feature to return the directories and files that are being monitored [#374](https://github.com/fsnotify/fsnotify/pull/374)
* Fix potential crash on windows if `raw.FileNameLength` exceeds `syscall.MAX_PATH` [#361](https://github.com/fsnotify/fsnotify/pull/361)
* Allow build on unsupported GOOS [#424](https://github.com/fsnotify/fsnotify/pull/424)
* Don't set `poller.fd` twice in `newFdPoller` [#406](https://github.com/fsnotify/fsnotify/pull/406)
* fix go vet warnings: call to `(*T).Fatalf` from a non-test goroutine [#416](https://github.com/fsnotify/fsnotify/pull/416)
## [1.5.1] - 2021-08-24 ## [1.5.1] - 2021-08-24
* Revert Add AddRaw to not follow symlinks * Revert Add AddRaw to not follow symlinks [#394](https://github.com/fsnotify/fsnotify/pull/394)
## [1.5.0] - 2021-08-20 ## [1.5.0] - 2021-08-20
@ -22,6 +129,30 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
[#385](https://github.com/fsnotify/fsnotify/pull/385) [#385](https://github.com/fsnotify/fsnotify/pull/385)
* Go 1.14+: Fix unsafe pointer conversion [#325](https://github.com/fsnotify/fsnotify/pull/325) * Go 1.14+: Fix unsafe pointer conversion [#325](https://github.com/fsnotify/fsnotify/pull/325)
## [1.4.9] - 2020-03-11
* Move example usage to the readme #329. This may resolve #328.
## [1.4.8] - 2020-03-10
* CI: test more go versions (@nathany 1d13583d846ea9d66dcabbfefbfb9d8e6fb05216)
* Tests: Queued inotify events could have been read by the test before max_queued_events was hit (@matthias-stone #265)
* Tests: t.Fatalf -> t.Errorf in go routines (@gdey #266)
* CI: Less verbosity (@nathany #267)
* Tests: Darwin: Exchangedata is deprecated on 10.13 (@nathany #267)
* Tests: Check if channels are closed in the example (@alexeykazakov #244)
* CI: Only run golint on latest version of go and fix issues (@cpuguy83 #284)
* CI: Add windows to travis matrix (@cpuguy83 #284)
* Docs: Remover appveyor badge (@nathany 11844c0959f6fff69ba325d097fce35bd85a8e93)
* Linux: create epoll and pipe fds with close-on-exec (@JohannesEbke #219)
* Linux: open files with close-on-exec (@linxiulei #273)
* Docs: Plan to support fanotify (@nathany ab058b44498e8b7566a799372a39d150d9ea0119 )
* Project: Add go.mod (@nathany #309)
* Project: Revise editor config (@nathany #309)
* Project: Update copyright for 2019 (@nathany #309)
* CI: Drop go1.8 from CI matrix (@nathany #309)
* Docs: Updating the FAQ section for supportability with NFS & FUSE filesystems (@Pratik32 4bf2d1fec78374803a39307bfb8d340688f4f28e )
## [1.4.7] - 2018-01-09 ## [1.4.7] - 2018-01-09
* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine) * BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine)

View File

@ -1,77 +1,26 @@
# Contributing Thank you for your interest in contributing to fsnotify! We try to review and
merge PRs in a reasonable timeframe, but please be aware that:
## Issues - To avoid "wasted" work, please discus changes on the issue tracker first. You
can just send PRs, but they may end up being rejected for one reason or the
other.
* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues). - fsnotify is a cross-platform library, and changes must work reasonably well on
* Please indicate the platform you are using fsnotify on. all supported platforms.
* A code example to reproduce the problem is appreciated.
## Pull Requests - Changes will need to be compatible; old code should still compile, and the
runtime behaviour can't change in ways that are likely to lead to problems for
users.
### Contributor License Agreement Testing
-------
Just `go test ./...` runs all the tests; the CI runs this on all supported
platforms. Testing different platforms locally can be done with something like
[goon] or [Vagrant], but this isn't super-easy to set up at the moment.
fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual). Use the `-short` flag to make the "stress test" run faster.
Please indicate that you have signed the CLA in your pull request.
### How fsnotify is Developed [goon]: https://github.com/arp242/goon
[Vagrant]: https://www.vagrantup.com/
* Development is done on feature branches. [integration_test.go]: /integration_test.go
* Tests are run on BSD, Linux, macOS and Windows.
* Pull requests are reviewed and [applied to master][am] using [hub][].
* Maintainers may modify or squash commits rather than asking contributors to.
* To issue a new release, the maintainers will:
* Update the CHANGELOG
* Tag a version, which will become available through gopkg.in.
### How to Fork
For smooth sailing, always use the original import path. Installing with `go get` makes this easy.
1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`)
2. Create your feature branch (`git checkout -b my-new-feature`)
3. Ensure everything works and the tests pass (see below)
4. Commit your changes (`git commit -am 'Add some feature'`)
Contribute upstream:
1. Fork fsnotify on GitHub
2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`)
3. Push to the branch (`git push fork my-new-feature`)
4. Create a new Pull Request on GitHub
This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/).
### Testing
fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows.
Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on.
To aid in cross-platform testing there is a Vagrantfile for Linux and BSD.
* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/)
* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder.
* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password)
* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`.
* When you're done, you will want to halt or destroy the Vagrant boxes.
Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory.
Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads).
### Maintainers
Help maintaining fsnotify is welcome. To be a maintainer:
* Submit a pull request and sign the CLA as above.
* You must be able to run the test suite on Mac, Windows, Linux and BSD.
To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][].
All code changes should be internal pull requests.
Releases are tagged using [Semantic Versioning](http://semver.org/).
[hub]: https://github.com/github/hub
[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs

View File

@ -1,28 +1,25 @@
Copyright (c) 2012 The Go Authors. All rights reserved. Copyright © 2012 The Go Authors. All rights reserved.
Copyright (c) 2012-2019 fsnotify Authors. All rights reserved. Copyright © fsnotify Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without Redistribution and use in source and binary forms, with or without modification,
modification, are permitted provided that the following conditions are are permitted provided that the following conditions are met:
met:
* Redistributions of source code must retain the above copyright * Redistributions of source code must retain the above copyright notice, this
notice, this list of conditions and the following disclaimer. list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above * Redistributions in binary form must reproduce the above copyright notice, this
copyright notice, this list of conditions and the following disclaimer list of conditions and the following disclaimer in the documentation and/or
in the documentation and/or other materials provided with the other materials provided with the distribution.
distribution. * Neither the name of Google Inc. nor the names of its contributors may be used
* Neither the name of Google Inc. nor the names of its to endorse or promote products derived from this software without specific
contributors may be used to endorse or promote products derived from prior written permission.
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,39 +1,33 @@
# File system notifications for Go fsnotify is a Go library to provide cross-platform filesystem notifications on
Windows, Linux, macOS, and BSD systems.
[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify) Go 1.16 or newer is required; the full documentation is at
https://pkg.go.dev/github.com/fsnotify/fsnotify
fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running: **It's best to read the documentation at pkg.go.dev, as it's pinned to the last
released version, whereas this README is for the last development version which
may include additions/changes.**
```console ---
go get -u golang.org/x/sys/...
```
Cross platform: Windows, Linux, BSD and macOS. Platform support:
| Adapter | OS | Status | | Adapter | OS | Status |
| --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | | --------------------- | ---------------| -------------------------------------------------------------|
| inotify | Linux 2.6.27 or later, Android\* | Supported | | inotify | Linux 2.6.32+ | Supported |
| kqueue | BSD, macOS, iOS\* | Supported | | kqueue | BSD, macOS | Supported |
| ReadDirectoryChangesW | Windows | Supported | | ReadDirectoryChangesW | Windows | Supported |
| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | | FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) |
| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/issues/12) | | FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/pull/371) |
| fanotify | Linux 2.6.37+ | [Planned](https://github.com/fsnotify/fsnotify/issues/114) | | fanotify | Linux 5.9+ | [Maybe](https://github.com/fsnotify/fsnotify/issues/114) |
| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) | | USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) |
| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) | | Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) |
\* Android and iOS are untested. Linux and macOS should include Android and iOS, but these are currently untested.
Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information. Usage
-----
## API stability A basic example:
fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA).
All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number.
Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`.
## Usage
```go ```go
package main package main
@ -45,13 +39,14 @@ import (
) )
func main() { func main() {
// Create new watcher.
watcher, err := fsnotify.NewWatcher() watcher, err := fsnotify.NewWatcher()
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
defer watcher.Close() defer watcher.Close()
done := make(chan bool) // Start listening for events.
go func() { go func() {
for { for {
select { select {
@ -60,7 +55,7 @@ func main() {
return return
} }
log.Println("event:", event) log.Println("event:", event)
if event.Op&fsnotify.Write == fsnotify.Write { if event.Has(fsnotify.Write) {
log.Println("modified file:", event.Name) log.Println("modified file:", event.Name)
} }
case err, ok := <-watcher.Errors: case err, ok := <-watcher.Errors:
@ -72,59 +67,95 @@ func main() {
} }
}() }()
err = watcher.Add("/tmp/foo") // Add a path.
err = watcher.Add("/tmp")
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
<-done
// Block main goroutine forever.
<-make(chan struct{})
} }
``` ```
## Contributing Some more examples can be found in [cmd/fsnotify](cmd/fsnotify), which can be
run with:
Please refer to [CONTRIBUTING][] before opening an issue or pull request. % go run ./cmd/fsnotify
## Example FAQ
---
### Will a file still be watched when it's moved to another directory?
No, not unless you are watching the location it was moved to.
See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go). ### Are subdirectories watched too?
No, you must add watches for any directory you want to watch (a recursive
watcher is on the roadmap: [#18]).
## FAQ
**When a file is moved to another directory is it still being watched?**
No (it shouldn't be, unless you are watching where it was moved to).
**When I watch a directory, are all subdirectories watched as well?**
No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]).
**Do I have to watch the Error and Event channels in a separate goroutine?**
As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7])
**Why am I receiving multiple events for the same file on OS X?**
Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]).
**How many files can be watched at once?**
There are OS-specific limits as to how many watches can be created:
* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error.
* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error.
**Why don't notifications work with NFS filesystems or filesystem in userspace (FUSE)?**
fsnotify requires support from underlying OS to work. The current NFS protocol does not provide network level support for file notifications.
[#62]: https://github.com/howeyc/fsnotify/issues/62
[#18]: https://github.com/fsnotify/fsnotify/issues/18 [#18]: https://github.com/fsnotify/fsnotify/issues/18
### Do I have to watch the Error and Event channels in a goroutine?
As of now, yes (you can read both channels in the same goroutine using `select`,
you don't need a separate goroutine for both channels; see the example).
### Why don't notifications work with NFS, SMB, FUSE, /proc, or /sys?
fsnotify requires support from underlying OS to work. The current NFS and SMB
protocols does not provide network level support for file notifications, and
neither do the /proc and /sys virtual filesystems.
This could be fixed with a polling watcher ([#9]), but it's not yet implemented.
[#9]: https://github.com/fsnotify/fsnotify/issues/9
Platform-specific notes
-----------------------
### Linux
When a file is removed a REMOVE event won't be emitted until all file
descriptors are closed; it will emit a CHMOD instead:
fp := os.Open("file")
os.Remove("file") // CHMOD
fp.Close() // REMOVE
This is the event that inotify sends, so not much can be changed about this.
The `fs.inotify.max_user_watches` sysctl variable specifies the upper limit for
the number of watches per user, and `fs.inotify.max_user_instances` specifies
the maximum number of inotify instances per user. Every Watcher you create is an
"instance", and every path you add is a "watch".
These are also exposed in `/proc` as `/proc/sys/fs/inotify/max_user_watches` and
`/proc/sys/fs/inotify/max_user_instances`
To increase them you can use `sysctl` or write the value to proc file:
# The default values on Linux 5.18
sysctl fs.inotify.max_user_watches=124983
sysctl fs.inotify.max_user_instances=128
To make the changes persist on reboot edit `/etc/sysctl.conf` or
`/usr/lib/sysctl.d/50-default.conf` (details differ per Linux distro; check your
distro's documentation):
fs.inotify.max_user_watches=124983
fs.inotify.max_user_instances=128
Reaching the limit will result in a "no space left on device" or "too many open
files" error.
### kqueue (macOS, all BSD systems)
kqueue requires opening a file descriptor for every file that's being watched;
so if you're watching a directory with five files then that's six file
descriptors. You will run in to your system's "max open files" limit faster on
these platforms.
The sysctl variables `kern.maxfiles` and `kern.maxfilesperproc` can be used to
control the maximum number of open files.
### macOS
Spotlight indexing on macOS can result in multiple events (see [#15]). A temporary
workaround is to add your folder(s) to the *Spotlight Privacy settings* until we
have a native FSEvents implementation (see [#11]).
[#11]: https://github.com/fsnotify/fsnotify/issues/11 [#11]: https://github.com/fsnotify/fsnotify/issues/11
[#7]: https://github.com/howeyc/fsnotify/issues/7 [#15]: https://github.com/fsnotify/fsnotify/issues/15
[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md
## Related Projects
* [notify](https://github.com/rjeczalik/notify)
* [fsevents](https://github.com/fsnotify/fsevents)

162
vendor/github.com/fsnotify/fsnotify/backend_fen.go generated vendored Normal file
View File

@ -0,0 +1,162 @@
//go:build solaris
// +build solaris
package fsnotify
import (
"errors"
)
// Watcher watches a set of paths, delivering events on a channel.
//
// A watcher should not be copied (e.g. pass it by pointer, rather than by
// value).
//
// # Linux notes
//
// When a file is removed a Remove event won't be emitted until all file
// descriptors are closed, and deletes will always emit a Chmod. For example:
//
// fp := os.Open("file")
// os.Remove("file") // Triggers Chmod
// fp.Close() // Triggers Remove
//
// This is the event that inotify sends, so not much can be changed about this.
//
// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
// for the number of watches per user, and fs.inotify.max_user_instances
// specifies the maximum number of inotify instances per user. Every Watcher you
// create is an "instance", and every path you add is a "watch".
//
// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
// /proc/sys/fs/inotify/max_user_instances
//
// To increase them you can use sysctl or write the value to the /proc file:
//
// # Default values on Linux 5.18
// sysctl fs.inotify.max_user_watches=124983
// sysctl fs.inotify.max_user_instances=128
//
// To make the changes persist on reboot edit /etc/sysctl.conf or
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
// your distro's documentation):
//
// fs.inotify.max_user_watches=124983
// fs.inotify.max_user_instances=128
//
// Reaching the limit will result in a "no space left on device" or "too many open
// files" error.
//
// # kqueue notes (macOS, BSD)
//
// kqueue requires opening a file descriptor for every file that's being watched;
// so if you're watching a directory with five files then that's six file
// descriptors. You will run in to your system's "max open files" limit faster on
// these platforms.
//
// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
// control the maximum number of open files, as well as /etc/login.conf on BSD
// systems.
//
// # macOS notes
//
// Spotlight indexing on macOS can result in multiple events (see [#15]). A
// temporary workaround is to add your folder(s) to the "Spotlight Privacy
// Settings" until we have a native FSEvents implementation (see [#11]).
//
// [#11]: https://github.com/fsnotify/fsnotify/issues/11
// [#15]: https://github.com/fsnotify/fsnotify/issues/15
type Watcher struct {
// Events sends the filesystem change events.
//
// fsnotify can send the following events; a "path" here can refer to a
// file, directory, symbolic link, or special file like a FIFO.
//
// fsnotify.Create A new path was created; this may be followed by one
// or more Write events if data also gets written to a
// file.
//
// fsnotify.Remove A path was removed.
//
// fsnotify.Rename A path was renamed. A rename is always sent with the
// old path as Event.Name, and a Create event will be
// sent with the new name. Renames are only sent for
// paths that are currently watched; e.g. moving an
// unmonitored file into a monitored directory will
// show up as just a Create. Similarly, renaming a file
// to outside a monitored directory will show up as
// only a Rename.
//
// fsnotify.Write A file or named pipe was written to. A Truncate will
// also trigger a Write. A single "write action"
// initiated by the user may show up as one or multiple
// writes, depending on when the system syncs things to
// disk. For example when compiling a large Go program
// you may get hundreds of Write events, so you
// probably want to wait until you've stopped receiving
// them (see the dedup example in cmd/fsnotify).
//
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
// when a file is removed (or more accurately, when a
// link to an inode is removed). On kqueue it's sent
// and on kqueue when a file is truncated. On Windows
// it's never sent.
Events chan Event
// Errors sends any errors.
Errors chan error
}
// NewWatcher creates a new Watcher.
func NewWatcher() (*Watcher, error) {
return nil, errors.New("FEN based watcher not yet supported for fsnotify\n")
}
// Close removes all watches and closes the events channel.
func (w *Watcher) Close() error {
return nil
}
// Add starts monitoring the path for changes.
//
// A path can only be watched once; attempting to watch it more than once will
// return an error. Paths that do not yet exist on the filesystem cannot be
// added. A watch will be automatically removed if the path is deleted.
//
// A path will remain watched if it gets renamed to somewhere else on the same
// filesystem, but the monitor will get removed if the path gets deleted and
// re-created, or if it's moved to a different filesystem.
//
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
// filesystems (/proc, /sys, etc.) generally don't work.
//
// # Watching directories
//
// All files in a directory are monitored, including new files that are created
// after the watcher is started. Subdirectories are not watched (i.e. it's
// non-recursive).
//
// # Watching files
//
// Watching individual files (rather than directories) is generally not
// recommended as many tools update files atomically. Instead of "just" writing
// to the file a temporary file will be written to first, and if successful the
// temporary file is moved to to destination removing the original, or some
// variant thereof. The watcher on the original file is now lost, as it no
// longer exists.
//
// Instead, watch the parent directory and use Event.Name to filter out files
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
func (w *Watcher) Add(name string) error {
return nil
}
// Remove stops monitoring the path for changes.
//
// Directories are always removed non-recursively. For example, if you added
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
//
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
func (w *Watcher) Remove(name string) error {
return nil
}

459
vendor/github.com/fsnotify/fsnotify/backend_inotify.go generated vendored Normal file
View File

@ -0,0 +1,459 @@
//go:build linux
// +build linux
package fsnotify
import (
"errors"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"sync"
"unsafe"
"golang.org/x/sys/unix"
)
// Watcher watches a set of paths, delivering events on a channel.
//
// A watcher should not be copied (e.g. pass it by pointer, rather than by
// value).
//
// # Linux notes
//
// When a file is removed a Remove event won't be emitted until all file
// descriptors are closed, and deletes will always emit a Chmod. For example:
//
// fp := os.Open("file")
// os.Remove("file") // Triggers Chmod
// fp.Close() // Triggers Remove
//
// This is the event that inotify sends, so not much can be changed about this.
//
// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
// for the number of watches per user, and fs.inotify.max_user_instances
// specifies the maximum number of inotify instances per user. Every Watcher you
// create is an "instance", and every path you add is a "watch".
//
// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
// /proc/sys/fs/inotify/max_user_instances
//
// To increase them you can use sysctl or write the value to the /proc file:
//
// # Default values on Linux 5.18
// sysctl fs.inotify.max_user_watches=124983
// sysctl fs.inotify.max_user_instances=128
//
// To make the changes persist on reboot edit /etc/sysctl.conf or
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
// your distro's documentation):
//
// fs.inotify.max_user_watches=124983
// fs.inotify.max_user_instances=128
//
// Reaching the limit will result in a "no space left on device" or "too many open
// files" error.
//
// # kqueue notes (macOS, BSD)
//
// kqueue requires opening a file descriptor for every file that's being watched;
// so if you're watching a directory with five files then that's six file
// descriptors. You will run in to your system's "max open files" limit faster on
// these platforms.
//
// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
// control the maximum number of open files, as well as /etc/login.conf on BSD
// systems.
//
// # macOS notes
//
// Spotlight indexing on macOS can result in multiple events (see [#15]). A
// temporary workaround is to add your folder(s) to the "Spotlight Privacy
// Settings" until we have a native FSEvents implementation (see [#11]).
//
// [#11]: https://github.com/fsnotify/fsnotify/issues/11
// [#15]: https://github.com/fsnotify/fsnotify/issues/15
type Watcher struct {
// Events sends the filesystem change events.
//
// fsnotify can send the following events; a "path" here can refer to a
// file, directory, symbolic link, or special file like a FIFO.
//
// fsnotify.Create A new path was created; this may be followed by one
// or more Write events if data also gets written to a
// file.
//
// fsnotify.Remove A path was removed.
//
// fsnotify.Rename A path was renamed. A rename is always sent with the
// old path as Event.Name, and a Create event will be
// sent with the new name. Renames are only sent for
// paths that are currently watched; e.g. moving an
// unmonitored file into a monitored directory will
// show up as just a Create. Similarly, renaming a file
// to outside a monitored directory will show up as
// only a Rename.
//
// fsnotify.Write A file or named pipe was written to. A Truncate will
// also trigger a Write. A single "write action"
// initiated by the user may show up as one or multiple
// writes, depending on when the system syncs things to
// disk. For example when compiling a large Go program
// you may get hundreds of Write events, so you
// probably want to wait until you've stopped receiving
// them (see the dedup example in cmd/fsnotify).
//
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
// when a file is removed (or more accurately, when a
// link to an inode is removed). On kqueue it's sent
// and on kqueue when a file is truncated. On Windows
// it's never sent.
Events chan Event
// Errors sends any errors.
Errors chan error
// Store fd here as os.File.Read() will no longer return on close after
// calling Fd(). See: https://github.com/golang/go/issues/26439
fd int
mu sync.Mutex // Map access
inotifyFile *os.File
watches map[string]*watch // Map of inotify watches (key: path)
paths map[int]string // Map of watched paths (key: watch descriptor)
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
doneResp chan struct{} // Channel to respond to Close
}
// NewWatcher creates a new Watcher.
func NewWatcher() (*Watcher, error) {
// Create inotify fd
// Need to set the FD to nonblocking mode in order for SetDeadline methods to work
// Otherwise, blocking i/o operations won't terminate on close
fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK)
if fd == -1 {
return nil, errno
}
w := &Watcher{
fd: fd,
inotifyFile: os.NewFile(uintptr(fd), ""),
watches: make(map[string]*watch),
paths: make(map[int]string),
Events: make(chan Event),
Errors: make(chan error),
done: make(chan struct{}),
doneResp: make(chan struct{}),
}
go w.readEvents()
return w, nil
}
// Returns true if the event was sent, or false if watcher is closed.
func (w *Watcher) sendEvent(e Event) bool {
select {
case w.Events <- e:
return true
case <-w.done:
}
return false
}
// Returns true if the error was sent, or false if watcher is closed.
func (w *Watcher) sendError(err error) bool {
select {
case w.Errors <- err:
return true
case <-w.done:
return false
}
}
func (w *Watcher) isClosed() bool {
select {
case <-w.done:
return true
default:
return false
}
}
// Close removes all watches and closes the events channel.
func (w *Watcher) Close() error {
w.mu.Lock()
if w.isClosed() {
w.mu.Unlock()
return nil
}
// Send 'close' signal to goroutine, and set the Watcher to closed.
close(w.done)
w.mu.Unlock()
// Causes any blocking reads to return with an error, provided the file
// still supports deadline operations.
err := w.inotifyFile.Close()
if err != nil {
return err
}
// Wait for goroutine to close
<-w.doneResp
return nil
}
// Add starts monitoring the path for changes.
//
// A path can only be watched once; attempting to watch it more than once will
// return an error. Paths that do not yet exist on the filesystem cannot be
// added. A watch will be automatically removed if the path is deleted.
//
// A path will remain watched if it gets renamed to somewhere else on the same
// filesystem, but the monitor will get removed if the path gets deleted and
// re-created, or if it's moved to a different filesystem.
//
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
// filesystems (/proc, /sys, etc.) generally don't work.
//
// # Watching directories
//
// All files in a directory are monitored, including new files that are created
// after the watcher is started. Subdirectories are not watched (i.e. it's
// non-recursive).
//
// # Watching files
//
// Watching individual files (rather than directories) is generally not
// recommended as many tools update files atomically. Instead of "just" writing
// to the file a temporary file will be written to first, and if successful the
// temporary file is moved to to destination removing the original, or some
// variant thereof. The watcher on the original file is now lost, as it no
// longer exists.
//
// Instead, watch the parent directory and use Event.Name to filter out files
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
func (w *Watcher) Add(name string) error {
name = filepath.Clean(name)
if w.isClosed() {
return errors.New("inotify instance already closed")
}
var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
w.mu.Lock()
defer w.mu.Unlock()
watchEntry := w.watches[name]
if watchEntry != nil {
flags |= watchEntry.flags | unix.IN_MASK_ADD
}
wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
if wd == -1 {
return errno
}
if watchEntry == nil {
w.watches[name] = &watch{wd: uint32(wd), flags: flags}
w.paths[wd] = name
} else {
watchEntry.wd = uint32(wd)
watchEntry.flags = flags
}
return nil
}
// Remove stops monitoring the path for changes.
//
// Directories are always removed non-recursively. For example, if you added
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
//
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
func (w *Watcher) Remove(name string) error {
name = filepath.Clean(name)
// Fetch the watch.
w.mu.Lock()
defer w.mu.Unlock()
watch, ok := w.watches[name]
// Remove it from inotify.
if !ok {
return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
}
// We successfully removed the watch if InotifyRmWatch doesn't return an
// error, we need to clean up our internal state to ensure it matches
// inotify's kernel state.
delete(w.paths, int(watch.wd))
delete(w.watches, name)
// inotify_rm_watch will return EINVAL if the file has been deleted;
// the inotify will already have been removed.
// watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
// by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE
// so that EINVAL means that the wd is being rm_watch()ed or its file removed
// by another thread and we have not received IN_IGNORE event.
success, errno := unix.InotifyRmWatch(w.fd, watch.wd)
if success == -1 {
// TODO: Perhaps it's not helpful to return an error here in every case;
// The only two possible errors are:
//
// - EBADF, which happens when w.fd is not a valid file descriptor
// of any kind.
// - EINVAL, which is when fd is not an inotify descriptor or wd
// is not a valid watch descriptor. Watch descriptors are
// invalidated when they are removed explicitly or implicitly;
// explicitly by inotify_rm_watch, implicitly when the file they
// are watching is deleted.
return errno
}
return nil
}
// WatchList returns all paths added with [Add] (and are not yet removed).
func (w *Watcher) WatchList() []string {
w.mu.Lock()
defer w.mu.Unlock()
entries := make([]string, 0, len(w.watches))
for pathname := range w.watches {
entries = append(entries, pathname)
}
return entries
}
type watch struct {
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
}
// readEvents reads from the inotify file descriptor, converts the
// received events into Event objects and sends them via the Events channel
func (w *Watcher) readEvents() {
defer func() {
close(w.doneResp)
close(w.Errors)
close(w.Events)
}()
var (
buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
errno error // Syscall errno
)
for {
// See if we have been closed.
if w.isClosed() {
return
}
n, err := w.inotifyFile.Read(buf[:])
switch {
case errors.Unwrap(err) == os.ErrClosed:
return
case err != nil:
if !w.sendError(err) {
return
}
continue
}
if n < unix.SizeofInotifyEvent {
var err error
if n == 0 {
// If EOF is received. This should really never happen.
err = io.EOF
} else if n < 0 {
// If an error occurred while reading.
err = errno
} else {
// Read was too short.
err = errors.New("notify: short read in readEvents()")
}
if !w.sendError(err) {
return
}
continue
}
var offset uint32
// We don't know how many events we just read into the buffer
// While the offset points to at least one whole event...
for offset <= uint32(n-unix.SizeofInotifyEvent) {
var (
// Point "raw" to the event in the buffer
raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
mask = uint32(raw.Mask)
nameLen = uint32(raw.Len)
)
if mask&unix.IN_Q_OVERFLOW != 0 {
if !w.sendError(ErrEventOverflow) {
return
}
}
// If the event happened to the watched directory or the watched file, the kernel
// doesn't append the filename to the event, but we would like to always fill the
// the "Name" field with a valid filename. We retrieve the path of the watch from
// the "paths" map.
w.mu.Lock()
name, ok := w.paths[int(raw.Wd)]
// IN_DELETE_SELF occurs when the file/directory being watched is removed.
// This is a sign to clean up the maps, otherwise we are no longer in sync
// with the inotify kernel state which has already deleted the watch
// automatically.
if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
delete(w.paths, int(raw.Wd))
delete(w.watches, name)
}
w.mu.Unlock()
if nameLen > 0 {
// Point "bytes" at the first byte of the filename
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen]
// The filename is padded with NULL bytes. TrimRight() gets rid of those.
name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
}
event := w.newEvent(name, mask)
// Send the events that are not ignored on the events channel
if mask&unix.IN_IGNORED == 0 {
if !w.sendEvent(event) {
return
}
}
// Move to the next event in the buffer
offset += unix.SizeofInotifyEvent + nameLen
}
}
}
// newEvent returns an platform-independent Event based on an inotify mask.
func (w *Watcher) newEvent(name string, mask uint32) Event {
e := Event{Name: name}
if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
e.Op |= Create
}
if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE {
e.Op |= Remove
}
if mask&unix.IN_MODIFY == unix.IN_MODIFY {
e.Op |= Write
}
if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
e.Op |= Rename
}
if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
e.Op |= Chmod
}
return e
}

707
vendor/github.com/fsnotify/fsnotify/backend_kqueue.go generated vendored Normal file
View File

@ -0,0 +1,707 @@
//go:build freebsd || openbsd || netbsd || dragonfly || darwin
// +build freebsd openbsd netbsd dragonfly darwin
package fsnotify
import (
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sync"
"golang.org/x/sys/unix"
)
// Watcher watches a set of paths, delivering events on a channel.
//
// A watcher should not be copied (e.g. pass it by pointer, rather than by
// value).
//
// # Linux notes
//
// When a file is removed a Remove event won't be emitted until all file
// descriptors are closed, and deletes will always emit a Chmod. For example:
//
// fp := os.Open("file")
// os.Remove("file") // Triggers Chmod
// fp.Close() // Triggers Remove
//
// This is the event that inotify sends, so not much can be changed about this.
//
// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
// for the number of watches per user, and fs.inotify.max_user_instances
// specifies the maximum number of inotify instances per user. Every Watcher you
// create is an "instance", and every path you add is a "watch".
//
// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
// /proc/sys/fs/inotify/max_user_instances
//
// To increase them you can use sysctl or write the value to the /proc file:
//
// # Default values on Linux 5.18
// sysctl fs.inotify.max_user_watches=124983
// sysctl fs.inotify.max_user_instances=128
//
// To make the changes persist on reboot edit /etc/sysctl.conf or
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
// your distro's documentation):
//
// fs.inotify.max_user_watches=124983
// fs.inotify.max_user_instances=128
//
// Reaching the limit will result in a "no space left on device" or "too many open
// files" error.
//
// # kqueue notes (macOS, BSD)
//
// kqueue requires opening a file descriptor for every file that's being watched;
// so if you're watching a directory with five files then that's six file
// descriptors. You will run in to your system's "max open files" limit faster on
// these platforms.
//
// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
// control the maximum number of open files, as well as /etc/login.conf on BSD
// systems.
//
// # macOS notes
//
// Spotlight indexing on macOS can result in multiple events (see [#15]). A
// temporary workaround is to add your folder(s) to the "Spotlight Privacy
// Settings" until we have a native FSEvents implementation (see [#11]).
//
// [#11]: https://github.com/fsnotify/fsnotify/issues/11
// [#15]: https://github.com/fsnotify/fsnotify/issues/15
type Watcher struct {
// Events sends the filesystem change events.
//
// fsnotify can send the following events; a "path" here can refer to a
// file, directory, symbolic link, or special file like a FIFO.
//
// fsnotify.Create A new path was created; this may be followed by one
// or more Write events if data also gets written to a
// file.
//
// fsnotify.Remove A path was removed.
//
// fsnotify.Rename A path was renamed. A rename is always sent with the
// old path as Event.Name, and a Create event will be
// sent with the new name. Renames are only sent for
// paths that are currently watched; e.g. moving an
// unmonitored file into a monitored directory will
// show up as just a Create. Similarly, renaming a file
// to outside a monitored directory will show up as
// only a Rename.
//
// fsnotify.Write A file or named pipe was written to. A Truncate will
// also trigger a Write. A single "write action"
// initiated by the user may show up as one or multiple
// writes, depending on when the system syncs things to
// disk. For example when compiling a large Go program
// you may get hundreds of Write events, so you
// probably want to wait until you've stopped receiving
// them (see the dedup example in cmd/fsnotify).
//
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
// when a file is removed (or more accurately, when a
// link to an inode is removed). On kqueue it's sent
// and on kqueue when a file is truncated. On Windows
// it's never sent.
Events chan Event
// Errors sends any errors.
Errors chan error
done chan struct{}
kq int // File descriptor (as returned by the kqueue() syscall).
closepipe [2]int // Pipe used for closing.
mu sync.Mutex // Protects access to watcher data
watches map[string]int // Watched file descriptors (key: path).
watchesByDir map[string]map[int]struct{} // Watched file descriptors indexed by the parent directory (key: dirname(path)).
userWatches map[string]struct{} // Watches added with Watcher.Add()
dirFlags map[string]uint32 // Watched directories to fflags used in kqueue.
paths map[int]pathInfo // File descriptors to path names for processing kqueue events.
fileExists map[string]struct{} // Keep track of if we know this file exists (to stop duplicate create events).
isClosed bool // Set to true when Close() is first called
}
type pathInfo struct {
name string
isDir bool
}
// NewWatcher creates a new Watcher.
func NewWatcher() (*Watcher, error) {
kq, closepipe, err := newKqueue()
if err != nil {
return nil, err
}
w := &Watcher{
kq: kq,
closepipe: closepipe,
watches: make(map[string]int),
watchesByDir: make(map[string]map[int]struct{}),
dirFlags: make(map[string]uint32),
paths: make(map[int]pathInfo),
fileExists: make(map[string]struct{}),
userWatches: make(map[string]struct{}),
Events: make(chan Event),
Errors: make(chan error),
done: make(chan struct{}),
}
go w.readEvents()
return w, nil
}
// newKqueue creates a new kernel event queue and returns a descriptor.
//
// This registers a new event on closepipe, which will trigger an event when
// it's closed. This way we can use kevent() without timeout/polling; without
// the closepipe, it would block forever and we wouldn't be able to stop it at
// all.
func newKqueue() (kq int, closepipe [2]int, err error) {
kq, err = unix.Kqueue()
if kq == -1 {
return kq, closepipe, err
}
// Register the close pipe.
err = unix.Pipe(closepipe[:])
if err != nil {
unix.Close(kq)
return kq, closepipe, err
}
// Register changes to listen on the closepipe.
changes := make([]unix.Kevent_t, 1)
// SetKevent converts int to the platform-specific types.
unix.SetKevent(&changes[0], closepipe[0], unix.EVFILT_READ,
unix.EV_ADD|unix.EV_ENABLE|unix.EV_ONESHOT)
ok, err := unix.Kevent(kq, changes, nil, nil)
if ok == -1 {
unix.Close(kq)
unix.Close(closepipe[0])
unix.Close(closepipe[1])
return kq, closepipe, err
}
return kq, closepipe, nil
}
// Returns true if the event was sent, or false if watcher is closed.
func (w *Watcher) sendEvent(e Event) bool {
select {
case w.Events <- e:
return true
case <-w.done:
}
return false
}
// Returns true if the error was sent, or false if watcher is closed.
func (w *Watcher) sendError(err error) bool {
select {
case w.Errors <- err:
return true
case <-w.done:
}
return false
}
// Close removes all watches and closes the events channel.
func (w *Watcher) Close() error {
w.mu.Lock()
if w.isClosed {
w.mu.Unlock()
return nil
}
w.isClosed = true
// copy paths to remove while locked
pathsToRemove := make([]string, 0, len(w.watches))
for name := range w.watches {
pathsToRemove = append(pathsToRemove, name)
}
w.mu.Unlock() // Unlock before calling Remove, which also locks
for _, name := range pathsToRemove {
w.Remove(name)
}
// Send "quit" message to the reader goroutine.
unix.Close(w.closepipe[1])
close(w.done)
return nil
}
// Add starts monitoring the path for changes.
//
// A path can only be watched once; attempting to watch it more than once will
// return an error. Paths that do not yet exist on the filesystem cannot be
// added. A watch will be automatically removed if the path is deleted.
//
// A path will remain watched if it gets renamed to somewhere else on the same
// filesystem, but the monitor will get removed if the path gets deleted and
// re-created, or if it's moved to a different filesystem.
//
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
// filesystems (/proc, /sys, etc.) generally don't work.
//
// # Watching directories
//
// All files in a directory are monitored, including new files that are created
// after the watcher is started. Subdirectories are not watched (i.e. it's
// non-recursive).
//
// # Watching files
//
// Watching individual files (rather than directories) is generally not
// recommended as many tools update files atomically. Instead of "just" writing
// to the file a temporary file will be written to first, and if successful the
// temporary file is moved to to destination removing the original, or some
// variant thereof. The watcher on the original file is now lost, as it no
// longer exists.
//
// Instead, watch the parent directory and use Event.Name to filter out files
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
func (w *Watcher) Add(name string) error {
w.mu.Lock()
w.userWatches[name] = struct{}{}
w.mu.Unlock()
_, err := w.addWatch(name, noteAllEvents)
return err
}
// Remove stops monitoring the path for changes.
//
// Directories are always removed non-recursively. For example, if you added
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
//
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
func (w *Watcher) Remove(name string) error {
name = filepath.Clean(name)
w.mu.Lock()
watchfd, ok := w.watches[name]
w.mu.Unlock()
if !ok {
return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
}
err := w.register([]int{watchfd}, unix.EV_DELETE, 0)
if err != nil {
return err
}
unix.Close(watchfd)
w.mu.Lock()
isDir := w.paths[watchfd].isDir
delete(w.watches, name)
delete(w.userWatches, name)
parentName := filepath.Dir(name)
delete(w.watchesByDir[parentName], watchfd)
if len(w.watchesByDir[parentName]) == 0 {
delete(w.watchesByDir, parentName)
}
delete(w.paths, watchfd)
delete(w.dirFlags, name)
delete(w.fileExists, name)
w.mu.Unlock()
// Find all watched paths that are in this directory that are not external.
if isDir {
var pathsToRemove []string
w.mu.Lock()
for fd := range w.watchesByDir[name] {
path := w.paths[fd]
if _, ok := w.userWatches[path.name]; !ok {
pathsToRemove = append(pathsToRemove, path.name)
}
}
w.mu.Unlock()
for _, name := range pathsToRemove {
// Since these are internal, not much sense in propagating error
// to the user, as that will just confuse them with an error about
// a path they did not explicitly watch themselves.
w.Remove(name)
}
}
return nil
}
// WatchList returns all paths added with [Add] (and are not yet removed).
func (w *Watcher) WatchList() []string {
w.mu.Lock()
defer w.mu.Unlock()
entries := make([]string, 0, len(w.userWatches))
for pathname := range w.userWatches {
entries = append(entries, pathname)
}
return entries
}
// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
// addWatch adds name to the watched file set.
// The flags are interpreted as described in kevent(2).
// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks.
func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
var isDir bool
// Make ./name and name equivalent
name = filepath.Clean(name)
w.mu.Lock()
if w.isClosed {
w.mu.Unlock()
return "", errors.New("kevent instance already closed")
}
watchfd, alreadyWatching := w.watches[name]
// We already have a watch, but we can still override flags.
if alreadyWatching {
isDir = w.paths[watchfd].isDir
}
w.mu.Unlock()
if !alreadyWatching {
fi, err := os.Lstat(name)
if err != nil {
return "", err
}
// Don't watch sockets or named pipes
if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) {
return "", nil
}
// Follow Symlinks
//
// Linux can add unresolvable symlinks to the watch list without issue,
// and Windows can't do symlinks period. To maintain consistency, we
// will act like everything is fine if the link can't be resolved.
// There will simply be no file events for broken symlinks. Hence the
// returns of nil on errors.
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
name, err = filepath.EvalSymlinks(name)
if err != nil {
return "", nil
}
w.mu.Lock()
_, alreadyWatching = w.watches[name]
w.mu.Unlock()
if alreadyWatching {
return name, nil
}
fi, err = os.Lstat(name)
if err != nil {
return "", nil
}
}
// Retry on EINTR; open() can return EINTR in practice on macOS.
// See #354, and go issues 11180 and 39237.
for {
watchfd, err = unix.Open(name, openMode, 0)
if err == nil {
break
}
if errors.Is(err, unix.EINTR) {
continue
}
return "", err
}
isDir = fi.IsDir()
}
err := w.register([]int{watchfd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags)
if err != nil {
unix.Close(watchfd)
return "", err
}
if !alreadyWatching {
w.mu.Lock()
parentName := filepath.Dir(name)
w.watches[name] = watchfd
watchesByDir, ok := w.watchesByDir[parentName]
if !ok {
watchesByDir = make(map[int]struct{}, 1)
w.watchesByDir[parentName] = watchesByDir
}
watchesByDir[watchfd] = struct{}{}
w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
w.mu.Unlock()
}
if isDir {
// Watch the directory if it has not been watched before,
// or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
w.mu.Lock()
watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
(!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE)
// Store flags so this watch can be updated later
w.dirFlags[name] = flags
w.mu.Unlock()
if watchDir {
if err := w.watchDirectoryFiles(name); err != nil {
return "", err
}
}
}
return name, nil
}
// readEvents reads from kqueue and converts the received kevents into
// Event values that it sends down the Events channel.
func (w *Watcher) readEvents() {
defer func() {
err := unix.Close(w.kq)
if err != nil {
w.Errors <- err
}
unix.Close(w.closepipe[0])
close(w.Events)
close(w.Errors)
}()
eventBuffer := make([]unix.Kevent_t, 10)
for closed := false; !closed; {
kevents, err := w.read(eventBuffer)
// EINTR is okay, the syscall was interrupted before timeout expired.
if err != nil && err != unix.EINTR {
if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) {
closed = true
}
continue
}
// Flush the events we received to the Events channel
for _, kevent := range kevents {
var (
watchfd = int(kevent.Ident)
mask = uint32(kevent.Fflags)
)
// Shut down the loop when the pipe is closed, but only after all
// other events have been processed.
if watchfd == w.closepipe[0] {
closed = true
continue
}
w.mu.Lock()
path := w.paths[watchfd]
w.mu.Unlock()
event := w.newEvent(path.name, mask)
if path.isDir && !event.Has(Remove) {
// Double check to make sure the directory exists. This can
// happen when we do a rm -fr on a recursively watched folders
// and we receive a modification event first but the folder has
// been deleted and later receive the delete event.
if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
event.Op |= Remove
}
}
if event.Has(Rename) || event.Has(Remove) {
w.Remove(event.Name)
w.mu.Lock()
delete(w.fileExists, event.Name)
w.mu.Unlock()
}
if path.isDir && event.Has(Write) && !event.Has(Remove) {
w.sendDirectoryChangeEvents(event.Name)
} else {
if !w.sendEvent(event) {
closed = true
continue
}
}
if event.Has(Remove) {
// Look for a file that may have overwritten this.
// For example, mv f1 f2 will delete f2, then create f2.
if path.isDir {
fileDir := filepath.Clean(event.Name)
w.mu.Lock()
_, found := w.watches[fileDir]
w.mu.Unlock()
if found {
// make sure the directory exists before we watch for changes. When we
// do a recursive watch and perform rm -fr, the parent directory might
// have gone missing, ignore the missing directory and let the
// upcoming delete event remove the watch from the parent directory.
if _, err := os.Lstat(fileDir); err == nil {
w.sendDirectoryChangeEvents(fileDir)
}
}
} else {
filePath := filepath.Clean(event.Name)
if fileInfo, err := os.Lstat(filePath); err == nil {
w.sendFileCreatedEventIfNew(filePath, fileInfo)
}
}
}
}
}
}
// newEvent returns an platform-independent Event based on kqueue Fflags.
func (w *Watcher) newEvent(name string, mask uint32) Event {
e := Event{Name: name}
if mask&unix.NOTE_DELETE == unix.NOTE_DELETE {
e.Op |= Remove
}
if mask&unix.NOTE_WRITE == unix.NOTE_WRITE {
e.Op |= Write
}
if mask&unix.NOTE_RENAME == unix.NOTE_RENAME {
e.Op |= Rename
}
if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
e.Op |= Chmod
}
return e
}
// watchDirectoryFiles to mimic inotify when adding a watch on a directory
func (w *Watcher) watchDirectoryFiles(dirPath string) error {
// Get all files
files, err := ioutil.ReadDir(dirPath)
if err != nil {
return err
}
for _, fileInfo := range files {
path := filepath.Join(dirPath, fileInfo.Name())
cleanPath, err := w.internalWatch(path, fileInfo)
if err != nil {
// No permission to read the file; that's not a problem: just skip.
// But do add it to w.fileExists to prevent it from being picked up
// as a "new" file later (it still shows up in the directory
// listing).
switch {
case errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM):
cleanPath = filepath.Clean(path)
default:
return fmt.Errorf("%q: %w", filepath.Join(dirPath, fileInfo.Name()), err)
}
}
w.mu.Lock()
w.fileExists[cleanPath] = struct{}{}
w.mu.Unlock()
}
return nil
}
// Search the directory for new files and send an event for them.
//
// This functionality is to have the BSD watcher match the inotify, which sends
// a create event for files created in a watched directory.
func (w *Watcher) sendDirectoryChangeEvents(dir string) {
// Get all files
files, err := ioutil.ReadDir(dir)
if err != nil {
if !w.sendError(fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)) {
return
}
}
// Search for new files
for _, fi := range files {
err := w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi)
if err != nil {
return
}
}
}
// sendFileCreatedEvent sends a create event if the file isn't already being tracked.
func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) {
w.mu.Lock()
_, doesExist := w.fileExists[filePath]
w.mu.Unlock()
if !doesExist {
if !w.sendEvent(Event{Name: filePath, Op: Create}) {
return
}
}
// like watchDirectoryFiles (but without doing another ReadDir)
filePath, err = w.internalWatch(filePath, fileInfo)
if err != nil {
return err
}
w.mu.Lock()
w.fileExists[filePath] = struct{}{}
w.mu.Unlock()
return nil
}
func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) {
if fileInfo.IsDir() {
// mimic Linux providing delete events for subdirectories
// but preserve the flags used if currently watching subdirectory
w.mu.Lock()
flags := w.dirFlags[name]
w.mu.Unlock()
flags |= unix.NOTE_DELETE | unix.NOTE_RENAME
return w.addWatch(name, flags)
}
// watch file to mimic Linux inotify
return w.addWatch(name, noteAllEvents)
}
// Register events with the queue.
func (w *Watcher) register(fds []int, flags int, fflags uint32) error {
changes := make([]unix.Kevent_t, len(fds))
for i, fd := range fds {
// SetKevent converts int to the platform-specific types.
unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags)
changes[i].Fflags = fflags
}
// Register the events.
success, err := unix.Kevent(w.kq, changes, nil, nil)
if success == -1 {
return err
}
return nil
}
// read retrieves pending events, or waits until an event occurs.
func (w *Watcher) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) {
n, err := unix.Kevent(w.kq, nil, events, nil)
if err != nil {
return nil, err
}
return events[0:n], nil
}

66
vendor/github.com/fsnotify/fsnotify/backend_other.go generated vendored Normal file
View File

@ -0,0 +1,66 @@
//go:build !darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows
// +build !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows
package fsnotify
import (
"fmt"
"runtime"
)
// Watcher watches a set of files, delivering events to a channel.
type Watcher struct{}
// NewWatcher creates a new Watcher.
func NewWatcher() (*Watcher, error) {
return nil, fmt.Errorf("fsnotify not supported on %s", runtime.GOOS)
}
// Close removes all watches and closes the events channel.
func (w *Watcher) Close() error {
return nil
}
// Add starts monitoring the path for changes.
//
// A path can only be watched once; attempting to watch it more than once will
// return an error. Paths that do not yet exist on the filesystem cannot be
// added. A watch will be automatically removed if the path is deleted.
//
// A path will remain watched if it gets renamed to somewhere else on the same
// filesystem, but the monitor will get removed if the path gets deleted and
// re-created, or if it's moved to a different filesystem.
//
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
// filesystems (/proc, /sys, etc.) generally don't work.
//
// # Watching directories
//
// All files in a directory are monitored, including new files that are created
// after the watcher is started. Subdirectories are not watched (i.e. it's
// non-recursive).
//
// # Watching files
//
// Watching individual files (rather than directories) is generally not
// recommended as many tools update files atomically. Instead of "just" writing
// to the file a temporary file will be written to first, and if successful the
// temporary file is moved to to destination removing the original, or some
// variant thereof. The watcher on the original file is now lost, as it no
// longer exists.
//
// Instead, watch the parent directory and use Event.Name to filter out files
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
func (w *Watcher) Add(name string) error {
return nil
}
// Remove stops monitoring the path for changes.
//
// Directories are always removed non-recursively. For example, if you added
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
//
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
func (w *Watcher) Remove(name string) error {
return nil
}

746
vendor/github.com/fsnotify/fsnotify/backend_windows.go generated vendored Normal file
View File

@ -0,0 +1,746 @@
//go:build windows
// +build windows
package fsnotify
import (
"errors"
"fmt"
"os"
"path/filepath"
"reflect"
"runtime"
"strings"
"sync"
"unsafe"
"golang.org/x/sys/windows"
)
// Watcher watches a set of paths, delivering events on a channel.
//
// A watcher should not be copied (e.g. pass it by pointer, rather than by
// value).
//
// # Linux notes
//
// When a file is removed a Remove event won't be emitted until all file
// descriptors are closed, and deletes will always emit a Chmod. For example:
//
// fp := os.Open("file")
// os.Remove("file") // Triggers Chmod
// fp.Close() // Triggers Remove
//
// This is the event that inotify sends, so not much can be changed about this.
//
// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
// for the number of watches per user, and fs.inotify.max_user_instances
// specifies the maximum number of inotify instances per user. Every Watcher you
// create is an "instance", and every path you add is a "watch".
//
// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
// /proc/sys/fs/inotify/max_user_instances
//
// To increase them you can use sysctl or write the value to the /proc file:
//
// # Default values on Linux 5.18
// sysctl fs.inotify.max_user_watches=124983
// sysctl fs.inotify.max_user_instances=128
//
// To make the changes persist on reboot edit /etc/sysctl.conf or
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
// your distro's documentation):
//
// fs.inotify.max_user_watches=124983
// fs.inotify.max_user_instances=128
//
// Reaching the limit will result in a "no space left on device" or "too many open
// files" error.
//
// # kqueue notes (macOS, BSD)
//
// kqueue requires opening a file descriptor for every file that's being watched;
// so if you're watching a directory with five files then that's six file
// descriptors. You will run in to your system's "max open files" limit faster on
// these platforms.
//
// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
// control the maximum number of open files, as well as /etc/login.conf on BSD
// systems.
//
// # macOS notes
//
// Spotlight indexing on macOS can result in multiple events (see [#15]). A
// temporary workaround is to add your folder(s) to the "Spotlight Privacy
// Settings" until we have a native FSEvents implementation (see [#11]).
//
// [#11]: https://github.com/fsnotify/fsnotify/issues/11
// [#15]: https://github.com/fsnotify/fsnotify/issues/15
type Watcher struct {
// Events sends the filesystem change events.
//
// fsnotify can send the following events; a "path" here can refer to a
// file, directory, symbolic link, or special file like a FIFO.
//
// fsnotify.Create A new path was created; this may be followed by one
// or more Write events if data also gets written to a
// file.
//
// fsnotify.Remove A path was removed.
//
// fsnotify.Rename A path was renamed. A rename is always sent with the
// old path as Event.Name, and a Create event will be
// sent with the new name. Renames are only sent for
// paths that are currently watched; e.g. moving an
// unmonitored file into a monitored directory will
// show up as just a Create. Similarly, renaming a file
// to outside a monitored directory will show up as
// only a Rename.
//
// fsnotify.Write A file or named pipe was written to. A Truncate will
// also trigger a Write. A single "write action"
// initiated by the user may show up as one or multiple
// writes, depending on when the system syncs things to
// disk. For example when compiling a large Go program
// you may get hundreds of Write events, so you
// probably want to wait until you've stopped receiving
// them (see the dedup example in cmd/fsnotify).
//
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
// when a file is removed (or more accurately, when a
// link to an inode is removed). On kqueue it's sent
// and on kqueue when a file is truncated. On Windows
// it's never sent.
Events chan Event
// Errors sends any errors.
Errors chan error
port windows.Handle // Handle to completion port
input chan *input // Inputs to the reader are sent on this channel
quit chan chan<- error
mu sync.Mutex // Protects access to watches, isClosed
watches watchMap // Map of watches (key: i-number)
isClosed bool // Set to true when Close() is first called
}
// NewWatcher creates a new Watcher.
func NewWatcher() (*Watcher, error) {
port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0)
if err != nil {
return nil, os.NewSyscallError("CreateIoCompletionPort", err)
}
w := &Watcher{
port: port,
watches: make(watchMap),
input: make(chan *input, 1),
Events: make(chan Event, 50),
Errors: make(chan error),
quit: make(chan chan<- error, 1),
}
go w.readEvents()
return w, nil
}
func (w *Watcher) sendEvent(name string, mask uint64) bool {
if mask == 0 {
return false
}
event := w.newEvent(name, uint32(mask))
select {
case ch := <-w.quit:
w.quit <- ch
case w.Events <- event:
}
return true
}
// Returns true if the error was sent, or false if watcher is closed.
func (w *Watcher) sendError(err error) bool {
select {
case w.Errors <- err:
return true
case <-w.quit:
}
return false
}
// Close removes all watches and closes the events channel.
func (w *Watcher) Close() error {
w.mu.Lock()
if w.isClosed {
w.mu.Unlock()
return nil
}
w.isClosed = true
w.mu.Unlock()
// Send "quit" message to the reader goroutine
ch := make(chan error)
w.quit <- ch
if err := w.wakeupReader(); err != nil {
return err
}
return <-ch
}
// Add starts monitoring the path for changes.
//
// A path can only be watched once; attempting to watch it more than once will
// return an error. Paths that do not yet exist on the filesystem cannot be
// added. A watch will be automatically removed if the path is deleted.
//
// A path will remain watched if it gets renamed to somewhere else on the same
// filesystem, but the monitor will get removed if the path gets deleted and
// re-created, or if it's moved to a different filesystem.
//
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
// filesystems (/proc, /sys, etc.) generally don't work.
//
// # Watching directories
//
// All files in a directory are monitored, including new files that are created
// after the watcher is started. Subdirectories are not watched (i.e. it's
// non-recursive).
//
// # Watching files
//
// Watching individual files (rather than directories) is generally not
// recommended as many tools update files atomically. Instead of "just" writing
// to the file a temporary file will be written to first, and if successful the
// temporary file is moved to to destination removing the original, or some
// variant thereof. The watcher on the original file is now lost, as it no
// longer exists.
//
// Instead, watch the parent directory and use Event.Name to filter out files
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
func (w *Watcher) Add(name string) error {
w.mu.Lock()
if w.isClosed {
w.mu.Unlock()
return errors.New("watcher already closed")
}
w.mu.Unlock()
in := &input{
op: opAddWatch,
path: filepath.Clean(name),
flags: sysFSALLEVENTS,
reply: make(chan error),
}
w.input <- in
if err := w.wakeupReader(); err != nil {
return err
}
return <-in.reply
}
// Remove stops monitoring the path for changes.
//
// Directories are always removed non-recursively. For example, if you added
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
//
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
func (w *Watcher) Remove(name string) error {
in := &input{
op: opRemoveWatch,
path: filepath.Clean(name),
reply: make(chan error),
}
w.input <- in
if err := w.wakeupReader(); err != nil {
return err
}
return <-in.reply
}
// WatchList returns all paths added with [Add] (and are not yet removed).
func (w *Watcher) WatchList() []string {
w.mu.Lock()
defer w.mu.Unlock()
entries := make([]string, 0, len(w.watches))
for _, entry := range w.watches {
for _, watchEntry := range entry {
entries = append(entries, watchEntry.path)
}
}
return entries
}
// These options are from the old golang.org/x/exp/winfsnotify, where you could
// add various options to the watch. This has long since been removed.
//
// The "sys" in the name is misleading as they're not part of any "system".
//
// This should all be removed at some point, and just use windows.FILE_NOTIFY_*
const (
sysFSALLEVENTS = 0xfff
sysFSATTRIB = 0x4
sysFSCREATE = 0x100
sysFSDELETE = 0x200
sysFSDELETESELF = 0x400
sysFSMODIFY = 0x2
sysFSMOVE = 0xc0
sysFSMOVEDFROM = 0x40
sysFSMOVEDTO = 0x80
sysFSMOVESELF = 0x800
sysFSIGNORED = 0x8000
)
func (w *Watcher) newEvent(name string, mask uint32) Event {
e := Event{Name: name}
if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
e.Op |= Create
}
if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF {
e.Op |= Remove
}
if mask&sysFSMODIFY == sysFSMODIFY {
e.Op |= Write
}
if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
e.Op |= Rename
}
if mask&sysFSATTRIB == sysFSATTRIB {
e.Op |= Chmod
}
return e
}
const (
opAddWatch = iota
opRemoveWatch
)
const (
provisional uint64 = 1 << (32 + iota)
)
type input struct {
op int
path string
flags uint32
reply chan error
}
type inode struct {
handle windows.Handle
volume uint32
index uint64
}
type watch struct {
ov windows.Overlapped
ino *inode // i-number
path string // Directory path
mask uint64 // Directory itself is being watched with these notify flags
names map[string]uint64 // Map of names being watched and their notify flags
rename string // Remembers the old name while renaming a file
buf [65536]byte // 64K buffer
}
type (
indexMap map[uint64]*watch
watchMap map[uint32]indexMap
)
func (w *Watcher) wakeupReader() error {
err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil)
if err != nil {
return os.NewSyscallError("PostQueuedCompletionStatus", err)
}
return nil
}
func (w *Watcher) getDir(pathname string) (dir string, err error) {
attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname))
if err != nil {
return "", os.NewSyscallError("GetFileAttributes", err)
}
if attr&windows.FILE_ATTRIBUTE_DIRECTORY != 0 {
dir = pathname
} else {
dir, _ = filepath.Split(pathname)
dir = filepath.Clean(dir)
}
return
}
func (w *Watcher) getIno(path string) (ino *inode, err error) {
h, err := windows.CreateFile(windows.StringToUTF16Ptr(path),
windows.FILE_LIST_DIRECTORY,
windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE,
nil, windows.OPEN_EXISTING,
windows.FILE_FLAG_BACKUP_SEMANTICS|windows.FILE_FLAG_OVERLAPPED, 0)
if err != nil {
return nil, os.NewSyscallError("CreateFile", err)
}
var fi windows.ByHandleFileInformation
err = windows.GetFileInformationByHandle(h, &fi)
if err != nil {
windows.CloseHandle(h)
return nil, os.NewSyscallError("GetFileInformationByHandle", err)
}
ino = &inode{
handle: h,
volume: fi.VolumeSerialNumber,
index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
}
return ino, nil
}
// Must run within the I/O thread.
func (m watchMap) get(ino *inode) *watch {
if i := m[ino.volume]; i != nil {
return i[ino.index]
}
return nil
}
// Must run within the I/O thread.
func (m watchMap) set(ino *inode, watch *watch) {
i := m[ino.volume]
if i == nil {
i = make(indexMap)
m[ino.volume] = i
}
i[ino.index] = watch
}
// Must run within the I/O thread.
func (w *Watcher) addWatch(pathname string, flags uint64) error {
dir, err := w.getDir(pathname)
if err != nil {
return err
}
ino, err := w.getIno(dir)
if err != nil {
return err
}
w.mu.Lock()
watchEntry := w.watches.get(ino)
w.mu.Unlock()
if watchEntry == nil {
_, err := windows.CreateIoCompletionPort(ino.handle, w.port, 0, 0)
if err != nil {
windows.CloseHandle(ino.handle)
return os.NewSyscallError("CreateIoCompletionPort", err)
}
watchEntry = &watch{
ino: ino,
path: dir,
names: make(map[string]uint64),
}
w.mu.Lock()
w.watches.set(ino, watchEntry)
w.mu.Unlock()
flags |= provisional
} else {
windows.CloseHandle(ino.handle)
}
if pathname == dir {
watchEntry.mask |= flags
} else {
watchEntry.names[filepath.Base(pathname)] |= flags
}
err = w.startRead(watchEntry)
if err != nil {
return err
}
if pathname == dir {
watchEntry.mask &= ^provisional
} else {
watchEntry.names[filepath.Base(pathname)] &= ^provisional
}
return nil
}
// Must run within the I/O thread.
func (w *Watcher) remWatch(pathname string) error {
dir, err := w.getDir(pathname)
if err != nil {
return err
}
ino, err := w.getIno(dir)
if err != nil {
return err
}
w.mu.Lock()
watch := w.watches.get(ino)
w.mu.Unlock()
err = windows.CloseHandle(ino.handle)
if err != nil {
w.sendError(os.NewSyscallError("CloseHandle", err))
}
if watch == nil {
return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname)
}
if pathname == dir {
w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
watch.mask = 0
} else {
name := filepath.Base(pathname)
w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED)
delete(watch.names, name)
}
return w.startRead(watch)
}
// Must run within the I/O thread.
func (w *Watcher) deleteWatch(watch *watch) {
for name, mask := range watch.names {
if mask&provisional == 0 {
w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED)
}
delete(watch.names, name)
}
if watch.mask != 0 {
if watch.mask&provisional == 0 {
w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
}
watch.mask = 0
}
}
// Must run within the I/O thread.
func (w *Watcher) startRead(watch *watch) error {
err := windows.CancelIo(watch.ino.handle)
if err != nil {
w.sendError(os.NewSyscallError("CancelIo", err))
w.deleteWatch(watch)
}
mask := w.toWindowsFlags(watch.mask)
for _, m := range watch.names {
mask |= w.toWindowsFlags(m)
}
if mask == 0 {
err := windows.CloseHandle(watch.ino.handle)
if err != nil {
w.sendError(os.NewSyscallError("CloseHandle", err))
}
w.mu.Lock()
delete(w.watches[watch.ino.volume], watch.ino.index)
w.mu.Unlock()
return nil
}
rdErr := windows.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
if rdErr != nil {
err := os.NewSyscallError("ReadDirectoryChanges", rdErr)
if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
// Watched directory was probably removed
w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
err = nil
}
w.deleteWatch(watch)
w.startRead(watch)
return err
}
return nil
}
// readEvents reads from the I/O completion port, converts the
// received events into Event objects and sends them via the Events channel.
// Entry point to the I/O thread.
func (w *Watcher) readEvents() {
var (
n uint32
key uintptr
ov *windows.Overlapped
)
runtime.LockOSThread()
for {
qErr := windows.GetQueuedCompletionStatus(w.port, &n, &key, &ov, windows.INFINITE)
// This error is handled after the watch == nil check below. NOTE: this
// seems odd, note sure if it's correct.
watch := (*watch)(unsafe.Pointer(ov))
if watch == nil {
select {
case ch := <-w.quit:
w.mu.Lock()
var indexes []indexMap
for _, index := range w.watches {
indexes = append(indexes, index)
}
w.mu.Unlock()
for _, index := range indexes {
for _, watch := range index {
w.deleteWatch(watch)
w.startRead(watch)
}
}
err := windows.CloseHandle(w.port)
if err != nil {
err = os.NewSyscallError("CloseHandle", err)
}
close(w.Events)
close(w.Errors)
ch <- err
return
case in := <-w.input:
switch in.op {
case opAddWatch:
in.reply <- w.addWatch(in.path, uint64(in.flags))
case opRemoveWatch:
in.reply <- w.remWatch(in.path)
}
default:
}
continue
}
switch qErr {
case windows.ERROR_MORE_DATA:
if watch == nil {
w.sendError(errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer"))
} else {
// The i/o succeeded but the buffer is full.
// In theory we should be building up a full packet.
// In practice we can get away with just carrying on.
n = uint32(unsafe.Sizeof(watch.buf))
}
case windows.ERROR_ACCESS_DENIED:
// Watched directory was probably removed
w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
w.deleteWatch(watch)
w.startRead(watch)
continue
case windows.ERROR_OPERATION_ABORTED:
// CancelIo was called on this handle
continue
default:
w.sendError(os.NewSyscallError("GetQueuedCompletionPort", qErr))
continue
case nil:
}
var offset uint32
for {
if n == 0 {
w.sendError(errors.New("short read in readEvents()"))
break
}
// Point "raw" to the event in the buffer
raw := (*windows.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
// Create a buf that is the size of the path name
size := int(raw.FileNameLength / 2)
var buf []uint16
// TODO: Use unsafe.Slice in Go 1.17; https://stackoverflow.com/questions/51187973
sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
sh.Data = uintptr(unsafe.Pointer(&raw.FileName))
sh.Len = size
sh.Cap = size
name := windows.UTF16ToString(buf)
fullname := filepath.Join(watch.path, name)
var mask uint64
switch raw.Action {
case windows.FILE_ACTION_REMOVED:
mask = sysFSDELETESELF
case windows.FILE_ACTION_MODIFIED:
mask = sysFSMODIFY
case windows.FILE_ACTION_RENAMED_OLD_NAME:
watch.rename = name
case windows.FILE_ACTION_RENAMED_NEW_NAME:
// Update saved path of all sub-watches.
old := filepath.Join(watch.path, watch.rename)
w.mu.Lock()
for _, watchMap := range w.watches {
for _, ww := range watchMap {
if strings.HasPrefix(ww.path, old) {
ww.path = filepath.Join(fullname, strings.TrimPrefix(ww.path, old))
}
}
}
w.mu.Unlock()
if watch.names[watch.rename] != 0 {
watch.names[name] |= watch.names[watch.rename]
delete(watch.names, watch.rename)
mask = sysFSMOVESELF
}
}
sendNameEvent := func() {
w.sendEvent(fullname, watch.names[name]&mask)
}
if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME {
sendNameEvent()
}
if raw.Action == windows.FILE_ACTION_REMOVED {
w.sendEvent(fullname, watch.names[name]&sysFSIGNORED)
delete(watch.names, name)
}
w.sendEvent(fullname, watch.mask&w.toFSnotifyFlags(raw.Action))
if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME {
fullname = filepath.Join(watch.path, watch.rename)
sendNameEvent()
}
// Move to the next event in the buffer
if raw.NextEntryOffset == 0 {
break
}
offset += raw.NextEntryOffset
// Error!
if offset >= n {
w.sendError(errors.New(
"Windows system assumed buffer larger than it is, events have likely been missed."))
break
}
}
if err := w.startRead(watch); err != nil {
w.sendError(err)
}
}
}
func (w *Watcher) toWindowsFlags(mask uint64) uint32 {
var m uint32
if mask&sysFSMODIFY != 0 {
m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE
}
if mask&sysFSATTRIB != 0 {
m |= windows.FILE_NOTIFY_CHANGE_ATTRIBUTES
}
if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
m |= windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME
}
return m
}
func (w *Watcher) toFSnotifyFlags(action uint32) uint64 {
switch action {
case windows.FILE_ACTION_ADDED:
return sysFSCREATE
case windows.FILE_ACTION_REMOVED:
return sysFSDELETE
case windows.FILE_ACTION_MODIFIED:
return sysFSMODIFY
case windows.FILE_ACTION_RENAMED_OLD_NAME:
return sysFSMOVEDFROM
case windows.FILE_ACTION_RENAMED_NEW_NAME:
return sysFSMOVEDTO
}
return 0
}

View File

@ -1,38 +0,0 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build solaris
// +build solaris
package fsnotify
import (
"errors"
)
// Watcher watches a set of files, delivering events to a channel.
type Watcher struct {
Events chan Event
Errors chan error
}
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
func NewWatcher() (*Watcher, error) {
return nil, errors.New("FEN based watcher not yet supported for fsnotify\n")
}
// Close removes all watches and closes the events channel.
func (w *Watcher) Close() error {
return nil
}
// Add starts watching the named file or directory (non-recursively).
func (w *Watcher) Add(name string) error {
return nil
}
// Remove stops watching the the named file or directory (non-recursively).
func (w *Watcher) Remove(name string) error {
return nil
}

View File

@ -1,29 +1,37 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !plan9 //go:build !plan9
// +build !plan9 // +build !plan9
// Package fsnotify provides a platform-independent interface for file system notifications. // Package fsnotify provides a cross-platform interface for file system
// notifications.
package fsnotify package fsnotify
import ( import (
"bytes"
"errors" "errors"
"fmt" "fmt"
"strings"
) )
// Event represents a single file system notification. // Event represents a file system notification.
type Event struct { type Event struct {
Name string // Relative path to the file or directory. // Path to the file or directory.
Op Op // File operation that triggered the event. //
// Paths are relative to the input; for example with Add("dir") the Name
// will be set to "dir/file" if you create that file, but if you use
// Add("/path/to/dir") it will be "/path/to/dir/file".
Name string
// File operation that triggered the event.
//
// This is a bitmask and some systems may send multiple operations at once.
// Use the Event.Has() method instead of comparing with ==.
Op Op
} }
// Op describes a set of file operations. // Op describes a set of file operations.
type Op uint32 type Op uint32
// These are the generalized file operations that can trigger a notification. // The operations fsnotify can trigger; see the documentation on [Watcher] for a
// full description, and check them with [Event.Has].
const ( const (
Create Op = 1 << iota Create Op = 1 << iota
Write Write
@ -32,38 +40,42 @@ const (
Chmod Chmod
) )
func (op Op) String() string {
// Use a buffer for efficient string concatenation
var buffer bytes.Buffer
if op&Create == Create {
buffer.WriteString("|CREATE")
}
if op&Remove == Remove {
buffer.WriteString("|REMOVE")
}
if op&Write == Write {
buffer.WriteString("|WRITE")
}
if op&Rename == Rename {
buffer.WriteString("|RENAME")
}
if op&Chmod == Chmod {
buffer.WriteString("|CHMOD")
}
if buffer.Len() == 0 {
return ""
}
return buffer.String()[1:] // Strip leading pipe
}
// String returns a string representation of the event in the form
// "file: REMOVE|WRITE|..."
func (e Event) String() string {
return fmt.Sprintf("%q: %s", e.Name, e.Op.String())
}
// Common errors that can be reported by a watcher // Common errors that can be reported by a watcher
var ( var (
ErrNonExistentWatch = errors.New("can't remove non-existent watcher")
ErrEventOverflow = errors.New("fsnotify queue overflow") ErrEventOverflow = errors.New("fsnotify queue overflow")
) )
func (op Op) String() string {
var b strings.Builder
if op.Has(Create) {
b.WriteString("|CREATE")
}
if op.Has(Remove) {
b.WriteString("|REMOVE")
}
if op.Has(Write) {
b.WriteString("|WRITE")
}
if op.Has(Rename) {
b.WriteString("|RENAME")
}
if op.Has(Chmod) {
b.WriteString("|CHMOD")
}
if b.Len() == 0 {
return "[no events]"
}
return b.String()[1:]
}
// Has reports if this operation has the given operation.
func (o Op) Has(h Op) bool { return o&h == h }
// Has reports if this event has the given operation.
func (e Event) Has(op Op) bool { return e.Op.Has(op) }
// String returns a string representation of the event with their path.
func (e Event) String() string {
return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name)
}

View File

@ -1,338 +0,0 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build linux
// +build linux
package fsnotify
import (
"errors"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"sync"
"unsafe"
"golang.org/x/sys/unix"
)
// Watcher watches a set of files, delivering events to a channel.
type Watcher struct {
Events chan Event
Errors chan error
mu sync.Mutex // Map access
fd int
poller *fdPoller
watches map[string]*watch // Map of inotify watches (key: path)
paths map[int]string // Map of watched paths (key: watch descriptor)
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
doneResp chan struct{} // Channel to respond to Close
}
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
func NewWatcher() (*Watcher, error) {
// Create inotify fd
fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC)
if fd == -1 {
return nil, errno
}
// Create epoll
poller, err := newFdPoller(fd)
if err != nil {
unix.Close(fd)
return nil, err
}
w := &Watcher{
fd: fd,
poller: poller,
watches: make(map[string]*watch),
paths: make(map[int]string),
Events: make(chan Event),
Errors: make(chan error),
done: make(chan struct{}),
doneResp: make(chan struct{}),
}
go w.readEvents()
return w, nil
}
func (w *Watcher) isClosed() bool {
select {
case <-w.done:
return true
default:
return false
}
}
// Close removes all watches and closes the events channel.
func (w *Watcher) Close() error {
if w.isClosed() {
return nil
}
// Send 'close' signal to goroutine, and set the Watcher to closed.
close(w.done)
// Wake up goroutine
w.poller.wake()
// Wait for goroutine to close
<-w.doneResp
return nil
}
// Add starts watching the named file or directory (non-recursively).
func (w *Watcher) Add(name string) error {
name = filepath.Clean(name)
if w.isClosed() {
return errors.New("inotify instance already closed")
}
const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
var flags uint32 = agnosticEvents
w.mu.Lock()
defer w.mu.Unlock()
watchEntry := w.watches[name]
if watchEntry != nil {
flags |= watchEntry.flags | unix.IN_MASK_ADD
}
wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
if wd == -1 {
return errno
}
if watchEntry == nil {
w.watches[name] = &watch{wd: uint32(wd), flags: flags}
w.paths[wd] = name
} else {
watchEntry.wd = uint32(wd)
watchEntry.flags = flags
}
return nil
}
// Remove stops watching the named file or directory (non-recursively).
func (w *Watcher) Remove(name string) error {
name = filepath.Clean(name)
// Fetch the watch.
w.mu.Lock()
defer w.mu.Unlock()
watch, ok := w.watches[name]
// Remove it from inotify.
if !ok {
return fmt.Errorf("can't remove non-existent inotify watch for: %s", name)
}
// We successfully removed the watch if InotifyRmWatch doesn't return an
// error, we need to clean up our internal state to ensure it matches
// inotify's kernel state.
delete(w.paths, int(watch.wd))
delete(w.watches, name)
// inotify_rm_watch will return EINVAL if the file has been deleted;
// the inotify will already have been removed.
// watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
// by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE
// so that EINVAL means that the wd is being rm_watch()ed or its file removed
// by another thread and we have not received IN_IGNORE event.
success, errno := unix.InotifyRmWatch(w.fd, watch.wd)
if success == -1 {
// TODO: Perhaps it's not helpful to return an error here in every case.
// the only two possible errors are:
// EBADF, which happens when w.fd is not a valid file descriptor of any kind.
// EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor.
// Watch descriptors are invalidated when they are removed explicitly or implicitly;
// explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted.
return errno
}
return nil
}
type watch struct {
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
}
// readEvents reads from the inotify file descriptor, converts the
// received events into Event objects and sends them via the Events channel
func (w *Watcher) readEvents() {
var (
buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
n int // Number of bytes read with read()
errno error // Syscall errno
ok bool // For poller.wait
)
defer close(w.doneResp)
defer close(w.Errors)
defer close(w.Events)
defer unix.Close(w.fd)
defer w.poller.close()
for {
// See if we have been closed.
if w.isClosed() {
return
}
ok, errno = w.poller.wait()
if errno != nil {
select {
case w.Errors <- errno:
case <-w.done:
return
}
continue
}
if !ok {
continue
}
n, errno = unix.Read(w.fd, buf[:])
// If a signal interrupted execution, see if we've been asked to close, and try again.
// http://man7.org/linux/man-pages/man7/signal.7.html :
// "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable"
if errno == unix.EINTR {
continue
}
// unix.Read might have been woken up by Close. If so, we're done.
if w.isClosed() {
return
}
if n < unix.SizeofInotifyEvent {
var err error
if n == 0 {
// If EOF is received. This should really never happen.
err = io.EOF
} else if n < 0 {
// If an error occurred while reading.
err = errno
} else {
// Read was too short.
err = errors.New("notify: short read in readEvents()")
}
select {
case w.Errors <- err:
case <-w.done:
return
}
continue
}
var offset uint32
// We don't know how many events we just read into the buffer
// While the offset points to at least one whole event...
for offset <= uint32(n-unix.SizeofInotifyEvent) {
// Point "raw" to the event in the buffer
raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
mask := uint32(raw.Mask)
nameLen := uint32(raw.Len)
if mask&unix.IN_Q_OVERFLOW != 0 {
select {
case w.Errors <- ErrEventOverflow:
case <-w.done:
return
}
}
// If the event happened to the watched directory or the watched file, the kernel
// doesn't append the filename to the event, but we would like to always fill the
// the "Name" field with a valid filename. We retrieve the path of the watch from
// the "paths" map.
w.mu.Lock()
name, ok := w.paths[int(raw.Wd)]
// IN_DELETE_SELF occurs when the file/directory being watched is removed.
// This is a sign to clean up the maps, otherwise we are no longer in sync
// with the inotify kernel state which has already deleted the watch
// automatically.
if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
delete(w.paths, int(raw.Wd))
delete(w.watches, name)
}
w.mu.Unlock()
if nameLen > 0 {
// Point "bytes" at the first byte of the filename
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen]
// The filename is padded with NULL bytes. TrimRight() gets rid of those.
name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
}
event := newEvent(name, mask)
// Send the events that are not ignored on the events channel
if !event.ignoreLinux(mask) {
select {
case w.Events <- event:
case <-w.done:
return
}
}
// Move to the next event in the buffer
offset += unix.SizeofInotifyEvent + nameLen
}
}
}
// Certain types of events can be "ignored" and not sent over the Events
// channel. Such as events marked ignore by the kernel, or MODIFY events
// against files that do not exist.
func (e *Event) ignoreLinux(mask uint32) bool {
// Ignore anything the inotify API says to ignore
if mask&unix.IN_IGNORED == unix.IN_IGNORED {
return true
}
// If the event is not a DELETE or RENAME, the file must exist.
// Otherwise the event is ignored.
// *Note*: this was put in place because it was seen that a MODIFY
// event was sent after the DELETE. This ignores that MODIFY and
// assumes a DELETE will come or has come if the file doesn't exist.
if !(e.Op&Remove == Remove || e.Op&Rename == Rename) {
_, statErr := os.Lstat(e.Name)
return os.IsNotExist(statErr)
}
return false
}
// newEvent returns an platform-independent Event based on an inotify mask.
func newEvent(name string, mask uint32) Event {
e := Event{Name: name}
if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
e.Op |= Create
}
if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE {
e.Op |= Remove
}
if mask&unix.IN_MODIFY == unix.IN_MODIFY {
e.Op |= Write
}
if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
e.Op |= Rename
}
if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
e.Op |= Chmod
}
return e
}

View File

@ -1,188 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build linux
// +build linux
package fsnotify
import (
"errors"
"golang.org/x/sys/unix"
)
type fdPoller struct {
fd int // File descriptor (as returned by the inotify_init() syscall)
epfd int // Epoll file descriptor
pipe [2]int // Pipe for waking up
}
func emptyPoller(fd int) *fdPoller {
poller := new(fdPoller)
poller.fd = fd
poller.epfd = -1
poller.pipe[0] = -1
poller.pipe[1] = -1
return poller
}
// Create a new inotify poller.
// This creates an inotify handler, and an epoll handler.
func newFdPoller(fd int) (*fdPoller, error) {
var errno error
poller := emptyPoller(fd)
defer func() {
if errno != nil {
poller.close()
}
}()
poller.fd = fd
// Create epoll fd
poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC)
if poller.epfd == -1 {
return nil, errno
}
// Create pipe; pipe[0] is the read end, pipe[1] the write end.
errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK|unix.O_CLOEXEC)
if errno != nil {
return nil, errno
}
// Register inotify fd with epoll
event := unix.EpollEvent{
Fd: int32(poller.fd),
Events: unix.EPOLLIN,
}
errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event)
if errno != nil {
return nil, errno
}
// Register pipe fd with epoll
event = unix.EpollEvent{
Fd: int32(poller.pipe[0]),
Events: unix.EPOLLIN,
}
errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event)
if errno != nil {
return nil, errno
}
return poller, nil
}
// Wait using epoll.
// Returns true if something is ready to be read,
// false if there is not.
func (poller *fdPoller) wait() (bool, error) {
// 3 possible events per fd, and 2 fds, makes a maximum of 6 events.
// I don't know whether epoll_wait returns the number of events returned,
// or the total number of events ready.
// I decided to catch both by making the buffer one larger than the maximum.
events := make([]unix.EpollEvent, 7)
for {
n, errno := unix.EpollWait(poller.epfd, events, -1)
if n == -1 {
if errno == unix.EINTR {
continue
}
return false, errno
}
if n == 0 {
// If there are no events, try again.
continue
}
if n > 6 {
// This should never happen. More events were returned than should be possible.
return false, errors.New("epoll_wait returned more events than I know what to do with")
}
ready := events[:n]
epollhup := false
epollerr := false
epollin := false
for _, event := range ready {
if event.Fd == int32(poller.fd) {
if event.Events&unix.EPOLLHUP != 0 {
// This should not happen, but if it does, treat it as a wakeup.
epollhup = true
}
if event.Events&unix.EPOLLERR != 0 {
// If an error is waiting on the file descriptor, we should pretend
// something is ready to read, and let unix.Read pick up the error.
epollerr = true
}
if event.Events&unix.EPOLLIN != 0 {
// There is data to read.
epollin = true
}
}
if event.Fd == int32(poller.pipe[0]) {
if event.Events&unix.EPOLLHUP != 0 {
// Write pipe descriptor was closed, by us. This means we're closing down the
// watcher, and we should wake up.
}
if event.Events&unix.EPOLLERR != 0 {
// If an error is waiting on the pipe file descriptor.
// This is an absolute mystery, and should never ever happen.
return false, errors.New("Error on the pipe descriptor.")
}
if event.Events&unix.EPOLLIN != 0 {
// This is a regular wakeup, so we have to clear the buffer.
err := poller.clearWake()
if err != nil {
return false, err
}
}
}
}
if epollhup || epollerr || epollin {
return true, nil
}
return false, nil
}
}
// Close the write end of the poller.
func (poller *fdPoller) wake() error {
buf := make([]byte, 1)
n, errno := unix.Write(poller.pipe[1], buf)
if n == -1 {
if errno == unix.EAGAIN {
// Buffer is full, poller will wake.
return nil
}
return errno
}
return nil
}
func (poller *fdPoller) clearWake() error {
// You have to be woken up a LOT in order to get to 100!
buf := make([]byte, 100)
n, errno := unix.Read(poller.pipe[0], buf)
if n == -1 {
if errno == unix.EAGAIN {
// Buffer is empty, someone else cleared our wake.
return nil
}
return errno
}
return nil
}
// Close all poller file descriptors, but not the one passed to it.
func (poller *fdPoller) close() {
if poller.pipe[1] != -1 {
unix.Close(poller.pipe[1])
}
if poller.pipe[0] != -1 {
unix.Close(poller.pipe[0])
}
if poller.epfd != -1 {
unix.Close(poller.epfd)
}
}

View File

@ -1,522 +0,0 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build freebsd || openbsd || netbsd || dragonfly || darwin
// +build freebsd openbsd netbsd dragonfly darwin
package fsnotify
import (
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sync"
"time"
"golang.org/x/sys/unix"
)
// Watcher watches a set of files, delivering events to a channel.
type Watcher struct {
Events chan Event
Errors chan error
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
kq int // File descriptor (as returned by the kqueue() syscall).
mu sync.Mutex // Protects access to watcher data
watches map[string]int // Map of watched file descriptors (key: path).
externalWatches map[string]bool // Map of watches added by user of the library.
dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue.
paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events.
fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events).
isClosed bool // Set to true when Close() is first called
}
type pathInfo struct {
name string
isDir bool
}
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
func NewWatcher() (*Watcher, error) {
kq, err := kqueue()
if err != nil {
return nil, err
}
w := &Watcher{
kq: kq,
watches: make(map[string]int),
dirFlags: make(map[string]uint32),
paths: make(map[int]pathInfo),
fileExists: make(map[string]bool),
externalWatches: make(map[string]bool),
Events: make(chan Event),
Errors: make(chan error),
done: make(chan struct{}),
}
go w.readEvents()
return w, nil
}
// Close removes all watches and closes the events channel.
func (w *Watcher) Close() error {
w.mu.Lock()
if w.isClosed {
w.mu.Unlock()
return nil
}
w.isClosed = true
// copy paths to remove while locked
var pathsToRemove = make([]string, 0, len(w.watches))
for name := range w.watches {
pathsToRemove = append(pathsToRemove, name)
}
w.mu.Unlock()
// unlock before calling Remove, which also locks
for _, name := range pathsToRemove {
w.Remove(name)
}
// send a "quit" message to the reader goroutine
close(w.done)
return nil
}
// Add starts watching the named file or directory (non-recursively).
func (w *Watcher) Add(name string) error {
w.mu.Lock()
w.externalWatches[name] = true
w.mu.Unlock()
_, err := w.addWatch(name, noteAllEvents)
return err
}
// Remove stops watching the the named file or directory (non-recursively).
func (w *Watcher) Remove(name string) error {
name = filepath.Clean(name)
w.mu.Lock()
watchfd, ok := w.watches[name]
w.mu.Unlock()
if !ok {
return fmt.Errorf("can't remove non-existent kevent watch for: %s", name)
}
const registerRemove = unix.EV_DELETE
if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil {
return err
}
unix.Close(watchfd)
w.mu.Lock()
isDir := w.paths[watchfd].isDir
delete(w.watches, name)
delete(w.paths, watchfd)
delete(w.dirFlags, name)
w.mu.Unlock()
// Find all watched paths that are in this directory that are not external.
if isDir {
var pathsToRemove []string
w.mu.Lock()
for _, path := range w.paths {
wdir, _ := filepath.Split(path.name)
if filepath.Clean(wdir) == name {
if !w.externalWatches[path.name] {
pathsToRemove = append(pathsToRemove, path.name)
}
}
}
w.mu.Unlock()
for _, name := range pathsToRemove {
// Since these are internal, not much sense in propagating error
// to the user, as that will just confuse them with an error about
// a path they did not explicitly watch themselves.
w.Remove(name)
}
}
return nil
}
// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
// keventWaitTime to block on each read from kevent
var keventWaitTime = durationToTimespec(100 * time.Millisecond)
// addWatch adds name to the watched file set.
// The flags are interpreted as described in kevent(2).
// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks.
func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
var isDir bool
// Make ./name and name equivalent
name = filepath.Clean(name)
w.mu.Lock()
if w.isClosed {
w.mu.Unlock()
return "", errors.New("kevent instance already closed")
}
watchfd, alreadyWatching := w.watches[name]
// We already have a watch, but we can still override flags.
if alreadyWatching {
isDir = w.paths[watchfd].isDir
}
w.mu.Unlock()
if !alreadyWatching {
fi, err := os.Lstat(name)
if err != nil {
return "", err
}
// Don't watch sockets.
if fi.Mode()&os.ModeSocket == os.ModeSocket {
return "", nil
}
// Don't watch named pipes.
if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe {
return "", nil
}
// Follow Symlinks
// Unfortunately, Linux can add bogus symlinks to watch list without
// issue, and Windows can't do symlinks period (AFAIK). To maintain
// consistency, we will act like everything is fine. There will simply
// be no file events for broken symlinks.
// Hence the returns of nil on errors.
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
name, err = filepath.EvalSymlinks(name)
if err != nil {
return "", nil
}
w.mu.Lock()
_, alreadyWatching = w.watches[name]
w.mu.Unlock()
if alreadyWatching {
return name, nil
}
fi, err = os.Lstat(name)
if err != nil {
return "", nil
}
}
watchfd, err = unix.Open(name, openMode, 0700)
if watchfd == -1 {
return "", err
}
isDir = fi.IsDir()
}
const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE
if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil {
unix.Close(watchfd)
return "", err
}
if !alreadyWatching {
w.mu.Lock()
w.watches[name] = watchfd
w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
w.mu.Unlock()
}
if isDir {
// Watch the directory if it has not been watched before,
// or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
w.mu.Lock()
watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
(!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE)
// Store flags so this watch can be updated later
w.dirFlags[name] = flags
w.mu.Unlock()
if watchDir {
if err := w.watchDirectoryFiles(name); err != nil {
return "", err
}
}
}
return name, nil
}
// readEvents reads from kqueue and converts the received kevents into
// Event values that it sends down the Events channel.
func (w *Watcher) readEvents() {
eventBuffer := make([]unix.Kevent_t, 10)
loop:
for {
// See if there is a message on the "done" channel
select {
case <-w.done:
break loop
default:
}
// Get new events
kevents, err := read(w.kq, eventBuffer, &keventWaitTime)
// EINTR is okay, the syscall was interrupted before timeout expired.
if err != nil && err != unix.EINTR {
select {
case w.Errors <- err:
case <-w.done:
break loop
}
continue
}
// Flush the events we received to the Events channel
for len(kevents) > 0 {
kevent := &kevents[0]
watchfd := int(kevent.Ident)
mask := uint32(kevent.Fflags)
w.mu.Lock()
path := w.paths[watchfd]
w.mu.Unlock()
event := newEvent(path.name, mask)
if path.isDir && !(event.Op&Remove == Remove) {
// Double check to make sure the directory exists. This can happen when
// we do a rm -fr on a recursively watched folders and we receive a
// modification event first but the folder has been deleted and later
// receive the delete event
if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
// mark is as delete event
event.Op |= Remove
}
}
if event.Op&Rename == Rename || event.Op&Remove == Remove {
w.Remove(event.Name)
w.mu.Lock()
delete(w.fileExists, event.Name)
w.mu.Unlock()
}
if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) {
w.sendDirectoryChangeEvents(event.Name)
} else {
// Send the event on the Events channel.
select {
case w.Events <- event:
case <-w.done:
break loop
}
}
if event.Op&Remove == Remove {
// Look for a file that may have overwritten this.
// For example, mv f1 f2 will delete f2, then create f2.
if path.isDir {
fileDir := filepath.Clean(event.Name)
w.mu.Lock()
_, found := w.watches[fileDir]
w.mu.Unlock()
if found {
// make sure the directory exists before we watch for changes. When we
// do a recursive watch and perform rm -fr, the parent directory might
// have gone missing, ignore the missing directory and let the
// upcoming delete event remove the watch from the parent directory.
if _, err := os.Lstat(fileDir); err == nil {
w.sendDirectoryChangeEvents(fileDir)
}
}
} else {
filePath := filepath.Clean(event.Name)
if fileInfo, err := os.Lstat(filePath); err == nil {
w.sendFileCreatedEventIfNew(filePath, fileInfo)
}
}
}
// Move to next event
kevents = kevents[1:]
}
}
// cleanup
err := unix.Close(w.kq)
if err != nil {
// only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors.
select {
case w.Errors <- err:
default:
}
}
close(w.Events)
close(w.Errors)
}
// newEvent returns an platform-independent Event based on kqueue Fflags.
func newEvent(name string, mask uint32) Event {
e := Event{Name: name}
if mask&unix.NOTE_DELETE == unix.NOTE_DELETE {
e.Op |= Remove
}
if mask&unix.NOTE_WRITE == unix.NOTE_WRITE {
e.Op |= Write
}
if mask&unix.NOTE_RENAME == unix.NOTE_RENAME {
e.Op |= Rename
}
if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
e.Op |= Chmod
}
return e
}
func newCreateEvent(name string) Event {
return Event{Name: name, Op: Create}
}
// watchDirectoryFiles to mimic inotify when adding a watch on a directory
func (w *Watcher) watchDirectoryFiles(dirPath string) error {
// Get all files
files, err := ioutil.ReadDir(dirPath)
if err != nil {
return err
}
for _, fileInfo := range files {
filePath := filepath.Join(dirPath, fileInfo.Name())
filePath, err = w.internalWatch(filePath, fileInfo)
if err != nil {
return err
}
w.mu.Lock()
w.fileExists[filePath] = true
w.mu.Unlock()
}
return nil
}
// sendDirectoryEvents searches the directory for newly created files
// and sends them over the event channel. This functionality is to have
// the BSD version of fsnotify match Linux inotify which provides a
// create event for files created in a watched directory.
func (w *Watcher) sendDirectoryChangeEvents(dirPath string) {
// Get all files
files, err := ioutil.ReadDir(dirPath)
if err != nil {
select {
case w.Errors <- err:
case <-w.done:
return
}
}
// Search for new files
for _, fileInfo := range files {
filePath := filepath.Join(dirPath, fileInfo.Name())
err := w.sendFileCreatedEventIfNew(filePath, fileInfo)
if err != nil {
return
}
}
}
// sendFileCreatedEvent sends a create event if the file isn't already being tracked.
func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) {
w.mu.Lock()
_, doesExist := w.fileExists[filePath]
w.mu.Unlock()
if !doesExist {
// Send create event
select {
case w.Events <- newCreateEvent(filePath):
case <-w.done:
return
}
}
// like watchDirectoryFiles (but without doing another ReadDir)
filePath, err = w.internalWatch(filePath, fileInfo)
if err != nil {
return err
}
w.mu.Lock()
w.fileExists[filePath] = true
w.mu.Unlock()
return nil
}
func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) {
if fileInfo.IsDir() {
// mimic Linux providing delete events for subdirectories
// but preserve the flags used if currently watching subdirectory
w.mu.Lock()
flags := w.dirFlags[name]
w.mu.Unlock()
flags |= unix.NOTE_DELETE | unix.NOTE_RENAME
return w.addWatch(name, flags)
}
// watch file to mimic Linux inotify
return w.addWatch(name, noteAllEvents)
}
// kqueue creates a new kernel event queue and returns a descriptor.
func kqueue() (kq int, err error) {
kq, err = unix.Kqueue()
if kq == -1 {
return kq, err
}
return kq, nil
}
// register events with the queue
func register(kq int, fds []int, flags int, fflags uint32) error {
changes := make([]unix.Kevent_t, len(fds))
for i, fd := range fds {
// SetKevent converts int to the platform-specific types:
unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags)
changes[i].Fflags = fflags
}
// register the events
success, err := unix.Kevent(kq, changes, nil, nil)
if success == -1 {
return err
}
return nil
}
// read retrieves pending events, or waits until an event occurs.
// A timeout of nil blocks indefinitely, while 0 polls the queue.
func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) {
n, err := unix.Kevent(kq, nil, events, timeout)
if err != nil {
return nil, err
}
return events[0:n], nil
}
// durationToTimespec prepares a timeout value
func durationToTimespec(d time.Duration) unix.Timespec {
return unix.NsecToTimespec(d.Nanoseconds())
}

208
vendor/github.com/fsnotify/fsnotify/mkdoc.zsh generated vendored Normal file
View File

@ -0,0 +1,208 @@
#!/usr/bin/env zsh
[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1
setopt err_exit no_unset pipefail extended_glob
# Simple script to update the godoc comments on all watchers. Probably took me
# more time to write this than doing it manually, but ah well 🙃
watcher=$(<<EOF
// Watcher watches a set of paths, delivering events on a channel.
//
// A watcher should not be copied (e.g. pass it by pointer, rather than by
// value).
//
// # Linux notes
//
// When a file is removed a Remove event won't be emitted until all file
// descriptors are closed, and deletes will always emit a Chmod. For example:
//
// fp := os.Open("file")
// os.Remove("file") // Triggers Chmod
// fp.Close() // Triggers Remove
//
// This is the event that inotify sends, so not much can be changed about this.
//
// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
// for the number of watches per user, and fs.inotify.max_user_instances
// specifies the maximum number of inotify instances per user. Every Watcher you
// create is an "instance", and every path you add is a "watch".
//
// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
// /proc/sys/fs/inotify/max_user_instances
//
// To increase them you can use sysctl or write the value to the /proc file:
//
// # Default values on Linux 5.18
// sysctl fs.inotify.max_user_watches=124983
// sysctl fs.inotify.max_user_instances=128
//
// To make the changes persist on reboot edit /etc/sysctl.conf or
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
// your distro's documentation):
//
// fs.inotify.max_user_watches=124983
// fs.inotify.max_user_instances=128
//
// Reaching the limit will result in a "no space left on device" or "too many open
// files" error.
//
// # kqueue notes (macOS, BSD)
//
// kqueue requires opening a file descriptor for every file that's being watched;
// so if you're watching a directory with five files then that's six file
// descriptors. You will run in to your system's "max open files" limit faster on
// these platforms.
//
// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
// control the maximum number of open files, as well as /etc/login.conf on BSD
// systems.
//
// # macOS notes
//
// Spotlight indexing on macOS can result in multiple events (see [#15]). A
// temporary workaround is to add your folder(s) to the "Spotlight Privacy
// Settings" until we have a native FSEvents implementation (see [#11]).
//
// [#11]: https://github.com/fsnotify/fsnotify/issues/11
// [#15]: https://github.com/fsnotify/fsnotify/issues/15
EOF
)
new=$(<<EOF
// NewWatcher creates a new Watcher.
EOF
)
add=$(<<EOF
// Add starts monitoring the path for changes.
//
// A path can only be watched once; attempting to watch it more than once will
// return an error. Paths that do not yet exist on the filesystem cannot be
// added. A watch will be automatically removed if the path is deleted.
//
// A path will remain watched if it gets renamed to somewhere else on the same
// filesystem, but the monitor will get removed if the path gets deleted and
// re-created, or if it's moved to a different filesystem.
//
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
// filesystems (/proc, /sys, etc.) generally don't work.
//
// # Watching directories
//
// All files in a directory are monitored, including new files that are created
// after the watcher is started. Subdirectories are not watched (i.e. it's
// non-recursive).
//
// # Watching files
//
// Watching individual files (rather than directories) is generally not
// recommended as many tools update files atomically. Instead of "just" writing
// to the file a temporary file will be written to first, and if successful the
// temporary file is moved to to destination removing the original, or some
// variant thereof. The watcher on the original file is now lost, as it no
// longer exists.
//
// Instead, watch the parent directory and use Event.Name to filter out files
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
EOF
)
remove=$(<<EOF
// Remove stops monitoring the path for changes.
//
// Directories are always removed non-recursively. For example, if you added
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
//
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
EOF
)
close=$(<<EOF
// Close removes all watches and closes the events channel.
EOF
)
watchlist=$(<<EOF
// WatchList returns all paths added with [Add] (and are not yet removed).
EOF
)
events=$(<<EOF
// Events sends the filesystem change events.
//
// fsnotify can send the following events; a "path" here can refer to a
// file, directory, symbolic link, or special file like a FIFO.
//
// fsnotify.Create A new path was created; this may be followed by one
// or more Write events if data also gets written to a
// file.
//
// fsnotify.Remove A path was removed.
//
// fsnotify.Rename A path was renamed. A rename is always sent with the
// old path as Event.Name, and a Create event will be
// sent with the new name. Renames are only sent for
// paths that are currently watched; e.g. moving an
// unmonitored file into a monitored directory will
// show up as just a Create. Similarly, renaming a file
// to outside a monitored directory will show up as
// only a Rename.
//
// fsnotify.Write A file or named pipe was written to. A Truncate will
// also trigger a Write. A single "write action"
// initiated by the user may show up as one or multiple
// writes, depending on when the system syncs things to
// disk. For example when compiling a large Go program
// you may get hundreds of Write events, so you
// probably want to wait until you've stopped receiving
// them (see the dedup example in cmd/fsnotify).
//
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
// when a file is removed (or more accurately, when a
// link to an inode is removed). On kqueue it's sent
// and on kqueue when a file is truncated. On Windows
// it's never sent.
EOF
)
errors=$(<<EOF
// Errors sends any errors.
EOF
)
set-cmt() {
local pat=$1
local cmt=$2
IFS=$'\n' local files=($(grep -n $pat backend_*~*_test.go))
for f in $files; do
IFS=':' local fields=($=f)
local file=$fields[1]
local end=$(( $fields[2] - 1 ))
# Find start of comment.
local start=0
IFS=$'\n' local lines=($(head -n$end $file))
for (( i = 1; i <= $#lines; i++ )); do
local line=$lines[-$i]
if ! grep -q '^[[:space:]]*//' <<<$line; then
start=$(( end - (i - 2) ))
break
fi
done
head -n $(( start - 1 )) $file >/tmp/x
print -r -- $cmt >>/tmp/x
tail -n+$(( end + 1 )) $file >>/tmp/x
mv /tmp/x $file
done
}
set-cmt '^type Watcher struct ' $watcher
set-cmt '^func NewWatcher(' $new
set-cmt '^func (w \*Watcher) Add(' $add
set-cmt '^func (w \*Watcher) Remove(' $remove
set-cmt '^func (w \*Watcher) Close(' $close
set-cmt '^func (w \*Watcher) WatchList(' $watchlist
set-cmt '^[[:space:]]*Events *chan Event$' $events
set-cmt '^[[:space:]]*Errors *chan error$' $errors

View File

@ -1,7 +1,3 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build freebsd || openbsd || netbsd || dragonfly //go:build freebsd || openbsd || netbsd || dragonfly
// +build freebsd openbsd netbsd dragonfly // +build freebsd openbsd netbsd dragonfly

View File

@ -1,7 +1,3 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build darwin //go:build darwin
// +build darwin // +build darwin

View File

@ -1,562 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build windows
// +build windows
package fsnotify
import (
"errors"
"fmt"
"os"
"path/filepath"
"runtime"
"sync"
"syscall"
"unsafe"
)
// Watcher watches a set of files, delivering events to a channel.
type Watcher struct {
Events chan Event
Errors chan error
isClosed bool // Set to true when Close() is first called
mu sync.Mutex // Map access
port syscall.Handle // Handle to completion port
watches watchMap // Map of watches (key: i-number)
input chan *input // Inputs to the reader are sent on this channel
quit chan chan<- error
}
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
func NewWatcher() (*Watcher, error) {
port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0)
if e != nil {
return nil, os.NewSyscallError("CreateIoCompletionPort", e)
}
w := &Watcher{
port: port,
watches: make(watchMap),
input: make(chan *input, 1),
Events: make(chan Event, 50),
Errors: make(chan error),
quit: make(chan chan<- error, 1),
}
go w.readEvents()
return w, nil
}
// Close removes all watches and closes the events channel.
func (w *Watcher) Close() error {
if w.isClosed {
return nil
}
w.isClosed = true
// Send "quit" message to the reader goroutine
ch := make(chan error)
w.quit <- ch
if err := w.wakeupReader(); err != nil {
return err
}
return <-ch
}
// Add starts watching the named file or directory (non-recursively).
func (w *Watcher) Add(name string) error {
if w.isClosed {
return errors.New("watcher already closed")
}
in := &input{
op: opAddWatch,
path: filepath.Clean(name),
flags: sysFSALLEVENTS,
reply: make(chan error),
}
w.input <- in
if err := w.wakeupReader(); err != nil {
return err
}
return <-in.reply
}
// Remove stops watching the the named file or directory (non-recursively).
func (w *Watcher) Remove(name string) error {
in := &input{
op: opRemoveWatch,
path: filepath.Clean(name),
reply: make(chan error),
}
w.input <- in
if err := w.wakeupReader(); err != nil {
return err
}
return <-in.reply
}
const (
// Options for AddWatch
sysFSONESHOT = 0x80000000
sysFSONLYDIR = 0x1000000
// Events
sysFSACCESS = 0x1
sysFSALLEVENTS = 0xfff
sysFSATTRIB = 0x4
sysFSCLOSE = 0x18
sysFSCREATE = 0x100
sysFSDELETE = 0x200
sysFSDELETESELF = 0x400
sysFSMODIFY = 0x2
sysFSMOVE = 0xc0
sysFSMOVEDFROM = 0x40
sysFSMOVEDTO = 0x80
sysFSMOVESELF = 0x800
// Special events
sysFSIGNORED = 0x8000
sysFSQOVERFLOW = 0x4000
)
func newEvent(name string, mask uint32) Event {
e := Event{Name: name}
if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
e.Op |= Create
}
if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF {
e.Op |= Remove
}
if mask&sysFSMODIFY == sysFSMODIFY {
e.Op |= Write
}
if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
e.Op |= Rename
}
if mask&sysFSATTRIB == sysFSATTRIB {
e.Op |= Chmod
}
return e
}
const (
opAddWatch = iota
opRemoveWatch
)
const (
provisional uint64 = 1 << (32 + iota)
)
type input struct {
op int
path string
flags uint32
reply chan error
}
type inode struct {
handle syscall.Handle
volume uint32
index uint64
}
type watch struct {
ov syscall.Overlapped
ino *inode // i-number
path string // Directory path
mask uint64 // Directory itself is being watched with these notify flags
names map[string]uint64 // Map of names being watched and their notify flags
rename string // Remembers the old name while renaming a file
buf [4096]byte
}
type indexMap map[uint64]*watch
type watchMap map[uint32]indexMap
func (w *Watcher) wakeupReader() error {
e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil)
if e != nil {
return os.NewSyscallError("PostQueuedCompletionStatus", e)
}
return nil
}
func getDir(pathname string) (dir string, err error) {
attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname))
if e != nil {
return "", os.NewSyscallError("GetFileAttributes", e)
}
if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
dir = pathname
} else {
dir, _ = filepath.Split(pathname)
dir = filepath.Clean(dir)
}
return
}
func getIno(path string) (ino *inode, err error) {
h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path),
syscall.FILE_LIST_DIRECTORY,
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
nil, syscall.OPEN_EXISTING,
syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0)
if e != nil {
return nil, os.NewSyscallError("CreateFile", e)
}
var fi syscall.ByHandleFileInformation
if e = syscall.GetFileInformationByHandle(h, &fi); e != nil {
syscall.CloseHandle(h)
return nil, os.NewSyscallError("GetFileInformationByHandle", e)
}
ino = &inode{
handle: h,
volume: fi.VolumeSerialNumber,
index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
}
return ino, nil
}
// Must run within the I/O thread.
func (m watchMap) get(ino *inode) *watch {
if i := m[ino.volume]; i != nil {
return i[ino.index]
}
return nil
}
// Must run within the I/O thread.
func (m watchMap) set(ino *inode, watch *watch) {
i := m[ino.volume]
if i == nil {
i = make(indexMap)
m[ino.volume] = i
}
i[ino.index] = watch
}
// Must run within the I/O thread.
func (w *Watcher) addWatch(pathname string, flags uint64) error {
dir, err := getDir(pathname)
if err != nil {
return err
}
if flags&sysFSONLYDIR != 0 && pathname != dir {
return nil
}
ino, err := getIno(dir)
if err != nil {
return err
}
w.mu.Lock()
watchEntry := w.watches.get(ino)
w.mu.Unlock()
if watchEntry == nil {
if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil {
syscall.CloseHandle(ino.handle)
return os.NewSyscallError("CreateIoCompletionPort", e)
}
watchEntry = &watch{
ino: ino,
path: dir,
names: make(map[string]uint64),
}
w.mu.Lock()
w.watches.set(ino, watchEntry)
w.mu.Unlock()
flags |= provisional
} else {
syscall.CloseHandle(ino.handle)
}
if pathname == dir {
watchEntry.mask |= flags
} else {
watchEntry.names[filepath.Base(pathname)] |= flags
}
if err = w.startRead(watchEntry); err != nil {
return err
}
if pathname == dir {
watchEntry.mask &= ^provisional
} else {
watchEntry.names[filepath.Base(pathname)] &= ^provisional
}
return nil
}
// Must run within the I/O thread.
func (w *Watcher) remWatch(pathname string) error {
dir, err := getDir(pathname)
if err != nil {
return err
}
ino, err := getIno(dir)
if err != nil {
return err
}
w.mu.Lock()
watch := w.watches.get(ino)
w.mu.Unlock()
if watch == nil {
return fmt.Errorf("can't remove non-existent watch for: %s", pathname)
}
if pathname == dir {
w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
watch.mask = 0
} else {
name := filepath.Base(pathname)
w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED)
delete(watch.names, name)
}
return w.startRead(watch)
}
// Must run within the I/O thread.
func (w *Watcher) deleteWatch(watch *watch) {
for name, mask := range watch.names {
if mask&provisional == 0 {
w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED)
}
delete(watch.names, name)
}
if watch.mask != 0 {
if watch.mask&provisional == 0 {
w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
}
watch.mask = 0
}
}
// Must run within the I/O thread.
func (w *Watcher) startRead(watch *watch) error {
if e := syscall.CancelIo(watch.ino.handle); e != nil {
w.Errors <- os.NewSyscallError("CancelIo", e)
w.deleteWatch(watch)
}
mask := toWindowsFlags(watch.mask)
for _, m := range watch.names {
mask |= toWindowsFlags(m)
}
if mask == 0 {
if e := syscall.CloseHandle(watch.ino.handle); e != nil {
w.Errors <- os.NewSyscallError("CloseHandle", e)
}
w.mu.Lock()
delete(w.watches[watch.ino.volume], watch.ino.index)
w.mu.Unlock()
return nil
}
e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
if e != nil {
err := os.NewSyscallError("ReadDirectoryChanges", e)
if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
// Watched directory was probably removed
if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) {
if watch.mask&sysFSONESHOT != 0 {
watch.mask = 0
}
}
err = nil
}
w.deleteWatch(watch)
w.startRead(watch)
return err
}
return nil
}
// readEvents reads from the I/O completion port, converts the
// received events into Event objects and sends them via the Events channel.
// Entry point to the I/O thread.
func (w *Watcher) readEvents() {
var (
n, key uint32
ov *syscall.Overlapped
)
runtime.LockOSThread()
for {
e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE)
watch := (*watch)(unsafe.Pointer(ov))
if watch == nil {
select {
case ch := <-w.quit:
w.mu.Lock()
var indexes []indexMap
for _, index := range w.watches {
indexes = append(indexes, index)
}
w.mu.Unlock()
for _, index := range indexes {
for _, watch := range index {
w.deleteWatch(watch)
w.startRead(watch)
}
}
var err error
if e := syscall.CloseHandle(w.port); e != nil {
err = os.NewSyscallError("CloseHandle", e)
}
close(w.Events)
close(w.Errors)
ch <- err
return
case in := <-w.input:
switch in.op {
case opAddWatch:
in.reply <- w.addWatch(in.path, uint64(in.flags))
case opRemoveWatch:
in.reply <- w.remWatch(in.path)
}
default:
}
continue
}
switch e {
case syscall.ERROR_MORE_DATA:
if watch == nil {
w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")
} else {
// The i/o succeeded but the buffer is full.
// In theory we should be building up a full packet.
// In practice we can get away with just carrying on.
n = uint32(unsafe.Sizeof(watch.buf))
}
case syscall.ERROR_ACCESS_DENIED:
// Watched directory was probably removed
w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
w.deleteWatch(watch)
w.startRead(watch)
continue
case syscall.ERROR_OPERATION_ABORTED:
// CancelIo was called on this handle
continue
default:
w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e)
continue
case nil:
}
var offset uint32
for {
if n == 0 {
w.Events <- newEvent("", sysFSQOVERFLOW)
w.Errors <- errors.New("short read in readEvents()")
break
}
// Point "raw" to the event in the buffer
raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName))
name := syscall.UTF16ToString(buf[:raw.FileNameLength/2])
fullname := filepath.Join(watch.path, name)
var mask uint64
switch raw.Action {
case syscall.FILE_ACTION_REMOVED:
mask = sysFSDELETESELF
case syscall.FILE_ACTION_MODIFIED:
mask = sysFSMODIFY
case syscall.FILE_ACTION_RENAMED_OLD_NAME:
watch.rename = name
case syscall.FILE_ACTION_RENAMED_NEW_NAME:
if watch.names[watch.rename] != 0 {
watch.names[name] |= watch.names[watch.rename]
delete(watch.names, watch.rename)
mask = sysFSMOVESELF
}
}
sendNameEvent := func() {
if w.sendEvent(fullname, watch.names[name]&mask) {
if watch.names[name]&sysFSONESHOT != 0 {
delete(watch.names, name)
}
}
}
if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME {
sendNameEvent()
}
if raw.Action == syscall.FILE_ACTION_REMOVED {
w.sendEvent(fullname, watch.names[name]&sysFSIGNORED)
delete(watch.names, name)
}
if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) {
if watch.mask&sysFSONESHOT != 0 {
watch.mask = 0
}
}
if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME {
fullname = filepath.Join(watch.path, watch.rename)
sendNameEvent()
}
// Move to the next event in the buffer
if raw.NextEntryOffset == 0 {
break
}
offset += raw.NextEntryOffset
// Error!
if offset >= n {
w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.")
break
}
}
if err := w.startRead(watch); err != nil {
w.Errors <- err
}
}
}
func (w *Watcher) sendEvent(name string, mask uint64) bool {
if mask == 0 {
return false
}
event := newEvent(name, uint32(mask))
select {
case ch := <-w.quit:
w.quit <- ch
case w.Events <- event:
}
return true
}
func toWindowsFlags(mask uint64) uint32 {
var m uint32
if mask&sysFSACCESS != 0 {
m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS
}
if mask&sysFSMODIFY != 0 {
m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE
}
if mask&sysFSATTRIB != 0 {
m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES
}
if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME
}
return m
}
func toFSnotifyFlags(action uint32) uint64 {
switch action {
case syscall.FILE_ACTION_ADDED:
return sysFSCREATE
case syscall.FILE_ACTION_REMOVED:
return sysFSDELETE
case syscall.FILE_ACTION_MODIFIED:
return sysFSMODIFY
case syscall.FILE_ACTION_RENAMED_OLD_NAME:
return sysFSMOVEDFROM
case syscall.FILE_ACTION_RENAMED_NEW_NAME:
return sysFSMOVEDTO
}
return 0
}

6
vendor/github.com/go-logr/stdr/README.md generated vendored Normal file
View File

@ -0,0 +1,6 @@
# Minimal Go logging using logr and Go's standard library
[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/stdr.svg)](https://pkg.go.dev/github.com/go-logr/stdr)
This package implements the [logr interface](https://github.com/go-logr/logr)
in terms of Go's standard log package(https://pkg.go.dev/log).

170
vendor/github.com/go-logr/stdr/stdr.go generated vendored Normal file
View File

@ -0,0 +1,170 @@
/*
Copyright 2019 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package stdr implements github.com/go-logr/logr.Logger in terms of
// Go's standard log package.
package stdr
import (
"log"
"os"
"github.com/go-logr/logr"
"github.com/go-logr/logr/funcr"
)
// The global verbosity level. See SetVerbosity().
var globalVerbosity int
// SetVerbosity sets the global level against which all info logs will be
// compared. If this is greater than or equal to the "V" of the logger, the
// message will be logged. A higher value here means more logs will be written.
// The previous verbosity value is returned. This is not concurrent-safe -
// callers must be sure to call it from only one goroutine.
func SetVerbosity(v int) int {
old := globalVerbosity
globalVerbosity = v
return old
}
// New returns a logr.Logger which is implemented by Go's standard log package,
// or something like it. If std is nil, this will use a default logger
// instead.
//
// Example: stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)))
func New(std StdLogger) logr.Logger {
return NewWithOptions(std, Options{})
}
// NewWithOptions returns a logr.Logger which is implemented by Go's standard
// log package, or something like it. See New for details.
func NewWithOptions(std StdLogger, opts Options) logr.Logger {
if std == nil {
// Go's log.Default() is only available in 1.16 and higher.
std = log.New(os.Stderr, "", log.LstdFlags)
}
if opts.Depth < 0 {
opts.Depth = 0
}
fopts := funcr.Options{
LogCaller: funcr.MessageClass(opts.LogCaller),
}
sl := &logger{
Formatter: funcr.NewFormatter(fopts),
std: std,
}
// For skipping our own logger.Info/Error.
sl.Formatter.AddCallDepth(1 + opts.Depth)
return logr.New(sl)
}
// Options carries parameters which influence the way logs are generated.
type Options struct {
// Depth biases the assumed number of call frames to the "true" caller.
// This is useful when the calling code calls a function which then calls
// stdr (e.g. a logging shim to another API). Values less than zero will
// be treated as zero.
Depth int
// LogCaller tells stdr to add a "caller" key to some or all log lines.
// Go's log package has options to log this natively, too.
LogCaller MessageClass
// TODO: add an option to log the date/time
}
// MessageClass indicates which category or categories of messages to consider.
type MessageClass int
const (
// None ignores all message classes.
None MessageClass = iota
// All considers all message classes.
All
// Info only considers info messages.
Info
// Error only considers error messages.
Error
)
// StdLogger is the subset of the Go stdlib log.Logger API that is needed for
// this adapter.
type StdLogger interface {
// Output is the same as log.Output and log.Logger.Output.
Output(calldepth int, logline string) error
}
type logger struct {
funcr.Formatter
std StdLogger
}
var _ logr.LogSink = &logger{}
var _ logr.CallDepthLogSink = &logger{}
func (l logger) Enabled(level int) bool {
return globalVerbosity >= level
}
func (l logger) Info(level int, msg string, kvList ...interface{}) {
prefix, args := l.FormatInfo(level, msg, kvList)
if prefix != "" {
args = prefix + ": " + args
}
_ = l.std.Output(l.Formatter.GetDepth()+1, args)
}
func (l logger) Error(err error, msg string, kvList ...interface{}) {
prefix, args := l.FormatError(err, msg, kvList)
if prefix != "" {
args = prefix + ": " + args
}
_ = l.std.Output(l.Formatter.GetDepth()+1, args)
}
func (l logger) WithName(name string) logr.LogSink {
l.Formatter.AddName(name)
return &l
}
func (l logger) WithValues(kvList ...interface{}) logr.LogSink {
l.Formatter.AddValues(kvList)
return &l
}
func (l logger) WithCallDepth(depth int) logr.LogSink {
l.Formatter.AddCallDepth(depth)
return &l
}
// Underlier exposes access to the underlying logging implementation. Since
// callers only have a logr.Logger, they have to know which implementation is
// in use, so this interface is less of an abstraction and more of way to test
// type conversion.
type Underlier interface {
GetUnderlying() StdLogger
}
// GetUnderlying returns the StdLogger underneath this logger. Since StdLogger
// is itself an interface, the result may or may not be a Go log.Logger.
func (l logger) GetUnderlying() StdLogger {
return l.std
}

View File

@ -1,23 +0,0 @@
load("@rules_proto//proto:defs.bzl", "proto_library")
load("@io_bazel_rules_go//go:def.bzl", "go_library")
load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
package(default_visibility = ["//visibility:public"])
proto_library(
name = "internal_proto",
srcs = ["errors.proto"],
deps = ["@com_google_protobuf//:any_proto"],
)
go_proto_library(
name = "internal_go_proto",
importpath = "github.com/grpc-ecosystem/grpc-gateway/internal",
proto = ":internal_proto",
)
go_library(
name = "go_default_library",
embed = [":internal_go_proto"],
importpath = "github.com/grpc-ecosystem/grpc-gateway/internal",
)

View File

@ -1,189 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: internal/errors.proto
package internal
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
any "github.com/golang/protobuf/ptypes/any"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// Error is the generic error returned from unary RPCs.
type Error struct {
Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
// This is to make the error more compatible with users that expect errors to be Status objects:
// https://github.com/grpc/grpc/blob/master/src/proto/grpc/status/status.proto
// It should be the exact same message as the Error field.
Code int32 `protobuf:"varint,2,opt,name=code,proto3" json:"code,omitempty"`
Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"`
Details []*any.Any `protobuf:"bytes,4,rep,name=details,proto3" json:"details,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Error) Reset() { *m = Error{} }
func (m *Error) String() string { return proto.CompactTextString(m) }
func (*Error) ProtoMessage() {}
func (*Error) Descriptor() ([]byte, []int) {
return fileDescriptor_9b093362ca6d1e03, []int{0}
}
func (m *Error) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Error.Unmarshal(m, b)
}
func (m *Error) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Error.Marshal(b, m, deterministic)
}
func (m *Error) XXX_Merge(src proto.Message) {
xxx_messageInfo_Error.Merge(m, src)
}
func (m *Error) XXX_Size() int {
return xxx_messageInfo_Error.Size(m)
}
func (m *Error) XXX_DiscardUnknown() {
xxx_messageInfo_Error.DiscardUnknown(m)
}
var xxx_messageInfo_Error proto.InternalMessageInfo
func (m *Error) GetError() string {
if m != nil {
return m.Error
}
return ""
}
func (m *Error) GetCode() int32 {
if m != nil {
return m.Code
}
return 0
}
func (m *Error) GetMessage() string {
if m != nil {
return m.Message
}
return ""
}
func (m *Error) GetDetails() []*any.Any {
if m != nil {
return m.Details
}
return nil
}
// StreamError is a response type which is returned when
// streaming rpc returns an error.
type StreamError struct {
GrpcCode int32 `protobuf:"varint,1,opt,name=grpc_code,json=grpcCode,proto3" json:"grpc_code,omitempty"`
HttpCode int32 `protobuf:"varint,2,opt,name=http_code,json=httpCode,proto3" json:"http_code,omitempty"`
Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"`
HttpStatus string `protobuf:"bytes,4,opt,name=http_status,json=httpStatus,proto3" json:"http_status,omitempty"`
Details []*any.Any `protobuf:"bytes,5,rep,name=details,proto3" json:"details,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *StreamError) Reset() { *m = StreamError{} }
func (m *StreamError) String() string { return proto.CompactTextString(m) }
func (*StreamError) ProtoMessage() {}
func (*StreamError) Descriptor() ([]byte, []int) {
return fileDescriptor_9b093362ca6d1e03, []int{1}
}
func (m *StreamError) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_StreamError.Unmarshal(m, b)
}
func (m *StreamError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_StreamError.Marshal(b, m, deterministic)
}
func (m *StreamError) XXX_Merge(src proto.Message) {
xxx_messageInfo_StreamError.Merge(m, src)
}
func (m *StreamError) XXX_Size() int {
return xxx_messageInfo_StreamError.Size(m)
}
func (m *StreamError) XXX_DiscardUnknown() {
xxx_messageInfo_StreamError.DiscardUnknown(m)
}
var xxx_messageInfo_StreamError proto.InternalMessageInfo
func (m *StreamError) GetGrpcCode() int32 {
if m != nil {
return m.GrpcCode
}
return 0
}
func (m *StreamError) GetHttpCode() int32 {
if m != nil {
return m.HttpCode
}
return 0
}
func (m *StreamError) GetMessage() string {
if m != nil {
return m.Message
}
return ""
}
func (m *StreamError) GetHttpStatus() string {
if m != nil {
return m.HttpStatus
}
return ""
}
func (m *StreamError) GetDetails() []*any.Any {
if m != nil {
return m.Details
}
return nil
}
func init() {
proto.RegisterType((*Error)(nil), "grpc.gateway.runtime.Error")
proto.RegisterType((*StreamError)(nil), "grpc.gateway.runtime.StreamError")
}
func init() { proto.RegisterFile("internal/errors.proto", fileDescriptor_9b093362ca6d1e03) }
var fileDescriptor_9b093362ca6d1e03 = []byte{
// 252 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x90, 0xc1, 0x4a, 0xc4, 0x30,
0x10, 0x86, 0x89, 0xbb, 0x75, 0xdb, 0xe9, 0x2d, 0x54, 0x88, 0xee, 0xc1, 0xb2, 0xa7, 0x9e, 0x52,
0xd0, 0x27, 0xd0, 0xc5, 0x17, 0xe8, 0xde, 0xbc, 0x2c, 0xd9, 0xdd, 0x31, 0x16, 0xda, 0xa4, 0x24,
0x53, 0xa4, 0xf8, 0x56, 0x3e, 0xa1, 0x24, 0xa5, 0xb0, 0x27, 0xf1, 0xd6, 0xf9, 0xfb, 0xcf, 0x7c,
0x1f, 0x81, 0xbb, 0xd6, 0x10, 0x3a, 0xa3, 0xba, 0x1a, 0x9d, 0xb3, 0xce, 0xcb, 0xc1, 0x59, 0xb2,
0xbc, 0xd0, 0x6e, 0x38, 0x4b, 0xad, 0x08, 0xbf, 0xd4, 0x24, 0xdd, 0x68, 0xa8, 0xed, 0xf1, 0xe1,
0x5e, 0x5b, 0xab, 0x3b, 0xac, 0x63, 0xe7, 0x34, 0x7e, 0xd4, 0xca, 0x4c, 0xf3, 0xc2, 0xee, 0x1b,
0x92, 0xb7, 0x70, 0x80, 0x17, 0x90, 0xc4, 0x4b, 0x82, 0x95, 0xac, 0xca, 0x9a, 0x79, 0xe0, 0x1c,
0xd6, 0x67, 0x7b, 0x41, 0x71, 0x53, 0xb2, 0x2a, 0x69, 0xe2, 0x37, 0x17, 0xb0, 0xe9, 0xd1, 0x7b,
0xa5, 0x51, 0xac, 0x62, 0x77, 0x19, 0xb9, 0x84, 0xcd, 0x05, 0x49, 0xb5, 0x9d, 0x17, 0xeb, 0x72,
0x55, 0xe5, 0x4f, 0x85, 0x9c, 0xc9, 0x72, 0x21, 0xcb, 0x17, 0x33, 0x35, 0x4b, 0x69, 0xf7, 0xc3,
0x20, 0x3f, 0x90, 0x43, 0xd5, 0xcf, 0x0e, 0x5b, 0xc8, 0x82, 0xff, 0x31, 0x22, 0x59, 0x44, 0xa6,
0x21, 0xd8, 0x07, 0xec, 0x16, 0xb2, 0x4f, 0xa2, 0xe1, 0x78, 0xe5, 0x93, 0x86, 0x60, 0xff, 0xb7,
0xd3, 0x23, 0xe4, 0x71, 0xcd, 0x93, 0xa2, 0x31, 0x78, 0x85, 0xbf, 0x10, 0xa2, 0x43, 0x4c, 0xae,
0xa5, 0x93, 0x7f, 0x48, 0xbf, 0xc2, 0x7b, 0xba, 0xbc, 0xfd, 0xe9, 0x36, 0x56, 0x9e, 0x7f, 0x03,
0x00, 0x00, 0xff, 0xff, 0xde, 0x72, 0x6b, 0x83, 0x8e, 0x01, 0x00, 0x00,
}

View File

@ -1,26 +0,0 @@
syntax = "proto3";
package grpc.gateway.runtime;
option go_package = "internal";
import "google/protobuf/any.proto";
// Error is the generic error returned from unary RPCs.
message Error {
string error = 1;
// This is to make the error more compatible with users that expect errors to be Status objects:
// https://github.com/grpc/grpc/blob/master/src/proto/grpc/status/status.proto
// It should be the exact same message as the Error field.
int32 code = 2;
string message = 3;
repeated google.protobuf.Any details = 4;
}
// StreamError is a response type which is returned when
// streaming rpc returns an error.
message StreamError {
int32 grpc_code = 1;
int32 http_code = 2;
string message = 3;
string http_status = 4;
repeated google.protobuf.Any details = 5;
}

View File

@ -1,85 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
package(default_visibility = ["//visibility:public"])
go_library(
name = "go_default_library",
srcs = [
"context.go",
"convert.go",
"doc.go",
"errors.go",
"fieldmask.go",
"handler.go",
"marshal_httpbodyproto.go",
"marshal_json.go",
"marshal_jsonpb.go",
"marshal_proto.go",
"marshaler.go",
"marshaler_registry.go",
"mux.go",
"pattern.go",
"proto2_convert.go",
"proto_errors.go",
"query.go",
],
importpath = "github.com/grpc-ecosystem/grpc-gateway/runtime",
deps = [
"//internal:go_default_library",
"//utilities:go_default_library",
"@com_github_golang_protobuf//descriptor:go_default_library_gen",
"@com_github_golang_protobuf//jsonpb:go_default_library_gen",
"@com_github_golang_protobuf//proto:go_default_library",
"@go_googleapis//google/api:httpbody_go_proto",
"@io_bazel_rules_go//proto/wkt:any_go_proto",
"@io_bazel_rules_go//proto/wkt:descriptor_go_proto",
"@io_bazel_rules_go//proto/wkt:duration_go_proto",
"@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
"@io_bazel_rules_go//proto/wkt:timestamp_go_proto",
"@io_bazel_rules_go//proto/wkt:wrappers_go_proto",
"@org_golang_google_grpc//codes:go_default_library",
"@org_golang_google_grpc//grpclog:go_default_library",
"@org_golang_google_grpc//metadata:go_default_library",
"@org_golang_google_grpc//status:go_default_library",
],
)
go_test(
name = "go_default_test",
size = "small",
srcs = [
"context_test.go",
"convert_test.go",
"errors_test.go",
"fieldmask_test.go",
"handler_test.go",
"marshal_httpbodyproto_test.go",
"marshal_json_test.go",
"marshal_jsonpb_test.go",
"marshal_proto_test.go",
"marshaler_registry_test.go",
"mux_test.go",
"pattern_test.go",
"query_test.go",
],
embed = [":go_default_library"],
deps = [
"//internal:go_default_library",
"//runtime/internal/examplepb:go_default_library",
"//utilities:go_default_library",
"@com_github_golang_protobuf//jsonpb:go_default_library_gen",
"@com_github_golang_protobuf//proto:go_default_library",
"@com_github_golang_protobuf//ptypes:go_default_library_gen",
"@go_googleapis//google/api:httpbody_go_proto",
"@go_googleapis//google/rpc:errdetails_go_proto",
"@io_bazel_rules_go//proto/wkt:duration_go_proto",
"@io_bazel_rules_go//proto/wkt:empty_go_proto",
"@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
"@io_bazel_rules_go//proto/wkt:struct_go_proto",
"@io_bazel_rules_go//proto/wkt:timestamp_go_proto",
"@io_bazel_rules_go//proto/wkt:wrappers_go_proto",
"@org_golang_google_grpc//codes:go_default_library",
"@org_golang_google_grpc//metadata:go_default_library",
"@org_golang_google_grpc//status:go_default_library",
],
)

View File

@ -1,186 +0,0 @@
package runtime
import (
"context"
"io"
"net/http"
"strings"
"github.com/grpc-ecosystem/grpc-gateway/internal"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/status"
)
// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status.
// See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto
func HTTPStatusFromCode(code codes.Code) int {
switch code {
case codes.OK:
return http.StatusOK
case codes.Canceled:
return http.StatusRequestTimeout
case codes.Unknown:
return http.StatusInternalServerError
case codes.InvalidArgument:
return http.StatusBadRequest
case codes.DeadlineExceeded:
return http.StatusGatewayTimeout
case codes.NotFound:
return http.StatusNotFound
case codes.AlreadyExists:
return http.StatusConflict
case codes.PermissionDenied:
return http.StatusForbidden
case codes.Unauthenticated:
return http.StatusUnauthorized
case codes.ResourceExhausted:
return http.StatusTooManyRequests
case codes.FailedPrecondition:
// Note, this deliberately doesn't translate to the similarly named '412 Precondition Failed' HTTP response status.
return http.StatusBadRequest
case codes.Aborted:
return http.StatusConflict
case codes.OutOfRange:
return http.StatusBadRequest
case codes.Unimplemented:
return http.StatusNotImplemented
case codes.Internal:
return http.StatusInternalServerError
case codes.Unavailable:
return http.StatusServiceUnavailable
case codes.DataLoss:
return http.StatusInternalServerError
}
grpclog.Infof("Unknown gRPC error code: %v", code)
return http.StatusInternalServerError
}
var (
// HTTPError replies to the request with an error.
//
// HTTPError is called:
// - From generated per-endpoint gateway handler code, when calling the backend results in an error.
// - From gateway runtime code, when forwarding the response message results in an error.
//
// The default value for HTTPError calls the custom error handler configured on the ServeMux via the
// WithProtoErrorHandler serve option if that option was used, calling GlobalHTTPErrorHandler otherwise.
//
// To customize the error handling of a particular ServeMux instance, use the WithProtoErrorHandler
// serve option.
//
// To customize the error format for all ServeMux instances not using the WithProtoErrorHandler serve
// option, set GlobalHTTPErrorHandler to a custom function.
//
// Setting this variable directly to customize error format is deprecated.
HTTPError = MuxOrGlobalHTTPError
// GlobalHTTPErrorHandler is the HTTPError handler for all ServeMux instances not using the
// WithProtoErrorHandler serve option.
//
// You can set a custom function to this variable to customize error format.
GlobalHTTPErrorHandler = DefaultHTTPError
// OtherErrorHandler handles gateway errors from parsing and routing client requests for all
// ServeMux instances not using the WithProtoErrorHandler serve option.
//
// It returns the following error codes: StatusMethodNotAllowed StatusNotFound StatusBadRequest
//
// To customize parsing and routing error handling of a particular ServeMux instance, use the
// WithProtoErrorHandler serve option.
//
// To customize parsing and routing error handling of all ServeMux instances not using the
// WithProtoErrorHandler serve option, set a custom function to this variable.
OtherErrorHandler = DefaultOtherErrorHandler
)
// MuxOrGlobalHTTPError uses the mux-configured error handler, falling back to GlobalErrorHandler.
func MuxOrGlobalHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) {
if mux.protoErrorHandler != nil {
mux.protoErrorHandler(ctx, mux, marshaler, w, r, err)
} else {
GlobalHTTPErrorHandler(ctx, mux, marshaler, w, r, err)
}
}
// DefaultHTTPError is the default implementation of HTTPError.
// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode.
// If otherwise, it replies with http.StatusInternalServerError.
//
// The response body returned by this function is a JSON object,
// which contains a member whose key is "error" and whose value is err.Error().
func DefaultHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) {
const fallback = `{"error": "failed to marshal error message"}`
s, ok := status.FromError(err)
if !ok {
s = status.New(codes.Unknown, err.Error())
}
w.Header().Del("Trailer")
w.Header().Del("Transfer-Encoding")
contentType := marshaler.ContentType()
// Check marshaler on run time in order to keep backwards compatibility
// An interface param needs to be added to the ContentType() function on
// the Marshal interface to be able to remove this check
if typeMarshaler, ok := marshaler.(contentTypeMarshaler); ok {
pb := s.Proto()
contentType = typeMarshaler.ContentTypeFromMessage(pb)
}
w.Header().Set("Content-Type", contentType)
body := &internal.Error{
Error: s.Message(),
Message: s.Message(),
Code: int32(s.Code()),
Details: s.Proto().GetDetails(),
}
buf, merr := marshaler.Marshal(body)
if merr != nil {
grpclog.Infof("Failed to marshal error message %q: %v", body, merr)
w.WriteHeader(http.StatusInternalServerError)
if _, err := io.WriteString(w, fallback); err != nil {
grpclog.Infof("Failed to write response: %v", err)
}
return
}
md, ok := ServerMetadataFromContext(ctx)
if !ok {
grpclog.Infof("Failed to extract ServerMetadata from context")
}
handleForwardResponseServerMetadata(w, mux, md)
// RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2
// Unless the request includes a TE header field indicating "trailers"
// is acceptable, as described in Section 4.3, a server SHOULD NOT
// generate trailer fields that it believes are necessary for the user
// agent to receive.
var wantsTrailers bool
if te := r.Header.Get("TE"); strings.Contains(strings.ToLower(te), "trailers") {
wantsTrailers = true
handleForwardResponseTrailerHeader(w, md)
w.Header().Set("Transfer-Encoding", "chunked")
}
st := HTTPStatusFromCode(s.Code())
w.WriteHeader(st)
if _, err := w.Write(buf); err != nil {
grpclog.Infof("Failed to write response: %v", err)
}
if wantsTrailers {
handleForwardResponseTrailer(w, md)
}
}
// DefaultOtherErrorHandler is the default implementation of OtherErrorHandler.
// It simply writes a string representation of the given error into "w".
func DefaultOtherErrorHandler(w http.ResponseWriter, _ *http.Request, msg string, code int) {
http.Error(w, msg, code)
}

View File

@ -1,89 +0,0 @@
package runtime
import (
"encoding/json"
"io"
"strings"
descriptor2 "github.com/golang/protobuf/descriptor"
"github.com/golang/protobuf/protoc-gen-go/descriptor"
"google.golang.org/genproto/protobuf/field_mask"
)
func translateName(name string, md *descriptor.DescriptorProto) (string, *descriptor.DescriptorProto) {
// TODO - should really gate this with a test that the marshaller has used json names
if md != nil {
for _, f := range md.Field {
if f.JsonName != nil && f.Name != nil && *f.JsonName == name {
var subType *descriptor.DescriptorProto
// If the field has a TypeName then we retrieve the nested type for translating the embedded message names.
if f.TypeName != nil {
typeSplit := strings.Split(*f.TypeName, ".")
typeName := typeSplit[len(typeSplit)-1]
for _, t := range md.NestedType {
if typeName == *t.Name {
subType = t
}
}
}
return *f.Name, subType
}
}
}
return name, nil
}
// FieldMaskFromRequestBody creates a FieldMask printing all complete paths from the JSON body.
func FieldMaskFromRequestBody(r io.Reader, md *descriptor.DescriptorProto) (*field_mask.FieldMask, error) {
fm := &field_mask.FieldMask{}
var root interface{}
if err := json.NewDecoder(r).Decode(&root); err != nil {
if err == io.EOF {
return fm, nil
}
return nil, err
}
queue := []fieldMaskPathItem{{node: root, md: md}}
for len(queue) > 0 {
// dequeue an item
item := queue[0]
queue = queue[1:]
if m, ok := item.node.(map[string]interface{}); ok {
// if the item is an object, then enqueue all of its children
for k, v := range m {
protoName, subMd := translateName(k, item.md)
if subMsg, ok := v.(descriptor2.Message); ok {
_, subMd = descriptor2.ForMessage(subMsg)
}
var path string
if item.path == "" {
path = protoName
} else {
path = item.path + "." + protoName
}
queue = append(queue, fieldMaskPathItem{path: path, node: v, md: subMd})
}
} else if len(item.path) > 0 {
// otherwise, it's a leaf node so print its path
fm.Paths = append(fm.Paths, item.path)
}
}
return fm, nil
}
// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask
type fieldMaskPathItem struct {
// the list of prior fields leading up to node connected by dots
path string
// a generic decoded json object the current item to inspect for further path extraction
node interface{}
// descriptor for parent message
md *descriptor.DescriptorProto
}

View File

@ -1,106 +0,0 @@
package runtime
import (
"context"
"io"
"net/http"
"github.com/golang/protobuf/ptypes/any"
"github.com/grpc-ecosystem/grpc-gateway/internal"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/status"
)
// StreamErrorHandlerFunc accepts an error as a gRPC error generated via status package and translates it into a
// a proto struct used to represent error at the end of a stream.
type StreamErrorHandlerFunc func(context.Context, error) *StreamError
// StreamError is the payload for the final message in a server stream in the event that the server returns an
// error after a response message has already been sent.
type StreamError internal.StreamError
// ProtoErrorHandlerFunc handles the error as a gRPC error generated via status package and replies to the request.
type ProtoErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error)
var _ ProtoErrorHandlerFunc = DefaultHTTPProtoErrorHandler
// DefaultHTTPProtoErrorHandler is an implementation of HTTPError.
// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode.
// If otherwise, it replies with http.StatusInternalServerError.
//
// The response body returned by this function is a Status message marshaled by a Marshaler.
//
// Do not set this function to HTTPError variable directly, use WithProtoErrorHandler option instead.
func DefaultHTTPProtoErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) {
// return Internal when Marshal failed
const fallback = `{"code": 13, "message": "failed to marshal error message"}`
s, ok := status.FromError(err)
if !ok {
s = status.New(codes.Unknown, err.Error())
}
w.Header().Del("Trailer")
contentType := marshaler.ContentType()
// Check marshaler on run time in order to keep backwards compatibility
// An interface param needs to be added to the ContentType() function on
// the Marshal interface to be able to remove this check
if typeMarshaler, ok := marshaler.(contentTypeMarshaler); ok {
pb := s.Proto()
contentType = typeMarshaler.ContentTypeFromMessage(pb)
}
w.Header().Set("Content-Type", contentType)
buf, merr := marshaler.Marshal(s.Proto())
if merr != nil {
grpclog.Infof("Failed to marshal error message %q: %v", s.Proto(), merr)
w.WriteHeader(http.StatusInternalServerError)
if _, err := io.WriteString(w, fallback); err != nil {
grpclog.Infof("Failed to write response: %v", err)
}
return
}
md, ok := ServerMetadataFromContext(ctx)
if !ok {
grpclog.Infof("Failed to extract ServerMetadata from context")
}
handleForwardResponseServerMetadata(w, mux, md)
handleForwardResponseTrailerHeader(w, md)
st := HTTPStatusFromCode(s.Code())
w.WriteHeader(st)
if _, err := w.Write(buf); err != nil {
grpclog.Infof("Failed to write response: %v", err)
}
handleForwardResponseTrailer(w, md)
}
// DefaultHTTPStreamErrorHandler converts the given err into a *StreamError via
// default logic.
//
// It extracts the gRPC status from err if possible. The fields of the status are
// used to populate the returned StreamError, and the HTTP status code is derived
// from the gRPC code via HTTPStatusFromCode. If the given err does not contain a
// gRPC status, an "Unknown" gRPC code is used and "Internal Server Error" HTTP code.
func DefaultHTTPStreamErrorHandler(_ context.Context, err error) *StreamError {
grpcCode := codes.Unknown
grpcMessage := err.Error()
var grpcDetails []*any.Any
if s, ok := status.FromError(err); ok {
grpcCode = s.Code()
grpcMessage = s.Message()
grpcDetails = s.Proto().GetDetails()
}
httpCode := HTTPStatusFromCode(grpcCode)
return &StreamError{
GrpcCode: int32(grpcCode),
HttpCode: int32(httpCode),
Message: grpcMessage,
HttpStatus: http.StatusText(httpCode),
Details: grpcDetails,
}
}

View File

@ -1,406 +0,0 @@
package runtime
import (
"encoding/base64"
"fmt"
"net/url"
"reflect"
"regexp"
"strconv"
"strings"
"time"
"github.com/golang/protobuf/proto"
"github.com/grpc-ecosystem/grpc-gateway/utilities"
"google.golang.org/grpc/grpclog"
)
var valuesKeyRegexp = regexp.MustCompile("^(.*)\\[(.*)\\]$")
var currentQueryParser QueryParameterParser = &defaultQueryParser{}
// QueryParameterParser defines interface for all query parameter parsers
type QueryParameterParser interface {
Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error
}
// PopulateQueryParameters parses query parameters
// into "msg" using current query parser
func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
return currentQueryParser.Parse(msg, values, filter)
}
type defaultQueryParser struct{}
// Parse populates "values" into "msg".
// A value is ignored if its key starts with one of the elements in "filter".
func (*defaultQueryParser) Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
for key, values := range values {
match := valuesKeyRegexp.FindStringSubmatch(key)
if len(match) == 3 {
key = match[1]
values = append([]string{match[2]}, values...)
}
fieldPath := strings.Split(key, ".")
if filter.HasCommonPrefix(fieldPath) {
continue
}
if err := populateFieldValueFromPath(msg, fieldPath, values); err != nil {
return err
}
}
return nil
}
// PopulateFieldFromPath sets a value in a nested Protobuf structure.
// It instantiates missing protobuf fields as it goes.
func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error {
fieldPath := strings.Split(fieldPathString, ".")
return populateFieldValueFromPath(msg, fieldPath, []string{value})
}
func populateFieldValueFromPath(msg proto.Message, fieldPath []string, values []string) error {
m := reflect.ValueOf(msg)
if m.Kind() != reflect.Ptr {
return fmt.Errorf("unexpected type %T: %v", msg, msg)
}
var props *proto.Properties
m = m.Elem()
for i, fieldName := range fieldPath {
isLast := i == len(fieldPath)-1
if !isLast && m.Kind() != reflect.Struct {
return fmt.Errorf("non-aggregate type in the mid of path: %s", strings.Join(fieldPath, "."))
}
var f reflect.Value
var err error
f, props, err = fieldByProtoName(m, fieldName)
if err != nil {
return err
} else if !f.IsValid() {
grpclog.Infof("field not found in %T: %s", msg, strings.Join(fieldPath, "."))
return nil
}
switch f.Kind() {
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64:
if !isLast {
return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], "."))
}
m = f
case reflect.Slice:
if !isLast {
return fmt.Errorf("unexpected repeated field in %s", strings.Join(fieldPath, "."))
}
// Handle []byte
if f.Type().Elem().Kind() == reflect.Uint8 {
m = f
break
}
return populateRepeatedField(f, values, props)
case reflect.Ptr:
if f.IsNil() {
m = reflect.New(f.Type().Elem())
f.Set(m.Convert(f.Type()))
}
m = f.Elem()
continue
case reflect.Struct:
m = f
continue
case reflect.Map:
if !isLast {
return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], "."))
}
return populateMapField(f, values, props)
default:
return fmt.Errorf("unexpected type %s in %T", f.Type(), msg)
}
}
switch len(values) {
case 0:
return fmt.Errorf("no value of field: %s", strings.Join(fieldPath, "."))
case 1:
default:
grpclog.Infof("too many field values: %s", strings.Join(fieldPath, "."))
}
return populateField(m, values[0], props)
}
// fieldByProtoName looks up a field whose corresponding protobuf field name is "name".
// "m" must be a struct value. It returns zero reflect.Value if no such field found.
func fieldByProtoName(m reflect.Value, name string) (reflect.Value, *proto.Properties, error) {
props := proto.GetProperties(m.Type())
// look up field name in oneof map
for _, op := range props.OneofTypes {
if name == op.Prop.OrigName || name == op.Prop.JSONName {
v := reflect.New(op.Type.Elem())
field := m.Field(op.Field)
if !field.IsNil() {
return reflect.Value{}, nil, fmt.Errorf("field already set for %s oneof", props.Prop[op.Field].OrigName)
}
field.Set(v)
return v.Elem().Field(0), op.Prop, nil
}
}
for _, p := range props.Prop {
if p.OrigName == name {
return m.FieldByName(p.Name), p, nil
}
if p.JSONName == name {
return m.FieldByName(p.Name), p, nil
}
}
return reflect.Value{}, nil, nil
}
func populateMapField(f reflect.Value, values []string, props *proto.Properties) error {
if len(values) != 2 {
return fmt.Errorf("more than one value provided for key %s in map %s", values[0], props.Name)
}
key, value := values[0], values[1]
keyType := f.Type().Key()
valueType := f.Type().Elem()
if f.IsNil() {
f.Set(reflect.MakeMap(f.Type()))
}
keyConv, ok := convFromType[keyType.Kind()]
if !ok {
return fmt.Errorf("unsupported key type %s in map %s", keyType, props.Name)
}
valueConv, ok := convFromType[valueType.Kind()]
if !ok {
return fmt.Errorf("unsupported value type %s in map %s", valueType, props.Name)
}
keyV := keyConv.Call([]reflect.Value{reflect.ValueOf(key)})
if err := keyV[1].Interface(); err != nil {
return err.(error)
}
valueV := valueConv.Call([]reflect.Value{reflect.ValueOf(value)})
if err := valueV[1].Interface(); err != nil {
return err.(error)
}
f.SetMapIndex(keyV[0].Convert(keyType), valueV[0].Convert(valueType))
return nil
}
func populateRepeatedField(f reflect.Value, values []string, props *proto.Properties) error {
elemType := f.Type().Elem()
// is the destination field a slice of an enumeration type?
if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil {
return populateFieldEnumRepeated(f, values, enumValMap)
}
conv, ok := convFromType[elemType.Kind()]
if !ok {
return fmt.Errorf("unsupported field type %s", elemType)
}
f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type()))
for i, v := range values {
result := conv.Call([]reflect.Value{reflect.ValueOf(v)})
if err := result[1].Interface(); err != nil {
return err.(error)
}
f.Index(i).Set(result[0].Convert(f.Index(i).Type()))
}
return nil
}
func populateField(f reflect.Value, value string, props *proto.Properties) error {
i := f.Addr().Interface()
// Handle protobuf well known types
var name string
switch m := i.(type) {
case interface{ XXX_WellKnownType() string }:
name = m.XXX_WellKnownType()
case proto.Message:
const wktPrefix = "google.protobuf."
if fullName := proto.MessageName(m); strings.HasPrefix(fullName, wktPrefix) {
name = fullName[len(wktPrefix):]
}
}
switch name {
case "Timestamp":
if value == "null" {
f.FieldByName("Seconds").SetInt(0)
f.FieldByName("Nanos").SetInt(0)
return nil
}
t, err := time.Parse(time.RFC3339Nano, value)
if err != nil {
return fmt.Errorf("bad Timestamp: %v", err)
}
f.FieldByName("Seconds").SetInt(int64(t.Unix()))
f.FieldByName("Nanos").SetInt(int64(t.Nanosecond()))
return nil
case "Duration":
if value == "null" {
f.FieldByName("Seconds").SetInt(0)
f.FieldByName("Nanos").SetInt(0)
return nil
}
d, err := time.ParseDuration(value)
if err != nil {
return fmt.Errorf("bad Duration: %v", err)
}
ns := d.Nanoseconds()
s := ns / 1e9
ns %= 1e9
f.FieldByName("Seconds").SetInt(s)
f.FieldByName("Nanos").SetInt(ns)
return nil
case "DoubleValue":
fallthrough
case "FloatValue":
float64Val, err := strconv.ParseFloat(value, 64)
if err != nil {
return fmt.Errorf("bad DoubleValue: %s", value)
}
f.FieldByName("Value").SetFloat(float64Val)
return nil
case "Int64Value":
fallthrough
case "Int32Value":
int64Val, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmt.Errorf("bad DoubleValue: %s", value)
}
f.FieldByName("Value").SetInt(int64Val)
return nil
case "UInt64Value":
fallthrough
case "UInt32Value":
uint64Val, err := strconv.ParseUint(value, 10, 64)
if err != nil {
return fmt.Errorf("bad DoubleValue: %s", value)
}
f.FieldByName("Value").SetUint(uint64Val)
return nil
case "BoolValue":
if value == "true" {
f.FieldByName("Value").SetBool(true)
} else if value == "false" {
f.FieldByName("Value").SetBool(false)
} else {
return fmt.Errorf("bad BoolValue: %s", value)
}
return nil
case "StringValue":
f.FieldByName("Value").SetString(value)
return nil
case "BytesValue":
bytesVal, err := base64.StdEncoding.DecodeString(value)
if err != nil {
return fmt.Errorf("bad BytesValue: %s", value)
}
f.FieldByName("Value").SetBytes(bytesVal)
return nil
case "FieldMask":
p := f.FieldByName("Paths")
for _, v := range strings.Split(value, ",") {
if v != "" {
p.Set(reflect.Append(p, reflect.ValueOf(v)))
}
}
return nil
}
// Handle Time and Duration stdlib types
switch t := i.(type) {
case *time.Time:
pt, err := time.Parse(time.RFC3339Nano, value)
if err != nil {
return fmt.Errorf("bad Timestamp: %v", err)
}
*t = pt
return nil
case *time.Duration:
d, err := time.ParseDuration(value)
if err != nil {
return fmt.Errorf("bad Duration: %v", err)
}
*t = d
return nil
}
// is the destination field an enumeration type?
if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil {
return populateFieldEnum(f, value, enumValMap)
}
conv, ok := convFromType[f.Kind()]
if !ok {
return fmt.Errorf("field type %T is not supported in query parameters", i)
}
result := conv.Call([]reflect.Value{reflect.ValueOf(value)})
if err := result[1].Interface(); err != nil {
return err.(error)
}
f.Set(result[0].Convert(f.Type()))
return nil
}
func convertEnum(value string, t reflect.Type, enumValMap map[string]int32) (reflect.Value, error) {
// see if it's an enumeration string
if enumVal, ok := enumValMap[value]; ok {
return reflect.ValueOf(enumVal).Convert(t), nil
}
// check for an integer that matches an enumeration value
eVal, err := strconv.Atoi(value)
if err != nil {
return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t)
}
for _, v := range enumValMap {
if v == int32(eVal) {
return reflect.ValueOf(eVal).Convert(t), nil
}
}
return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t)
}
func populateFieldEnum(f reflect.Value, value string, enumValMap map[string]int32) error {
cval, err := convertEnum(value, f.Type(), enumValMap)
if err != nil {
return err
}
f.Set(cval)
return nil
}
func populateFieldEnumRepeated(f reflect.Value, values []string, enumValMap map[string]int32) error {
elemType := f.Type().Elem()
f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type()))
for i, v := range values {
result, err := convertEnum(v, elemType, enumValMap)
if err != nil {
return err
}
f.Index(i).Set(result)
}
return nil
}
var (
convFromType = map[reflect.Kind]reflect.Value{
reflect.String: reflect.ValueOf(String),
reflect.Bool: reflect.ValueOf(Bool),
reflect.Float64: reflect.ValueOf(Float64),
reflect.Float32: reflect.ValueOf(Float32),
reflect.Int64: reflect.ValueOf(Int64),
reflect.Int32: reflect.ValueOf(Int32),
reflect.Uint64: reflect.ValueOf(Uint64),
reflect.Uint32: reflect.ValueOf(Uint32),
reflect.Slice: reflect.ValueOf(Bytes),
}
)

View File

@ -0,0 +1,35 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
package(default_visibility = ["//visibility:public"])
go_library(
name = "httprule",
srcs = [
"compile.go",
"parse.go",
"types.go",
],
importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule",
deps = ["//utilities"],
)
go_test(
name = "httprule_test",
size = "small",
srcs = [
"compile_test.go",
"parse_test.go",
"types_test.go",
],
embed = [":httprule"],
deps = [
"//utilities",
"@com_github_golang_glog//:glog",
],
)
alias(
name = "go_default_library",
actual = ":httprule",
visibility = ["//:__subpackages__"],
)

View File

@ -0,0 +1,121 @@
package httprule
import (
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
)
const (
opcodeVersion = 1
)
// Template is a compiled representation of path templates.
type Template struct {
// Version is the version number of the format.
Version int
// OpCodes is a sequence of operations.
OpCodes []int
// Pool is a constant pool
Pool []string
// Verb is a VERB part in the template.
Verb string
// Fields is a list of field paths bound in this template.
Fields []string
// Original template (example: /v1/a_bit_of_everything)
Template string
}
// Compiler compiles utilities representation of path templates into marshallable operations.
// They can be unmarshalled by runtime.NewPattern.
type Compiler interface {
Compile() Template
}
type op struct {
// code is the opcode of the operation
code utilities.OpCode
// str is a string operand of the code.
// num is ignored if str is not empty.
str string
// num is a numeric operand of the code.
num int
}
func (w wildcard) compile() []op {
return []op{
{code: utilities.OpPush},
}
}
func (w deepWildcard) compile() []op {
return []op{
{code: utilities.OpPushM},
}
}
func (l literal) compile() []op {
return []op{
{
code: utilities.OpLitPush,
str: string(l),
},
}
}
func (v variable) compile() []op {
var ops []op
for _, s := range v.segments {
ops = append(ops, s.compile()...)
}
ops = append(ops, op{
code: utilities.OpConcatN,
num: len(v.segments),
}, op{
code: utilities.OpCapture,
str: v.path,
})
return ops
}
func (t template) Compile() Template {
var rawOps []op
for _, s := range t.segments {
rawOps = append(rawOps, s.compile()...)
}
var (
ops []int
pool []string
fields []string
)
consts := make(map[string]int)
for _, op := range rawOps {
ops = append(ops, int(op.code))
if op.str == "" {
ops = append(ops, op.num)
} else {
// eof segment literal represents the "/" path pattern
if op.str == eof {
op.str = ""
}
if _, ok := consts[op.str]; !ok {
consts[op.str] = len(pool)
pool = append(pool, op.str)
}
ops = append(ops, consts[op.str])
}
if op.code == utilities.OpCapture {
fields = append(fields, op.str)
}
}
return Template{
Version: opcodeVersion,
OpCodes: ops,
Pool: pool,
Verb: t.verb,
Fields: fields,
Template: t.template,
}
}

View File

@ -0,0 +1,11 @@
// +build gofuzz
package httprule
func Fuzz(data []byte) int {
_, err := Parse(string(data))
if err != nil {
return 0
}
return 0
}

View File

@ -0,0 +1,368 @@
package httprule
import (
"fmt"
"strings"
)
// InvalidTemplateError indicates that the path template is not valid.
type InvalidTemplateError struct {
tmpl string
msg string
}
func (e InvalidTemplateError) Error() string {
return fmt.Sprintf("%s: %s", e.msg, e.tmpl)
}
// Parse parses the string representation of path template
func Parse(tmpl string) (Compiler, error) {
if !strings.HasPrefix(tmpl, "/") {
return template{}, InvalidTemplateError{tmpl: tmpl, msg: "no leading /"}
}
tokens, verb := tokenize(tmpl[1:])
p := parser{tokens: tokens}
segs, err := p.topLevelSegments()
if err != nil {
return template{}, InvalidTemplateError{tmpl: tmpl, msg: err.Error()}
}
return template{
segments: segs,
verb: verb,
template: tmpl,
}, nil
}
func tokenize(path string) (tokens []string, verb string) {
if path == "" {
return []string{eof}, ""
}
const (
init = iota
field
nested
)
st := init
for path != "" {
var idx int
switch st {
case init:
idx = strings.IndexAny(path, "/{")
case field:
idx = strings.IndexAny(path, ".=}")
case nested:
idx = strings.IndexAny(path, "/}")
}
if idx < 0 {
tokens = append(tokens, path)
break
}
switch r := path[idx]; r {
case '/', '.':
case '{':
st = field
case '=':
st = nested
case '}':
st = init
}
if idx == 0 {
tokens = append(tokens, path[idx:idx+1])
} else {
tokens = append(tokens, path[:idx], path[idx:idx+1])
}
path = path[idx+1:]
}
l := len(tokens)
// See
// https://github.com/grpc-ecosystem/grpc-gateway/pull/1947#issuecomment-774523693 ;
// although normal and backwards-compat logic here is to use the last index
// of a colon, if the final segment is a variable followed by a colon, the
// part following the colon must be a verb. Hence if the previous token is
// an end var marker, we switch the index we're looking for to Index instead
// of LastIndex, so that we correctly grab the remaining part of the path as
// the verb.
var penultimateTokenIsEndVar bool
switch l {
case 0, 1:
// Not enough to be variable so skip this logic and don't result in an
// invalid index
default:
penultimateTokenIsEndVar = tokens[l-2] == "}"
}
t := tokens[l-1]
var idx int
if penultimateTokenIsEndVar {
idx = strings.Index(t, ":")
} else {
idx = strings.LastIndex(t, ":")
}
if idx == 0 {
tokens, verb = tokens[:l-1], t[1:]
} else if idx > 0 {
tokens[l-1], verb = t[:idx], t[idx+1:]
}
tokens = append(tokens, eof)
return tokens, verb
}
// parser is a parser of the template syntax defined in github.com/googleapis/googleapis/google/api/http.proto.
type parser struct {
tokens []string
accepted []string
}
// topLevelSegments is the target of this parser.
func (p *parser) topLevelSegments() ([]segment, error) {
if _, err := p.accept(typeEOF); err == nil {
p.tokens = p.tokens[:0]
return []segment{literal(eof)}, nil
}
segs, err := p.segments()
if err != nil {
return nil, err
}
if _, err := p.accept(typeEOF); err != nil {
return nil, fmt.Errorf("unexpected token %q after segments %q", p.tokens[0], strings.Join(p.accepted, ""))
}
return segs, nil
}
func (p *parser) segments() ([]segment, error) {
s, err := p.segment()
if err != nil {
return nil, err
}
segs := []segment{s}
for {
if _, err := p.accept("/"); err != nil {
return segs, nil
}
s, err := p.segment()
if err != nil {
return segs, err
}
segs = append(segs, s)
}
}
func (p *parser) segment() (segment, error) {
if _, err := p.accept("*"); err == nil {
return wildcard{}, nil
}
if _, err := p.accept("**"); err == nil {
return deepWildcard{}, nil
}
if l, err := p.literal(); err == nil {
return l, nil
}
v, err := p.variable()
if err != nil {
return nil, fmt.Errorf("segment neither wildcards, literal or variable: %v", err)
}
return v, err
}
func (p *parser) literal() (segment, error) {
lit, err := p.accept(typeLiteral)
if err != nil {
return nil, err
}
return literal(lit), nil
}
func (p *parser) variable() (segment, error) {
if _, err := p.accept("{"); err != nil {
return nil, err
}
path, err := p.fieldPath()
if err != nil {
return nil, err
}
var segs []segment
if _, err := p.accept("="); err == nil {
segs, err = p.segments()
if err != nil {
return nil, fmt.Errorf("invalid segment in variable %q: %v", path, err)
}
} else {
segs = []segment{wildcard{}}
}
if _, err := p.accept("}"); err != nil {
return nil, fmt.Errorf("unterminated variable segment: %s", path)
}
return variable{
path: path,
segments: segs,
}, nil
}
func (p *parser) fieldPath() (string, error) {
c, err := p.accept(typeIdent)
if err != nil {
return "", err
}
components := []string{c}
for {
if _, err = p.accept("."); err != nil {
return strings.Join(components, "."), nil
}
c, err := p.accept(typeIdent)
if err != nil {
return "", fmt.Errorf("invalid field path component: %v", err)
}
components = append(components, c)
}
}
// A termType is a type of terminal symbols.
type termType string
// These constants define some of valid values of termType.
// They improve readability of parse functions.
//
// You can also use "/", "*", "**", "." or "=" as valid values.
const (
typeIdent = termType("ident")
typeLiteral = termType("literal")
typeEOF = termType("$")
)
const (
// eof is the terminal symbol which always appears at the end of token sequence.
eof = "\u0000"
)
// accept tries to accept a token in "p".
// This function consumes a token and returns it if it matches to the specified "term".
// If it doesn't match, the function does not consume any tokens and return an error.
func (p *parser) accept(term termType) (string, error) {
t := p.tokens[0]
switch term {
case "/", "*", "**", ".", "=", "{", "}":
if t != string(term) && t != "/" {
return "", fmt.Errorf("expected %q but got %q", term, t)
}
case typeEOF:
if t != eof {
return "", fmt.Errorf("expected EOF but got %q", t)
}
case typeIdent:
if err := expectIdent(t); err != nil {
return "", err
}
case typeLiteral:
if err := expectPChars(t); err != nil {
return "", err
}
default:
return "", fmt.Errorf("unknown termType %q", term)
}
p.tokens = p.tokens[1:]
p.accepted = append(p.accepted, t)
return t, nil
}
// expectPChars determines if "t" consists of only pchars defined in RFC3986.
//
// https://www.ietf.org/rfc/rfc3986.txt, P.49
// pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
// unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
// sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
// / "*" / "+" / "," / ";" / "="
// pct-encoded = "%" HEXDIG HEXDIG
func expectPChars(t string) error {
const (
init = iota
pct1
pct2
)
st := init
for _, r := range t {
if st != init {
if !isHexDigit(r) {
return fmt.Errorf("invalid hexdigit: %c(%U)", r, r)
}
switch st {
case pct1:
st = pct2
case pct2:
st = init
}
continue
}
// unreserved
switch {
case 'A' <= r && r <= 'Z':
continue
case 'a' <= r && r <= 'z':
continue
case '0' <= r && r <= '9':
continue
}
switch r {
case '-', '.', '_', '~':
// unreserved
case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=':
// sub-delims
case ':', '@':
// rest of pchar
case '%':
// pct-encoded
st = pct1
default:
return fmt.Errorf("invalid character in path segment: %q(%U)", r, r)
}
}
if st != init {
return fmt.Errorf("invalid percent-encoding in %q", t)
}
return nil
}
// expectIdent determines if "ident" is a valid identifier in .proto schema ([[:alpha:]_][[:alphanum:]_]*).
func expectIdent(ident string) error {
if ident == "" {
return fmt.Errorf("empty identifier")
}
for pos, r := range ident {
switch {
case '0' <= r && r <= '9':
if pos == 0 {
return fmt.Errorf("identifier starting with digit: %s", ident)
}
continue
case 'A' <= r && r <= 'Z':
continue
case 'a' <= r && r <= 'z':
continue
case r == '_':
continue
default:
return fmt.Errorf("invalid character %q(%U) in identifier: %s", r, r, ident)
}
}
return nil
}
func isHexDigit(r rune) bool {
switch {
case '0' <= r && r <= '9':
return true
case 'A' <= r && r <= 'F':
return true
case 'a' <= r && r <= 'f':
return true
}
return false
}

Some files were not shown because too many files have changed in this diff Show More