Merge pull request #37 from ceph/devel

Sync rhs:devel with ceph:devel
This commit is contained in:
OpenShift Merge Robot 2021-10-18 17:10:48 +02:00 committed by GitHub
commit ddf7f43655
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
52 changed files with 3222 additions and 465 deletions

View File

@ -0,0 +1,20 @@
/*
Copyright 2021 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package kubernetes contains functions to obtain standard and recommended
// deployment artifacts for Kubernetes. These artifacts can be used by
// automation tools that want to deploy Ceph-CSI.
package kubernetes

View File

@ -0,0 +1,74 @@
/*
Copyright 2021 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rbd
import (
"bytes"
_ "embed"
"fmt"
"text/template"
"github.com/ghodss/yaml"
v1 "k8s.io/api/core/v1"
)
//go:embed csi-config-map.yaml
var csiConfigMap string
type CSIConfigMapValues struct {
Name string
}
var CSIConfigMapDefaults = CSIConfigMapValues{
Name: "ceph-csi-config",
}
// NewCSIConfigMap takes a name from the CSIConfigMapValues struct and relaces
// the value in the template. A ConfigMap object is returned which can be
// created in the Kubernetes cluster.
func NewCSIConfigMap(values CSIConfigMapValues) (*v1.ConfigMap, error) {
data, err := NewCSIConfigMapYAML(values)
if err != nil {
return nil, err
}
cm := &v1.ConfigMap{}
err = yaml.Unmarshal([]byte(data), cm)
if err != nil {
return nil, fmt.Errorf("failed convert YAML to %T: %w", cm, err)
}
return cm, nil
}
// NewCSIConfigMapYAML takes a name from the CSIConfigMapValues struct and
// relaces the value in the template. A ConfigMap object in YAML is returned
// which can be created in the Kubernetes cluster.
func NewCSIConfigMapYAML(values CSIConfigMapValues) (string, error) {
var buf bytes.Buffer
tmpl, err := template.New("CSIConfigMap").Parse(csiConfigMap)
if err != nil {
return "", fmt.Errorf("failed to parse template: %w", err)
}
err = tmpl.Execute(&buf, values)
if err != nil {
return "", fmt.Errorf("failed to replace values in template: %w", err)
}
return buf.String(), nil
}

View File

@ -0,0 +1,8 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: "{{ .Name }}"
data:
config.json: |-
[]

View File

@ -0,0 +1,38 @@
/*
Copyright 2021 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rbd
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestNewCSIConfigMap(t *testing.T) {
cm, err := NewCSIConfigMap(CSIConfigMapDefaults)
require.NoError(t, err)
require.NotNil(t, cm)
require.Equal(t, cm.Name, CSIConfigMapDefaults.Name)
}
func TestNewCSIConfigMapYAML(t *testing.T) {
yaml, err := NewCSIConfigMapYAML(CSIConfigMapDefaults)
require.NoError(t, err)
require.NotEqual(t, "", yaml)
}

View File

@ -0,0 +1,74 @@
/*
Copyright 2021 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rbd
import (
"bytes"
_ "embed"
"fmt"
"text/template"
"github.com/ghodss/yaml"
storagev1 "k8s.io/api/storage/v1"
)
//go:embed csidriver.yaml
var csiDriver string
type CSIDriverValues struct {
Name string
}
var CSIDriverDefaults = CSIDriverValues{
Name: "rbd.csi.ceph.com",
}
// NewCSIDriver takes a driver name from the CSIDriverValues struct and relaces
// the value in the template. A CSIDriver object is returned which can be
// created in the Kubernetes cluster.
func NewCSIDriver(values CSIDriverValues) (*storagev1.CSIDriver, error) {
data, err := NewCSIDriverYAML(values)
if err != nil {
return nil, err
}
driver := &storagev1.CSIDriver{}
err = yaml.Unmarshal([]byte(data), driver)
if err != nil {
return nil, fmt.Errorf("failed convert YAML to %T: %w", driver, err)
}
return driver, nil
}
// NewCSIDriverYAML takes a driver name from the CSIDriverValues struct and relaces
// the value in the template. A CSIDriver object in YAML is returned which can be
// created in the Kubernetes cluster.
func NewCSIDriverYAML(values CSIDriverValues) (string, error) {
var buf bytes.Buffer
tmpl, err := template.New("CSIDriver").Parse(csiDriver)
if err != nil {
return "", fmt.Errorf("failed to parse template: %w", err)
}
err = tmpl.Execute(&buf, values)
if err != nil {
return "", fmt.Errorf("failed to replace values in template: %w", err)
}
return buf.String(), nil
}

View File

@ -0,0 +1,10 @@
---
# if Kubernetes version is less than 1.18 change
# apiVersion to storage.k8s.io/v1beta1
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: "{{ .Name }}"
spec:
attachRequired: true
podInfoOnMount: false

View File

@ -0,0 +1,38 @@
/*
Copyright 2021 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rbd
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestNewCSIDriver(t *testing.T) {
driver, err := NewCSIDriver(CSIDriverDefaults)
require.NoError(t, err)
require.NotNil(t, driver)
require.Equal(t, driver.Name, CSIDriverDefaults.Name)
}
func TestNewCSIDriverYAML(t *testing.T) {
yaml, err := NewCSIDriverYAML(CSIDriverDefaults)
require.NoError(t, err)
require.NotEqual(t, "", yaml)
}

View File

@ -6,4 +6,5 @@ require (
github.com/ghodss/yaml v1.0.0 github.com/ghodss/yaml v1.0.0
github.com/openshift/api v0.0.0-20210927171657-636513e97fda github.com/openshift/api v0.0.0-20210927171657-636513e97fda
github.com/stretchr/testify v1.7.0 github.com/stretchr/testify v1.7.0
k8s.io/api v0.22.1
) )

View File

@ -59,5 +59,5 @@ CSI_NODE_DRIVER_REGISTRAR_VERSION=v2.3.0
# - enable CEPH_CSI_RUN_ALL_TESTS when running tests with if it has root # - enable CEPH_CSI_RUN_ALL_TESTS when running tests with if it has root
# permissions on the host # permissions on the host
#CEPH_CSI_RUN_ALL_TESTS=true #CEPH_CSI_RUN_ALL_TESTS=true
E2E_TIMEOUT=90m E2E_TIMEOUT=120m
DEPLOY_TIMEOUT=10 DEPLOY_TIMEOUT=10

View File

@ -13,7 +13,16 @@
# limitations under the License. # limitations under the License.
.PHONY: all .PHONY: all
all: scc.yaml all: \
scc.yaml \
rbd/kubernetes/csidriver.yaml \
rbd/kubernetes/csi-config-map.yaml
scc.yaml: ../api/deploy/ocp/scc.yaml ../api/deploy/ocp/scc.go scc.yaml: ../api/deploy/ocp/scc.yaml ../api/deploy/ocp/scc.go
$(MAKE) -C ../tools generate-deploy $(MAKE) -C ../tools generate-deploy
rbd/kubernetes/csidriver.yaml: ../api/deploy/kubernetes/rbd/csidriver.yaml ../api/deploy/kubernetes/rbd/csidriver.go
$(MAKE) -C ../tools generate-deploy
rbd/kubernetes/csi-config-map.yaml: ../api/deploy/kubernetes/rbd/csidriver.*
$(MAKE) -C ../tools generate-deploy

View File

@ -1,8 +1,15 @@
#
# /!\ DO NOT MODIFY THIS FILE
#
# This file has been automatically generated by Ceph-CSI yamlgen.
# The source for the contents can be found in the api/deploy directory, make
# your modifications there.
#
--- ---
apiVersion: v1 apiVersion: v1
kind: ConfigMap kind: ConfigMap
metadata:
name: "ceph-csi-config"
data: data:
config.json: |- config.json: |-
[] []
metadata:
name: ceph-csi-config

View File

@ -1,10 +1,17 @@
#
# /!\ DO NOT MODIFY THIS FILE
#
# This file has been automatically generated by Ceph-CSI yamlgen.
# The source for the contents can be found in the api/deploy directory, make
# your modifications there.
#
--- ---
# if Kubernetes version is less than 1.18 change # if Kubernetes version is less than 1.18 change
# apiVersion to storage.k8s.io/v1beta1 # apiVersion to storage.k8s.io/v1beta1
apiVersion: storage.k8s.io/v1 apiVersion: storage.k8s.io/v1
kind: CSIDriver kind: CSIDriver
metadata: metadata:
name: rbd.csi.ceph.com name: "rbd.csi.ceph.com"
spec: spec:
attachRequired: true attachRequired: true
podInfoOnMount: false podInfoOnMount: false

View File

@ -1,4 +1,3 @@
---
# #
# /!\ DO NOT MODIFY THIS FILE # /!\ DO NOT MODIFY THIS FILE
# #

View File

@ -6,6 +6,7 @@
- [Create RBD static PV](#create-rbd-static-pv) - [Create RBD static PV](#create-rbd-static-pv)
- [RBD Volume Attributes in PV](#rbd-volume-attributes-in-pv) - [RBD Volume Attributes in PV](#rbd-volume-attributes-in-pv)
- [Create RBD static PVC](#create-rbd-static-pvc) - [Create RBD static PVC](#create-rbd-static-pvc)
- [Resize RBD image](#resize-rbd-image)
- [CephFS static PVC](#cephfs-static-pvc) - [CephFS static PVC](#cephfs-static-pvc)
- [Create CephFS subvolume](#create-cephfs-subvolume) - [Create CephFS subvolume](#create-cephfs-subvolume)
- [Create CephFS static PV](#create-cephfs-static-pv) - [Create CephFS static PV](#create-cephfs-static-pv)
@ -124,6 +125,26 @@ $ kubectl create -f fs-static-pvc.yaml
persistentvolumeclaim/fs-static-pvc created persistentvolumeclaim/fs-static-pvc created
``` ```
### Resize RBD image
Let us resize the RBD image in ceph cluster
```console
rbd resize static-image --size=2048 --pool=replicapool
```
Once the rbd image is resized in the ceph cluster, update the PV size and PVC
size to match the size of the rbd image.
Now scale down the application pod which is using `cephfs-static-pvc` and scale
up the application pod to resize the filesystem.
**Note** If you have mounted same static PVC to multiple application pods, make
sure you will scale down all the application pods and make sure no application
pods using the static PVC is running on the node and scale up all the
application pods again(this will trigger `NodeStageVolumeRequest` which will
resize the filesystem for static volume).
**Note** deleting PV and PVC does not removed the backend rbd image, user need to **Note** deleting PV and PVC does not removed the backend rbd image, user need to
manually delete the rbd image if required manually delete the rbd image if required

View File

@ -8,6 +8,7 @@ import (
"strings" "strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
) )
@ -48,7 +49,7 @@ func validateRBDStaticMigrationPVDeletion(f *framework.Framework, appPath, scNam
} }
opt["migration"] = "true" opt["migration"] = "true"
opt["monitors"] = mon opt["clusterID"] = getMonsHash(mon)
opt["imageFeatures"] = staticPVImageFeature opt["imageFeatures"] = staticPVImageFeature
opt["pool"] = defaultRBDPool opt["pool"] = defaultRBDPool
opt["staticVolume"] = strconv.FormatBool(true) opt["staticVolume"] = strconv.FormatBool(true)
@ -114,3 +115,32 @@ func composeIntreeMigVolID(mons, rbdImageName string) string {
return strings.Join(vhSlice, "_") return strings.Join(vhSlice, "_")
} }
// generateClusterIDConfigMapForMigration retrieve monitors and generate a hash value which
// is used as a clusterID in the custom configmap, this function also recreate RBD CSI pods
// once the custom config map has been recreated.
func generateClusterIDConfigMapForMigration(f *framework.Framework, c kubernetes.Interface) error {
// create monitors hash by fetching monitors from the cluster.
mons, err := getMons(rookNamespace, c)
if err != nil {
return fmt.Errorf("failed to get monitors %w", err)
}
mon := strings.Join(mons, ",")
inClusterID := getMonsHash(mon)
clusterInfo := map[string]map[string]string{}
clusterInfo[inClusterID] = map[string]string{}
// create custom configmap
err = createCustomConfigMap(f.ClientSet, rbdDirPath, clusterInfo)
if err != nil {
return fmt.Errorf("failed to create configmap with error %w", err)
}
// restart csi pods for the configmap to take effect.
err = recreateCSIRBDPods(f)
if err != nil {
return fmt.Errorf("failed to recreate rbd csi pods with error %w", err)
}
return nil
}

View File

@ -61,6 +61,7 @@ var (
defaultCloneCount = 10 defaultCloneCount = 10
nbdMapOptions = "debug-rbd=20" nbdMapOptions = "debug-rbd=20"
e2eDefaultCephLogStrategy = "preserve"
) )
func deployRBDPlugin() { func deployRBDPlugin() {
@ -366,32 +367,17 @@ var _ = Describe("RBD", func() {
} }
}) })
} }
// todo: may be remove the below deletion test later once the migration nodestage tests are adjusted
// also to have deletion validation through the same.
By("validate RBD migration+static Block PVC Deletion", func() { By("validate RBD migration+static Block PVC Deletion", func() {
// create monitors hash by fetching monitors from the cluster. err := generateClusterIDConfigMapForMigration(f, c)
mons, err := getMons(rookNamespace, c)
if err != nil { if err != nil {
e2elog.Failf("failed to get monitors %v", err) e2elog.Failf("failed to generate clusterID configmap with error %v", err)
}
mon := strings.Join(mons, ",")
inClusterID := getMonsHash(mon)
clusterInfo := map[string]map[string]string{}
clusterInfo[inClusterID] = map[string]string{}
// create custom configmap
err = createCustomConfigMap(f.ClientSet, rbdDirPath, clusterInfo)
if err != nil {
e2elog.Failf("failed to create configmap with error %v", err)
} }
err = createRBDStorageClass(f.ClientSet, f, "migrationsc", nil, nil, deletePolicy) err = createRBDStorageClass(f.ClientSet, f, "migrationsc", nil, nil, deletePolicy)
if err != nil { if err != nil {
e2elog.Failf("failed to create storageclass with error %v", err) e2elog.Failf("failed to create storageclass with error %v", err)
} }
// restart csi pods for the configmap to take effect.
err = recreateCSIRBDPods(f)
if err != nil {
e2elog.Failf("failed to recreate rbd csi pods with error %v", err)
}
err = validateRBDStaticMigrationPVDeletion(f, rawAppPath, "migrationsc", true) err = validateRBDStaticMigrationPVDeletion(f, rawAppPath, "migrationsc", true)
if err != nil { if err != nil {
e2elog.Failf("failed to validate rbd migrated static block pv with error %v", err) e2elog.Failf("failed to validate rbd migrated static block pv with error %v", err)
@ -479,6 +465,7 @@ var _ = Describe("RBD", func() {
map[string]string{ map[string]string{
"mounter": "rbd-nbd", "mounter": "rbd-nbd",
"mapOptions": nbdMapOptions, "mapOptions": nbdMapOptions,
"cephLogStrategy": e2eDefaultCephLogStrategy,
}, },
deletePolicy) deletePolicy)
if err != nil { if err != nil {
@ -515,6 +502,7 @@ var _ = Describe("RBD", func() {
map[string]string{ map[string]string{
"mounter": "rbd-nbd", "mounter": "rbd-nbd",
"mapOptions": nbdMapOptions, "mapOptions": nbdMapOptions,
"cephLogStrategy": e2eDefaultCephLogStrategy,
}, },
deletePolicy) deletePolicy)
if err != nil { if err != nil {
@ -560,6 +548,7 @@ var _ = Describe("RBD", func() {
map[string]string{ map[string]string{
"mounter": "rbd-nbd", "mounter": "rbd-nbd",
"mapOptions": nbdMapOptions, "mapOptions": nbdMapOptions,
"cephLogStrategy": e2eDefaultCephLogStrategy,
}, },
deletePolicy) deletePolicy)
if err != nil { if err != nil {
@ -714,6 +703,7 @@ var _ = Describe("RBD", func() {
map[string]string{ map[string]string{
"mounter": "rbd-nbd", "mounter": "rbd-nbd",
"mapOptions": nbdMapOptions, "mapOptions": nbdMapOptions,
"cephLogStrategy": e2eDefaultCephLogStrategy,
"encrypted": "true", "encrypted": "true",
}, },
deletePolicy) deletePolicy)
@ -1072,6 +1062,7 @@ var _ = Describe("RBD", func() {
"imageFeatures": "layering,journaling,exclusive-lock", "imageFeatures": "layering,journaling,exclusive-lock",
"mounter": "rbd-nbd", "mounter": "rbd-nbd",
"mapOptions": nbdMapOptions, "mapOptions": nbdMapOptions,
"cephLogStrategy": e2eDefaultCephLogStrategy,
}, },
deletePolicy) deletePolicy)
if err != nil { if err != nil {
@ -1611,21 +1602,45 @@ var _ = Describe("RBD", func() {
}) })
By("validate RBD migration+static FileSystem PVC", func() { By("validate RBD migration+static FileSystem PVC", func() {
err := validateRBDStaticMigrationPV(f, appPath, false) err := generateClusterIDConfigMapForMigration(f, c)
if err != nil {
e2elog.Failf("failed to generate clusterID configmap with error %v", err)
}
err = validateRBDStaticMigrationPV(f, appPath, false)
if err != nil { if err != nil {
e2elog.Failf("failed to validate rbd migrated static pv with error %v", err) e2elog.Failf("failed to validate rbd migrated static pv with error %v", err)
} }
// validate created backend rbd images // validate created backend rbd images
validateRBDImageCount(f, 0, defaultRBDPool) validateRBDImageCount(f, 0, defaultRBDPool)
err = deleteConfigMap(rbdDirPath)
if err != nil {
e2elog.Failf("failed to delete configmap with error %v", err)
}
err = createConfigMap(rbdDirPath, f.ClientSet, f)
if err != nil {
e2elog.Failf("failed to create configmap with error %v", err)
}
}) })
By("validate RBD migration+static Block PVC", func() { By("validate RBD migration+static Block PVC", func() {
err := validateRBDStaticMigrationPV(f, rawAppPath, true) err := generateClusterIDConfigMapForMigration(f, c)
if err != nil {
e2elog.Failf("failed to generate clusterID configmap with error %v", err)
}
err = validateRBDStaticMigrationPV(f, rawAppPath, true)
if err != nil { if err != nil {
e2elog.Failf("failed to validate rbd migrated static block pv with error %v", err) e2elog.Failf("failed to validate rbd migrated static block pv with error %v", err)
} }
// validate created backend rbd images // validate created backend rbd images
validateRBDImageCount(f, 0, defaultRBDPool) validateRBDImageCount(f, 0, defaultRBDPool)
err = deleteConfigMap(rbdDirPath)
if err != nil {
e2elog.Failf("failed to delete configmap with error %v", err)
}
err = createConfigMap(rbdDirPath, f.ClientSet, f)
if err != nil {
e2elog.Failf("failed to create configmap with error %v", err)
}
}) })
By("validate failure of RBD static PVC without imageFeatures parameter", func() { By("validate failure of RBD static PVC without imageFeatures parameter", func() {

View File

@ -14,6 +14,7 @@ import (
const ( const (
staticPVSize = "4Gi" staticPVSize = "4Gi"
staticPVNewSize = "8Gi"
staticPVImageFeature = "layering" staticPVImageFeature = "layering"
monsPrefix = "mons-" monsPrefix = "mons-"
imagePrefix = "image-" imagePrefix = "image-"
@ -175,6 +176,11 @@ func validateRBDStaticPV(f *framework.Framework, appPath string, isBlock, checkI
return err return err
} }
app.Labels = make(map[string]string)
app.Labels[appKey] = appLabel
appOpt := metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", appKey, appLabel),
}
app.Namespace = namespace app.Namespace = namespace
app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvcName app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvcName
if checkImgFeat { if checkImgFeat {
@ -191,6 +197,14 @@ func validateRBDStaticPV(f *framework.Framework, appPath string, isBlock, checkI
return err return err
} }
// resize image only if the image is already mounted and formatted
if !checkImgFeat {
err = validateRBDStaticResize(f, app, &appOpt, pvc, rbdImageName)
if err != nil {
return err
}
}
err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, metav1.DeleteOptions{}) err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, metav1.DeleteOptions{})
if err != nil { if err != nil {
return fmt.Errorf("failed to delete pvc: %w", err) return fmt.Errorf("failed to delete pvc: %w", err)
@ -242,7 +256,7 @@ func validateRBDStaticMigrationPV(f *framework.Framework, appPath string, isBloc
} }
opt["migration"] = "true" opt["migration"] = "true"
opt["monitors"] = mon opt["clusterID"] = getMonsHash(mon)
opt["imageFeatures"] = staticPVImageFeature opt["imageFeatures"] = staticPVImageFeature
opt["pool"] = defaultRBDPool opt["pool"] = defaultRBDPool
opt["staticVolume"] = strconv.FormatBool(true) opt["staticVolume"] = strconv.FormatBool(true)
@ -473,3 +487,36 @@ func validateCephFsStaticPV(f *framework.Framework, appPath, scPath string) erro
return nil return nil
} }
func validateRBDStaticResize(
f *framework.Framework,
app *v1.Pod,
appOpt *metav1.ListOptions,
pvc *v1.PersistentVolumeClaim,
rbdImageName string) error {
// resize rbd image
size := staticPVNewSize
cmd := fmt.Sprintf(
"rbd resize %s --size=%s %s",
rbdImageName,
size,
rbdOptions(defaultRBDPool))
_, _, err := execCommandInToolBoxPod(f, cmd, rookNamespace)
if err != nil {
return err
}
err = createApp(f.ClientSet, app, deployTimeout)
if err != nil {
return err
}
// check size for the filesystem type PVC
if *pvc.Spec.VolumeMode == v1.PersistentVolumeFilesystem {
err = checkDirSize(app, f, appOpt, size)
if err != nil {
return err
}
}
return deletePod(app.Name, app.Namespace, f.ClientSet, deployTimeout)
}

6
go.mod
View File

@ -3,7 +3,7 @@ module github.com/ceph/ceph-csi
go 1.16 go 1.16
require ( require (
github.com/aws/aws-sdk-go v1.40.50 github.com/aws/aws-sdk-go v1.41.0
github.com/ceph/ceph-csi/api v0.0.0-00010101000000-000000000000 github.com/ceph/ceph-csi/api v0.0.0-00010101000000-000000000000
github.com/ceph/go-ceph v0.11.0 github.com/ceph/go-ceph v0.11.0
github.com/container-storage-interface/spec v1.5.0 github.com/container-storage-interface/spec v1.5.0
@ -17,7 +17,7 @@ require (
github.com/kubernetes-csi/csi-lib-utils v0.10.0 github.com/kubernetes-csi/csi-lib-utils v0.10.0
github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0 github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0
github.com/libopenstorage/secrets v0.0.0-20210908194121-a1d19aa9713a github.com/libopenstorage/secrets v0.0.0-20210908194121-a1d19aa9713a
github.com/onsi/ginkgo v1.16.4 github.com/onsi/ginkgo v1.16.5
github.com/onsi/gomega v1.16.0 github.com/onsi/gomega v1.16.0
github.com/pborman/uuid v1.2.1 github.com/pborman/uuid v1.2.1
github.com/prometheus/client_golang v1.11.0 github.com/prometheus/client_golang v1.11.0
@ -33,7 +33,7 @@ require (
k8s.io/kubernetes v1.22.2 k8s.io/kubernetes v1.22.2
k8s.io/mount-utils v0.22.2 k8s.io/mount-utils v0.22.2
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a
sigs.k8s.io/controller-runtime v0.10.1 sigs.k8s.io/controller-runtime v0.10.2
) )
replace ( replace (

11
go.sum
View File

@ -133,8 +133,8 @@ github.com/aws/aws-sdk-go v1.25.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpi
github.com/aws/aws-sdk-go v1.30.27/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.30.27/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
github.com/aws/aws-sdk-go v1.35.24/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k= github.com/aws/aws-sdk-go v1.35.24/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k=
github.com/aws/aws-sdk-go v1.38.49/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.38.49/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.40.50 h1:QP4NC9EZWBszbNo2UbG6bbObMtN35kCFb4h0r08q884= github.com/aws/aws-sdk-go v1.41.0 h1:XUzHLFWQVhmFtmKTodnAo5QdooPQfpVfilCxIV3aLoE=
github.com/aws/aws-sdk-go v1.40.50/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= github.com/aws/aws-sdk-go v1.41.0/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc=
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
@ -786,8 +786,9 @@ github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
@ -1619,8 +1620,8 @@ rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22 h1:fmRfl9WJ4ApJn7LxNuED4m0t18qivVQOxP6aAYG9J6c= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22 h1:fmRfl9WJ4ApJn7LxNuED4m0t18qivVQOxP6aAYG9J6c=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
sigs.k8s.io/controller-runtime v0.2.2/go.mod h1:9dyohw3ZtoXQuV1e766PHUn+cmrRCIcBh6XIMFNMZ+I= sigs.k8s.io/controller-runtime v0.2.2/go.mod h1:9dyohw3ZtoXQuV1e766PHUn+cmrRCIcBh6XIMFNMZ+I=
sigs.k8s.io/controller-runtime v0.10.1 h1:+eLHgY/VrJWnfg6iXUqhCUqNXgPH1NZeP9drNAAgWlg= sigs.k8s.io/controller-runtime v0.10.2 h1:jW8qiY+yMnnPx6O9hu63tgcwaKzd1yLYui+mpvClOOc=
sigs.k8s.io/controller-runtime v0.10.1/go.mod h1:CQp8eyUQZ/Q7PJvnIrB6/hgfTC1kBkGylwsLgOQi1WY= sigs.k8s.io/controller-runtime v0.10.2/go.mod h1:CQp8eyUQZ/Q7PJvnIrB6/hgfTC1kBkGylwsLgOQi1WY=
sigs.k8s.io/kustomize/api v0.8.11/go.mod h1:a77Ls36JdfCWojpUqR6m60pdGY1AYFix4AH83nJtY1g= sigs.k8s.io/kustomize/api v0.8.11/go.mod h1:a77Ls36JdfCWojpUqR6m60pdGY1AYFix4AH83nJtY1g=
sigs.k8s.io/kustomize/cmd/config v0.9.13/go.mod h1:7547FLF8W/lTaDf0BDqFTbZxM9zqwEJqCKN9sSR0xSs= sigs.k8s.io/kustomize/cmd/config v0.9.13/go.mod h1:7547FLF8W/lTaDf0BDqFTbZxM9zqwEJqCKN9sSR0xSs=
sigs.k8s.io/kustomize/kustomize/v4 v4.2.0/go.mod h1:MOkR6fmhwG7hEDRXBYELTi5GSFcLwfqwzTRHW3kv5go= sigs.k8s.io/kustomize/kustomize/v4 v4.2.0/go.mod h1:MOkR6fmhwG7hEDRXBYELTi5GSFcLwfqwzTRHW3kv5go=

View File

@ -54,12 +54,12 @@ func TestIsMigrationVolID(t *testing.T) {
}, },
} }
for _, tt := range tests { for _, tt := range tests {
tt := tt newtt := tt
t.Run(tt.name, func(t *testing.T) { t.Run(newtt.name, func(t *testing.T) {
t.Parallel() t.Parallel()
got := isMigrationVolID(tt.args) got := isMigrationVolID(newtt.args)
if got != tt.migVolID { if got != newtt.migVolID {
t.Errorf("isMigrationVolID() = %v, want %v", got, tt.migVolID) t.Errorf("isMigrationVolID() = %v, want %v", got, newtt.migVolID)
} }
}) })
} }
@ -156,17 +156,17 @@ func TestParseMigrationVolID(t *testing.T) {
}, },
} }
for _, tt := range tests { for _, tt := range tests {
tt := tt newtt := tt
t.Run(tt.name, func(t *testing.T) { t.Run(newtt.name, func(t *testing.T) {
t.Parallel() t.Parallel()
got, err := parseMigrationVolID(tt.args) got, err := parseMigrationVolID(newtt.args)
if (err != nil) != tt.wantErr { if (err != nil) != newtt.wantErr {
t.Errorf("ParseMigrationVolID() error = %v, wantErr %v", err, tt.wantErr) t.Errorf("ParseMigrationVolID() error = %v, wantErr %v", err, newtt.wantErr)
return return
} }
if !reflect.DeepEqual(got, tt.want) { if !reflect.DeepEqual(got, newtt.want) {
t.Errorf("ParseMigrationVolID() got = %v, want %v", got, tt.want) t.Errorf("ParseMigrationVolID() got = %v, want %v", got, newtt.want)
} }
}) })
} }

View File

@ -33,6 +33,7 @@ import (
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
"k8s.io/kubernetes/pkg/volume"
mount "k8s.io/mount-utils" mount "k8s.io/mount-utils"
utilexec "k8s.io/utils/exec" utilexec "k8s.io/utils/exec"
) )
@ -149,24 +150,6 @@ func healerStageTransaction(ctx context.Context, cr *util.Credentials, volOps *r
return nil return nil
} }
// getClusterIDFromMigrationVolume fills the clusterID for the passed in monitors.
func getClusterIDFromMigrationVolume(monitors string) (string, error) {
var err error
var rclusterID string
for _, m := range strings.Split(monitors, ",") {
rclusterID, err = util.GetClusterIDFromMon(m)
if err != nil && !errors.Is(err, util.ErrMissingConfigForMonitor) {
return "", err
}
if rclusterID != "" {
return rclusterID, nil
}
}
return "", err
}
// populateRbdVol update the fields in rbdVolume struct based on the request it received. // populateRbdVol update the fields in rbdVolume struct based on the request it received.
func populateRbdVol( func populateRbdVol(
ctx context.Context, ctx context.Context,
@ -290,16 +273,6 @@ func (ns *NodeServer) NodeStageVolume(
} }
defer ns.VolumeLocks.Release(volID) defer ns.VolumeLocks.Release(volID)
// Check this is a migration request because in that case, unlike other node stage requests
// it will be missing the clusterID, so fill it by fetching it from config file using mon.
if req.GetVolumeContext()[intreeMigrationKey] == intreeMigrationLabel && req.VolumeContext[util.ClusterIDKey] == "" {
cID, cErr := getClusterIDFromMigrationVolume(req.GetVolumeContext()["monitors"])
if cErr != nil {
return nil, status.Error(codes.Internal, cErr.Error())
}
req.VolumeContext[util.ClusterIDKey] = cID
}
stagingParentPath := req.GetStagingTargetPath() stagingParentPath := req.GetStagingTargetPath()
stagingTargetPath := stagingParentPath + "/" + volID stagingTargetPath := stagingParentPath + "/" + volID
@ -385,8 +358,6 @@ func (ns *NodeServer) stageTransaction(
var err error var err error
var readOnly bool var readOnly bool
var feature bool
var depth uint
var cr *util.Credentials var cr *util.Credentials
cr, err = util.NewUserCredentials(req.GetSecrets()) cr, err = util.NewUserCredentials(req.GetSecrets())
@ -402,29 +373,11 @@ func (ns *NodeServer) stageTransaction(
volOptions.readOnly = true volOptions.readOnly = true
} }
if kernelRelease == "" { err = flattenImageBeforeMapping(ctx, volOptions, cr)
// fetch the current running kernel info
kernelRelease, err = util.GetKernelVersion()
if err != nil { if err != nil {
return transaction, err return transaction, err
} }
}
if !util.CheckKernelSupport(kernelRelease, deepFlattenSupport) && !skipForceFlatten {
feature, err = volOptions.checkImageChainHasFeature(ctx, librbd.FeatureDeepFlatten)
if err != nil {
return transaction, err
}
depth, err = volOptions.getCloneDepth(ctx)
if err != nil {
return transaction, err
}
if feature || depth != 0 {
err = volOptions.flattenRbdImage(ctx, cr, true, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
if err != nil {
return transaction, err
}
}
}
// Mapping RBD image // Mapping RBD image
var devicePath string var devicePath string
devicePath, err = attachRBDImage(ctx, volOptions, devicePath, cr) devicePath, err = attachRBDImage(ctx, volOptions, devicePath, cr)
@ -472,6 +425,26 @@ func (ns *NodeServer) stageTransaction(
} }
transaction.isMounted = true transaction.isMounted = true
// resize if its fileSystemType static volume.
if staticVol && !isBlock {
var ok bool
resizer := mount.NewResizeFs(utilexec.New())
ok, err = resizer.NeedResize(devicePath, stagingTargetPath)
if err != nil {
return transaction, status.Errorf(codes.Internal,
"Need resize check failed on devicePath %s and staingPath %s, error: %v",
devicePath,
stagingTargetPath,
err)
}
if ok {
ok, err = resizer.Resize(devicePath, stagingTargetPath)
if !ok {
return transaction, status.Errorf(codes.Internal,
"resize failed on path %s, error: %v", stagingTargetPath, err)
}
}
}
if !readOnly { if !readOnly {
// #nosec - allow anyone to write inside the target path // #nosec - allow anyone to write inside the target path
err = os.Chmod(stagingTargetPath, 0o777) err = os.Chmod(stagingTargetPath, 0o777)
@ -480,6 +453,41 @@ func (ns *NodeServer) stageTransaction(
return transaction, err return transaction, err
} }
func flattenImageBeforeMapping(
ctx context.Context,
volOptions *rbdVolume,
cr *util.Credentials) error {
var err error
var feature bool
var depth uint
if kernelRelease == "" {
// fetch the current running kernel info
kernelRelease, err = util.GetKernelVersion()
if err != nil {
return err
}
}
if !util.CheckKernelSupport(kernelRelease, deepFlattenSupport) && !skipForceFlatten {
feature, err = volOptions.checkImageChainHasFeature(ctx, librbd.FeatureDeepFlatten)
if err != nil {
return err
}
depth, err = volOptions.getCloneDepth(ctx)
if err != nil {
return err
}
if feature || depth != 0 {
err = volOptions.flattenRbdImage(ctx, cr, true, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
if err != nil {
return err
}
}
}
return nil
}
func (ns *NodeServer) undoStagingTransaction( func (ns *NodeServer) undoStagingTransaction(
ctx context.Context, ctx context.Context,
req *csi.NodeStageVolumeRequest, req *csi.NodeStageVolumeRequest,
@ -1140,18 +1148,10 @@ func (ns *NodeServer) NodeGetVolumeStats(
// //
// TODO: https://github.com/container-storage-interface/spec/issues/371#issuecomment-756834471 // TODO: https://github.com/container-storage-interface/spec/issues/371#issuecomment-756834471
func blockNodeGetVolumeStats(ctx context.Context, targetPath string) (*csi.NodeGetVolumeStatsResponse, error) { func blockNodeGetVolumeStats(ctx context.Context, targetPath string) (*csi.NodeGetVolumeStatsResponse, error) {
args := []string{"--noheadings", "--bytes", "--output=SIZE", targetPath} mp := volume.NewMetricsBlock(targetPath)
lsblkSize, _, err := util.ExecCommand(ctx, "/bin/lsblk", args...) m, err := mp.GetMetrics()
if err != nil { if err != nil {
err = fmt.Errorf("lsblk %v returned an error: %w", args, err) err = fmt.Errorf("failed to get metrics: %w", err)
log.ErrorLog(ctx, err.Error())
return nil, status.Error(codes.Internal, err.Error())
}
size, err := strconv.ParseInt(strings.TrimSpace(lsblkSize), 10, 64)
if err != nil {
err = fmt.Errorf("failed to convert %q to bytes: %w", lsblkSize, err)
log.ErrorLog(ctx, err.Error()) log.ErrorLog(ctx, err.Error())
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
@ -1160,7 +1160,7 @@ func blockNodeGetVolumeStats(ctx context.Context, targetPath string) (*csi.NodeG
return &csi.NodeGetVolumeStatsResponse{ return &csi.NodeGetVolumeStatsResponse{
Usage: []*csi.VolumeUsage{ Usage: []*csi.VolumeUsage{
{ {
Total: size, Total: m.Capacity.Value(),
Unit: csi.VolumeUsage_BYTES, Unit: csi.VolumeUsage_BYTES,
}, },
}, },

View File

@ -481,12 +481,12 @@ func (rv *rbdVolume) isInUse() (bool, error) {
return false, err return false, err
} }
// TODO replace this with logic to get mirroring information once mirrorInfo, err := image.GetMirrorImageInfo()
// https://github.com/ceph/go-ceph/issues/379 is fixed
err = rv.updateVolWithImageInfo()
if err != nil { if err != nil {
return false, err return false, err
} }
rv.Primary = mirrorInfo.Primary
// because we opened the image, there is at least one watcher // because we opened the image, there is at least one watcher
defaultWatchers := 1 defaultWatchers := 1
if rv.Primary { if rv.Primary {
@ -1463,51 +1463,6 @@ func (rv *rbdVolume) getImageInfo() error {
return nil return nil
} }
// imageInfo strongly typed JSON spec for image info.
type imageInfo struct {
Mirroring mirroring `json:"mirroring"`
}
// parentInfo spec for parent volume info.
type mirroring struct {
Primary bool `json:"primary"`
}
// updateVolWithImageInfo updates provided rbdVolume with information from on-disk data
// regarding the same.
func (rv *rbdVolume) updateVolWithImageInfo() error {
// rbd --format=json info [image-spec | snap-spec]
var imgInfo imageInfo
stdout, stderr, err := util.ExecCommand(
context.TODO(),
"rbd",
"-m", rv.Monitors,
"--id", rv.conn.Creds.ID,
"--keyfile="+rv.conn.Creds.KeyFile,
"-c", util.CephConfigPath,
"--format="+"json",
"info", rv.String())
if err != nil {
if strings.Contains(stderr, "rbd: error opening image "+rv.RbdImageName+
": (2) No such file or directory") {
return util.JoinErrors(ErrImageNotFound, err)
}
return err
}
if stdout != "" {
err = json.Unmarshal([]byte(stdout), &imgInfo)
if err != nil {
return fmt.Errorf("unmarshal failed (%w), raw buffer response: %s", err, stdout)
}
rv.Primary = imgInfo.Mirroring.Primary
}
return nil
}
/* /*
checkSnapExists queries rbd about the snapshots of the given image and returns checkSnapExists queries rbd about the snapshots of the given image and returns
ErrImageNotFound if provided image is not found, and ErrSnapNotFound if ErrImageNotFound if provided image is not found, and ErrSnapNotFound if

View File

@ -634,8 +634,7 @@ func (rs *ReplicationServer) ResyncVolume(ctx context.Context,
ready = checkRemoteSiteStatus(ctx, mirrorStatus) ready = checkRemoteSiteStatus(ctx, mirrorStatus)
} }
// resync only if the image is in error state if resyncRequired(localStatus) {
if strings.Contains(localStatus.State.String(), string(errorState)) {
err = rbdVol.resyncImage() err = rbdVol.resyncImage()
if err != nil { if err != nil {
log.ErrorLog(ctx, err.Error()) log.ErrorLog(ctx, err.Error())
@ -664,3 +663,20 @@ func (rs *ReplicationServer) ResyncVolume(ctx context.Context,
return resp, nil return resp, nil
} }
// resyncRequired returns true if local image is in split-brain state and image
// needs resync.
func resyncRequired(localStatus librbd.SiteMirrorImageStatus) bool {
// resync is required if the image is in error state or the description
// contains split-brain message.
// In some corner cases like `re-player shutdown` the local image will not
// be in an error state. It would be also worth considering the `description`
// field to make sure about split-brain.
splitBrain := "split-brain"
if strings.Contains(localStatus.State.String(), string(errorState)) ||
strings.Contains(localStatus.Description, splitBrain) {
return true
}
return false
}

View File

@ -161,47 +161,3 @@ func GetClusterID(options map[string]string) (string, error) {
return clusterID, nil return clusterID, nil
} }
// GetClusterIDFromMon will be called with a mon string to fetch
// clusterID based on the passed in mon string. If passed in 'mon'
// string has been found in the config the clusterID is returned,
// else error.
func GetClusterIDFromMon(mon string) (string, error) {
clusterID, err := readClusterInfoWithMon(CsiConfigFile, mon)
return clusterID, err
}
func readClusterInfoWithMon(pathToConfig, mon string) (string, error) {
var config []ClusterInfo
// #nosec
content, err := ioutil.ReadFile(pathToConfig)
if err != nil {
err = fmt.Errorf("error fetching configuration file %q: %w", pathToConfig, err)
return "", err
}
err = json.Unmarshal(content, &config)
if err != nil {
return "", fmt.Errorf("unmarshal failed (%w), raw buffer response: %s",
err, string(content))
}
for _, cluster := range config {
// as the same mons can fall into different clusterIDs with
// different radosnamespace configurations, we are bailing out
// if radosnamespace configuration is found for this cluster
if cluster.RadosNamespace != "" {
continue
}
for _, m := range cluster.Monitors {
if m == mon {
return cluster.ClusterID, nil
}
}
}
return "", ErrMissingConfigForMonitor
}

View File

@ -34,7 +34,6 @@ func cleanupTestData() {
os.RemoveAll(basePath) os.RemoveAll(basePath)
} }
// nolint:gocyclo,cyclop // TODO: make this function less complex.
func TestCSIConfig(t *testing.T) { func TestCSIConfig(t *testing.T) {
t.Parallel() t.Parallel()
var err error var err error
@ -139,35 +138,4 @@ func TestCSIConfig(t *testing.T) {
if err != nil { if err != nil {
t.Errorf("Test setup error %s", err) t.Errorf("Test setup error %s", err)
} }
// TEST: Should pass as clusterID is present in config
content, err = readClusterInfoWithMon(pathToConfig, "mon1")
if err != nil || content != "test2" {
t.Errorf("Failed: want (%s), got (%s) (%v)", "test2", content, err)
}
// TEST: Should pass as clusterID is present in config
content, err = readClusterInfoWithMon(pathToConfig, "mon5")
if err != nil || content != "test1" {
t.Errorf("Failed: want (%s), got (%s) (%v)", "test1", content, err)
}
// TEST: Should fail as clusterID is not present in config
content, err = readClusterInfoWithMon(pathToConfig, "mon8")
if err == nil {
t.Errorf("Failed: got (%s)", content)
}
data = "[{\"clusterID\":\"" + clusterID2 + "\", \"radosNamespace\": \"ns1\", \"monitors\":[\"mon1\"]}," +
"{\"clusterID\":\"" + clusterID1 + "\",\"monitors\":[\"mon1\"]}]"
err = ioutil.WriteFile(basePath+"/"+csiClusters, []byte(data), 0o600)
if err != nil {
t.Errorf("Test setup error %s", err)
}
// TEST: Should pass as clusterID is present in config
content, err = readClusterInfoWithMon(pathToConfig, "mon1")
if err != nil || content != clusterID1 {
t.Errorf("Failed: want (%s), got (%s) (%v)", "test2", content, err)
}
} }

View File

@ -42,22 +42,28 @@ function deploy_rook() {
then then
kubectl_retry create -f "${ROOK_URL}/crds.yaml" kubectl_retry create -f "${ROOK_URL}/crds.yaml"
fi fi
kubectl_retry create -f "${ROOK_URL}/operator.yaml" TEMP_DIR="$(mktemp -d)"
curl -o "${TEMP_DIR}/operator.yaml" "${ROOK_URL}/operator.yaml"
# disable rook deployed csi drivers
sed -i 's|ROOK_CSI_ENABLE_CEPHFS: "true"|ROOK_CSI_ENABLE_CEPHFS: "false"|g' "${TEMP_DIR}/operator.yaml"
sed -i 's|ROOK_CSI_ENABLE_RBD: "true"|ROOK_CSI_ENABLE_RBD: "false"|g' "${TEMP_DIR}/operator.yaml"
kubectl_retry create -f "${TEMP_DIR}/operator.yaml"
# Override the ceph version which rook installs by default. # Override the ceph version which rook installs by default.
if [ -z "${ROOK_CEPH_CLUSTER_IMAGE}" ] if [ -z "${ROOK_CEPH_CLUSTER_IMAGE}" ]
then then
kubectl_retry create -f "${ROOK_URL}/cluster-test.yaml" kubectl_retry create -f "${ROOK_URL}/cluster-test.yaml"
else else
ROOK_CEPH_CLUSTER_VERSION_IMAGE_PATH="image: ${ROOK_CEPH_CLUSTER_IMAGE}" ROOK_CEPH_CLUSTER_VERSION_IMAGE_PATH="image: ${ROOK_CEPH_CLUSTER_IMAGE}"
TEMP_DIR="$(mktemp -d)"
curl -o "${TEMP_DIR}"/cluster-test.yaml "${ROOK_URL}/cluster-test.yaml" curl -o "${TEMP_DIR}"/cluster-test.yaml "${ROOK_URL}/cluster-test.yaml"
sed -i "s|image.*|${ROOK_CEPH_CLUSTER_VERSION_IMAGE_PATH}|g" "${TEMP_DIR}"/cluster-test.yaml sed -i "s|image.*|${ROOK_CEPH_CLUSTER_VERSION_IMAGE_PATH}|g" "${TEMP_DIR}"/cluster-test.yaml
sed -i "s/config: |/config: |\n \[mon\]\n mon_warn_on_insecure_global_id_reclaim_allowed = false/g" "${TEMP_DIR}"/cluster-test.yaml sed -i "s/config: |/config: |\n \[mon\]\n mon_warn_on_insecure_global_id_reclaim_allowed = false/g" "${TEMP_DIR}"/cluster-test.yaml
sed -i "s/healthCheck:/healthCheck:\n livenessProbe:\n mon:\n disabled: true\n mgr:\n disabled: true\n mds:\n disabled: true/g" "${TEMP_DIR}"/cluster-test.yaml sed -i "s/healthCheck:/healthCheck:\n livenessProbe:\n mon:\n disabled: true\n mgr:\n disabled: true\n mds:\n disabled: true/g" "${TEMP_DIR}"/cluster-test.yaml
cat "${TEMP_DIR}"/cluster-test.yaml cat "${TEMP_DIR}"/cluster-test.yaml
kubectl_retry create -f "${TEMP_DIR}/cluster-test.yaml" kubectl_retry create -f "${TEMP_DIR}/cluster-test.yaml"
rm -rf "${TEMP_DIR}"
fi fi
rm -rf "${TEMP_DIR}"
kubectl_retry create -f "${ROOK_URL}/toolbox.yaml" kubectl_retry create -f "${ROOK_URL}/toolbox.yaml"
kubectl_retry create -f "${ROOK_URL}/filesystem-test.yaml" kubectl_retry create -f "${ROOK_URL}/filesystem-test.yaml"

View File

@ -19,12 +19,13 @@ package main
import ( import (
"fmt" "fmt"
"os" "os"
"reflect"
"github.com/ceph/ceph-csi/api/deploy/kubernetes/rbd"
"github.com/ceph/ceph-csi/api/deploy/ocp" "github.com/ceph/ceph-csi/api/deploy/ocp"
) )
const header = `--- const header = `#
#
# /!\ DO NOT MODIFY THIS FILE # /!\ DO NOT MODIFY THIS FILE
# #
# This file has been automatically generated by Ceph-CSI yamlgen. # This file has been automatically generated by Ceph-CSI yamlgen.
@ -35,18 +36,25 @@ const header = `---
type deploymentArtifact struct { type deploymentArtifact struct {
filename string filename string
// FIXME: This is not dynamic enough for additional YAML generating yamlFunc reflect.Value
// functions. Need to look into typecasting the functions and passing defaults reflect.Value
// interface{} instead of ocp.SecurityContextConstraintsValues.
yamlFunc func(ocp.SecurityContextConstraintsValues) (string, error)
defaults ocp.SecurityContextConstraintsValues
} }
var yamlArtifacts = []deploymentArtifact{ var yamlArtifacts = []deploymentArtifact{
{ {
"../deploy/scc.yaml", "../deploy/scc.yaml",
ocp.NewSecurityContextConstraintsYAML, reflect.ValueOf(ocp.NewSecurityContextConstraintsYAML),
ocp.SecurityContextConstraintsDefaults, reflect.ValueOf(ocp.SecurityContextConstraintsDefaults),
},
{
"../deploy/rbd/kubernetes/csidriver.yaml",
reflect.ValueOf(rbd.NewCSIDriverYAML),
reflect.ValueOf(rbd.CSIDriverDefaults),
},
{
"../deploy/rbd/kubernetes/csi-config-map.yaml",
reflect.ValueOf(rbd.NewCSIConfigMapYAML),
reflect.ValueOf(rbd.CSIConfigMapDefaults),
}, },
} }
@ -69,9 +77,10 @@ func writeArtifact(artifact deploymentArtifact) {
panic(fmt.Sprintf("failed to write header to %q: %v", artifact.filename, err)) panic(fmt.Sprintf("failed to write header to %q: %v", artifact.filename, err))
} }
data, err := artifact.yamlFunc(artifact.defaults) result := artifact.yamlFunc.Call([]reflect.Value{artifact.defaults})
if err != nil { data := result[0].String()
panic(fmt.Sprintf("failed to generate YAML for %q: %v", artifact.filename, err)) if data == "" {
panic(fmt.Sprintf("failed to generate YAML for %q: %v", artifact.filename, result[1].String()))
} }
_, err = f.WriteString(data) _, err = f.WriteString(data)

View File

@ -237,6 +237,19 @@ var awsPartition = partition{
"us-west-2": endpoint{}, "us-west-2": endpoint{},
}, },
}, },
"account": service{
PartitionEndpoint: "aws-global",
IsRegionalized: boxedFalse,
Endpoints: endpoints{
"aws-global": endpoint{
Hostname: "account.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
},
},
},
"acm": service{ "acm": service{
Endpoints: endpoints{ Endpoints: endpoints{
@ -959,7 +972,11 @@ var awsPartition = partition{
Protocols: []string{"https"}, Protocols: []string{"https"},
}, },
Endpoints: endpoints{ Endpoints: endpoints{
"ap-northeast-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"eu-central-1": endpoint{}, "eu-central-1": endpoint{},
"eu-north-1": endpoint{},
"eu-west-1": endpoint{}, "eu-west-1": endpoint{},
"us-east-1": endpoint{}, "us-east-1": endpoint{},
"us-east-2": endpoint{}, "us-east-2": endpoint{},
@ -1241,6 +1258,62 @@ var awsPartition = partition{
"us-west-2": endpoint{}, "us-west-2": endpoint{},
}, },
}, },
"cloudcontrolapi": service{
Endpoints: endpoints{
"af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
"ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
"eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
"fips-ca-central-1": endpoint{
Hostname: "cloudcontrolapi-fips.ca-central-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "ca-central-1",
},
},
"fips-us-east-1": endpoint{
Hostname: "cloudcontrolapi-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
},
"fips-us-east-2": endpoint{
Hostname: "cloudcontrolapi-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
},
"fips-us-west-1": endpoint{
Hostname: "cloudcontrolapi-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
},
"fips-us-west-2": endpoint{
Hostname: "cloudcontrolapi-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
},
"me-south-1": endpoint{},
"sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-1": endpoint{},
"us-west-2": endpoint{},
},
},
"clouddirectory": service{ "clouddirectory": service{
Endpoints: endpoints{ Endpoints: endpoints{
@ -2002,6 +2075,30 @@ var awsPartition = partition{
"us-west-2": endpoint{}, "us-west-2": endpoint{},
}, },
}, },
"databrew": service{
Endpoints: endpoints{
"af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
"eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
"sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-1": endpoint{},
"us-west-2": endpoint{},
},
},
"dataexchange": service{ "dataexchange": service{
Endpoints: endpoints{ Endpoints: endpoints{
@ -3622,6 +3719,71 @@ var awsPartition = partition{
"us-west-2": endpoint{}, "us-west-2": endpoint{},
}, },
}, },
"grafana": service{
Endpoints: endpoints{
"ap-northeast-1": endpoint{
Hostname: "grafana.ap-northeast-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "ap-northeast-1",
},
},
"ap-northeast-2": endpoint{
Hostname: "grafana.ap-northeast-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "ap-northeast-2",
},
},
"ap-southeast-1": endpoint{
Hostname: "grafana.ap-southeast-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "ap-southeast-1",
},
},
"ap-southeast-2": endpoint{
Hostname: "grafana.ap-southeast-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "ap-southeast-2",
},
},
"eu-central-1": endpoint{
Hostname: "grafana.eu-central-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "eu-central-1",
},
},
"eu-west-1": endpoint{
Hostname: "grafana.eu-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "eu-west-1",
},
},
"eu-west-2": endpoint{
Hostname: "grafana.eu-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "eu-west-2",
},
},
"us-east-1": endpoint{
Hostname: "grafana.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
},
"us-east-2": endpoint{
Hostname: "grafana.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
},
"us-west-2": endpoint{
Hostname: "grafana.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
},
},
},
"greengrass": service{ "greengrass": service{
IsRegionalized: boxedTrue, IsRegionalized: boxedTrue,
Defaults: endpoint{ Defaults: endpoint{
@ -3934,6 +4096,7 @@ var awsPartition = partition{
Endpoints: endpoints{ Endpoints: endpoints{
"ap-northeast-1": endpoint{}, "ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{}, "ap-northeast-2": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{}, "ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{}, "ap-southeast-2": endpoint{},
"eu-central-1": endpoint{}, "eu-central-1": endpoint{},
@ -3959,6 +4122,12 @@ var awsPartition = partition{
Region: "ap-northeast-2", Region: "ap-northeast-2",
}, },
}, },
"ap-south-1": endpoint{
Hostname: "data.iotevents.ap-south-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "ap-south-1",
},
},
"ap-southeast-1": endpoint{ "ap-southeast-1": endpoint{
Hostname: "data.iotevents.ap-southeast-1.amazonaws.com", Hostname: "data.iotevents.ap-southeast-1.amazonaws.com",
CredentialScope: credentialScope{ CredentialScope: credentialScope{
@ -4165,6 +4334,36 @@ var awsPartition = partition{
"us-west-2": endpoint{}, "us-west-2": endpoint{},
}, },
}, },
"kendra": service{
Endpoints: endpoints{
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"ca-central-1": endpoint{},
"eu-west-1": endpoint{},
"fips-us-east-1": endpoint{
Hostname: "kendra-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
},
"fips-us-east-2": endpoint{
Hostname: "kendra-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
},
"fips-us-west-2": endpoint{
Hostname: "kendra-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-2": endpoint{},
},
},
"kinesis": service{ "kinesis": service{
Endpoints: endpoints{ Endpoints: endpoints{
@ -5234,6 +5433,12 @@ var awsPartition = partition{
Region: "eu-west-3", Region: "eu-west-3",
}, },
}, },
"sa-east-1": endpoint{
Hostname: "oidc.sa-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "sa-east-1",
},
},
"us-east-1": endpoint{ "us-east-1": endpoint{
Hostname: "oidc.us-east-1.amazonaws.com", Hostname: "oidc.us-east-1.amazonaws.com",
CredentialScope: credentialScope{ CredentialScope: credentialScope{
@ -7456,6 +7661,54 @@ var awsPartition = partition{
"us-west-2": endpoint{}, "us-west-2": endpoint{},
}, },
}, },
"textract": service{
Endpoints: endpoints{
"ap-northeast-2": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
"fips-ca-central-1": endpoint{
Hostname: "textract-fips.ca-central-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "ca-central-1",
},
},
"fips-us-east-1": endpoint{
Hostname: "textract-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
},
"fips-us-east-2": endpoint{
Hostname: "textract-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
},
"fips-us-west-1": endpoint{
Hostname: "textract-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
},
"fips-us-west-2": endpoint{
Hostname: "textract-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-1": endpoint{},
"us-west-2": endpoint{},
},
},
"transcribe": service{ "transcribe": service{
Defaults: endpoint{ Defaults: endpoint{
Protocols: []string{"https"}, Protocols: []string{"https"},
@ -7640,6 +7893,18 @@ var awsPartition = partition{
}, },
}, },
}, },
"voiceid": service{
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"eu-central-1": endpoint{},
"eu-west-2": endpoint{},
"us-east-1": endpoint{},
"us-west-2": endpoint{},
},
},
"waf": service{ "waf": service{
PartitionEndpoint: "aws-global", PartitionEndpoint: "aws-global",
IsRegionalized: boxedFalse, IsRegionalized: boxedFalse,
@ -8067,6 +8332,19 @@ var awscnPartition = partition{
"cn-northwest-1": endpoint{}, "cn-northwest-1": endpoint{},
}, },
}, },
"account": service{
PartitionEndpoint: "aws-cn-global",
IsRegionalized: boxedFalse,
Endpoints: endpoints{
"aws-cn-global": endpoint{
Hostname: "account.cn-northwest-1.amazonaws.com.cn",
CredentialScope: credentialScope{
Region: "cn-northwest-1",
},
},
},
},
"acm": service{ "acm": service{
Endpoints: endpoints{ Endpoints: endpoints{
@ -8268,6 +8546,13 @@ var awscnPartition = partition{
"cn-northwest-1": endpoint{}, "cn-northwest-1": endpoint{},
}, },
}, },
"databrew": service{
Endpoints: endpoints{
"cn-north-1": endpoint{},
"cn-northwest-1": endpoint{},
},
},
"dax": service{ "dax": service{
Endpoints: endpoints{ Endpoints: endpoints{
@ -9278,6 +9563,25 @@ var awsusgovPartition = partition{
"us-gov-west-1": endpoint{}, "us-gov-west-1": endpoint{},
}, },
}, },
"cloudcontrolapi": service{
Endpoints: endpoints{
"fips-us-gov-east-1": endpoint{
Hostname: "cloudcontrolapi-fips.us-gov-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-east-1",
},
},
"fips-us-gov-west-1": endpoint{
Hostname: "cloudcontrolapi-fips.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
},
"us-gov-east-1": endpoint{},
"us-gov-west-1": endpoint{},
},
},
"clouddirectory": service{ "clouddirectory": service{
Endpoints: endpoints{ Endpoints: endpoints{
@ -9492,6 +9796,12 @@ var awsusgovPartition = partition{
"us-gov-west-1": endpoint{}, "us-gov-west-1": endpoint{},
}, },
}, },
"databrew": service{
Endpoints: endpoints{
"us-gov-west-1": endpoint{},
},
},
"datasync": service{ "datasync": service{
Endpoints: endpoints{ Endpoints: endpoints{
@ -10931,6 +11241,25 @@ var awsusgovPartition = partition{
"us-gov-west-1": endpoint{}, "us-gov-west-1": endpoint{},
}, },
}, },
"textract": service{
Endpoints: endpoints{
"fips-us-gov-east-1": endpoint{
Hostname: "textract-fips.us-gov-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-east-1",
},
},
"fips-us-gov-west-1": endpoint{
Hostname: "textract-fips.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
},
"us-gov-east-1": endpoint{},
"us-gov-west-1": endpoint{},
},
},
"transcribe": service{ "transcribe": service{
Defaults: endpoint{ Defaults: endpoint{
Protocols: []string{"https"}, Protocols: []string{"https"},

View File

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go" const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK // SDKVersion is the version of this SDK
const SDKVersion = "1.40.50" const SDKVersion = "1.41.0"

View File

@ -82,13 +82,17 @@ func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag)
field, _ := value.Type().FieldByName(payload) field, _ := value.Type().FieldByName(payload)
tag = field.Tag tag = field.Tag
value = elemOf(value.FieldByName(payload)) value = elemOf(value.FieldByName(payload))
if !value.IsValid() && tag.Get("type") != "structure" {
if !value.IsValid() {
return nil return nil
} }
} }
buf.WriteByte('{') buf.WriteByte('{')
defer buf.WriteString("}")
if !value.IsValid() {
return nil
}
t := value.Type() t := value.Type()
first := true first := true
@ -144,8 +148,6 @@ func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag)
} }
buf.WriteString("}")
return nil return nil
} }

View File

@ -49,9 +49,8 @@ func Build(req *request.Request) {
buf = emptyJSON buf = emptyJSON
} }
if req.ClientInfo.TargetPrefix != "" || string(buf) != "{}" { // Always serialize the body, don't suppress it.
req.SetBufferBody(buf) req.SetBufferBody(buf)
}
if req.ClientInfo.TargetPrefix != "" { if req.ClientInfo.TargetPrefix != "" {
target := req.ClientInfo.TargetPrefix + "." + req.Operation.Name target := req.ClientInfo.TargetPrefix + "." + req.Operation.Name

View File

@ -28,18 +28,27 @@ func PayloadMember(i interface{}) interface{} {
return nil return nil
} }
// PayloadType returns the type of a payload field member of i if there is one, or "". const nopayloadPayloadType = "nopayload"
// PayloadType returns the type of a payload field member of i if there is one,
// or "".
func PayloadType(i interface{}) string { func PayloadType(i interface{}) string {
v := reflect.Indirect(reflect.ValueOf(i)) v := reflect.Indirect(reflect.ValueOf(i))
if !v.IsValid() { if !v.IsValid() {
return "" return ""
} }
if field, ok := v.Type().FieldByName("_"); ok { if field, ok := v.Type().FieldByName("_"); ok {
if noPayload := field.Tag.Get(nopayloadPayloadType); noPayload != "" {
return nopayloadPayloadType
}
if payloadName := field.Tag.Get("payload"); payloadName != "" { if payloadName := field.Tag.Get("payload"); payloadName != "" {
if member, ok := v.Type().FieldByName(payloadName); ok { if member, ok := v.Type().FieldByName(payloadName); ok {
return member.Tag.Get("type") return member.Tag.Get("type")
} }
} }
} }
return "" return ""
} }

File diff suppressed because it is too large Load Diff

View File

@ -1030,6 +1030,11 @@ func (c *EC2) WaitUntilSnapshotCompletedWithContext(ctx aws.Context, input *Desc
Matcher: request.PathAllWaiterMatch, Argument: "Snapshots[].State", Matcher: request.PathAllWaiterMatch, Argument: "Snapshots[].State",
Expected: "completed", Expected: "completed",
}, },
{
State: request.FailureWaiterState,
Matcher: request.PathAnyWaiterMatch, Argument: "Snapshots[].State",
Expected: "error",
},
}, },
Logger: c.Config.Logger, Logger: c.Config.Logger,
NewRequest: func(opts []request.Option) (*request.Request, error) { NewRequest: func(opts []request.Option) (*request.Request, error) {

View File

@ -7057,7 +7057,8 @@ func (c *KMS) UpdateCustomKeyStoreRequest(input *UpdateCustomKeyStoreInput) (req
// the connection state of a custom key store, use the DescribeCustomKeyStores // the connection state of a custom key store, use the DescribeCustomKeyStores
// operation. // operation.
// //
// Use the parameters of UpdateCustomKeyStore to edit your keystore settings. // The CustomKeyStoreId parameter is required in all commands. Use the other
// parameters of UpdateCustomKeyStore to edit your key store settings.
// //
// * Use the NewCustomKeyStoreName parameter to change the friendly name // * Use the NewCustomKeyStoreName parameter to change the friendly name
// of the custom key store to the value that you specify. // of the custom key store to the value that you specify.

View File

@ -550,7 +550,7 @@ func (s *AccountInfo) SetEmailAddress(v string) *AccountInfo {
} }
type GetRoleCredentialsInput struct { type GetRoleCredentialsInput struct {
_ struct{} `type:"structure"` _ struct{} `type:"structure" nopayload:"true"`
// The token issued by the CreateToken API call. For more information, see CreateToken // The token issued by the CreateToken API call. For more information, see CreateToken
// (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html)
@ -726,7 +726,7 @@ func (s *InvalidRequestException) RequestID() string {
} }
type ListAccountRolesInput struct { type ListAccountRolesInput struct {
_ struct{} `type:"structure"` _ struct{} `type:"structure" nopayload:"true"`
// The token issued by the CreateToken API call. For more information, see CreateToken // The token issued by the CreateToken API call. For more information, see CreateToken
// (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html)
@ -855,7 +855,7 @@ func (s *ListAccountRolesOutput) SetRoleList(v []*RoleInfo) *ListAccountRolesOut
} }
type ListAccountsInput struct { type ListAccountsInput struct {
_ struct{} `type:"structure"` _ struct{} `type:"structure" nopayload:"true"`
// The token issued by the CreateToken API call. For more information, see CreateToken // The token issued by the CreateToken API call. For more information, see CreateToken
// (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html)
@ -970,7 +970,7 @@ func (s *ListAccountsOutput) SetNextToken(v string) *ListAccountsOutput {
} }
type LogoutInput struct { type LogoutInput struct {
_ struct{} `type:"structure"` _ struct{} `type:"structure" nopayload:"true"`
// The token issued by the CreateToken API call. For more information, see CreateToken // The token issued by the CreateToken API call. For more information, see CreateToken
// (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html)
@ -1022,7 +1022,7 @@ func (s *LogoutInput) SetAccessToken(v string) *LogoutInput {
} }
type LogoutOutput struct { type LogoutOutput struct {
_ struct{} `type:"structure"` _ struct{} `type:"structure" nopayload:"true"`
} }
// String returns the string representation. // String returns the string representation.

View File

@ -0,0 +1,74 @@
/*
Copyright 2021 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rbd
import (
"bytes"
_ "embed"
"fmt"
"text/template"
"github.com/ghodss/yaml"
v1 "k8s.io/api/core/v1"
)
//go:embed csi-config-map.yaml
var csiConfigMap string
type CSIConfigMapValues struct {
Name string
}
var CSIConfigMapDefaults = CSIConfigMapValues{
Name: "ceph-csi-config",
}
// NewCSIConfigMap takes a name from the CSIConfigMapValues struct and relaces
// the value in the template. A ConfigMap object is returned which can be
// created in the Kubernetes cluster.
func NewCSIConfigMap(values CSIConfigMapValues) (*v1.ConfigMap, error) {
data, err := NewCSIConfigMapYAML(values)
if err != nil {
return nil, err
}
cm := &v1.ConfigMap{}
err = yaml.Unmarshal([]byte(data), cm)
if err != nil {
return nil, fmt.Errorf("failed convert YAML to %T: %w", cm, err)
}
return cm, nil
}
// NewCSIConfigMapYAML takes a name from the CSIConfigMapValues struct and
// relaces the value in the template. A ConfigMap object in YAML is returned
// which can be created in the Kubernetes cluster.
func NewCSIConfigMapYAML(values CSIConfigMapValues) (string, error) {
var buf bytes.Buffer
tmpl, err := template.New("CSIConfigMap").Parse(csiConfigMap)
if err != nil {
return "", fmt.Errorf("failed to parse template: %w", err)
}
err = tmpl.Execute(&buf, values)
if err != nil {
return "", fmt.Errorf("failed to replace values in template: %w", err)
}
return buf.String(), nil
}

View File

@ -0,0 +1,8 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: "{{ .Name }}"
data:
config.json: |-
[]

View File

@ -0,0 +1,74 @@
/*
Copyright 2021 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rbd
import (
"bytes"
_ "embed"
"fmt"
"text/template"
"github.com/ghodss/yaml"
storagev1 "k8s.io/api/storage/v1"
)
//go:embed csidriver.yaml
var csiDriver string
type CSIDriverValues struct {
Name string
}
var CSIDriverDefaults = CSIDriverValues{
Name: "rbd.csi.ceph.com",
}
// NewCSIDriver takes a driver name from the CSIDriverValues struct and relaces
// the value in the template. A CSIDriver object is returned which can be
// created in the Kubernetes cluster.
func NewCSIDriver(values CSIDriverValues) (*storagev1.CSIDriver, error) {
data, err := NewCSIDriverYAML(values)
if err != nil {
return nil, err
}
driver := &storagev1.CSIDriver{}
err = yaml.Unmarshal([]byte(data), driver)
if err != nil {
return nil, fmt.Errorf("failed convert YAML to %T: %w", driver, err)
}
return driver, nil
}
// NewCSIDriverYAML takes a driver name from the CSIDriverValues struct and relaces
// the value in the template. A CSIDriver object in YAML is returned which can be
// created in the Kubernetes cluster.
func NewCSIDriverYAML(values CSIDriverValues) (string, error) {
var buf bytes.Buffer
tmpl, err := template.New("CSIDriver").Parse(csiDriver)
if err != nil {
return "", fmt.Errorf("failed to parse template: %w", err)
}
err = tmpl.Execute(&buf, values)
if err != nil {
return "", fmt.Errorf("failed to replace values in template: %w", err)
}
return buf.String(), nil
}

View File

@ -0,0 +1,10 @@
---
# if Kubernetes version is less than 1.18 change
# apiVersion to storage.k8s.io/v1beta1
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: "{{ .Name }}"
spec:
attachRequired: true
podInfoOnMount: false

View File

@ -1,3 +1,10 @@
## 1.16.5
Ginkgo 2.0 now has a Release Candidate. 1.16.5 advertises the existence of the RC.
1.16.5 deprecates GinkgoParallelNode in favor of GinkgoParallelProcess
You can silence the RC advertisement by setting an `ACK_GINKG_RC=true` environment variable or creating a file in your home directory called `.ack-ginkgo-rc`
## 1.16.4 ## 1.16.4
### Fixes ### Fixes

View File

@ -1,23 +1,18 @@
![Ginkgo: A Go BDD Testing Framework](https://onsi.github.io/ginkgo/images/ginkgo.png) ![Ginkgo: A Go BDD Testing Framework](https://onsi.github.io/ginkgo/images/ginkgo.png)
[![Build Status](https://travis-ci.org/onsi/ginkgo.svg?branch=master)](https://travis-ci.org/onsi/ginkgo)
[![test](https://github.com/onsi/ginkgo/workflows/test/badge.svg?branch=master)](https://github.com/onsi/ginkgo/actions?query=workflow%3Atest+branch%3Amaster) [![test](https://github.com/onsi/ginkgo/workflows/test/badge.svg?branch=master)](https://github.com/onsi/ginkgo/actions?query=workflow%3Atest+branch%3Amaster)
Jump to the [docs](https://onsi.github.io/ginkgo/) | [中文文档](https://ke-chain.github.io/ginkgodoc) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)! Jump to the [docs](https://onsi.github.io/ginkgo/) | [中文文档](https://ke-chain.github.io/ginkgodoc) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)!
If you have a question, comment, bug report, feature request, etc. please open a GitHub issue, or visit the [Ginkgo Slack channel](https://app.slack.com/client/T029RQSE6/CQQ50BBNW). If you have a question, comment, bug report, feature request, etc. please open a GitHub issue, or visit the [Ginkgo Slack channel](https://app.slack.com/client/T029RQSE6/CQQ50BBNW).
# Ginkgo 2.0 is coming soon! # Ginkgo 2.0 Release Candidate is available!
An effort is underway to develop and deliver Ginkgo 2.0. The work is happening in the [v2](https://github.com/onsi/ginkgo/tree/v2) branch and a changelog and migration guide is being maintained on that branch [here](https://github.com/onsi/ginkgo/blob/v2/docs/MIGRATING_TO_V2.md). Issue [#711](https://github.com/onsi/ginkgo/issues/711) is the central place for discussion and links to the original [proposal doc](https://docs.google.com/document/d/1h28ZknXRsTLPNNiOjdHIO-F2toCzq4xoZDXbfYaBdoQ/edit#). An effort is underway to develop and deliver Ginkgo 2.0. The work is happening in the [ver2](https://github.com/onsi/ginkgo/tree/ver2) branch and a changelog and migration guide is being maintained on that branch [here](https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md). Issue [#711](https://github.com/onsi/ginkgo/issues/711) is the central place for discussion.
As described in the [changelog](https://github.com/onsi/ginkgo/blob/v2/docs/MIGRATING_TO_V2.md) and [proposal](https://docs.google.com/document/d/1h28ZknXRsTLPNNiOjdHIO-F2toCzq4xoZDXbfYaBdoQ/edit#), Ginkgo 2.0 will clean up the Ginkgo codebase, deprecate and remove some v1 functionality, and add several new much-requested features. To help users get ready for the migration, Ginkgo v1 has started emitting deprecation warnings for features that will no longer be supported with links to documentation for how to migrate away from these features. If you have concerns or comments please chime in on [#711](https://github.com/onsi/ginkgo/issues/711). As described in the [changelog](https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md) and [proposal](https://docs.google.com/document/d/1h28ZknXRsTLPNNiOjdHIO-F2toCzq4xoZDXbfYaBdoQ/edit#), Ginkgo 2.0 will clean up the Ginkgo codebase, deprecate and remove some v1 functionality, and add several new much-requested features. To help users get ready for the migration, Ginkgo v1 has started emitting deprecation warnings for features that will no longer be supported with links to documentation for how to migrate away from these features. If you have concerns or comments please chime in on [#711](https://github.com/onsi/ginkgo/issues/711).
The current timeline for completion of 2.0 looks like: Please start exploring and using the V2 release! To get started follow the [Using the Release Candidate](https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md#using-the-beta) directions in the migration guide.
- Early April 2021: first public release of 2.0, deprecation warnings land in v1.
- May 2021: first beta/rc of 2.0 with most new functionality in place.
- June/July 2021: 2.0 ships and fully replaces the 1.x codebase on master.
## TLDR ## TLDR
Ginkgo builds on Go's `testing` package, allowing expressive [Behavior-Driven Development](https://en.wikipedia.org/wiki/Behavior-driven_development) ("BDD") style tests. Ginkgo builds on Go's `testing` package, allowing expressive [Behavior-Driven Development](https://en.wikipedia.org/wiki/Behavior-driven_development) ("BDD") style tests.

View File

@ -20,7 +20,7 @@ import (
"fmt" "fmt"
) )
const VERSION = "1.16.4" const VERSION = "1.16.5"
type GinkgoConfigType struct { type GinkgoConfigType struct {
RandomSeed int64 RandomSeed int64

View File

@ -73,9 +73,15 @@ func GinkgoRandomSeed() int64 {
return config.GinkgoConfig.RandomSeed return config.GinkgoConfig.RandomSeed
} }
//GinkgoParallelNode returns the parallel node number for the current ginkgo process //GinkgoParallelNode is deprecated, use GinkgoParallelProcess instead
//The node number is 1-indexed
func GinkgoParallelNode() int { func GinkgoParallelNode() int {
deprecationTracker.TrackDeprecation(types.Deprecations.ParallelNode(), codelocation.New(1))
return GinkgoParallelProcess()
}
//GinkgoParallelProcess returns the parallel process number for the current ginkgo process
//The process number is 1-indexed
func GinkgoParallelProcess() int {
return config.GinkgoConfig.ParallelNode return config.GinkgoConfig.ParallelNode
} }
@ -109,6 +115,7 @@ func GinkgoT(optionalOffset ...int) GinkgoTInterface {
//in the testing package's T. //in the testing package's T.
type GinkgoTInterface interface { type GinkgoTInterface interface {
Cleanup(func()) Cleanup(func())
Setenv(key, value string)
Error(args ...interface{}) Error(args ...interface{})
Errorf(format string, args ...interface{}) Errorf(format string, args ...interface{})
Fail() Fail()

View File

@ -1,6 +1,6 @@
module github.com/onsi/ginkgo module github.com/onsi/ginkgo
go 1.15 go 1.16
require ( require (
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0

View File

@ -34,6 +34,11 @@ func (t *ginkgoTestingTProxy) Cleanup(func()) {
// No-op // No-op
} }
func (t *ginkgoTestingTProxy) Setenv(kev, value string) {
fmt.Println("Setenv is a noop for Ginkgo at the moment but will be implemented in V2")
// No-op until Cleanup is implemented
}
func (t *ginkgoTestingTProxy) Error(args ...interface{}) { func (t *ginkgoTestingTProxy) Error(args ...interface{}) {
t.fail(fmt.Sprintln(args...), t.offset) t.fail(fmt.Sprintln(args...), t.offset)
} }

View File

@ -52,6 +52,14 @@ func (d deprecations) Measure() Deprecation {
} }
} }
func (d deprecations) ParallelNode() Deprecation {
return Deprecation{
Message: "GinkgoParallelNode is deprecated and will be removed in Ginkgo V2. Please use GinkgoParallelProcess instead.",
DocLink: "renamed-ginkgoparallelnode",
Version: "1.16.5",
}
}
func (d deprecations) Convert() Deprecation { func (d deprecations) Convert() Deprecation {
return Deprecation{ return Deprecation{
Message: "The convert command is deprecated in Ginkgo V2", Message: "The convert command is deprecated in Ginkgo V2",
@ -99,16 +107,18 @@ func (d *DeprecationTracker) DidTrackDeprecations() bool {
} }
func (d *DeprecationTracker) DeprecationsReport() string { func (d *DeprecationTracker) DeprecationsReport() string {
out := formatter.F("{{light-yellow}}You're using deprecated Ginkgo functionality:{{/}}\n") out := formatter.F("\n{{light-yellow}}You're using deprecated Ginkgo functionality:{{/}}\n")
out += formatter.F("{{light-yellow}}============================================={{/}}\n") out += formatter.F("{{light-yellow}}============================================={{/}}\n")
out += formatter.F("Ginkgo 2.0 is under active development and will introduce (a small number of) breaking changes.\n") out += formatter.F("{{bold}}{{green}}Ginkgo 2.0{{/}} is under active development and will introduce several new features, improvements, and a small handful of breaking changes.\n")
out += formatter.F("To learn more, view the migration guide at {{cyan}}{{underline}}https://github.com/onsi/ginkgo/blob/v2/docs/MIGRATING_TO_V2.md{{/}}\n") out += formatter.F("A release candidate for 2.0 is now available and 2.0 should GA in Fall 2021. {{bold}}Please give the RC a try and send us feedback!{{/}}\n")
out += formatter.F("To comment, chime in at {{cyan}}{{underline}}https://github.com/onsi/ginkgo/issues/711{{/}}\n\n") out += formatter.F(" - To learn more, view the migration guide at {{cyan}}{{underline}}https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md{{/}}\n")
out += formatter.F(" - For instructions on using the Release Candidate visit {{cyan}}{{underline}}https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md#using-the-beta{{/}}\n")
out += formatter.F(" - To comment, chime in at {{cyan}}{{underline}}https://github.com/onsi/ginkgo/issues/711{{/}}\n\n")
for deprecation, locations := range d.deprecations { for deprecation, locations := range d.deprecations {
out += formatter.Fi(1, "{{yellow}}"+deprecation.Message+"{{/}}\n") out += formatter.Fi(1, "{{yellow}}"+deprecation.Message+"{{/}}\n")
if deprecation.DocLink != "" { if deprecation.DocLink != "" {
out += formatter.Fi(1, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}https://github.com/onsi/ginkgo/blob/v2/docs/MIGRATING_TO_V2.md#%s{{/}}\n", deprecation.DocLink) out += formatter.Fi(1, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md#%s{{/}}\n", deprecation.DocLink)
} }
for _, location := range locations { for _, location := range locations {
out += formatter.Fi(2, "{{gray}}%s{{/}}\n", location) out += formatter.Fi(2, "{{gray}}%s{{/}}\n", location)

7
vendor/modules.txt vendored
View File

@ -1,4 +1,4 @@
# github.com/aws/aws-sdk-go v1.40.50 # github.com/aws/aws-sdk-go v1.41.0
## explicit ## explicit
github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws
github.com/aws/aws-sdk-go/aws/awserr github.com/aws/aws-sdk-go/aws/awserr
@ -51,6 +51,7 @@ github.com/blang/semver
github.com/cenkalti/backoff/v3 github.com/cenkalti/backoff/v3
# github.com/ceph/ceph-csi/api v0.0.0-00010101000000-000000000000 => ./api # github.com/ceph/ceph-csi/api v0.0.0-00010101000000-000000000000 => ./api
## explicit ## explicit
github.com/ceph/ceph-csi/api/deploy/kubernetes/rbd
github.com/ceph/ceph-csi/api/deploy/ocp github.com/ceph/ceph-csi/api/deploy/ocp
# github.com/ceph/go-ceph v0.11.0 # github.com/ceph/go-ceph v0.11.0
## explicit ## explicit
@ -225,7 +226,7 @@ github.com/nxadm/tail/ratelimiter
github.com/nxadm/tail/util github.com/nxadm/tail/util
github.com/nxadm/tail/watch github.com/nxadm/tail/watch
github.com/nxadm/tail/winfile github.com/nxadm/tail/winfile
# github.com/onsi/ginkgo v1.16.4 # github.com/onsi/ginkgo v1.16.5
## explicit ## explicit
github.com/onsi/ginkgo github.com/onsi/ginkgo
github.com/onsi/ginkgo/config github.com/onsi/ginkgo/config
@ -1020,7 +1021,7 @@ k8s.io/utils/trace
# sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22 # sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22
sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client
sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client
# sigs.k8s.io/controller-runtime v0.10.1 # sigs.k8s.io/controller-runtime v0.10.2
## explicit ## explicit
sigs.k8s.io/controller-runtime/pkg/cache sigs.k8s.io/controller-runtime/pkg/cache
sigs.k8s.io/controller-runtime/pkg/cache/internal sigs.k8s.io/controller-runtime/pkg/cache/internal

View File

@ -21,8 +21,10 @@ import (
"errors" "errors"
"fmt" "fmt"
"sync" "sync"
"time"
"k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/util/workqueue" "k8s.io/client-go/util/workqueue"
"sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/event"
@ -119,17 +121,34 @@ func (ks *Kind) Start(ctx context.Context, handler handler.EventHandler, queue w
ctx, ks.startCancel = context.WithCancel(ctx) ctx, ks.startCancel = context.WithCancel(ctx)
ks.started = make(chan error) ks.started = make(chan error)
go func() { go func() {
var (
i cache.Informer
lastErr error
)
// Tries to get an informer until it returns true,
// an error or the specified context is cancelled or expired.
if err := wait.PollImmediateUntilWithContext(ctx, 10*time.Second, func(ctx context.Context) (bool, error) {
// Lookup the Informer from the Cache and add an EventHandler which populates the Queue // Lookup the Informer from the Cache and add an EventHandler which populates the Queue
i, err := ks.cache.GetInformer(ctx, ks.Type) i, lastErr = ks.cache.GetInformer(ctx, ks.Type)
if err != nil { if lastErr != nil {
kindMatchErr := &meta.NoKindMatchError{} kindMatchErr := &meta.NoKindMatchError{}
if errors.As(err, &kindMatchErr) { if errors.As(lastErr, &kindMatchErr) {
log.Error(err, "if kind is a CRD, it should be installed before calling Start", log.Error(lastErr, "if kind is a CRD, it should be installed before calling Start",
"kind", kindMatchErr.GroupKind) "kind", kindMatchErr.GroupKind)
} }
return false, nil // Retry.
}
return true, nil
}); err != nil {
if lastErr != nil {
ks.started <- fmt.Errorf("failed to get informer from cache: %w", lastErr)
return
}
ks.started <- err ks.started <- err
return return
} }
i.AddEventHandler(internal.EventHandler{Queue: queue, EventHandler: handler, Predicates: prct}) i.AddEventHandler(internal.EventHandler{Queue: queue, EventHandler: handler, Predicates: prct})
if !ks.cache.WaitForCacheSync(ctx) { if !ks.cache.WaitForCacheSync(ctx) {
// Would be great to return something more informative here // Would be great to return something more informative here

View File

@ -0,0 +1,85 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package admission
import (
"context"
"encoding/json"
"errors"
"net/http"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
)
// CustomDefaulter defines functions for setting defaults on resources.
type CustomDefaulter interface {
Default(ctx context.Context, obj runtime.Object) error
}
// WithCustomDefaulter creates a new Webhook for a CustomDefaulter interface.
func WithCustomDefaulter(obj runtime.Object, defaulter CustomDefaulter) *Webhook {
return &Webhook{
Handler: &defaulterForType{object: obj, defaulter: defaulter},
}
}
type defaulterForType struct {
defaulter CustomDefaulter
object runtime.Object
decoder *Decoder
}
var _ DecoderInjector = &defaulterForType{}
func (h *defaulterForType) InjectDecoder(d *Decoder) error {
h.decoder = d
return nil
}
// Handle handles admission requests.
func (h *defaulterForType) Handle(ctx context.Context, req Request) Response {
if h.defaulter == nil {
panic("defaulter should never be nil")
}
if h.object == nil {
panic("object should never be nil")
}
// Get the object in the request
obj := h.object.DeepCopyObject()
if err := h.decoder.Decode(req, obj); err != nil {
return Errored(http.StatusBadRequest, err)
}
// Default the object
if err := h.defaulter.Default(ctx, obj); err != nil {
var apiStatus apierrors.APIStatus
if errors.As(err, &apiStatus) {
return validationResponseFromStatus(false, apiStatus.Status())
}
return Denied(err.Error())
}
// Create the patch
marshalled, err := json.Marshal(obj)
if err != nil {
return Errored(http.StatusInternalServerError, err)
}
return PatchResponseFromRaw(req.Object.Raw, marshalled)
}

View File

@ -0,0 +1,111 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package admission
import (
"context"
"errors"
"fmt"
"net/http"
v1 "k8s.io/api/admission/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
)
// CustomValidator defines functions for validating an operation.
type CustomValidator interface {
ValidateCreate(ctx context.Context, obj runtime.Object) error
ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) error
ValidateDelete(ctx context.Context, obj runtime.Object) error
}
// WithCustomValidator creates a new Webhook for validating the provided type.
func WithCustomValidator(obj runtime.Object, validator CustomValidator) *Webhook {
return &Webhook{
Handler: &validatorForType{object: obj, validator: validator},
}
}
type validatorForType struct {
validator CustomValidator
object runtime.Object
decoder *Decoder
}
var _ DecoderInjector = &validatorForType{}
// InjectDecoder injects the decoder into a validatingHandler.
func (h *validatorForType) InjectDecoder(d *Decoder) error {
h.decoder = d
return nil
}
// Handle handles admission requests.
func (h *validatorForType) Handle(ctx context.Context, req Request) Response {
if h.validator == nil {
panic("validator should never be nil")
}
if h.object == nil {
panic("object should never be nil")
}
// Get the object in the request
obj := h.object.DeepCopyObject()
var err error
switch req.Operation {
case v1.Create:
if err := h.decoder.Decode(req, obj); err != nil {
return Errored(http.StatusBadRequest, err)
}
err = h.validator.ValidateCreate(ctx, obj)
case v1.Update:
oldObj := obj.DeepCopyObject()
if err := h.decoder.DecodeRaw(req.Object, obj); err != nil {
return Errored(http.StatusBadRequest, err)
}
if err := h.decoder.DecodeRaw(req.OldObject, oldObj); err != nil {
return Errored(http.StatusBadRequest, err)
}
err = h.validator.ValidateUpdate(ctx, oldObj, obj)
case v1.Delete:
// In reference to PR: https://github.com/kubernetes/kubernetes/pull/76346
// OldObject contains the object being deleted
if err := h.decoder.DecodeRaw(req.OldObject, obj); err != nil {
return Errored(http.StatusBadRequest, err)
}
err = h.validator.ValidateDelete(ctx, obj)
default:
return Errored(http.StatusBadRequest, fmt.Errorf("unknown operation request %q", req.Operation))
}
// Check the error message first.
if err != nil {
var apiStatus apierrors.APIStatus
if errors.As(err, &apiStatus) {
return validationResponseFromStatus(false, apiStatus.Status())
}
return Denied(err.Error())
}
// Return allowed if everything succeeded.
return Allowed("")
}

View File

@ -29,6 +29,12 @@ type Defaulter = admission.Defaulter
// Validator defines functions for validating an operation. // Validator defines functions for validating an operation.
type Validator = admission.Validator type Validator = admission.Validator
// CustomDefaulter defines functions for setting defaults on resources.
type CustomDefaulter = admission.CustomDefaulter
// CustomValidator defines functions for validating an operation.
type CustomValidator = admission.CustomValidator
// AdmissionRequest defines the input for an admission handler. // AdmissionRequest defines the input for an admission handler.
// It contains information to identify the object in // It contains information to identify the object in
// question (group, version, kind, resource, subresource, // question (group, version, kind, resource, subresource,