mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-12-18 02:50:30 +00:00
rebase: update kubernetes to v1.25.0
update kubernetes to latest v1.25.0 release. Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
parent
f47839d73d
commit
e3bf375035
86
go.mod
86
go.mod
@ -25,21 +25,21 @@ require (
|
|||||||
github.com/onsi/gomega v1.20.0
|
github.com/onsi/gomega v1.20.0
|
||||||
github.com/prometheus/client_golang v1.12.2
|
github.com/prometheus/client_golang v1.12.2
|
||||||
github.com/stretchr/testify v1.8.0
|
github.com/stretchr/testify v1.8.0
|
||||||
golang.org/x/crypto v0.0.0-20220214200702-86341886e292
|
golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd
|
||||||
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4
|
golang.org/x/net v0.0.0-20220722155237-a158d28d115b
|
||||||
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150
|
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f
|
||||||
google.golang.org/grpc v1.48.0
|
google.golang.org/grpc v1.48.0
|
||||||
google.golang.org/protobuf v1.28.0
|
google.golang.org/protobuf v1.28.0
|
||||||
k8s.io/api v0.24.4
|
k8s.io/api v0.25.0
|
||||||
k8s.io/apimachinery v0.24.4
|
k8s.io/apimachinery v0.25.0
|
||||||
k8s.io/client-go v12.0.0+incompatible
|
k8s.io/client-go v12.0.0+incompatible
|
||||||
k8s.io/cloud-provider v0.24.4
|
k8s.io/cloud-provider v0.25.0
|
||||||
k8s.io/klog/v2 v2.70.1
|
k8s.io/klog/v2 v2.70.1
|
||||||
//
|
//
|
||||||
// when updating k8s.io/kubernetes, make sure to update the replace section too
|
// when updating k8s.io/kubernetes, make sure to update the replace section too
|
||||||
//
|
//
|
||||||
k8s.io/kubernetes v1.24.4
|
k8s.io/kubernetes v1.25.0
|
||||||
k8s.io/mount-utils v0.24.4
|
k8s.io/mount-utils v0.25.0
|
||||||
k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed
|
k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed
|
||||||
sigs.k8s.io/controller-runtime v0.11.0-beta.0.0.20211208212546-f236f0345ad2
|
sigs.k8s.io/controller-runtime v0.11.0-beta.0.0.20211208212546-f236f0345ad2
|
||||||
)
|
)
|
||||||
@ -62,7 +62,7 @@ require (
|
|||||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
github.com/docker/distribution v2.8.1+incompatible // indirect
|
github.com/docker/distribution v2.8.1+incompatible // indirect
|
||||||
github.com/emicklei/go-restful v2.9.5+incompatible // indirect
|
github.com/emicklei/go-restful/v3 v3.8.0 // indirect
|
||||||
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
||||||
github.com/fatih/color v1.9.0 // indirect
|
github.com/fatih/color v1.9.0 // indirect
|
||||||
github.com/felixge/httpsnoop v1.0.1 // indirect
|
github.com/felixge/httpsnoop v1.0.1 // indirect
|
||||||
@ -105,6 +105,7 @@ require (
|
|||||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||||
github.com/josharian/intern v1.0.0 // indirect
|
github.com/josharian/intern v1.0.0 // indirect
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
|
github.com/kr/pretty v0.2.1 // indirect
|
||||||
github.com/mailru/easyjson v0.7.6 // indirect
|
github.com/mailru/easyjson v0.7.6 // indirect
|
||||||
github.com/mattn/go-colorable v0.1.12 // indirect
|
github.com/mattn/go-colorable v0.1.12 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.14 // indirect
|
github.com/mattn/go-isatty v0.0.14 // indirect
|
||||||
@ -122,6 +123,7 @@ require (
|
|||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||||
github.com/nxadm/tail v1.4.8 // indirect
|
github.com/nxadm/tail v1.4.8 // indirect
|
||||||
github.com/oklog/run v1.0.0 // indirect
|
github.com/oklog/run v1.0.0 // indirect
|
||||||
|
github.com/onsi/ginkgo/v2 v2.1.4 // indirect
|
||||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||||
github.com/opencontainers/selinux v1.10.0 // indirect
|
github.com/opencontainers/selinux v1.10.0 // indirect
|
||||||
github.com/openshift/api v0.0.0-20210927171657-636513e97fda // indirect
|
github.com/openshift/api v0.0.0-20210927171657-636513e97fda // indirect
|
||||||
@ -153,23 +155,23 @@ require (
|
|||||||
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
|
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
|
||||||
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
|
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
|
||||||
google.golang.org/appengine v1.6.7 // indirect
|
google.golang.org/appengine v1.6.7 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20220208230804-65c12eb4c068 // indirect
|
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect
|
||||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||||
gopkg.in/square/go-jose.v2 v2.5.1 // indirect
|
gopkg.in/square/go-jose.v2 v2.5.1 // indirect
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
k8s.io/apiextensions-apiserver v0.24.4 // indirect
|
k8s.io/apiextensions-apiserver v0.25.0 // indirect
|
||||||
k8s.io/apiserver v0.24.4 // indirect
|
k8s.io/apiserver v0.25.0 // indirect
|
||||||
k8s.io/component-base v0.24.4 // indirect
|
k8s.io/component-base v0.25.0 // indirect
|
||||||
k8s.io/component-helpers v0.24.4 // indirect
|
k8s.io/component-helpers v0.25.0 // indirect
|
||||||
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 // indirect
|
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect
|
||||||
k8s.io/kubectl v0.0.0 // indirect
|
k8s.io/kubectl v0.0.0 // indirect
|
||||||
k8s.io/kubelet v0.0.0 // indirect
|
k8s.io/kubelet v0.0.0 // indirect
|
||||||
k8s.io/pod-security-admission v0.0.0 // indirect
|
k8s.io/pod-security-admission v0.0.0 // indirect
|
||||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30 // indirect
|
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.32 // indirect
|
||||||
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
|
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
|
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -182,31 +184,31 @@ replace (
|
|||||||
//
|
//
|
||||||
// k8s.io/kubernetes depends on these k8s.io packages, but unversioned
|
// k8s.io/kubernetes depends on these k8s.io packages, but unversioned
|
||||||
//
|
//
|
||||||
k8s.io/api => k8s.io/api v0.24.4
|
k8s.io/api => k8s.io/api v0.25.0
|
||||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.24.4
|
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.25.0
|
||||||
k8s.io/apimachinery => k8s.io/apimachinery v0.24.4
|
k8s.io/apimachinery => k8s.io/apimachinery v0.25.0
|
||||||
k8s.io/apiserver => k8s.io/apiserver v0.24.4
|
k8s.io/apiserver => k8s.io/apiserver v0.25.0
|
||||||
k8s.io/cli-runtime => k8s.io/cli-runtime v0.24.4
|
k8s.io/cli-runtime => k8s.io/cli-runtime v0.25.0
|
||||||
k8s.io/client-go => k8s.io/client-go v0.24.4
|
k8s.io/client-go => k8s.io/client-go v0.25.0
|
||||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.24.4
|
k8s.io/cloud-provider => k8s.io/cloud-provider v0.25.0
|
||||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.24.4
|
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.25.0
|
||||||
k8s.io/code-generator => k8s.io/code-generator v0.24.4
|
k8s.io/code-generator => k8s.io/code-generator v0.25.0
|
||||||
k8s.io/component-base => k8s.io/component-base v0.24.4
|
k8s.io/component-base => k8s.io/component-base v0.25.0
|
||||||
k8s.io/component-helpers => k8s.io/component-helpers v0.24.4
|
k8s.io/component-helpers => k8s.io/component-helpers v0.25.0
|
||||||
k8s.io/controller-manager => k8s.io/controller-manager v0.24.4
|
k8s.io/controller-manager => k8s.io/controller-manager v0.25.0
|
||||||
k8s.io/cri-api => k8s.io/cri-api v0.24.4
|
k8s.io/cri-api => k8s.io/cri-api v0.25.0
|
||||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.24.4
|
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.25.0
|
||||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.24.4
|
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.25.0
|
||||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.24.4
|
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.25.0
|
||||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.24.4
|
k8s.io/kube-proxy => k8s.io/kube-proxy v0.25.0
|
||||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.24.4
|
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.25.0
|
||||||
k8s.io/kubectl => k8s.io/kubectl v0.24.4
|
k8s.io/kubectl => k8s.io/kubectl v0.25.0
|
||||||
k8s.io/kubelet => k8s.io/kubelet v0.24.4
|
k8s.io/kubelet => k8s.io/kubelet v0.25.0
|
||||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.24.4
|
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.25.0
|
||||||
k8s.io/metrics => k8s.io/metrics v0.24.4
|
k8s.io/metrics => k8s.io/metrics v0.25.0
|
||||||
k8s.io/mount-utils => k8s.io/mount-utils v0.25.0-alpha.3.0.20220801203918-ff562e546084
|
k8s.io/mount-utils => k8s.io/mount-utils v0.25.0-alpha.3.0.20220801203918-ff562e546084
|
||||||
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.24.4
|
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.25.0
|
||||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.24.4
|
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.25.0
|
||||||
// layeh.com seems to be misbehaving
|
// layeh.com seems to be misbehaving
|
||||||
layeh.com/radius => github.com/layeh/radius v0.0.0-20190322222518-890bc1058917
|
layeh.com/radius => github.com/layeh/radius v0.0.0-20190322222518-890bc1058917
|
||||||
)
|
)
|
||||||
|
276
go.sum
276
go.sum
@ -1,5 +1,5 @@
|
|||||||
bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
|
bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
|
||||||
bitbucket.org/bertimus9/systemstat v0.0.0-20180207000608-0eeff89b0690/go.mod h1:Ulb78X89vxKYgdL24HMTiXYHlyHEvruOj1ZPlqeNEZM=
|
bitbucket.org/bertimus9/systemstat v0.5.0/go.mod h1:EkUWPp8lKFPMXP8vnbpT5JDI0W/sTiLZAvN8ONWErHY=
|
||||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw=
|
cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw=
|
||||||
@ -22,6 +22,13 @@ cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmW
|
|||||||
cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
|
cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
|
||||||
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
|
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
|
||||||
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
|
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
|
||||||
|
cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
|
||||||
|
cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
|
||||||
|
cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
|
||||||
|
cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
|
||||||
|
cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
|
||||||
|
cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
|
||||||
|
cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
|
||||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||||
@ -50,11 +57,12 @@ github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg6
|
|||||||
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||||
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
|
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
|
||||||
github.com/Azure/go-autorest/autorest v0.9.2/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
|
github.com/Azure/go-autorest/autorest v0.9.2/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
|
||||||
github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
|
github.com/Azure/go-autorest/autorest v0.11.27/go.mod h1:7l8ybrIdUmGqZMTD0sRtAr8NvbHjfofbf8RSP2q7w7U=
|
||||||
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
|
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
|
||||||
github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
|
github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
|
||||||
github.com/Azure/go-autorest/autorest/adal v0.7.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
|
github.com/Azure/go-autorest/autorest/adal v0.7.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
|
||||||
github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
|
github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
|
||||||
|
github.com/Azure/go-autorest/autorest/adal v0.9.20/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
|
||||||
github.com/Azure/go-autorest/autorest/azure/auth v0.4.0/go.mod h1:Oo5cRhLvZteXzI2itUm5ziqsoIxRkzrt3t61FeZaS18=
|
github.com/Azure/go-autorest/autorest/azure/auth v0.4.0/go.mod h1:Oo5cRhLvZteXzI2itUm5ziqsoIxRkzrt3t61FeZaS18=
|
||||||
github.com/Azure/go-autorest/autorest/azure/cli v0.3.0/go.mod h1:rNYMNAefZMRowqCV0cVhr/YDW5dD7afFq9nXAXL4ykE=
|
github.com/Azure/go-autorest/autorest/azure/cli v0.3.0/go.mod h1:rNYMNAefZMRowqCV0cVhr/YDW5dD7afFq9nXAXL4ykE=
|
||||||
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
|
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
|
||||||
@ -64,6 +72,7 @@ github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxB
|
|||||||
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||||
github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
|
github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
|
||||||
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
|
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
|
||||||
|
github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU=
|
||||||
github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA=
|
github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA=
|
||||||
github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE=
|
github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE=
|
||||||
github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8=
|
github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8=
|
||||||
@ -77,14 +86,14 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym
|
|||||||
github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
||||||
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
||||||
github.com/DataDog/zstd v1.4.4/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
|
github.com/DataDog/zstd v1.4.4/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
|
||||||
github.com/GoogleCloudPlatform/k8s-cloud-provider v1.16.1-0.20210702024009-ea6160c1d0e3/go.mod h1:8XasY4ymP2V/tn2OOV9ZadmiTE1FIB/h3W+yNlPttKw=
|
github.com/GoogleCloudPlatform/k8s-cloud-provider v1.18.1-0.20220218231025-f11817397a1b/go.mod h1:FNj4KYEAAHfYu68kRYolGoxkaJn+6mdEsaM12VTwuI0=
|
||||||
github.com/IBM/keyprotect-go-client v0.5.1/go.mod h1:5TwDM/4FRJq1ZOlwQL1xFahLWQ3TveR88VmL1u3njyI=
|
github.com/IBM/keyprotect-go-client v0.5.1/go.mod h1:5TwDM/4FRJq1ZOlwQL1xFahLWQ3TveR88VmL1u3njyI=
|
||||||
github.com/IBM/keyprotect-go-client v0.8.0 h1:IgLKSigHRpCCl5cZjBkOYziUZ9zxn6w9iRh+KA8Siww=
|
github.com/IBM/keyprotect-go-client v0.8.0 h1:IgLKSigHRpCCl5cZjBkOYziUZ9zxn6w9iRh+KA8Siww=
|
||||||
github.com/IBM/keyprotect-go-client v0.8.0/go.mod h1:yr8h2noNgU8vcbs+vhqoXp3Lmv73PI0zAc6VMgFvWwM=
|
github.com/IBM/keyprotect-go-client v0.8.0/go.mod h1:yr8h2noNgU8vcbs+vhqoXp3Lmv73PI0zAc6VMgFvWwM=
|
||||||
github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab/go.mod h1:3VYc5hodBMJ5+l/7J4xAyMeuM2PNuepvHlGs8yilUCA=
|
github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab/go.mod h1:3VYc5hodBMJ5+l/7J4xAyMeuM2PNuepvHlGs8yilUCA=
|
||||||
github.com/Jeffail/gabs v1.1.1 h1:V0uzR08Hj22EX8+8QMhyI9sX2hwRu+/RJhJUmnwda/E=
|
github.com/Jeffail/gabs v1.1.1 h1:V0uzR08Hj22EX8+8QMhyI9sX2hwRu+/RJhJUmnwda/E=
|
||||||
github.com/Jeffail/gabs v1.1.1/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc=
|
github.com/Jeffail/gabs v1.1.1/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc=
|
||||||
github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E=
|
github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE=
|
||||||
github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
|
github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
|
||||||
github.com/Microsoft/go-winio v0.4.13/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
|
github.com/Microsoft/go-winio v0.4.13/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
|
||||||
github.com/Microsoft/go-winio v0.4.15/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
|
github.com/Microsoft/go-winio v0.4.15/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
|
||||||
@ -125,7 +134,7 @@ github.com/ansel1/merry/v2 v2.0.1 h1:WeiKZdslHPAPFYxTtgX7clC2Vh75NCoWs5OjCZbIA0A
|
|||||||
github.com/ansel1/merry/v2 v2.0.1/go.mod h1:dD5OhpiPrVkvgseRYd+xgYlx7s6ytU3v9BTTJlDA7FM=
|
github.com/ansel1/merry/v2 v2.0.1/go.mod h1:dD5OhpiPrVkvgseRYd+xgYlx7s6ytU3v9BTTJlDA7FM=
|
||||||
github.com/ansel1/vespucci/v4 v4.1.1/go.mod h1:zzdrO4IgBfgcGMbGTk/qNGL8JPslmW3nPpcBHKReFYY=
|
github.com/ansel1/vespucci/v4 v4.1.1/go.mod h1:zzdrO4IgBfgcGMbGTk/qNGL8JPslmW3nPpcBHKReFYY=
|
||||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||||
github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY=
|
github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20220418222510-f25a4f6275ed/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY=
|
||||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||||
github.com/apple/foundationdb/bindings/go v0.0.0-20190411004307-cd5c9d91fad2/go.mod h1:OMVSB21p9+xQUIqlGizHPZfjK+SHws1ht+ZytVDoz9U=
|
github.com/apple/foundationdb/bindings/go v0.0.0-20190411004307-cd5c9d91fad2/go.mod h1:OMVSB21p9+xQUIqlGizHPZfjK+SHws1ht+ZytVDoz9U=
|
||||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||||
@ -178,7 +187,6 @@ github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQ
|
|||||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||||
github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k=
|
github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k=
|
||||||
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
|
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
|
||||||
github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
|
|
||||||
github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
|
github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
|
||||||
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||||
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
|
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
|
||||||
@ -202,7 +210,7 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf
|
|||||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw=
|
github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA=
|
||||||
github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
|
github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
|
||||||
github.com/chrismalek/oktasdk-go v0.0.0-20181212195951-3430665dfaa0/go.mod h1:5d8DqS60xkj9k3aXfL3+mXBH0DPYO0FQjcKosxl+b/Q=
|
github.com/chrismalek/oktasdk-go v0.0.0-20181212195951-3430665dfaa0/go.mod h1:5d8DqS60xkj9k3aXfL3+mXBH0DPYO0FQjcKosxl+b/Q=
|
||||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||||
@ -215,7 +223,6 @@ github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp
|
|||||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
github.com/cloudfoundry-community/go-cfclient v0.0.0-20190201205600-f136f9222381/go.mod h1:e5+USP2j8Le2M0Jo3qKPFnNhuo1wueU4nWHCXBOfQ14=
|
github.com/cloudfoundry-community/go-cfclient v0.0.0-20190201205600-f136f9222381/go.mod h1:e5+USP2j8Le2M0Jo3qKPFnNhuo1wueU4nWHCXBOfQ14=
|
||||||
github.com/cloudfoundry/gofileutils v0.0.0-20170111115228-4d0c80011a0f/go.mod h1:Zv7xtAh/T/tmfZlxpESaWWiWOdiJz2GfbBYxImuI6T4=
|
github.com/cloudfoundry/gofileutils v0.0.0-20170111115228-4d0c80011a0f/go.mod h1:Zv7xtAh/T/tmfZlxpESaWWiWOdiJz2GfbBYxImuI6T4=
|
||||||
github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313/go.mod h1:P1wt9Z3DP8O6W3rvwCt0REIlshg1InHImaLW0t3ObY0=
|
|
||||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||||
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||||
@ -240,7 +247,6 @@ github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4q
|
|||||||
github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
|
github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
|
||||||
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
|
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
|
||||||
github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||||
github.com/containerd/containerd v1.4.12/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
|
||||||
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||||
github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM=
|
github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM=
|
||||||
github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
|
github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
|
||||||
@ -248,7 +254,7 @@ github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDX
|
|||||||
github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
|
github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
|
||||||
github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s=
|
github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s=
|
||||||
github.com/coredns/caddy v1.1.0/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4=
|
github.com/coredns/caddy v1.1.0/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4=
|
||||||
github.com/coredns/corefile-migration v1.0.14/go.mod h1:XnhgULOEouimnzgn0t4WPuFDN2/PJQcTxdWKC5eXNGE=
|
github.com/coredns/corefile-migration v1.0.17/go.mod h1:XnhgULOEouimnzgn0t4WPuFDN2/PJQcTxdWKC5eXNGE=
|
||||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||||
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||||
@ -283,16 +289,15 @@ github.com/dave/rebecca v0.9.1/go.mod h1:N6XYdMD/OKw3lkF3ywh8Z6wPGuwNFDNtWYEMFWE
|
|||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE=
|
github.com/daviddengcn/go-colortext v1.0.0/go.mod h1:zDqEI5NVUop5QPpVJUxE9UO10hRnmkD5G4Pmri9+m4c=
|
||||||
github.com/denisenkom/go-mssqldb v0.0.0-20190412130859-3b1d194e553a/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM=
|
github.com/denisenkom/go-mssqldb v0.0.0-20190412130859-3b1d194e553a/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM=
|
||||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||||
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
|
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
|
||||||
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
|
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
|
||||||
github.com/docker/distribution v2.8.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
|
||||||
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
|
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
|
||||||
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||||
github.com/docker/docker v20.10.12+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||||
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||||
@ -316,6 +321,9 @@ github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2/go.mod h1:gNh8
|
|||||||
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||||
github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk=
|
github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk=
|
||||||
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||||
|
github.com/emicklei/go-restful/v3 v3.5.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||||
|
github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw=
|
||||||
|
github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||||
@ -390,11 +398,13 @@ github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7
|
|||||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||||
github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||||
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||||
|
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||||
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
|
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
|
||||||
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||||
github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk=
|
github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk=
|
||||||
github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk=
|
|
||||||
github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro=
|
github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro=
|
||||||
|
github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A=
|
||||||
|
github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4=
|
||||||
github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8=
|
github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8=
|
||||||
github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E=
|
github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E=
|
||||||
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
|
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
|
||||||
@ -425,6 +435,7 @@ github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x
|
|||||||
github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||||
github.com/gofrs/uuid v4.2.0+incompatible h1:yyYWMnhkhrKwwr8gAOcOCYxOOscHgDS9yZgBrnJfGa0=
|
github.com/gofrs/uuid v4.2.0+incompatible h1:yyYWMnhkhrKwwr8gAOcOCYxOOscHgDS9yZgBrnJfGa0=
|
||||||
github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||||
|
github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4=
|
||||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||||
@ -432,6 +443,8 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP
|
|||||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||||
github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A=
|
github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A=
|
||||||
|
github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
|
||||||
|
github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
|
||||||
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
|
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
|
||||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
|
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
|
||||||
@ -458,17 +471,20 @@ github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
|
|||||||
github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||||
|
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||||
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
|
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
|
||||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||||
github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk=
|
github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAONeMXrxql8uvOKuAZSu8aM5RUGv+1C6IJaEho=
|
||||||
|
github.com/golangplus/bytes v1.0.0/go.mod h1:AdRaCFwmc/00ZzELMWb01soso6W1R/++O1XL80yAn+A=
|
||||||
|
github.com/golangplus/fmt v1.0.0/go.mod h1:zpM0OfbMCjPtd2qkTD/jX2MgiFCqklhSUFyDW44gVQE=
|
||||||
|
github.com/golangplus/testing v1.0.0/go.mod h1:ZDreixUV3YzhoVraIDyOzHrr76p6NUh6k/pPg/Q3gYA=
|
||||||
github.com/gomodules/jsonpatch/v2 v2.2.0 h1:QBjDK/nX43P4z/Os3gnk8VeFdLDgBuMns1Wljyo607U=
|
github.com/gomodules/jsonpatch/v2 v2.2.0 h1:QBjDK/nX43P4z/Os3gnk8VeFdLDgBuMns1Wljyo607U=
|
||||||
github.com/gomodules/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY=
|
github.com/gomodules/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY=
|
||||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||||
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||||
github.com/google/cadvisor v0.44.1/go.mod h1:GQ9KQfz0iNHQk3D6ftzJWK4TXabfIgM10Oy3FkR+Gzg=
|
github.com/google/cadvisor v0.45.0/go.mod h1:vsMT3Uv2XjQ8M7WUtKARV74mU/HN64C4XtM1bJhUKcU=
|
||||||
github.com/google/cel-go v0.10.1/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w=
|
github.com/google/cel-go v0.12.4/go.mod h1:Av7CU6r6X3YmcHR9GXqVDaEJYfEtSxl6wvIjUQTriCw=
|
||||||
github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA=
|
|
||||||
github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54=
|
github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54=
|
||||||
github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ=
|
github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ=
|
||||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||||
@ -495,6 +511,7 @@ github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/
|
|||||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||||
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||||
|
github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
|
||||||
github.com/google/pprof v0.0.0-20181127221834-b4f47329b966/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
github.com/google/pprof v0.0.0-20181127221834-b4f47329b966/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||||
@ -508,6 +525,9 @@ github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLe
|
|||||||
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||||
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||||
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||||
|
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||||
|
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||||
|
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
|
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
|
||||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
@ -517,6 +537,8 @@ github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
|||||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||||
|
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
|
||||||
|
github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
|
||||||
github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU=
|
github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU=
|
||||||
github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
|
github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
|
||||||
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
|
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
|
||||||
@ -705,7 +727,6 @@ github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6/go.mod h1:xGMAM8JLi7U
|
|||||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||||
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
|
||||||
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||||
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||||
github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
|
github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
|
||||||
@ -801,10 +822,8 @@ github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z
|
|||||||
github.com/lpabon/godbc v0.1.1/go.mod h1:Jo9QV0cf3U6jZABgiJ2skINAXb9j8m51r07g4KI92ZA=
|
github.com/lpabon/godbc v0.1.1/go.mod h1:Jo9QV0cf3U6jZABgiJ2skINAXb9j8m51r07g4KI92ZA=
|
||||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||||
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
|
|
||||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||||
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
|
|
||||||
github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA=
|
github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA=
|
||||||
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||||
github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11/go.mod h1:Ah2dBMoxZEqk118as2T4u4fjfXarE0pPnMJaArZQZsI=
|
github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11/go.mod h1:Ah2dBMoxZEqk118as2T4u4fjfXarE0pPnMJaArZQZsI=
|
||||||
@ -909,7 +928,6 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
|
|||||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||||
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
|
||||||
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
|
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
|
||||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||||
@ -932,8 +950,7 @@ github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3I
|
|||||||
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||||
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||||
github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||||
github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc=
|
github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg=
|
||||||
github.com/opencontainers/runc v1.1.1/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc=
|
|
||||||
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||||
github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||||
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||||
@ -962,7 +979,6 @@ github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTK
|
|||||||
github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34=
|
github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34=
|
||||||
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
||||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||||
github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
|
||||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||||
github.com/pierrec/lz4 v2.2.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
github.com/pierrec/lz4 v2.2.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||||
@ -983,8 +999,8 @@ github.com/portworx/talisman v0.0.0-20191007232806-837747f38224/go.mod h1:OjpMH9
|
|||||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||||
github.com/posener/complete v1.2.1 h1:LrvDIY//XNo65Lq84G/akBuMGlawHvGBABv8f/ZN6DI=
|
github.com/posener/complete v1.2.1 h1:LrvDIY//XNo65Lq84G/akBuMGlawHvGBABv8f/ZN6DI=
|
||||||
github.com/posener/complete v1.2.1/go.mod h1:6gapUrK/U1TAN7ciCoNRIdVC5sbdBTUh1DKN0g6uH7E=
|
github.com/posener/complete v1.2.1/go.mod h1:6gapUrK/U1TAN7ciCoNRIdVC5sbdBTUh1DKN0g6uH7E=
|
||||||
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
|
|
||||||
github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
|
github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
|
||||||
|
github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI=
|
||||||
github.com/pquerna/otp v1.2.1-0.20191009055518-468c2dd2b58d/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg=
|
github.com/pquerna/otp v1.2.1-0.20191009055518-468c2dd2b58d/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg=
|
||||||
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.44.1/go.mod h1:3WYi4xqXxGGXWDdQIITnLNmuDzO5n6wYva9spVhR4fg=
|
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.44.1/go.mod h1:3WYi4xqXxGGXWDdQIITnLNmuDzO5n6wYva9spVhR4fg=
|
||||||
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.46.0/go.mod h1:3WYi4xqXxGGXWDdQIITnLNmuDzO5n6wYva9spVhR4fg=
|
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.46.0/go.mod h1:3WYi4xqXxGGXWDdQIITnLNmuDzO5n6wYva9spVhR4fg=
|
||||||
@ -998,6 +1014,7 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn
|
|||||||
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
|
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
|
||||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||||
|
github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||||
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||||
github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34=
|
github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34=
|
||||||
github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||||
@ -1032,7 +1049,6 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1
|
|||||||
github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
|
github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
|
||||||
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||||
github.com/quobyte/api v0.1.8/go.mod h1:jL7lIHrmqQ7yh05OJ+eEEdHr0u/kmT1Ff9iHd+4H6VI=
|
|
||||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||||
github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M=
|
github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M=
|
||||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||||
@ -1051,7 +1067,7 @@ github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIH
|
|||||||
github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
|
github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
|
||||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||||
github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
|
github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
|
||||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||||
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||||
github.com/shirou/gopsutil v2.19.9+incompatible h1:IrPVlK4nfwW10DF7pW+7YJKws9NkgNzWozwwWv9FsgY=
|
github.com/shirou/gopsutil v2.19.9+incompatible h1:IrPVlK4nfwW10DF7pW+7YJKws9NkgNzWozwwWv9FsgY=
|
||||||
@ -1080,16 +1096,12 @@ github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTd
|
|||||||
github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY=
|
github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY=
|
||||||
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
|
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
|
||||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||||
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
|
||||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||||
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
|
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
|
||||||
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
|
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
|
||||||
github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
|
|
||||||
github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q=
|
github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q=
|
||||||
github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
|
github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
|
||||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
|
||||||
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
|
||||||
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||||
github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||||
@ -1097,9 +1109,7 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
|||||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||||
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
|
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
|
||||||
github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
|
|
||||||
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
|
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
|
||||||
github.com/storageos/go-api v2.2.0+incompatible/go.mod h1:ZrLn+e0ZuF3Y65PNF6dIwbJPZqfmtCXxFm9ckv0agOY=
|
|
||||||
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
@ -1136,7 +1146,7 @@ github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhe
|
|||||||
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
|
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
|
||||||
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos=
|
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos=
|
||||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||||
github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
|
github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
|
||||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
@ -1145,21 +1155,19 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
|
|||||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||||
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||||
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||||
|
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||||
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
|
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
|
||||||
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
|
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
|
||||||
go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
|
go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
|
||||||
go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
|
go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A=
|
||||||
go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
|
go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
|
||||||
go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
|
go.etcd.io/etcd/client/v2 v2.305.4/go.mod h1:Ud+VUwIi9/uQHOMA+4ekToJ12lTxlv0zB/+DHwTGEbU=
|
||||||
go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
|
go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY=
|
||||||
go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
|
go.etcd.io/etcd/pkg/v3 v3.5.4/go.mod h1:OI+TtO+Aa3nhQSppMbwE4ld3uF1/fqqwbpfndbbrEe0=
|
||||||
go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0=
|
go.etcd.io/etcd/raft/v3 v3.5.4/go.mod h1:SCuunjYvZFC0fBX0vxMSPjuZmpcSk+XaAcMrD6Do03w=
|
||||||
go.etcd.io/etcd/client/v3 v3.5.1/go.mod h1:OnjH4M8OnAotwaB2l9bVgZzRFKru7/ZMoS46OtKyd3Q=
|
go.etcd.io/etcd/server/v3 v3.5.4/go.mod h1:S5/YTU15KxymM5l3T6b09sNOHPXqGYIZStpuuGbb65c=
|
||||||
go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE=
|
|
||||||
go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc=
|
|
||||||
go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4=
|
|
||||||
go.mongodb.org/mongo-driver v1.2.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
|
go.mongodb.org/mongo-driver v1.2.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
|
||||||
go.opencensus.io v0.19.1/go.mod h1:gug0GbSHa8Pafr0d2urOSgoXHZ6x/RUlaiT0d9pqb4A=
|
go.opencensus.io v0.19.1/go.mod h1:gug0GbSHa8Pafr0d2urOSgoXHZ6x/RUlaiT0d9pqb4A=
|
||||||
go.opencensus.io v0.19.2/go.mod h1:NO/8qkisMZLZ1FCsKNqtJPwc8/TaclWyY0B6wcYNg9M=
|
go.opencensus.io v0.19.2/go.mod h1:NO/8qkisMZLZ1FCsKNqtJPwc8/TaclWyY0B6wcYNg9M=
|
||||||
@ -1173,9 +1181,11 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
|||||||
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
|
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
|
||||||
go.opentelemetry.io/contrib v0.20.0 h1:ubFQUn0VCZ0gPwIoJfBJVpeBlyRMxu8Mm/huKWYd9p0=
|
go.opentelemetry.io/contrib v0.20.0 h1:ubFQUn0VCZ0gPwIoJfBJVpeBlyRMxu8Mm/huKWYd9p0=
|
||||||
go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc=
|
go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc=
|
||||||
|
go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.20.0/go.mod h1:oQkZOyq61qZBItEFqhfpobK6X/oDPR7/Qr+MXjVSTks=
|
||||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E=
|
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E=
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0 h1:Q3C9yzW6I9jqEc8sawxzxZmY48fs9u220KXq6d5s3XU=
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0 h1:Q3C9yzW6I9jqEc8sawxzxZmY48fs9u220KXq6d5s3XU=
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4=
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4=
|
||||||
|
go.opentelemetry.io/contrib/propagators v0.20.0/go.mod h1:yLmt93MeSiARUwrK57bOZ4FBruRN4taLiW1lcGfnOes=
|
||||||
go.opentelemetry.io/otel v0.20.0 h1:eaP0Fqu7SXHwvjiqDq83zImeehOHX8doTvU9AwXON8g=
|
go.opentelemetry.io/otel v0.20.0 h1:eaP0Fqu7SXHwvjiqDq83zImeehOHX8doTvU9AwXON8g=
|
||||||
go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
|
go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
|
||||||
go.opentelemetry.io/otel/exporters/otlp v0.20.0 h1:PTNgq9MRmQqqJY0REVbZFvwkYOA85vbdQU/nVfxDyqg=
|
go.opentelemetry.io/otel/exporters/otlp v0.20.0 h1:PTNgq9MRmQqqJY0REVbZFvwkYOA85vbdQU/nVfxDyqg=
|
||||||
@ -1234,12 +1244,13 @@ golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8U
|
|||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
|
||||||
golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
golang.org/x/crypto v0.0.0-20220214200702-86341886e292 h1:f+lwQ+GtmgoY+A2YaQxlSOnDjXcQ7ZRLWOHbC6HtRqE=
|
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||||
golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||||
|
golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd h1:XcWmESyNjXJMLahc3mqVQJcgSTDxFxhETVlfk9uGc38=
|
||||||
|
golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
@ -1288,6 +1299,7 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|||||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
||||||
|
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||||
golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
@ -1348,8 +1360,9 @@ golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qx
|
|||||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||||
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4 h1:HVyaeDAYux4pnY+D/SiwmLOR36ewZ4iGQIIrtnuCjFA=
|
|
||||||
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||||
|
golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0=
|
||||||
|
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
@ -1365,10 +1378,11 @@ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ
|
|||||||
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg=
|
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg=
|
||||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
@ -1382,6 +1396,7 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
|
|||||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180903190138-2b024373dcd9/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180903190138-2b024373dcd9/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
@ -1437,7 +1452,6 @@ golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||||||
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
@ -1460,15 +1474,19 @@ golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
@ -1476,11 +1494,12 @@ golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||||||
golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 h1:xHms4gcpe1YE7A3yIllJXP16CMAGuqwO2lX1mTyyRRc=
|
|
||||||
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s=
|
||||||
|
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
@ -1573,10 +1592,12 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f
|
|||||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
|
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
|
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM=
|
golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM=
|
||||||
golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
|
||||||
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||||
|
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||||
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
@ -1615,8 +1636,15 @@ google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34q
|
|||||||
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
|
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
|
||||||
google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
|
google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
|
||||||
google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
|
google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
|
||||||
google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
|
google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
|
||||||
google.golang.org/api v0.46.0/go.mod h1:ceL4oozhkAiTID8XMmJBsIxID/9wMXJVVFXPg4ylg3I=
|
google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
|
||||||
|
google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
|
||||||
|
google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
|
||||||
|
google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
|
||||||
|
google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
|
||||||
|
google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
|
||||||
|
google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
|
||||||
|
google.golang.org/api v0.60.0/go.mod h1:d7rl65NZAkEQ90JFzqBjcRq1TVeG5ZoGV3sSpEnnVb4=
|
||||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
@ -1666,7 +1694,6 @@ google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6D
|
|||||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20201102152239-715cce707fb0/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
|
||||||
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
@ -1677,12 +1704,26 @@ google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6D
|
|||||||
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
|
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
|
||||||
google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
|
google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
|
||||||
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||||
|
google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||||
|
google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||||
|
google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
|
||||||
|
google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
|
||||||
|
google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
|
||||||
|
google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
|
||||||
|
google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
|
||||||
|
google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
|
||||||
|
google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||||
|
google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||||
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||||
google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||||
google.golang.org/genproto v0.0.0-20220208230804-65c12eb4c068 h1:pwzFiZfBTH/GjBWz1BcDwMBaHBo8mZvpLa7eBKJpFAk=
|
google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||||
|
google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||||
|
google.golang.org/genproto v0.0.0-20211021150943-2b146023228c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||||
google.golang.org/genproto v0.0.0-20220208230804-65c12eb4c068/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
google.golang.org/genproto v0.0.0-20220208230804-65c12eb4c068/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
||||||
|
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 h1:hrbNEivu7Zn1pxvHk6MBrq9iE22woVILTHqexqBxe6I=
|
||||||
|
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
|
||||||
google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||||
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||||
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
|
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
|
||||||
@ -1711,12 +1752,18 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG
|
|||||||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||||
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||||
google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||||
|
google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||||
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||||
|
google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
|
||||||
|
google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
|
||||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||||
google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
|
google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
|
||||||
google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||||
|
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||||
|
google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||||
google.golang.org/grpc v1.48.0 h1:rQOsyJ/8+ufEDJd/Gdsz7HG220Mh9HAhFHRGnIjda0w=
|
google.golang.org/grpc v1.48.0 h1:rQOsyJ/8+ufEDJd/Gdsz7HG220Mh9HAhFHRGnIjda0w=
|
||||||
google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||||
|
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
||||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
@ -1745,7 +1792,6 @@ gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
|||||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||||
gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||||
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||||
gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
|
||||||
gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4=
|
gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4=
|
||||||
gopkg.in/ldap.v3 v3.0.3/go.mod h1:oxD7NyBuxchC+SgJDE1Q5Od05eGt29SDQVBmV+HYbzw=
|
gopkg.in/ldap.v3 v3.0.3/go.mod h1:oxD7NyBuxchC+SgJDE1Q5Od05eGt29SDQVBmV+HYbzw=
|
||||||
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
|
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
|
||||||
@ -1789,29 +1835,28 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
|
|||||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||||
k8s.io/api v0.24.4 h1:I5Y645gJ8zWKawyr78lVfDQkZrAViSbeRXsPZWTxmXk=
|
k8s.io/api v0.25.0 h1:H+Q4ma2U/ww0iGB78ijZx6DRByPz6/733jIuFpX70e0=
|
||||||
k8s.io/api v0.24.4/go.mod h1:42pVfA0NRxrtJhZQOvRSyZcJihzAdU59WBtTjYcB0/M=
|
k8s.io/api v0.25.0/go.mod h1:ttceV1GyV1i1rnmvzT3BST08N6nGt+dudGrquzVQWPk=
|
||||||
k8s.io/apiextensions-apiserver v0.24.4 h1:w53Pm4zu8fCt9WfiRgS2YI6LE6I4NJ5aUi78GElD3K8=
|
k8s.io/apiextensions-apiserver v0.25.0 h1:CJ9zlyXAbq0FIW8CD7HHyozCMBpDSiH7EdrSTCZcZFY=
|
||||||
k8s.io/apiextensions-apiserver v0.24.4/go.mod h1:iDK+Xb4jsPNnRGj5jU/WqqjLvt8363M7cKixKe1C9+U=
|
k8s.io/apiextensions-apiserver v0.25.0/go.mod h1:3pAjZiN4zw7R8aZC5gR0y3/vCkGlAjCazcg1me8iB/E=
|
||||||
k8s.io/apimachinery v0.24.4 h1:S0Ur3J/PbivTcL43EdSdPhqCqKla2NIuneNwZcTDeGQ=
|
k8s.io/apimachinery v0.25.0 h1:MlP0r6+3XbkUG2itd6vp3oxbtdQLQI94fD5gCS+gnoU=
|
||||||
k8s.io/apimachinery v0.24.4/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM=
|
k8s.io/apimachinery v0.25.0/go.mod h1:qMx9eAk0sZQGsXGu86fab8tZdffHbwUfsvzqKn4mfB0=
|
||||||
k8s.io/apiserver v0.24.4 h1:ei+OunC83pVEiagBeZhTnRZvkclHgpzs/rrm7aSBDYs=
|
k8s.io/apiserver v0.25.0 h1:8kl2ifbNffD440MyvHtPaIz1mw4mGKVgWqM0nL+oyu4=
|
||||||
k8s.io/apiserver v0.24.4/go.mod h1:mAuC3pZVc0IDXLx7lUHoisBOtBa1SobfLW/CI3klXQE=
|
k8s.io/apiserver v0.25.0/go.mod h1:BKwsE+PTC+aZK+6OJQDPr0v6uS91/HWxX7evElAH6xo=
|
||||||
k8s.io/cli-runtime v0.24.4/go.mod h1:RF+cSLYXkPV3WyvPrX2qeRLEUJY38INWx6jLKVLFCxM=
|
k8s.io/cli-runtime v0.25.0/go.mod h1:bHOI5ZZInRHhbq12OdUiYZQN8ml8aKZLwQgt9QlLINw=
|
||||||
k8s.io/client-go v0.24.4 h1:hIAIJZIPyaw46AkxwyR0FRfM/pRxpUNTd3ysYu9vyRg=
|
k8s.io/client-go v0.25.0 h1:CVWIaCETLMBNiTUta3d5nzRbXvY5Hy9Dpl+VvREpu5E=
|
||||||
k8s.io/client-go v0.24.4/go.mod h1:+AxlPWw/H6f+EJhRSjIeALaJT4tbeB/8g9BNvXGPd0Y=
|
k8s.io/client-go v0.25.0/go.mod h1:lxykvypVfKilxhTklov0wz1FoaUZ8X4EwbhS6rpRfN8=
|
||||||
k8s.io/cloud-provider v0.24.4 h1:UTk12SdNuZoFn6pQpK1c/tsOBey4e7TT6zNC10UryqU=
|
k8s.io/cloud-provider v0.25.0 h1:ONX5BON6f1Mxa2GWvPyKn+QsZXaLauPUte7MZxfWUro=
|
||||||
k8s.io/cloud-provider v0.24.4/go.mod h1:HTfeUcH+pPmmtMxA3qnFB1ZrrEff86BEakmIQCU/RUM=
|
k8s.io/cloud-provider v0.25.0/go.mod h1:afVfVCIYOUER914WmSp0QpAtJn12gv4qu9NMT4XBxZo=
|
||||||
k8s.io/cluster-bootstrap v0.24.4/go.mod h1:D9SYcEo302eah8yxFoPciGy8dRnReZr7fG+waTAvj14=
|
k8s.io/cluster-bootstrap v0.25.0/go.mod h1:x/TCtY3EiuR/rODkA3SvVQT3uSssQLf9cXcmSjdDTe0=
|
||||||
k8s.io/code-generator v0.24.4/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w=
|
k8s.io/code-generator v0.25.0/go.mod h1:B6jZgI3DvDFAualltPitbYMQ74NjaCFxum3YeKZZ+3w=
|
||||||
k8s.io/component-base v0.24.4 h1:WEGRp06GBYVwxp5JdiRaJ1zkdOhrqucxRv/8IrABLG0=
|
k8s.io/component-base v0.25.0 h1:haVKlLkPCFZhkcqB6WCvpVxftrg6+FK5x1ZuaIDaQ5Y=
|
||||||
k8s.io/component-base v0.24.4/go.mod h1:sWxkgcMfbYHadw0OJ0N+vIscd14/nqSIM2veCdg843o=
|
k8s.io/component-base v0.25.0/go.mod h1:F2Sumv9CnbBlqrpdf7rKZTmmd2meJq0HizeyY/yAFxk=
|
||||||
k8s.io/component-helpers v0.24.4 h1:gjginN6YYh/s3xg3PQ0gTFqBRGo27/wdLn0vRmJdHu8=
|
k8s.io/component-helpers v0.25.0 h1:vNzYfqnVXj7f+CPksduKVv2Z9kC+IDsOs9yaOyxZrj0=
|
||||||
k8s.io/component-helpers v0.24.4/go.mod h1:xAHlOKU8rAjLgXWJEsueWLR1LDMThbaPf2YvgKpSyQ8=
|
k8s.io/component-helpers v0.25.0/go.mod h1:auaFj2bvb5Zmy0mLk4WJNmwP0w4e7Zk+/Tu9FFBGA20=
|
||||||
k8s.io/controller-manager v0.24.4/go.mod h1:1Tkmq5m8POXAv0JQr2BDSp95psbaXP2iYLvNftpn1Ds=
|
k8s.io/controller-manager v0.25.0/go.mod h1:QElCivPrZ64NP1Y976pkgyViZUqn6UcvjlXHiAAUGd0=
|
||||||
k8s.io/cri-api v0.24.4/go.mod h1:t3tImFtGeStN+ES69bQUX9sFg67ek38BM9YIJhMmuig=
|
k8s.io/cri-api v0.25.0/go.mod h1:J1rAyQkSJ2Q6I+aBMOVgg2/cbbebso6FNa0UagiR0kc=
|
||||||
k8s.io/csi-translation-lib v0.24.4/go.mod h1:Ov9lVXDEI3AGXYVO5TTK+o7nRa2GdlsLVvpo86Xy6x4=
|
k8s.io/csi-translation-lib v0.25.0/go.mod h1:Wb80CDywP4753F6wWkIyOuJIQtQAbhgw985veSgAn/4=
|
||||||
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
|
||||||
k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||||
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||||
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||||
@ -1822,31 +1867,30 @@ k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
|||||||
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||||
k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
|
k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
|
||||||
k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||||
k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
|
||||||
k8s.io/klog/v2 v2.70.1 h1:7aaoSdahviPmR+XkS7FyxlkkXs6tHISSG03RxleQAVQ=
|
k8s.io/klog/v2 v2.70.1 h1:7aaoSdahviPmR+XkS7FyxlkkXs6tHISSG03RxleQAVQ=
|
||||||
k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||||
k8s.io/kube-aggregator v0.24.4/go.mod h1:5h/GX6F1Tk1YZf6N8l3TElwR+nB+lT8dKRUlxeMaMBs=
|
k8s.io/kube-aggregator v0.25.0/go.mod h1:dfdl4aQkleiWK/U++UDLdDC8g2rsonhkB23zzUeBCgM=
|
||||||
k8s.io/kube-controller-manager v0.24.4/go.mod h1:TWE865ujpJ29d0z1NR3Vfa1UffVRlws07E02/+DsGmk=
|
k8s.io/kube-controller-manager v0.25.0/go.mod h1:SjL1hKSG2z9wajnvjRHZv1zOsdDHjmbZd1ykmaYO6J8=
|
||||||
k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
|
k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
|
||||||
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
|
|
||||||
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk=
|
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk=
|
||||||
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 h1:Gii5eqf+GmIEwGNKQYQClCayuJCe2/4fZUvF7VG99sU=
|
k8s.io/kube-openapi v0.0.0-20220401212409-b28bf2818661/go.mod h1:daOouuuwd9JXpv1L7Y34iV3yf6nxzipkKMWWlqlvK9M=
|
||||||
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk=
|
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkIFQtZShWqoha7snGixVgEA=
|
||||||
k8s.io/kube-proxy v0.24.4/go.mod h1:v5yz4sefYAWDJjYG6zlvhhhE3BmxTETu3xKoQNGxC9E=
|
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU=
|
||||||
k8s.io/kube-scheduler v0.24.4/go.mod h1:XtTJnX4i3qmsui40LPwW+4r6lq+24SUZResY4OHIC2M=
|
k8s.io/kube-proxy v0.25.0/go.mod h1:uHv1HwMVDYgl1pU2PTDKLRlxtNOf4z2M5YPYC6NP1CU=
|
||||||
k8s.io/kubectl v0.24.4 h1:fPEBkAV3/cu3BQVIUCXNngCCY62AlZ+2rkRVHcmJPn0=
|
k8s.io/kube-scheduler v0.25.0/go.mod h1:cwiyJeImgFbhmbnImzvuhbiJayNngRNEe3FJkZDPw9Y=
|
||||||
k8s.io/kubectl v0.24.4/go.mod h1:AVyJzxUwA5UMGTDyKGL6nd6RRW36FbmAdtIDMhrZtW0=
|
k8s.io/kubectl v0.25.0 h1:/Wn1cFqo8ik3iee1EvpxYre3bkWsGLXzLQI6uCCAkQc=
|
||||||
k8s.io/kubelet v0.24.4 h1:JlnNBjWIpTuFz1aVxcQE7XC6USydRdQctqMV9sBEZBk=
|
k8s.io/kubectl v0.25.0/go.mod h1:n16ULWsOl2jmQpzt2o7Dud1t4o0+Y186ICb4O+GwKAU=
|
||||||
k8s.io/kubelet v0.24.4/go.mod h1:GA5G/c3DvCMx6d2ra/6w6RlT/x5XiOOULG/pKBwaxuo=
|
k8s.io/kubelet v0.25.0 h1:eTS5B1u1o63ndExAHKLJytzz/GBy86ROcxYtu0VK3RA=
|
||||||
k8s.io/kubernetes v1.24.4 h1:/UDpUPBqNN5oL46ACw0Vzjt6LXOEQ7BAbsf5+kz+XbA=
|
k8s.io/kubelet v0.25.0/go.mod h1:J6aQxrZdSsGPrskYrhZdEn6PCnGha+GNvF0g9aWfQnw=
|
||||||
k8s.io/kubernetes v1.24.4/go.mod h1:8e8maMiZzBR2/8Po5Uulx+MXZUYJuN3vtKwD4Ct1Xi0=
|
k8s.io/kubernetes v1.25.0 h1:NwTRyLrdXTORd5V7DLlUltxDbl/KZjYDiRgwI+pBYGE=
|
||||||
k8s.io/legacy-cloud-providers v0.24.4/go.mod h1:ZmqNPVqdg0Kw/s1DfuwEtgfvYzBdC0ssdHPzSMotR3c=
|
k8s.io/kubernetes v1.25.0/go.mod h1:UdtILd5Zg1vGZvShiO1EYOqmjzM2kZOG1hzwQnM5JxY=
|
||||||
k8s.io/metrics v0.24.4/go.mod h1:7D8Xm3DGZoJaiCS8+QA2EzdMuDlq0Y8SiOPUB/1BaGU=
|
k8s.io/legacy-cloud-providers v0.25.0/go.mod h1:bnmUgHHeBmK3M9JgQzu+ne6UCUVURDzkpF0Y7VeypVE=
|
||||||
|
k8s.io/metrics v0.25.0/go.mod h1:HZZrbhuRX+fsDcRc3u59o2FbrKhqD67IGnoFECNmovc=
|
||||||
k8s.io/mount-utils v0.25.0-alpha.3.0.20220801203918-ff562e546084 h1:MBVsRiLUuVn8PX7je4jjBfHfRs65QwEtgM//Te1mFpQ=
|
k8s.io/mount-utils v0.25.0-alpha.3.0.20220801203918-ff562e546084 h1:MBVsRiLUuVn8PX7je4jjBfHfRs65QwEtgM//Te1mFpQ=
|
||||||
k8s.io/mount-utils v0.25.0-alpha.3.0.20220801203918-ff562e546084/go.mod h1:dHX0bJ3b1Mvh/OHsBV9r559Mdrf5Lcjxyoc7FHUtnBg=
|
k8s.io/mount-utils v0.25.0-alpha.3.0.20220801203918-ff562e546084/go.mod h1:dHX0bJ3b1Mvh/OHsBV9r559Mdrf5Lcjxyoc7FHUtnBg=
|
||||||
k8s.io/pod-security-admission v0.24.4 h1:ZYYIsBFtP8+PJwVgOldLvnC/otR+6x7SoL4HnbuMXbg=
|
k8s.io/pod-security-admission v0.25.0 h1:Sceq45pO7E7RTaYAr3Br94ZMDISJIngvXXcAfcZJufk=
|
||||||
k8s.io/pod-security-admission v0.24.4/go.mod h1:yIppVzUIT3WCbvI+z1lku+WGo6PgMLRlk5W9DluSewo=
|
k8s.io/pod-security-admission v0.25.0/go.mod h1:b/UC586Th2LijoNV+ssyyAryUvmaTrEWms5ZzBEkVsA=
|
||||||
k8s.io/sample-apiserver v0.24.4/go.mod h1:0oa2NgPVL1lamSpJ4mf9AitAVKwiEc3MEBJHDKqiQjY=
|
k8s.io/sample-apiserver v0.25.0/go.mod h1:Wyy/yKmXCrWLcc+082Vsn6fxAuwraRw5FQpekHg3go8=
|
||||||
k8s.io/system-validators v1.7.0/go.mod h1:gP1Ky+R9wtrSiFbrpEPwWMeYz9yqyy1S/KOh0Vci7WI=
|
k8s.io/system-validators v1.7.0/go.mod h1:gP1Ky+R9wtrSiFbrpEPwWMeYz9yqyy1S/KOh0Vci7WI=
|
||||||
k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||||
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||||
@ -1864,23 +1908,23 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8
|
|||||||
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
|
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
|
||||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30 h1:dUk62HQ3ZFhD48Qr8MIXCiKA8wInBQCtuE4QGfFW7yA=
|
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.32 h1:2WjukG7txtEsbXsSKWtTibCdsyYAhcu6KFnttyDdZOQ=
|
||||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw=
|
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.32/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw=
|
||||||
sigs.k8s.io/controller-runtime v0.2.2/go.mod h1:9dyohw3ZtoXQuV1e766PHUn+cmrRCIcBh6XIMFNMZ+I=
|
sigs.k8s.io/controller-runtime v0.2.2/go.mod h1:9dyohw3ZtoXQuV1e766PHUn+cmrRCIcBh6XIMFNMZ+I=
|
||||||
sigs.k8s.io/controller-runtime v0.11.0-beta.0.0.20211208212546-f236f0345ad2 h1:+ReKrjTrd57mtAU19BJkxSAaWRIQkFlaWcO6dGFVP1g=
|
sigs.k8s.io/controller-runtime v0.11.0-beta.0.0.20211208212546-f236f0345ad2 h1:+ReKrjTrd57mtAU19BJkxSAaWRIQkFlaWcO6dGFVP1g=
|
||||||
sigs.k8s.io/controller-runtime v0.11.0-beta.0.0.20211208212546-f236f0345ad2/go.mod h1:KKwLiTooNGu+JmLZGn9Sl3Gjmfj66eMbCQznLP5zcqA=
|
sigs.k8s.io/controller-runtime v0.11.0-beta.0.0.20211208212546-f236f0345ad2/go.mod h1:KKwLiTooNGu+JmLZGn9Sl3Gjmfj66eMbCQznLP5zcqA=
|
||||||
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
|
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
|
||||||
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y=
|
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k=
|
||||||
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY=
|
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||||
sigs.k8s.io/kustomize/api v0.11.4/go.mod h1:k+8RsqYbgpkIrJ4p9jcdPqe8DprLxFUUO0yNOq8C+xI=
|
sigs.k8s.io/kustomize/api v0.12.1/go.mod h1:y3JUhimkZkR6sbLNwfJHxvo1TCLwuwm14sCYnkH6S1s=
|
||||||
sigs.k8s.io/kustomize/cmd/config v0.10.6/go.mod h1:/S4A4nUANUa4bZJ/Edt7ZQTyKOY9WCER0uBS1SW2Rco=
|
sigs.k8s.io/kustomize/cmd/config v0.10.9/go.mod h1:T0s850zPV3wKfBALA0dyeP/K74jlJcoP8Pr9ZWwE3MQ=
|
||||||
sigs.k8s.io/kustomize/kustomize/v4 v4.5.4/go.mod h1:Zo/Xc5FKD6sHl0lilbrieeGeZHVYCA4BzxeAaLI05Bg=
|
sigs.k8s.io/kustomize/kustomize/v4 v4.5.7/go.mod h1:VSNKEH9D9d9bLiWEGbS6Xbg/Ih0tgQalmPvntzRxZ/Q=
|
||||||
sigs.k8s.io/kustomize/kyaml v0.13.6/go.mod h1:yHP031rn1QX1lr/Xd934Ri/xdVNG8BE2ECa78Ht/kEg=
|
sigs.k8s.io/kustomize/kyaml v0.13.9/go.mod h1:QsRbD0/KcU+wdk0/L0fIp2KLnohkVzs6fQ85/nOXac4=
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
|
sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.0/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
|
sigs.k8s.io/structured-merge-diff/v4 v4.2.0/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y=
|
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
|
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
|
||||||
sigs.k8s.io/testing_frameworks v0.1.1/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U=
|
sigs.k8s.io/testing_frameworks v0.1.1/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U=
|
||||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||||
|
6
vendor/github.com/emicklei/go-restful/.travis.yml
generated
vendored
6
vendor/github.com/emicklei/go-restful/.travis.yml
generated
vendored
@ -1,6 +0,0 @@
|
|||||||
language: go
|
|
||||||
|
|
||||||
go:
|
|
||||||
- 1.x
|
|
||||||
|
|
||||||
script: go test -v
|
|
7
vendor/github.com/emicklei/go-restful/Makefile
generated
vendored
7
vendor/github.com/emicklei/go-restful/Makefile
generated
vendored
@ -1,7 +0,0 @@
|
|||||||
all: test
|
|
||||||
|
|
||||||
test:
|
|
||||||
go test -v .
|
|
||||||
|
|
||||||
ex:
|
|
||||||
cd examples && ls *.go | xargs go build -o /tmp/ignore
|
|
@ -68,3 +68,4 @@ examples/restful-html-template
|
|||||||
|
|
||||||
s.html
|
s.html
|
||||||
restful-path-tail
|
restful-path-tail
|
||||||
|
.idea
|
1
vendor/github.com/emicklei/go-restful/v3/.goconvey
generated
vendored
Normal file
1
vendor/github.com/emicklei/go-restful/v3/.goconvey
generated
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
ignore
|
13
vendor/github.com/emicklei/go-restful/v3/.travis.yml
generated
vendored
Normal file
13
vendor/github.com/emicklei/go-restful/v3/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
language: go
|
||||||
|
|
||||||
|
go:
|
||||||
|
- 1.x
|
||||||
|
|
||||||
|
before_install:
|
||||||
|
- go test -v
|
||||||
|
|
||||||
|
script:
|
||||||
|
- go test -race -coverprofile=coverage.txt -covermode=atomic
|
||||||
|
|
||||||
|
after_success:
|
||||||
|
- bash <(curl -s https://codecov.io/bash)
|
@ -1,7 +1,106 @@
|
|||||||
## Change history of go-restful
|
# Change history of go-restful
|
||||||
|
|
||||||
|
## [v3.8.0] - 20221-06-06
|
||||||
|
|
||||||
|
- use exact matching of allowed domain entries, issue #489 (#493)
|
||||||
|
- this changes fixes [security] Authorization Bypass Through User-Controlled Key
|
||||||
|
by changing the behaviour of the AllowedDomains setting in the CORS filter.
|
||||||
|
To support the previous behaviour, the CORS filter type now has a AllowedDomainFunc
|
||||||
|
callback mechanism which is called when a simple domain match fails.
|
||||||
|
- add test and fix for POST without body and Content-type, issue #492 (#496)
|
||||||
|
- [Minor] Bad practice to have a mix of Receiver types. (#491)
|
||||||
|
|
||||||
|
## [v3.7.2] - 2021-11-24
|
||||||
|
|
||||||
|
- restored FilterChain (#482 by SVilgelm)
|
||||||
|
|
||||||
|
|
||||||
|
## [v3.7.1] - 2021-10-04
|
||||||
|
|
||||||
|
- fix problem with contentEncodingEnabled setting (#479)
|
||||||
|
|
||||||
|
## [v3.7.0] - 2021-09-24
|
||||||
|
|
||||||
|
- feat(parameter): adds additional openapi mappings (#478)
|
||||||
|
|
||||||
|
## [v3.6.0] - 2021-09-18
|
||||||
|
|
||||||
|
- add support for vendor extensions (#477 thx erraggy)
|
||||||
|
|
||||||
|
## [v3.5.2] - 2021-07-14
|
||||||
|
|
||||||
|
- fix removing absent route from webservice (#472)
|
||||||
|
|
||||||
|
## [v3.5.1] - 2021-04-12
|
||||||
|
|
||||||
|
- fix handling no match access selected path
|
||||||
|
- remove obsolete field
|
||||||
|
|
||||||
|
## [v3.5.0] - 2021-04-10
|
||||||
|
|
||||||
|
- add check for wildcard (#463) in CORS
|
||||||
|
- add access to Route from Request, issue #459 (#462)
|
||||||
|
|
||||||
|
## [v3.4.0] - 2020-11-10
|
||||||
|
|
||||||
|
- Added OPTIONS to WebService
|
||||||
|
|
||||||
|
## [v3.3.2] - 2020-01-23
|
||||||
|
|
||||||
|
- Fixed duplicate compression in dispatch. #449
|
||||||
|
|
||||||
|
|
||||||
|
## [v3.3.1] - 2020-08-31
|
||||||
|
|
||||||
|
- Added check on writer to prevent compression of response twice. #447
|
||||||
|
|
||||||
|
## [v3.3.0] - 2020-08-19
|
||||||
|
|
||||||
|
- Enable content encoding on Handle and ServeHTTP (#446)
|
||||||
|
- List available representations in 406 body (#437)
|
||||||
|
- Convert to string using rune() (#443)
|
||||||
|
|
||||||
|
## [v3.2.0] - 2020-06-21
|
||||||
|
|
||||||
|
- 405 Method Not Allowed must have Allow header (#436) (thx Bracken <abdawson@gmail.com>)
|
||||||
|
- add field allowedMethodsWithoutContentType (#424)
|
||||||
|
|
||||||
|
## [v3.1.0]
|
||||||
|
|
||||||
|
- support describing response headers (#426)
|
||||||
|
- fix openapi examples (#425)
|
||||||
|
|
||||||
|
v3.0.0
|
||||||
|
|
||||||
|
- fix: use request/response resulting from filter chain
|
||||||
|
- add Go module
|
||||||
|
Module consumer should use github.com/emicklei/go-restful/v3 as import path
|
||||||
|
|
||||||
|
v2.10.0
|
||||||
|
|
||||||
|
- support for Custom Verbs (thanks Vinci Xu <277040271@qq.com>)
|
||||||
|
- fixed static example (thanks Arthur <yang_yapo@126.com>)
|
||||||
|
- simplify code (thanks Christian Muehlhaeuser <muesli@gmail.com>)
|
||||||
|
- added JWT HMAC with SHA-512 authentication code example (thanks Amim Knabben <amim.knabben@gmail.com>)
|
||||||
|
|
||||||
|
v2.9.6
|
||||||
|
|
||||||
|
- small optimization in filter code
|
||||||
|
|
||||||
|
v2.11.1
|
||||||
|
|
||||||
|
- fix WriteError return value (#415)
|
||||||
|
|
||||||
|
v2.11.0
|
||||||
|
|
||||||
|
- allow prefix and suffix in path variable expression (#414)
|
||||||
|
|
||||||
|
v2.9.6
|
||||||
|
|
||||||
|
- support google custome verb (#413)
|
||||||
|
|
||||||
v2.9.5
|
v2.9.5
|
||||||
|
|
||||||
- fix panic in Response.WriteError if err == nil
|
- fix panic in Response.WriteError if err == nil
|
||||||
|
|
||||||
v2.9.4
|
v2.9.4
|
8
vendor/github.com/emicklei/go-restful/v3/Makefile
generated
vendored
Normal file
8
vendor/github.com/emicklei/go-restful/v3/Makefile
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
all: test
|
||||||
|
|
||||||
|
test:
|
||||||
|
go vet .
|
||||||
|
go test -cover -v .
|
||||||
|
|
||||||
|
ex:
|
||||||
|
find ./examples -type f -name "*.go" | xargs -I {} go build -o /tmp/ignore {}
|
@ -4,9 +4,10 @@ package for building REST-style Web Services using Google Go
|
|||||||
|
|
||||||
[![Build Status](https://travis-ci.org/emicklei/go-restful.png)](https://travis-ci.org/emicklei/go-restful)
|
[![Build Status](https://travis-ci.org/emicklei/go-restful.png)](https://travis-ci.org/emicklei/go-restful)
|
||||||
[![Go Report Card](https://goreportcard.com/badge/github.com/emicklei/go-restful)](https://goreportcard.com/report/github.com/emicklei/go-restful)
|
[![Go Report Card](https://goreportcard.com/badge/github.com/emicklei/go-restful)](https://goreportcard.com/report/github.com/emicklei/go-restful)
|
||||||
[![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://godoc.org/github.com/emicklei/go-restful)
|
[![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://pkg.go.dev/github.com/emicklei/go-restful)
|
||||||
|
[![codecov](https://codecov.io/gh/emicklei/go-restful/branch/master/graph/badge.svg)](https://codecov.io/gh/emicklei/go-restful)
|
||||||
|
|
||||||
- [Code examples](https://github.com/emicklei/go-restful/tree/master/examples)
|
- [Code examples use v3](https://github.com/emicklei/go-restful/tree/v3/examples)
|
||||||
|
|
||||||
REST asks developers to use HTTP methods explicitly and in a way that's consistent with the protocol definition. This basic REST design principle establishes a one-to-one mapping between create, read, update, and delete (CRUD) operations and HTTP methods. According to this mapping:
|
REST asks developers to use HTTP methods explicitly and in a way that's consistent with the protocol definition. This basic REST design principle establishes a one-to-one mapping between create, read, update, and delete (CRUD) operations and HTTP methods. According to this mapping:
|
||||||
|
|
||||||
@ -18,6 +19,28 @@ REST asks developers to use HTTP methods explicitly and in a way that's consiste
|
|||||||
- PATCH = Update partial content of a resource
|
- PATCH = Update partial content of a resource
|
||||||
- OPTIONS = Get information about the communication options for the request URI
|
- OPTIONS = Get information about the communication options for the request URI
|
||||||
|
|
||||||
|
### Usage
|
||||||
|
|
||||||
|
#### Without Go Modules
|
||||||
|
|
||||||
|
All versions up to `v2.*.*` (on the master) are not supporting Go modules.
|
||||||
|
|
||||||
|
```
|
||||||
|
import (
|
||||||
|
restful "github.com/emicklei/go-restful"
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Using Go Modules
|
||||||
|
|
||||||
|
As of version `v3.0.0` (on the v3 branch), this package supports Go modules.
|
||||||
|
|
||||||
|
```
|
||||||
|
import (
|
||||||
|
restful "github.com/emicklei/go-restful/v3"
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
### Example
|
### Example
|
||||||
|
|
||||||
```Go
|
```Go
|
||||||
@ -39,15 +62,15 @@ func (u UserResource) findUser(request *restful.Request, response *restful.Respo
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
[Full API of a UserResource](https://github.com/emicklei/go-restful/tree/master/examples/restful-user-resource.go)
|
[Full API of a UserResource](https://github.com/emicklei/go-restful/blob/v3/examples/user-resource/restful-user-resource.go)
|
||||||
|
|
||||||
### Features
|
### Features
|
||||||
|
|
||||||
- Routes for request → function mapping with path parameter (e.g. {id}) support
|
- Routes for request → function mapping with path parameter (e.g. {id} but also prefix_{var} and {var}_suffix) support
|
||||||
- Configurable router:
|
- Configurable router:
|
||||||
- (default) Fast routing algorithm that allows static elements, regular expressions and dynamic parameters in the URL path (e.g. /meetings/{id} or /static/{subpath:*}
|
- (default) Fast routing algorithm that allows static elements, [google custom method](https://cloud.google.com/apis/design/custom_methods), regular expressions and dynamic parameters in the URL path (e.g. /resource/name:customVerb, /meetings/{id} or /static/{subpath:*})
|
||||||
- Routing algorithm after [JSR311](http://jsr311.java.net/nonav/releases/1.1/spec/spec.html) that is implemented using (but does **not** accept) regular expressions
|
- Routing algorithm after [JSR311](http://jsr311.java.net/nonav/releases/1.1/spec/spec.html) that is implemented using (but does **not** accept) regular expressions
|
||||||
- Request API for reading structs from JSON/XML and accesing parameters (path,query,header)
|
- Request API for reading structs from JSON/XML and accessing parameters (path,query,header)
|
||||||
- Response API for writing structs to JSON/XML and setting headers
|
- Response API for writing structs to JSON/XML and setting headers
|
||||||
- Customizable encoding using EntityReaderWriter registration
|
- Customizable encoding using EntityReaderWriter registration
|
||||||
- Filters for intercepting the request → response flow on Service or Route level
|
- Filters for intercepting the request → response flow on Service or Route level
|
||||||
@ -73,10 +96,9 @@ There are several hooks to customize the behavior of the go-restful package.
|
|||||||
- Encoders for other serializers
|
- Encoders for other serializers
|
||||||
- Use [jsoniter](https://github.com/json-iterator/go) by build this package using a tag, e.g. `go build -tags=jsoniter .`
|
- Use [jsoniter](https://github.com/json-iterator/go) by build this package using a tag, e.g. `go build -tags=jsoniter .`
|
||||||
|
|
||||||
TODO: write examples of these.
|
|
||||||
|
|
||||||
## Resources
|
## Resources
|
||||||
|
|
||||||
|
- [Example programs](./examples)
|
||||||
- [Example posted on blog](http://ernestmicklei.com/2012/11/go-restful-first-working-example/)
|
- [Example posted on blog](http://ernestmicklei.com/2012/11/go-restful-first-working-example/)
|
||||||
- [Design explained on blog](http://ernestmicklei.com/2012/11/go-restful-api-design/)
|
- [Design explained on blog](http://ernestmicklei.com/2012/11/go-restful-api-design/)
|
||||||
- [sourcegraph](https://sourcegraph.com/github.com/emicklei/go-restful)
|
- [sourcegraph](https://sourcegraph.com/github.com/emicklei/go-restful)
|
||||||
@ -85,4 +107,4 @@ TODO: write examples of these.
|
|||||||
|
|
||||||
Type ```git shortlog -s``` for a full list of contributors.
|
Type ```git shortlog -s``` for a full list of contributors.
|
||||||
|
|
||||||
© 2012 - 2018, http://ernestmicklei.com. MIT License. Contributions are welcome.
|
© 2012 - 2022, http://ernestmicklei.com. MIT License. Contributions are welcome.
|
13
vendor/github.com/emicklei/go-restful/v3/SECURITY.md
generated
vendored
Normal file
13
vendor/github.com/emicklei/go-restful/v3/SECURITY.md
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
# Security Policy
|
||||||
|
|
||||||
|
## Supported Versions
|
||||||
|
|
||||||
|
| Version | Supported |
|
||||||
|
| ------- | ------------------ |
|
||||||
|
| v3.7.x | :white_check_mark: |
|
||||||
|
| < v3.0.1 | :x: |
|
||||||
|
|
||||||
|
## Reporting a Vulnerability
|
||||||
|
|
||||||
|
Create an Issue and put the label `[security]` in the title of the issue.
|
||||||
|
Valid reported security issues are expected to be solved within a week.
|
@ -83,7 +83,11 @@ func (c *CompressingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error
|
|||||||
}
|
}
|
||||||
|
|
||||||
// WantsCompressedResponse reads the Accept-Encoding header to see if and which encoding is requested.
|
// WantsCompressedResponse reads the Accept-Encoding header to see if and which encoding is requested.
|
||||||
func wantsCompressedResponse(httpRequest *http.Request) (bool, string) {
|
// It also inspects the httpWriter whether its content-encoding is already set (non-empty).
|
||||||
|
func wantsCompressedResponse(httpRequest *http.Request, httpWriter http.ResponseWriter) (bool, string) {
|
||||||
|
if contentEncoding := httpWriter.Header().Get(HEADER_ContentEncoding); contentEncoding != "" {
|
||||||
|
return false, ""
|
||||||
|
}
|
||||||
header := httpRequest.Header.Get(HEADER_AcceptEncoding)
|
header := httpRequest.Header.Get(HEADER_AcceptEncoding)
|
||||||
gi := strings.Index(header, ENCODING_GZIP)
|
gi := strings.Index(header, ENCODING_GZIP)
|
||||||
zi := strings.Index(header, ENCODING_DEFLATE)
|
zi := strings.Index(header, ENCODING_DEFLATE)
|
@ -14,7 +14,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/emicklei/go-restful/log"
|
"github.com/emicklei/go-restful/v3/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Container holds a collection of WebServices and a http.ServeMux to dispatch http requests.
|
// Container holds a collection of WebServices and a http.ServeMux to dispatch http requests.
|
||||||
@ -185,6 +185,11 @@ func logStackOnRecover(panicReason interface{}, httpWriter http.ResponseWriter)
|
|||||||
// when a ServiceError is returned during route selection. Default implementation
|
// when a ServiceError is returned during route selection. Default implementation
|
||||||
// calls resp.WriteErrorString(err.Code, err.Message)
|
// calls resp.WriteErrorString(err.Code, err.Message)
|
||||||
func writeServiceError(err ServiceError, req *Request, resp *Response) {
|
func writeServiceError(err ServiceError, req *Request, resp *Response) {
|
||||||
|
for header, values := range err.Header {
|
||||||
|
for _, value := range values {
|
||||||
|
resp.Header().Add(header, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
resp.WriteErrorString(err.Code, err.Message)
|
resp.WriteErrorString(err.Code, err.Message)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -201,6 +206,7 @@ func (c *Container) Dispatch(httpWriter http.ResponseWriter, httpRequest *http.R
|
|||||||
|
|
||||||
// Dispatch the incoming Http Request to a matching WebService.
|
// Dispatch the incoming Http Request to a matching WebService.
|
||||||
func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) {
|
func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) {
|
||||||
|
// so we can assign a compressing one later
|
||||||
writer := httpWriter
|
writer := httpWriter
|
||||||
|
|
||||||
// CompressingResponseWriter should be closed after all operations are done
|
// CompressingResponseWriter should be closed after all operations are done
|
||||||
@ -231,28 +237,8 @@ func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.R
|
|||||||
c.webServices,
|
c.webServices,
|
||||||
httpRequest)
|
httpRequest)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Detect if compression is needed
|
|
||||||
// assume without compression, test for override
|
|
||||||
contentEncodingEnabled := c.contentEncodingEnabled
|
|
||||||
if route != nil && route.contentEncodingEnabled != nil {
|
|
||||||
contentEncodingEnabled = *route.contentEncodingEnabled
|
|
||||||
}
|
|
||||||
if contentEncodingEnabled {
|
|
||||||
doCompress, encoding := wantsCompressedResponse(httpRequest)
|
|
||||||
if doCompress {
|
|
||||||
var err error
|
|
||||||
writer, err = NewCompressingResponseWriter(httpWriter, encoding)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Print("unable to install compressor: ", err)
|
// a non-200 response (may be compressed) has already been written
|
||||||
httpWriter.WriteHeader(http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
// a non-200 response has already been written
|
|
||||||
// run container filters anyway ; they should not touch the response...
|
// run container filters anyway ; they should not touch the response...
|
||||||
chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) {
|
chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) {
|
||||||
switch err.(type) {
|
switch err.(type) {
|
||||||
@ -265,6 +251,29 @@ func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.R
|
|||||||
chain.ProcessFilter(NewRequest(httpRequest), NewResponse(writer))
|
chain.ProcessFilter(NewRequest(httpRequest), NewResponse(writer))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Unless httpWriter is already an CompressingResponseWriter see if we need to install one
|
||||||
|
if _, isCompressing := httpWriter.(*CompressingResponseWriter); !isCompressing {
|
||||||
|
// Detect if compression is needed
|
||||||
|
// assume without compression, test for override
|
||||||
|
contentEncodingEnabled := c.contentEncodingEnabled
|
||||||
|
if route != nil && route.contentEncodingEnabled != nil {
|
||||||
|
contentEncodingEnabled = *route.contentEncodingEnabled
|
||||||
|
}
|
||||||
|
if contentEncodingEnabled {
|
||||||
|
doCompress, encoding := wantsCompressedResponse(httpRequest, httpWriter)
|
||||||
|
if doCompress {
|
||||||
|
var err error
|
||||||
|
writer, err = NewCompressingResponseWriter(httpWriter, encoding)
|
||||||
|
if err != nil {
|
||||||
|
log.Print("unable to install compressor: ", err)
|
||||||
|
httpWriter.WriteHeader(http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pathProcessor, routerProcessesPath := c.router.(PathProcessor)
|
pathProcessor, routerProcessesPath := c.router.(PathProcessor)
|
||||||
if !routerProcessesPath {
|
if !routerProcessesPath {
|
||||||
pathProcessor = defaultPathProcessor{}
|
pathProcessor = defaultPathProcessor{}
|
||||||
@ -272,16 +281,18 @@ func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.R
|
|||||||
pathParams := pathProcessor.ExtractParameters(route, webService, httpRequest.URL.Path)
|
pathParams := pathProcessor.ExtractParameters(route, webService, httpRequest.URL.Path)
|
||||||
wrappedRequest, wrappedResponse := route.wrapRequestResponse(writer, httpRequest, pathParams)
|
wrappedRequest, wrappedResponse := route.wrapRequestResponse(writer, httpRequest, pathParams)
|
||||||
// pass through filters (if any)
|
// pass through filters (if any)
|
||||||
if len(c.containerFilters)+len(webService.filters)+len(route.Filters) > 0 {
|
if size := len(c.containerFilters) + len(webService.filters) + len(route.Filters); size > 0 {
|
||||||
// compose filter chain
|
// compose filter chain
|
||||||
allFilters := []FilterFunction{}
|
allFilters := make([]FilterFunction, 0, size)
|
||||||
allFilters = append(allFilters, c.containerFilters...)
|
allFilters = append(allFilters, c.containerFilters...)
|
||||||
allFilters = append(allFilters, webService.filters...)
|
allFilters = append(allFilters, webService.filters...)
|
||||||
allFilters = append(allFilters, route.Filters...)
|
allFilters = append(allFilters, route.Filters...)
|
||||||
chain := FilterChain{Filters: allFilters, Target: func(req *Request, resp *Response) {
|
chain := FilterChain{
|
||||||
// handle request by route after passing all filters
|
Filters: allFilters,
|
||||||
route.Function(wrappedRequest, wrappedResponse)
|
Target: route.Function,
|
||||||
}}
|
ParameterDocs: route.ParameterDocs,
|
||||||
|
Operation: route.Operation,
|
||||||
|
}
|
||||||
chain.ProcessFilter(wrappedRequest, wrappedResponse)
|
chain.ProcessFilter(wrappedRequest, wrappedResponse)
|
||||||
} else {
|
} else {
|
||||||
// no filters, handle request by route
|
// no filters, handle request by route
|
||||||
@ -299,13 +310,75 @@ func fixedPrefixPath(pathspec string) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ServeHTTP implements net/http.Handler therefore a Container can be a Handler in a http.Server
|
// ServeHTTP implements net/http.Handler therefore a Container can be a Handler in a http.Server
|
||||||
func (c *Container) ServeHTTP(httpwriter http.ResponseWriter, httpRequest *http.Request) {
|
func (c *Container) ServeHTTP(httpWriter http.ResponseWriter, httpRequest *http.Request) {
|
||||||
c.ServeMux.ServeHTTP(httpwriter, httpRequest)
|
// Skip, if content encoding is disabled
|
||||||
|
if !c.contentEncodingEnabled {
|
||||||
|
c.ServeMux.ServeHTTP(httpWriter, httpRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// content encoding is enabled
|
||||||
|
|
||||||
|
// Skip, if httpWriter is already an CompressingResponseWriter
|
||||||
|
if _, ok := httpWriter.(*CompressingResponseWriter); ok {
|
||||||
|
c.ServeMux.ServeHTTP(httpWriter, httpRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
writer := httpWriter
|
||||||
|
// CompressingResponseWriter should be closed after all operations are done
|
||||||
|
defer func() {
|
||||||
|
if compressWriter, ok := writer.(*CompressingResponseWriter); ok {
|
||||||
|
compressWriter.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
doCompress, encoding := wantsCompressedResponse(httpRequest, httpWriter)
|
||||||
|
if doCompress {
|
||||||
|
var err error
|
||||||
|
writer, err = NewCompressingResponseWriter(httpWriter, encoding)
|
||||||
|
if err != nil {
|
||||||
|
log.Print("unable to install compressor: ", err)
|
||||||
|
httpWriter.WriteHeader(http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
c.ServeMux.ServeHTTP(writer, httpRequest)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle registers the handler for the given pattern. If a handler already exists for pattern, Handle panics.
|
// Handle registers the handler for the given pattern. If a handler already exists for pattern, Handle panics.
|
||||||
func (c *Container) Handle(pattern string, handler http.Handler) {
|
func (c *Container) Handle(pattern string, handler http.Handler) {
|
||||||
c.ServeMux.Handle(pattern, handler)
|
c.ServeMux.Handle(pattern, http.HandlerFunc(func(httpWriter http.ResponseWriter, httpRequest *http.Request) {
|
||||||
|
// Skip, if httpWriter is already an CompressingResponseWriter
|
||||||
|
if _, ok := httpWriter.(*CompressingResponseWriter); ok {
|
||||||
|
handler.ServeHTTP(httpWriter, httpRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
writer := httpWriter
|
||||||
|
|
||||||
|
// CompressingResponseWriter should be closed after all operations are done
|
||||||
|
defer func() {
|
||||||
|
if compressWriter, ok := writer.(*CompressingResponseWriter); ok {
|
||||||
|
compressWriter.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if c.contentEncodingEnabled {
|
||||||
|
doCompress, encoding := wantsCompressedResponse(httpRequest, httpWriter)
|
||||||
|
if doCompress {
|
||||||
|
var err error
|
||||||
|
writer, err = NewCompressingResponseWriter(httpWriter, encoding)
|
||||||
|
if err != nil {
|
||||||
|
log.Print("unable to install compressor: ", err)
|
||||||
|
httpWriter.WriteHeader(http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
handler.ServeHTTP(writer, httpRequest)
|
||||||
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
// HandleWithFilter registers the handler for the given pattern.
|
// HandleWithFilter registers the handler for the given pattern.
|
||||||
@ -319,7 +392,7 @@ func (c *Container) HandleWithFilter(pattern string, handler http.Handler) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) {
|
chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) {
|
||||||
handler.ServeHTTP(httpResponse, httpRequest)
|
handler.ServeHTTP(resp, req.Request)
|
||||||
}}
|
}}
|
||||||
chain.ProcessFilter(NewRequest(httpRequest), NewResponse(httpResponse))
|
chain.ProcessFilter(NewRequest(httpRequest), NewResponse(httpResponse))
|
||||||
}
|
}
|
@ -19,8 +19,21 @@ import (
|
|||||||
// http://www.html5rocks.com/en/tutorials/cors/#toc-handling-a-not-so-simple-request
|
// http://www.html5rocks.com/en/tutorials/cors/#toc-handling-a-not-so-simple-request
|
||||||
type CrossOriginResourceSharing struct {
|
type CrossOriginResourceSharing struct {
|
||||||
ExposeHeaders []string // list of Header names
|
ExposeHeaders []string // list of Header names
|
||||||
AllowedHeaders []string // list of Header names
|
|
||||||
AllowedDomains []string // list of allowed values for Http Origin. An allowed value can be a regular expression to support subdomain matching. If empty all are allowed.
|
// AllowedHeaders is alist of Header names. Checking is case-insensitive.
|
||||||
|
// The list may contain the special wildcard string ".*" ; all is allowed
|
||||||
|
AllowedHeaders []string
|
||||||
|
|
||||||
|
// AllowedDomains is a list of allowed values for Http Origin.
|
||||||
|
// The list may contain the special wildcard string ".*" ; all is allowed
|
||||||
|
// If empty all are allowed.
|
||||||
|
AllowedDomains []string
|
||||||
|
|
||||||
|
// AllowedDomainFunc is optional and is a function that will do the check
|
||||||
|
// when the origin is not part of the AllowedDomains and it does not contain the wildcard ".*".
|
||||||
|
AllowedDomainFunc func(origin string) bool
|
||||||
|
|
||||||
|
// AllowedMethods is either empty or has a list of http methods names. Checking is case-insensitive.
|
||||||
AllowedMethods []string
|
AllowedMethods []string
|
||||||
MaxAge int // number of seconds before requiring new Options request
|
MaxAge int // number of seconds before requiring new Options request
|
||||||
CookiesAllowed bool
|
CookiesAllowed bool
|
||||||
@ -119,37 +132,25 @@ func (c CrossOriginResourceSharing) isOriginAllowed(origin string) bool {
|
|||||||
if len(origin) == 0 {
|
if len(origin) == 0 {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
lowerOrigin := strings.ToLower(origin)
|
||||||
if len(c.AllowedDomains) == 0 {
|
if len(c.AllowedDomains) == 0 {
|
||||||
|
if c.AllowedDomainFunc != nil {
|
||||||
|
return c.AllowedDomainFunc(lowerOrigin)
|
||||||
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
allowed := false
|
// exact match on each allowed domain
|
||||||
for _, domain := range c.AllowedDomains {
|
for _, domain := range c.AllowedDomains {
|
||||||
if domain == origin {
|
if domain == ".*" || strings.ToLower(domain) == lowerOrigin {
|
||||||
allowed = true
|
return true
|
||||||
break
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if c.AllowedDomainFunc != nil {
|
||||||
if !allowed {
|
return c.AllowedDomainFunc(origin)
|
||||||
if len(c.allowedOriginPatterns) == 0 {
|
}
|
||||||
// compile allowed domains to allowed origin patterns
|
|
||||||
allowedOriginRegexps, err := compileRegexps(c.AllowedDomains)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
c.allowedOriginPatterns = allowedOriginRegexps
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, pattern := range c.allowedOriginPatterns {
|
|
||||||
if allowed = pattern.MatchString(origin); allowed {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return allowed
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c CrossOriginResourceSharing) setAllowOriginHeader(req *Request, resp *Response) {
|
func (c CrossOriginResourceSharing) setAllowOriginHeader(req *Request, resp *Response) {
|
||||||
origin := req.Request.Header.Get(HEADER_Origin)
|
origin := req.Request.Header.Get(HEADER_Origin)
|
||||||
@ -184,19 +185,9 @@ func (c CrossOriginResourceSharing) isValidAccessControlRequestHeader(header str
|
|||||||
if strings.ToLower(each) == strings.ToLower(header) {
|
if strings.ToLower(each) == strings.ToLower(header) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
if each == "*" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Take a list of strings and compile them into a list of regular expressions.
|
|
||||||
func compileRegexps(regexpStrings []string) ([]*regexp.Regexp, error) {
|
|
||||||
regexps := []*regexp.Regexp{}
|
|
||||||
for _, regexpStr := range regexpStrings {
|
|
||||||
r, err := regexp.Compile(regexpStr)
|
|
||||||
if err != nil {
|
|
||||||
return regexps, err
|
|
||||||
}
|
|
||||||
regexps = append(regexps, r)
|
|
||||||
}
|
|
||||||
return regexps, nil
|
|
||||||
}
|
|
@ -47,7 +47,7 @@ func (c CurlyRouter) SelectRoute(
|
|||||||
func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortableCurlyRoutes {
|
func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortableCurlyRoutes {
|
||||||
candidates := make(sortableCurlyRoutes, 0, 8)
|
candidates := make(sortableCurlyRoutes, 0, 8)
|
||||||
for _, each := range ws.routes {
|
for _, each := range ws.routes {
|
||||||
matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens)
|
matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens, each.hasCustomVerb)
|
||||||
if matches {
|
if matches {
|
||||||
candidates.add(curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers?
|
candidates.add(curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers?
|
||||||
}
|
}
|
||||||
@ -57,7 +57,7 @@ func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortab
|
|||||||
}
|
}
|
||||||
|
|
||||||
// matchesRouteByPathTokens computes whether it matches, howmany parameters do match and what the number of static path elements are.
|
// matchesRouteByPathTokens computes whether it matches, howmany parameters do match and what the number of static path elements are.
|
||||||
func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []string) (matches bool, paramCount int, staticCount int) {
|
func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []string, routeHasCustomVerb bool) (matches bool, paramCount int, staticCount int) {
|
||||||
if len(routeTokens) < len(requestTokens) {
|
if len(routeTokens) < len(requestTokens) {
|
||||||
// proceed in matching only if last routeToken is wildcard
|
// proceed in matching only if last routeToken is wildcard
|
||||||
count := len(routeTokens)
|
count := len(routeTokens)
|
||||||
@ -72,6 +72,15 @@ func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []strin
|
|||||||
return false, 0, 0
|
return false, 0, 0
|
||||||
}
|
}
|
||||||
requestToken := requestTokens[i]
|
requestToken := requestTokens[i]
|
||||||
|
if routeHasCustomVerb && hasCustomVerb(routeToken){
|
||||||
|
if !isMatchCustomVerb(routeToken, requestToken) {
|
||||||
|
return false, 0, 0
|
||||||
|
}
|
||||||
|
staticCount++
|
||||||
|
requestToken = removeCustomVerb(requestToken)
|
||||||
|
routeToken = removeCustomVerb(routeToken)
|
||||||
|
}
|
||||||
|
|
||||||
if strings.HasPrefix(routeToken, "{") {
|
if strings.HasPrefix(routeToken, "{") {
|
||||||
paramCount++
|
paramCount++
|
||||||
if colon := strings.Index(routeToken, ":"); colon != -1 {
|
if colon := strings.Index(routeToken, ":"); colon != -1 {
|
29
vendor/github.com/emicklei/go-restful/v3/custom_verb.go
generated
vendored
Normal file
29
vendor/github.com/emicklei/go-restful/v3/custom_verb.go
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
package restful
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
customVerbReg = regexp.MustCompile(":([A-Za-z]+)$")
|
||||||
|
)
|
||||||
|
|
||||||
|
func hasCustomVerb(routeToken string) bool {
|
||||||
|
return customVerbReg.MatchString(routeToken)
|
||||||
|
}
|
||||||
|
|
||||||
|
func isMatchCustomVerb(routeToken string, pathToken string) bool {
|
||||||
|
rs := customVerbReg.FindStringSubmatch(routeToken)
|
||||||
|
if len(rs) < 2 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
customVerb := rs[1]
|
||||||
|
specificVerbReg := regexp.MustCompile(fmt.Sprintf(":%s$", customVerb))
|
||||||
|
return specificVerbReg.MatchString(pathToken)
|
||||||
|
}
|
||||||
|
|
||||||
|
func removeCustomVerb(str string) string {
|
||||||
|
return customVerbReg.ReplaceAllString(str, "")
|
||||||
|
}
|
@ -28,7 +28,7 @@ This package has the logic to find the best matching Route and if found, call it
|
|||||||
|
|
||||||
The (*Request, *Response) arguments provide functions for reading information from the request and writing information back to the response.
|
The (*Request, *Response) arguments provide functions for reading information from the request and writing information back to the response.
|
||||||
|
|
||||||
See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-user-resource.go with a full implementation.
|
See the example https://github.com/emicklei/go-restful/blob/v3/examples/user-resource/restful-user-resource.go with a full implementation.
|
||||||
|
|
||||||
Regular expression matching Routes
|
Regular expression matching Routes
|
||||||
|
|
||||||
@ -82,7 +82,7 @@ These are processed before calling the function associated with the Route.
|
|||||||
// install 2 chained route filters (processed before calling findUser)
|
// install 2 chained route filters (processed before calling findUser)
|
||||||
ws.Route(ws.GET("/{user-id}").Filter(routeLogging).Filter(NewCountFilter().routeCounter).To(findUser))
|
ws.Route(ws.GET("/{user-id}").Filter(routeLogging).Filter(NewCountFilter().routeCounter).To(findUser))
|
||||||
|
|
||||||
See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-filters.go with full implementations.
|
See the example https://github.com/emicklei/go-restful/blob/v3/examples/filters/restful-filters.go with full implementations.
|
||||||
|
|
||||||
Response Encoding
|
Response Encoding
|
||||||
|
|
||||||
@ -93,7 +93,7 @@ Two encodings are supported: gzip and deflate. To enable this for all responses:
|
|||||||
If a Http request includes the Accept-Encoding header then the response content will be compressed using the specified encoding.
|
If a Http request includes the Accept-Encoding header then the response content will be compressed using the specified encoding.
|
||||||
Alternatively, you can create a Filter that performs the encoding and install it per WebService or Route.
|
Alternatively, you can create a Filter that performs the encoding and install it per WebService or Route.
|
||||||
|
|
||||||
See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-encoding-filter.go
|
See the example https://github.com/emicklei/go-restful/blob/v3/examples/encoding/restful-encoding-filter.go
|
||||||
|
|
||||||
OPTIONS support
|
OPTIONS support
|
||||||
|
|
21
vendor/github.com/emicklei/go-restful/v3/extensions.go
generated
vendored
Normal file
21
vendor/github.com/emicklei/go-restful/v3/extensions.go
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
package restful
|
||||||
|
|
||||||
|
// Copyright 2021 Ernest Micklei. All rights reserved.
|
||||||
|
// Use of this source code is governed by a license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// ExtensionProperties provides storage of vendor extensions for entities
|
||||||
|
type ExtensionProperties struct {
|
||||||
|
// Extensions vendor extensions used to describe extra functionality
|
||||||
|
// (https://swagger.io/docs/specification/2-0/swagger-extensions/)
|
||||||
|
Extensions map[string]interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddExtension adds or updates a key=value pair to the extension map.
|
||||||
|
func (ep *ExtensionProperties) AddExtension(key string, value interface{}) {
|
||||||
|
if ep.Extensions == nil {
|
||||||
|
ep.Extensions = map[string]interface{}{key: value}
|
||||||
|
} else {
|
||||||
|
ep.Extensions[key] = value
|
||||||
|
}
|
||||||
|
}
|
@ -9,6 +9,8 @@ type FilterChain struct {
|
|||||||
Filters []FilterFunction // ordered list of FilterFunction
|
Filters []FilterFunction // ordered list of FilterFunction
|
||||||
Index int // index into filters that is currently in progress
|
Index int // index into filters that is currently in progress
|
||||||
Target RouteFunction // function to call after passing all filters
|
Target RouteFunction // function to call after passing all filters
|
||||||
|
ParameterDocs []*Parameter // the parameter docs for the route
|
||||||
|
Operation string // the name of the operation
|
||||||
}
|
}
|
||||||
|
|
||||||
// ProcessFilter passes the request,response pair through the next of Filters.
|
// ProcessFilter passes the request,response pair through the next of Filters.
|
@ -9,6 +9,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"sort"
|
"sort"
|
||||||
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
// RouterJSR311 implements the flow for matching Requests to Routes (and consequently Resource Functions)
|
// RouterJSR311 implements the flow for matching Requests to Routes (and consequently Resource Functions)
|
||||||
@ -98,7 +99,18 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R
|
|||||||
if trace {
|
if trace {
|
||||||
traceLogger.Printf("no Route found (in %d routes) that matches HTTP method %s\n", len(previous), httpRequest.Method)
|
traceLogger.Printf("no Route found (in %d routes) that matches HTTP method %s\n", len(previous), httpRequest.Method)
|
||||||
}
|
}
|
||||||
return nil, NewError(http.StatusMethodNotAllowed, "405: Method Not Allowed")
|
allowed := []string{}
|
||||||
|
allowedLoop:
|
||||||
|
for _, candidate := range previous {
|
||||||
|
for _, method := range allowed {
|
||||||
|
if method == candidate.Method {
|
||||||
|
continue allowedLoop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
allowed = append(allowed, candidate.Method)
|
||||||
|
}
|
||||||
|
header := http.Header{"Allow": []string{strings.Join(allowed, ", ")}}
|
||||||
|
return nil, NewErrorWithHeader(http.StatusMethodNotAllowed, "405: Method Not Allowed", header)
|
||||||
}
|
}
|
||||||
|
|
||||||
// content-type
|
// content-type
|
||||||
@ -135,7 +147,24 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R
|
|||||||
if trace {
|
if trace {
|
||||||
traceLogger.Printf("no Route found (from %d) that matches HTTP Accept: %s\n", len(previous), accept)
|
traceLogger.Printf("no Route found (from %d) that matches HTTP Accept: %s\n", len(previous), accept)
|
||||||
}
|
}
|
||||||
return nil, NewError(http.StatusNotAcceptable, "406: Not Acceptable")
|
available := []string{}
|
||||||
|
for _, candidate := range previous {
|
||||||
|
available = append(available, candidate.Produces...)
|
||||||
|
}
|
||||||
|
// if POST,PUT,PATCH without body
|
||||||
|
method, length := httpRequest.Method, httpRequest.Header.Get("Content-Length")
|
||||||
|
if (method == http.MethodPost ||
|
||||||
|
method == http.MethodPut ||
|
||||||
|
method == http.MethodPatch) && length == "" {
|
||||||
|
return nil, NewError(
|
||||||
|
http.StatusUnsupportedMediaType,
|
||||||
|
fmt.Sprintf("415: Unsupported Media Type\n\nAvailable representations: %s", strings.Join(available, ", ")),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
return nil, NewError(
|
||||||
|
http.StatusNotAcceptable,
|
||||||
|
fmt.Sprintf("406: Not Acceptable\n\nAvailable representations: %s", strings.Join(available, ", ")),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
// return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil
|
// return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil
|
||||||
return candidates[0], nil
|
return candidates[0], nil
|
@ -4,7 +4,7 @@ package restful
|
|||||||
// Use of this source code is governed by a license
|
// Use of this source code is governed by a license
|
||||||
// that can be found in the LICENSE file.
|
// that can be found in the LICENSE file.
|
||||||
import (
|
import (
|
||||||
"github.com/emicklei/go-restful/log"
|
"github.com/emicklei/go-restful/v3/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
var trace bool = false
|
var trace bool = false
|
@ -1,5 +1,7 @@
|
|||||||
package restful
|
package restful
|
||||||
|
|
||||||
|
import "sort"
|
||||||
|
|
||||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||||
// Use of this source code is governed by a license
|
// Use of this source code is governed by a license
|
||||||
// that can be found in the LICENSE file.
|
// that can be found in the LICENSE file.
|
||||||
@ -52,13 +54,25 @@ type Parameter struct {
|
|||||||
// ParameterData represents the state of a Parameter.
|
// ParameterData represents the state of a Parameter.
|
||||||
// It is made public to make it accessible to e.g. the Swagger package.
|
// It is made public to make it accessible to e.g. the Swagger package.
|
||||||
type ParameterData struct {
|
type ParameterData struct {
|
||||||
|
ExtensionProperties
|
||||||
Name, Description, DataType, DataFormat string
|
Name, Description, DataType, DataFormat string
|
||||||
Kind int
|
Kind int
|
||||||
Required bool
|
Required bool
|
||||||
|
// AllowableValues is deprecated. Use PossibleValues instead
|
||||||
AllowableValues map[string]string
|
AllowableValues map[string]string
|
||||||
|
PossibleValues []string
|
||||||
AllowMultiple bool
|
AllowMultiple bool
|
||||||
|
AllowEmptyValue bool
|
||||||
DefaultValue string
|
DefaultValue string
|
||||||
CollectionFormat string
|
CollectionFormat string
|
||||||
|
Pattern string
|
||||||
|
Minimum *float64
|
||||||
|
Maximum *float64
|
||||||
|
MinLength *int64
|
||||||
|
MaxLength *int64
|
||||||
|
MinItems *int64
|
||||||
|
MaxItems *int64
|
||||||
|
UniqueItems bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Data returns the state of the Parameter
|
// Data returns the state of the Parameter
|
||||||
@ -106,9 +120,38 @@ func (p *Parameter) AllowMultiple(multiple bool) *Parameter {
|
|||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
|
|
||||||
// AllowableValues sets the allowableValues field and returns the receiver
|
// AddExtension adds or updates a key=value pair to the extension map
|
||||||
|
func (p *Parameter) AddExtension(key string, value interface{}) *Parameter {
|
||||||
|
p.data.AddExtension(key, value)
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowEmptyValue sets the AllowEmptyValue field and returns the receiver
|
||||||
|
func (p *Parameter) AllowEmptyValue(multiple bool) *Parameter {
|
||||||
|
p.data.AllowEmptyValue = multiple
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowableValues is deprecated. Use PossibleValues instead. Both will be set.
|
||||||
func (p *Parameter) AllowableValues(values map[string]string) *Parameter {
|
func (p *Parameter) AllowableValues(values map[string]string) *Parameter {
|
||||||
p.data.AllowableValues = values
|
p.data.AllowableValues = values
|
||||||
|
|
||||||
|
allowableSortedKeys := make([]string, 0, len(values))
|
||||||
|
for k := range values {
|
||||||
|
allowableSortedKeys = append(allowableSortedKeys, k)
|
||||||
|
}
|
||||||
|
sort.Strings(allowableSortedKeys)
|
||||||
|
|
||||||
|
p.data.PossibleValues = make([]string, 0, len(values))
|
||||||
|
for _, k := range allowableSortedKeys {
|
||||||
|
p.data.PossibleValues = append(p.data.PossibleValues, values[k])
|
||||||
|
}
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
// PossibleValues sets the possible values field and returns the receiver
|
||||||
|
func (p *Parameter) PossibleValues(values []string) *Parameter {
|
||||||
|
p.data.PossibleValues = values
|
||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -141,3 +184,51 @@ func (p *Parameter) CollectionFormat(format CollectionFormat) *Parameter {
|
|||||||
p.data.CollectionFormat = format.String()
|
p.data.CollectionFormat = format.String()
|
||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Pattern sets the pattern field and returns the receiver
|
||||||
|
func (p *Parameter) Pattern(pattern string) *Parameter {
|
||||||
|
p.data.Pattern = pattern
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
// Minimum sets the minimum field and returns the receiver
|
||||||
|
func (p *Parameter) Minimum(minimum float64) *Parameter {
|
||||||
|
p.data.Minimum = &minimum
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
// Maximum sets the maximum field and returns the receiver
|
||||||
|
func (p *Parameter) Maximum(maximum float64) *Parameter {
|
||||||
|
p.data.Maximum = &maximum
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
// MinLength sets the minLength field and returns the receiver
|
||||||
|
func (p *Parameter) MinLength(minLength int64) *Parameter {
|
||||||
|
p.data.MinLength = &minLength
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxLength sets the maxLength field and returns the receiver
|
||||||
|
func (p *Parameter) MaxLength(maxLength int64) *Parameter {
|
||||||
|
p.data.MaxLength = &maxLength
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
// MinItems sets the minItems field and returns the receiver
|
||||||
|
func (p *Parameter) MinItems(minItems int64) *Parameter {
|
||||||
|
p.data.MinItems = &minItems
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxItems sets the maxItems field and returns the receiver
|
||||||
|
func (p *Parameter) MaxItems(maxItems int64) *Parameter {
|
||||||
|
p.data.MaxItems = &maxItems
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
// UniqueItems sets the uniqueItems field and returns the receiver
|
||||||
|
func (p *Parameter) UniqueItems(uniqueItems bool) *Parameter {
|
||||||
|
p.data.UniqueItems = uniqueItems
|
||||||
|
return p
|
||||||
|
}
|
@ -29,7 +29,12 @@ func (d defaultPathProcessor) ExtractParameters(r *Route, _ *WebService, urlPath
|
|||||||
} else {
|
} else {
|
||||||
value = urlParts[i]
|
value = urlParts[i]
|
||||||
}
|
}
|
||||||
if strings.HasPrefix(key, "{") { // path-parameter
|
if r.hasCustomVerb && hasCustomVerb(key) {
|
||||||
|
key = removeCustomVerb(key)
|
||||||
|
value = removeCustomVerb(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.Index(key, "{") > -1 { // path-parameter
|
||||||
if colon := strings.Index(key, ":"); colon != -1 {
|
if colon := strings.Index(key, ":"); colon != -1 {
|
||||||
// extract by regex
|
// extract by regex
|
||||||
regPart := key[colon+1 : len(key)-1]
|
regPart := key[colon+1 : len(key)-1]
|
||||||
@ -42,7 +47,13 @@ func (d defaultPathProcessor) ExtractParameters(r *Route, _ *WebService, urlPath
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// without enclosing {}
|
// without enclosing {}
|
||||||
pathParameters[key[1:len(key)-1]] = value
|
startIndex := strings.Index(key, "{")
|
||||||
|
endKeyIndex := strings.Index(key, "}")
|
||||||
|
|
||||||
|
suffixLength := len(key) - endKeyIndex - 1
|
||||||
|
endValueIndex := len(value) - suffixLength
|
||||||
|
|
||||||
|
pathParameters[key[startIndex+1:endKeyIndex]] = value[startIndex:endValueIndex]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -16,7 +16,7 @@ type Request struct {
|
|||||||
Request *http.Request
|
Request *http.Request
|
||||||
pathParameters map[string]string
|
pathParameters map[string]string
|
||||||
attributes map[string]interface{} // for storing request-scoped values
|
attributes map[string]interface{} // for storing request-scoped values
|
||||||
selectedRoutePath string // root path + route path that matched the request, e.g. /meetings/{id}/attendees
|
selectedRoute *Route // is nil when no route was matched
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewRequest(httpRequest *http.Request) *Request {
|
func NewRequest(httpRequest *http.Request) *Request {
|
||||||
@ -113,6 +113,20 @@ func (r Request) Attribute(name string) interface{} {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SelectedRoutePath root path + route path that matched the request, e.g. /meetings/{id}/attendees
|
// SelectedRoutePath root path + route path that matched the request, e.g. /meetings/{id}/attendees
|
||||||
|
// If no route was matched then return an empty string.
|
||||||
func (r Request) SelectedRoutePath() string {
|
func (r Request) SelectedRoutePath() string {
|
||||||
return r.selectedRoutePath
|
if r.selectedRoute == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
// skip creating an accessor
|
||||||
|
return r.selectedRoute.Path
|
||||||
|
}
|
||||||
|
|
||||||
|
// SelectedRoute returns a reader to access the selected Route by the container
|
||||||
|
// Returns nil if no route was matched.
|
||||||
|
func (r Request) SelectedRoute() RouteReader {
|
||||||
|
if r.selectedRoute == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return routeAccessor{route: r.selectedRoute}
|
||||||
}
|
}
|
@ -174,15 +174,16 @@ func (r *Response) WriteHeaderAndJson(status int, value interface{}, contentType
|
|||||||
return writeJSON(r, status, contentType, value)
|
return writeJSON(r, status, contentType, value)
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteError write the http status and the error string on the response. err can be nil.
|
// WriteError writes the http status and the error string on the response. err can be nil.
|
||||||
func (r *Response) WriteError(httpStatus int, err error) error {
|
// Return an error if writing was not successful.
|
||||||
|
func (r *Response) WriteError(httpStatus int, err error) (writeErr error) {
|
||||||
r.err = err
|
r.err = err
|
||||||
if err == nil {
|
if err == nil {
|
||||||
r.WriteErrorString(httpStatus, "")
|
writeErr = r.WriteErrorString(httpStatus, "")
|
||||||
} else {
|
} else {
|
||||||
r.WriteErrorString(httpStatus, err.Error())
|
writeErr = r.WriteErrorString(httpStatus, err.Error())
|
||||||
}
|
}
|
||||||
return err
|
return writeErr
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteServiceError is a convenience method for a responding with a status and a ServiceError
|
// WriteServiceError is a convenience method for a responding with a status and a ServiceError
|
@ -19,6 +19,7 @@ type RouteSelectionConditionFunction func(httpRequest *http.Request) bool
|
|||||||
|
|
||||||
// Route binds a HTTP Method,Path,Consumes combination to a RouteFunction.
|
// Route binds a HTTP Method,Path,Consumes combination to a RouteFunction.
|
||||||
type Route struct {
|
type Route struct {
|
||||||
|
ExtensionProperties
|
||||||
Method string
|
Method string
|
||||||
Produces []string
|
Produces []string
|
||||||
Consumes []string
|
Consumes []string
|
||||||
@ -49,35 +50,33 @@ type Route struct {
|
|||||||
|
|
||||||
//Overrides the container.contentEncodingEnabled
|
//Overrides the container.contentEncodingEnabled
|
||||||
contentEncodingEnabled *bool
|
contentEncodingEnabled *bool
|
||||||
|
|
||||||
|
// indicate route path has custom verb
|
||||||
|
hasCustomVerb bool
|
||||||
|
|
||||||
|
// if a request does not include a content-type header then
|
||||||
|
// depending on the method, it may return a 415 Unsupported Media
|
||||||
|
// Must have uppercase HTTP Method names such as GET,HEAD,OPTIONS,...
|
||||||
|
allowedMethodsWithoutContentType []string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize for Route
|
// Initialize for Route
|
||||||
func (r *Route) postBuild() {
|
func (r *Route) postBuild() {
|
||||||
r.pathParts = tokenizePath(r.Path)
|
r.pathParts = tokenizePath(r.Path)
|
||||||
|
r.hasCustomVerb = hasCustomVerb(r.Path)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create Request and Response from their http versions
|
// Create Request and Response from their http versions
|
||||||
func (r *Route) wrapRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request, pathParams map[string]string) (*Request, *Response) {
|
func (r *Route) wrapRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request, pathParams map[string]string) (*Request, *Response) {
|
||||||
wrappedRequest := NewRequest(httpRequest)
|
wrappedRequest := NewRequest(httpRequest)
|
||||||
wrappedRequest.pathParameters = pathParams
|
wrappedRequest.pathParameters = pathParams
|
||||||
wrappedRequest.selectedRoutePath = r.Path
|
wrappedRequest.selectedRoute = r
|
||||||
wrappedResponse := NewResponse(httpWriter)
|
wrappedResponse := NewResponse(httpWriter)
|
||||||
wrappedResponse.requestAccept = httpRequest.Header.Get(HEADER_Accept)
|
wrappedResponse.requestAccept = httpRequest.Header.Get(HEADER_Accept)
|
||||||
wrappedResponse.routeProduces = r.Produces
|
wrappedResponse.routeProduces = r.Produces
|
||||||
return wrappedRequest, wrappedResponse
|
return wrappedRequest, wrappedResponse
|
||||||
}
|
}
|
||||||
|
|
||||||
// dispatchWithFilters call the function after passing through its own filters
|
|
||||||
func (r *Route) dispatchWithFilters(wrappedRequest *Request, wrappedResponse *Response) {
|
|
||||||
if len(r.Filters) > 0 {
|
|
||||||
chain := FilterChain{Filters: r.Filters, Target: r.Function}
|
|
||||||
chain.ProcessFilter(wrappedRequest, wrappedResponse)
|
|
||||||
} else {
|
|
||||||
// unfiltered
|
|
||||||
r.Function(wrappedRequest, wrappedResponse)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func stringTrimSpaceCutset(r rune) bool {
|
func stringTrimSpaceCutset(r rune) bool {
|
||||||
return r == ' '
|
return r == ' '
|
||||||
}
|
}
|
||||||
@ -121,9 +120,18 @@ func (r Route) matchesContentType(mimeTypes string) bool {
|
|||||||
if len(mimeTypes) == 0 {
|
if len(mimeTypes) == 0 {
|
||||||
// idempotent methods with (most-likely or guaranteed) empty content match missing Content-Type
|
// idempotent methods with (most-likely or guaranteed) empty content match missing Content-Type
|
||||||
m := r.Method
|
m := r.Method
|
||||||
|
// if route specifies less or non-idempotent methods then use that
|
||||||
|
if len(r.allowedMethodsWithoutContentType) > 0 {
|
||||||
|
for _, each := range r.allowedMethodsWithoutContentType {
|
||||||
|
if m == each {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
if m == "GET" || m == "HEAD" || m == "OPTIONS" || m == "DELETE" || m == "TRACE" {
|
if m == "GET" || m == "HEAD" || m == "OPTIONS" || m == "DELETE" || m == "TRACE" {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
}
|
||||||
// proceed with default
|
// proceed with default
|
||||||
mimeTypes = MIME_OCTET
|
mimeTypes = MIME_OCTET
|
||||||
}
|
}
|
||||||
@ -160,11 +168,11 @@ func tokenizePath(path string) []string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// for debugging
|
// for debugging
|
||||||
func (r Route) String() string {
|
func (r *Route) String() string {
|
||||||
return r.Method + " " + r.Path
|
return r.Method + " " + r.Path
|
||||||
}
|
}
|
||||||
|
|
||||||
// EnableContentEncoding (default=false) allows for GZIP or DEFLATE encoding of responses. Overrides the container.contentEncodingEnabled value.
|
// EnableContentEncoding (default=false) allows for GZIP or DEFLATE encoding of responses. Overrides the container.contentEncodingEnabled value.
|
||||||
func (r Route) EnableContentEncoding(enabled bool) {
|
func (r *Route) EnableContentEncoding(enabled bool) {
|
||||||
r.contentEncodingEnabled = &enabled
|
r.contentEncodingEnabled = &enabled
|
||||||
}
|
}
|
@ -12,7 +12,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
||||||
"github.com/emicklei/go-restful/log"
|
"github.com/emicklei/go-restful/v3/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
// RouteBuilder is a helper to construct Routes.
|
// RouteBuilder is a helper to construct Routes.
|
||||||
@ -25,6 +25,7 @@ type RouteBuilder struct {
|
|||||||
function RouteFunction // required
|
function RouteFunction // required
|
||||||
filters []FilterFunction
|
filters []FilterFunction
|
||||||
conditions []RouteSelectionConditionFunction
|
conditions []RouteSelectionConditionFunction
|
||||||
|
allowedMethodsWithoutContentType []string // see Route
|
||||||
|
|
||||||
typeNameHandleFunc TypeNameHandleFunction // required
|
typeNameHandleFunc TypeNameHandleFunction // required
|
||||||
|
|
||||||
@ -37,6 +38,7 @@ type RouteBuilder struct {
|
|||||||
errorMap map[int]ResponseError
|
errorMap map[int]ResponseError
|
||||||
defaultResponse *ResponseError
|
defaultResponse *ResponseError
|
||||||
metadata map[string]interface{}
|
metadata map[string]interface{}
|
||||||
|
extensions map[string]interface{}
|
||||||
deprecated bool
|
deprecated bool
|
||||||
contentEncodingEnabled *bool
|
contentEncodingEnabled *bool
|
||||||
}
|
}
|
||||||
@ -176,6 +178,15 @@ func (b *RouteBuilder) Returns(code int, message string, model interface{}) *Rou
|
|||||||
return b
|
return b
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ReturnsWithHeaders is similar to Returns, but can specify response headers
|
||||||
|
func (b *RouteBuilder) ReturnsWithHeaders(code int, message string, model interface{}, headers map[string]Header) *RouteBuilder {
|
||||||
|
b.Returns(code, message, model)
|
||||||
|
err := b.errorMap[code]
|
||||||
|
err.Headers = headers
|
||||||
|
b.errorMap[code] = err
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
// DefaultReturns is a special Returns call that sets the default of the response.
|
// DefaultReturns is a special Returns call that sets the default of the response.
|
||||||
func (b *RouteBuilder) DefaultReturns(message string, model interface{}) *RouteBuilder {
|
func (b *RouteBuilder) DefaultReturns(message string, model interface{}) *RouteBuilder {
|
||||||
b.defaultResponse = &ResponseError{
|
b.defaultResponse = &ResponseError{
|
||||||
@ -194,20 +205,57 @@ func (b *RouteBuilder) Metadata(key string, value interface{}) *RouteBuilder {
|
|||||||
return b
|
return b
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AddExtension adds or updates a key=value pair to the extensions map.
|
||||||
|
func (b *RouteBuilder) AddExtension(key string, value interface{}) *RouteBuilder {
|
||||||
|
if b.extensions == nil {
|
||||||
|
b.extensions = map[string]interface{}{}
|
||||||
|
}
|
||||||
|
b.extensions[key] = value
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
// Deprecate sets the value of deprecated to true. Deprecated routes have a special UI treatment to warn against use
|
// Deprecate sets the value of deprecated to true. Deprecated routes have a special UI treatment to warn against use
|
||||||
func (b *RouteBuilder) Deprecate() *RouteBuilder {
|
func (b *RouteBuilder) Deprecate() *RouteBuilder {
|
||||||
b.deprecated = true
|
b.deprecated = true
|
||||||
return b
|
return b
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AllowedMethodsWithoutContentType overrides the default list GET,HEAD,OPTIONS,DELETE,TRACE
|
||||||
|
// If a request does not include a content-type header then
|
||||||
|
// depending on the method, it may return a 415 Unsupported Media.
|
||||||
|
// Must have uppercase HTTP Method names such as GET,HEAD,OPTIONS,...
|
||||||
|
func (b *RouteBuilder) AllowedMethodsWithoutContentType(methods []string) *RouteBuilder {
|
||||||
|
b.allowedMethodsWithoutContentType = methods
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
// ResponseError represents a response; not necessarily an error.
|
// ResponseError represents a response; not necessarily an error.
|
||||||
type ResponseError struct {
|
type ResponseError struct {
|
||||||
|
ExtensionProperties
|
||||||
Code int
|
Code int
|
||||||
Message string
|
Message string
|
||||||
Model interface{}
|
Model interface{}
|
||||||
|
Headers map[string]Header
|
||||||
IsDefault bool
|
IsDefault bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Header describes a header for a response of the API
|
||||||
|
//
|
||||||
|
// For more information: http://goo.gl/8us55a#headerObject
|
||||||
|
type Header struct {
|
||||||
|
*Items
|
||||||
|
Description string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Items describe swagger simple schemas for headers
|
||||||
|
type Items struct {
|
||||||
|
Type string
|
||||||
|
Format string
|
||||||
|
Items *Items
|
||||||
|
CollectionFormat string
|
||||||
|
Default interface{}
|
||||||
|
}
|
||||||
|
|
||||||
func (b *RouteBuilder) servicePath(path string) *RouteBuilder {
|
func (b *RouteBuilder) servicePath(path string) *RouteBuilder {
|
||||||
b.rootPath = path
|
b.rootPath = path
|
||||||
return b
|
return b
|
||||||
@ -296,7 +344,9 @@ func (b *RouteBuilder) Build() Route {
|
|||||||
Metadata: b.metadata,
|
Metadata: b.metadata,
|
||||||
Deprecated: b.deprecated,
|
Deprecated: b.deprecated,
|
||||||
contentEncodingEnabled: b.contentEncodingEnabled,
|
contentEncodingEnabled: b.contentEncodingEnabled,
|
||||||
|
allowedMethodsWithoutContentType: b.allowedMethodsWithoutContentType,
|
||||||
}
|
}
|
||||||
|
route.Extensions = b.extensions
|
||||||
route.postBuild()
|
route.postBuild()
|
||||||
return route
|
return route
|
||||||
}
|
}
|
66
vendor/github.com/emicklei/go-restful/v3/route_reader.go
generated
vendored
Normal file
66
vendor/github.com/emicklei/go-restful/v3/route_reader.go
generated
vendored
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
package restful
|
||||||
|
|
||||||
|
// Copyright 2021 Ernest Micklei. All rights reserved.
|
||||||
|
// Use of this source code is governed by a license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
type RouteReader interface {
|
||||||
|
Method() string
|
||||||
|
Consumes() []string
|
||||||
|
Path() string
|
||||||
|
Doc() string
|
||||||
|
Notes() string
|
||||||
|
Operation() string
|
||||||
|
ParameterDocs() []*Parameter
|
||||||
|
// Returns a copy
|
||||||
|
Metadata() map[string]interface{}
|
||||||
|
Deprecated() bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type routeAccessor struct {
|
||||||
|
route *Route
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r routeAccessor) Method() string {
|
||||||
|
return r.route.Method
|
||||||
|
}
|
||||||
|
func (r routeAccessor) Consumes() []string {
|
||||||
|
return r.route.Consumes[:]
|
||||||
|
}
|
||||||
|
func (r routeAccessor) Path() string {
|
||||||
|
return r.route.Path
|
||||||
|
}
|
||||||
|
func (r routeAccessor) Doc() string {
|
||||||
|
return r.route.Doc
|
||||||
|
}
|
||||||
|
func (r routeAccessor) Notes() string {
|
||||||
|
return r.route.Notes
|
||||||
|
}
|
||||||
|
func (r routeAccessor) Operation() string {
|
||||||
|
return r.route.Operation
|
||||||
|
}
|
||||||
|
func (r routeAccessor) ParameterDocs() []*Parameter {
|
||||||
|
return r.route.ParameterDocs[:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns a copy
|
||||||
|
func (r routeAccessor) Metadata() map[string]interface{} {
|
||||||
|
return copyMap(r.route.Metadata)
|
||||||
|
}
|
||||||
|
func (r routeAccessor) Deprecated() bool {
|
||||||
|
return r.route.Deprecated
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://stackoverflow.com/questions/23057785/how-to-copy-a-map
|
||||||
|
func copyMap(m map[string]interface{}) map[string]interface{} {
|
||||||
|
cp := make(map[string]interface{})
|
||||||
|
for k, v := range m {
|
||||||
|
vm, ok := v.(map[string]interface{})
|
||||||
|
if ok {
|
||||||
|
cp[k] = copyMap(vm)
|
||||||
|
} else {
|
||||||
|
cp[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return cp
|
||||||
|
}
|
@ -4,12 +4,16 @@ package restful
|
|||||||
// Use of this source code is governed by a license
|
// Use of this source code is governed by a license
|
||||||
// that can be found in the LICENSE file.
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
import "fmt"
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
// ServiceError is a transport object to pass information about a non-Http error occurred in a WebService while processing a request.
|
// ServiceError is a transport object to pass information about a non-Http error occurred in a WebService while processing a request.
|
||||||
type ServiceError struct {
|
type ServiceError struct {
|
||||||
Code int
|
Code int
|
||||||
Message string
|
Message string
|
||||||
|
Header http.Header
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewError returns a ServiceError using the code and reason
|
// NewError returns a ServiceError using the code and reason
|
||||||
@ -17,6 +21,11 @@ func NewError(code int, message string) ServiceError {
|
|||||||
return ServiceError{Code: code, Message: message}
|
return ServiceError{Code: code, Message: message}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewErrorWithHeader returns a ServiceError using the code, reason and header
|
||||||
|
func NewErrorWithHeader(code int, message string, header http.Header) ServiceError {
|
||||||
|
return ServiceError{Code: code, Message: message, Header: header}
|
||||||
|
}
|
||||||
|
|
||||||
// Error returns a text representation of the service error
|
// Error returns a text representation of the service error
|
||||||
func (s ServiceError) Error() string {
|
func (s ServiceError) Error() string {
|
||||||
return fmt.Sprintf("[ServiceError:%v] %v", s.Code, s.Message)
|
return fmt.Sprintf("[ServiceError:%v] %v", s.Code, s.Message)
|
@ -6,7 +6,7 @@ import (
|
|||||||
"reflect"
|
"reflect"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/emicklei/go-restful/log"
|
"github.com/emicklei/go-restful/v3/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||||
@ -181,14 +181,12 @@ func (w *WebService) RemoveRoute(path, method string) error {
|
|||||||
}
|
}
|
||||||
w.routesLock.Lock()
|
w.routesLock.Lock()
|
||||||
defer w.routesLock.Unlock()
|
defer w.routesLock.Unlock()
|
||||||
newRoutes := make([]Route, (len(w.routes) - 1))
|
newRoutes := []Route{}
|
||||||
current := 0
|
for _, route := range w.routes {
|
||||||
for ix := range w.routes {
|
if route.Method == method && route.Path == path {
|
||||||
if w.routes[ix].Method == method && w.routes[ix].Path == path {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
newRoutes[current] = w.routes[ix]
|
newRoutes = append(newRoutes, route)
|
||||||
current = current + 1
|
|
||||||
}
|
}
|
||||||
w.routes = newRoutes
|
w.routes = newRoutes
|
||||||
return nil
|
return nil
|
||||||
@ -288,3 +286,8 @@ func (w *WebService) PATCH(subPath string) *RouteBuilder {
|
|||||||
func (w *WebService) DELETE(subPath string) *RouteBuilder {
|
func (w *WebService) DELETE(subPath string) *RouteBuilder {
|
||||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("DELETE").Path(subPath)
|
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("DELETE").Path(subPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// OPTIONS is a shortcut for .Method("OPTIONS").Path(subPath)
|
||||||
|
func (w *WebService) OPTIONS(subPath string) *RouteBuilder {
|
||||||
|
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("OPTIONS").Path(subPath)
|
||||||
|
}
|
7
vendor/github.com/onsi/ginkgo/v2/.gitignore
generated
vendored
Normal file
7
vendor/github.com/onsi/ginkgo/v2/.gitignore
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
.DS_Store
|
||||||
|
TODO.md
|
||||||
|
tmp/**/*
|
||||||
|
*.coverprofile
|
||||||
|
.vscode
|
||||||
|
.idea/
|
||||||
|
*.log
|
438
vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md
generated
vendored
Normal file
438
vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md
generated
vendored
Normal file
@ -0,0 +1,438 @@
|
|||||||
|
## 2.1.4
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- Numerous documentation typos
|
||||||
|
- Prepend `when` when using `When` (this behavior was in 1.x but unintentionally lost during the 2.0 rewrite) [efce903]
|
||||||
|
- improve error message when a parallel process fails to report back [a7bd1fe]
|
||||||
|
- guard against concurrent map writes in DeprecationTracker [0976569]
|
||||||
|
- Invoke reporting nodes during dry-run (fixes #956 and #935) [aae4480]
|
||||||
|
- Fix ginkgo import circle [f779385]
|
||||||
|
|
||||||
|
## 2.1.3
|
||||||
|
|
||||||
|
See [https://onsi.github.io/ginkgo/MIGRATING_TO_V2](https://onsi.github.io/ginkgo/MIGRATING_TO_V2) for details on V2.
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- Calling By in a container node now emits a useful error. [ff12cee]
|
||||||
|
|
||||||
|
## 2.1.2
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
|
||||||
|
- Track location of focused specs correctly in `ginkgo unfocus` [a612ff1]
|
||||||
|
- Profiling suites with focused specs no longer generates an erroneous failure message [8fbfa02]
|
||||||
|
- Several documentation typos fixed. Big thanks to everyone who helped catch them and report/fix them!
|
||||||
|
|
||||||
|
## 2.1.1
|
||||||
|
|
||||||
|
See [https://onsi.github.io/ginkgo/MIGRATING_TO_V2](https://onsi.github.io/ginkgo/MIGRATING_TO_V2) for details on V2.
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- Suites that only import the new dsl packages are now correctly identified as Ginkgo suites [ec17e17]
|
||||||
|
|
||||||
|
## 2.1.0
|
||||||
|
|
||||||
|
See [https://onsi.github.io/ginkgo/MIGRATING_TO_V2](https://onsi.github.io/ginkgo/MIGRATING_TO_V2) for details on V2.
|
||||||
|
|
||||||
|
2.1.0 is a minor release with a few tweaks:
|
||||||
|
|
||||||
|
- Introduce new DSL packages to enable users to pick-and-choose which portions of the DSL to dot-import. [90868e2] More details [here](https://onsi.github.io/ginkgo/#alternatives-to-dot-importing-ginkgo).
|
||||||
|
- Add error check for invalid/nil parameters to DescribeTable [6f8577e]
|
||||||
|
- Myriad docs typos fixed (thanks everyone!) [718542a, ecb7098, 146654c, a8f9913, 6bdffde, 03dcd7e]
|
||||||
|
|
||||||
|
## 2.0.0
|
||||||
|
|
||||||
|
See [https://onsi.github.io/ginkgo/MIGRATING_TO_V2](https://onsi.github.io/ginkgo/MIGRATING_TO_V2)
|
||||||
|
|
||||||
|
## 1.16.5
|
||||||
|
|
||||||
|
Ginkgo 2.0 now has a Release Candidate. 1.16.5 advertises the existence of the RC.
|
||||||
|
1.16.5 deprecates GinkgoParallelNode in favor of GinkgoParallelProcess
|
||||||
|
|
||||||
|
You can silence the RC advertisement by setting an `ACK_GINKG_RC=true` environment variable or creating a file in your home directory called `.ack-ginkgo-rc`
|
||||||
|
|
||||||
|
## 1.16.4
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
1.16.4 retracts 1.16.3. There are no code changes. The 1.16.3 tag was associated with the wrong commit and an attempt to change it after-the-fact has proven problematic. 1.16.4 retracts 1.16.3 in Ginkgo's go.mod and creates a new, correctly tagged, release.
|
||||||
|
|
||||||
|
## 1.16.3
|
||||||
|
|
||||||
|
### Features
|
||||||
|
- Measure is now deprecated and emits a deprecation warning.
|
||||||
|
|
||||||
|
## 1.16.2
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- Deprecations can be suppressed by setting an `ACK_GINKGO_DEPRECATIONS=<semver>` environment variable.
|
||||||
|
|
||||||
|
## 1.16.1
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- Suppress --stream deprecation warning on windows (#793)
|
||||||
|
|
||||||
|
## 1.16.0
|
||||||
|
|
||||||
|
### Features
|
||||||
|
- Advertise Ginkgo 2.0. Introduce deprecations. [9ef1913]
|
||||||
|
- Update README.md to advertise that Ginkgo 2.0 is coming.
|
||||||
|
- Backport the 2.0 DeprecationTracker and start alerting users
|
||||||
|
about upcoming deprecations.
|
||||||
|
|
||||||
|
- Add slim-sprig template functions to bootstrap/generate (#775) [9162b86]
|
||||||
|
|
||||||
|
- Fix accidental reference to 1488 (#784) [9fb7fe4]
|
||||||
|
|
||||||
|
## 1.15.2
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- ignore blank `-focus` and `-skip` flags (#780) [e90a4a0]
|
||||||
|
|
||||||
|
## 1.15.1
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- reporters/junit: Use `system-out` element instead of `passed` (#769) [9eda305]
|
||||||
|
|
||||||
|
## 1.15.0
|
||||||
|
|
||||||
|
### Features
|
||||||
|
- Adds 'outline' command to print the outline of specs/containers in a file (#754) [071c369] [6803cc3] [935b538] [06744e8] [0c40583]
|
||||||
|
- Add support for using template to generate tests (#752) [efb9e69]
|
||||||
|
- Add a Chinese Doc #755 (#756) [5207632]
|
||||||
|
- cli: allow multiple -focus and -skip flags (#736) [9a782fb]
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- Add _internal to filename of tests created with internal flag (#751) [43c12da]
|
||||||
|
|
||||||
|
## 1.14.2
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- correct handling windows backslash in import path (#721) [97f3d51]
|
||||||
|
- Add additional methods to GinkgoT() to improve compatibility with the testing.TB interface [b5fe44d]
|
||||||
|
|
||||||
|
## 1.14.1
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- Discard exported method declaration when running ginkgo bootstrap (#558) [f4b0240]
|
||||||
|
|
||||||
|
## 1.14.0
|
||||||
|
|
||||||
|
### Features
|
||||||
|
- Defer running top-level container nodes until RunSpecs is called [d44dedf]
|
||||||
|
- [Document Ginkgo lifecycle](http://onsi.github.io/ginkgo/#understanding-ginkgos-lifecycle)
|
||||||
|
- Add `extensions/globals` package (#692) [3295c8f] - this can be helpful in contexts where you are test-driving your test-generation code (see [#692](https://github.com/onsi/ginkgo/pull/692))
|
||||||
|
- Print Skip reason in JUnit reporter if one was provided [820dfab]
|
||||||
|
|
||||||
|
## 1.13.0
|
||||||
|
|
||||||
|
### Features
|
||||||
|
- Add a version of table.Entry that allows dumping the entry parameters. (#689) [21eaef2]
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- Ensure integration tests pass in an environment sans GOPATH [606fba2]
|
||||||
|
- Add books package (#568) [fc0e44e]
|
||||||
|
- doc(readme): installation via "tools package" (#677) [83bb20e]
|
||||||
|
- Solve the undefined: unix.Dup2 compile error on mips64le (#680) [0624f75]
|
||||||
|
- Import package without dot (#687) [6321024]
|
||||||
|
- Fix integration tests to stop require GOPATH (#686) [a912ec5]
|
||||||
|
|
||||||
|
## 1.12.3
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- Print correct code location of failing table test (#666) [c6d7afb]
|
||||||
|
|
||||||
|
## 1.12.2
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- Update dependencies [ea4a036]
|
||||||
|
|
||||||
|
## 1.12.1
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- Make unfocus ("blur") much faster (#674) [8b18061]
|
||||||
|
- Fix typo (#673) [7fdcbe8]
|
||||||
|
- Test against 1.14 and remove 1.12 [d5c2ad6]
|
||||||
|
- Test if a coverprofile content is empty before checking its latest character (#670) [14d9fa2]
|
||||||
|
- replace tail package with maintained one. this fixes go get errors (#667) [4ba33d4]
|
||||||
|
- improve ginkgo performance - makes progress on #644 [a14f98e]
|
||||||
|
- fix convert integration tests [1f8ba69]
|
||||||
|
- fix typo succesful -> successful (#663) [1ea49cf]
|
||||||
|
- Fix invalid link (#658) [b886136]
|
||||||
|
- convert utility : Include comments from source (#657) [1077c6d]
|
||||||
|
- Explain what BDD means [d79e7fb]
|
||||||
|
- skip race detector test on unsupported platform (#642) [f8ab89d]
|
||||||
|
- Use Dup2 from golang.org/x/sys/unix instead of syscallDup (#638) [5d53c55]
|
||||||
|
- Fix missing newline in combined coverage file (#641) [6a07ea2]
|
||||||
|
- check if a spec is run before returning SpecSummary (#645) [8850000]
|
||||||
|
|
||||||
|
## 1.12.0
|
||||||
|
|
||||||
|
### Features
|
||||||
|
- Add module definition (#630) [78916ab]
|
||||||
|
|
||||||
|
## 1.11.0
|
||||||
|
|
||||||
|
### Features
|
||||||
|
- Add syscall for riscv64 architecture [f66e896]
|
||||||
|
- teamcity reporter: output location of test failure as well as test definition (#626) [9869142]
|
||||||
|
- teamcity reporter: output newline after every service message (#625) [3cfa02d]
|
||||||
|
- Add support for go module when running `generate` command (#578) [9c89e3f]
|
||||||
|
|
||||||
|
## 1.10.3
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- Set go_import_path in travis.yml to allow internal packages in forks (#607) [3b721db]
|
||||||
|
- Add integration test [d90e0dc]
|
||||||
|
- Fix coverage files combining [e5dde8c]
|
||||||
|
- A new CLI option: -ginkgo.reportFile <file path> (#601) [034fd25]
|
||||||
|
|
||||||
|
## 1.10.2
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- speed up table entry generateIt() (#609) [5049dc5]
|
||||||
|
- Fix. Write errors to stderr instead of stdout (#610) [7bb3091]
|
||||||
|
|
||||||
|
## 1.10.1
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- stack backtrace: fix skipping (#600) [2a4c0bd]
|
||||||
|
|
||||||
|
## 1.10.0
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- stack backtrace: fix alignment and skipping [66915d6]
|
||||||
|
- fix typo in documentation [8f97b93]
|
||||||
|
|
||||||
|
## 1.9.0
|
||||||
|
|
||||||
|
### Features
|
||||||
|
- Option to print output into report, when tests have passed [0545415]
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- Fixed typos in comments [0ecbc58]
|
||||||
|
- gofmt code [a7f8bfb]
|
||||||
|
- Simplify code [7454d00]
|
||||||
|
- Simplify concatenation, incrementation and function assignment [4825557]
|
||||||
|
- Avoid unnecessary conversions [9d9403c]
|
||||||
|
- JUnit: include more detailed information about panic [19cca4b]
|
||||||
|
- Print help to stdout when the user asks for help [4cb7441]
|
||||||
|
|
||||||
|
|
||||||
|
## 1.8.0
|
||||||
|
|
||||||
|
### New Features
|
||||||
|
- allow config of the vet flag for `go test` (#562) [3cd45fa]
|
||||||
|
- Support projects using go modules [d56ee76]
|
||||||
|
|
||||||
|
### Fixes and Minor Improvements
|
||||||
|
- chore(godoc): fixes typos in Measurement funcs [dbaca8e]
|
||||||
|
- Optimize focus to avoid allocations [f493786]
|
||||||
|
- Ensure generated test file names are underscored [505cc35]
|
||||||
|
|
||||||
|
## 1.7.0
|
||||||
|
|
||||||
|
### New Features
|
||||||
|
- Add JustAfterEach (#484) [0d4f080]
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- Correctly round suite time in junit reporter [2445fc1]
|
||||||
|
- Avoid using -i argument to go test for Golang 1.10+ [46bbc26]
|
||||||
|
|
||||||
|
## 1.6.0
|
||||||
|
|
||||||
|
### New Features
|
||||||
|
- add --debug flag to emit node output to files (#499) [39febac]
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- fix: for `go vet` to pass [69338ec]
|
||||||
|
- docs: fix for contributing instructions [7004cb1]
|
||||||
|
- consolidate and streamline contribution docs (#494) [d848015]
|
||||||
|
- Make generated Junit file compatible with "Maven Surefire" (#488) [e51bee6]
|
||||||
|
- all: gofmt [000d317]
|
||||||
|
- Increase eventually timeout to 30s [c73579c]
|
||||||
|
- Clarify asynchronous test behaviour [294d8f4]
|
||||||
|
- Travis badge should only show master [26d2143]
|
||||||
|
|
||||||
|
## 1.5.0 5/10/2018
|
||||||
|
|
||||||
|
### New Features
|
||||||
|
- Supports go v1.10 (#443, #446, #451) [e873237, 468e89e, e37dbfe, a37f4c0, c0b857d, bca5260, 4177ca8]
|
||||||
|
- Add a When() synonym for Context() (#386) [747514b, 7484dad, 7354a07, dd826c8]
|
||||||
|
- Re-add noisySkippings flag [652e15c]
|
||||||
|
- Allow coverage to be displayed for focused specs (#367) [11459a8]
|
||||||
|
- Handle -outputdir flag (#364) [228e3a8]
|
||||||
|
- Handle -coverprofile flag (#355) [43392d5]
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- When using custom reporters register the custom reporters *before* the default reporter. This allows users to see the output of any print statements in their customer reporters. (#365) [8382b23]
|
||||||
|
- When running a test and calculating the coverage using the `-coverprofile` and `-outputdir` flags, Ginkgo fails with an error if the directory does not exist. This is due to an [issue in go 1.10](https://github.com/golang/go/issues/24588) (#446) [b36a6e0]
|
||||||
|
- `unfocus` command ignores vendor folder (#459) [e5e551c, c556e43, a3b6351, 9a820dd]
|
||||||
|
- Ignore packages whose tests are all ignored by go (#456) [7430ca7, 6d8be98]
|
||||||
|
- Increase the threshold when checking time measuments (#455) [2f714bf, 68f622c]
|
||||||
|
- Fix race condition in coverage tests (#423) [a5a8ff7, ab9c08b]
|
||||||
|
- Add an extra new line after reporting spec run completion for test2json [874520d]
|
||||||
|
- added name name field to junit reported testsuite [ae61c63]
|
||||||
|
- Do not set the run time of a spec when the dryRun flag is used (#438) [457e2d9, ba8e856]
|
||||||
|
- Process FWhen and FSpecify when unfocusing (#434) [9008c7b, ee65bd, df87dfe]
|
||||||
|
- Synchronise the access to the state of specs to avoid race conditions (#430) [7d481bc, ae6829d]
|
||||||
|
- Added Duration on GinkgoTestDescription (#383) [5f49dad, 528417e, 0747408, 329d7ed]
|
||||||
|
- Fix Ginkgo stack trace on failure for Specify (#415) [b977ede, 65ca40e, 6c46eb8]
|
||||||
|
- Update README with Go 1.6+, Golang -> Go (#409) [17f6b97, bc14b66, 20d1598]
|
||||||
|
- Use fmt.Errorf instead of errors.New(fmt.Sprintf (#401) [a299f56, 44e2eaa]
|
||||||
|
- Imports in generated code should follow conventions (#398) [0bec0b0, e8536d8]
|
||||||
|
- Prevent data race error when Recording a benchmark value from multiple go routines (#390) [c0c4881, 7a241e9]
|
||||||
|
- Replace GOPATH in Environment [4b883f0]
|
||||||
|
|
||||||
|
|
||||||
|
## 1.4.0 7/16/2017
|
||||||
|
|
||||||
|
- `ginkgo` now provides a hint if you accidentally forget to run `ginkgo bootstrap` to generate a `*_suite_test.go` file that actually invokes the Ginkgo test runner. [#345](https://github.com/onsi/ginkgo/pull/345)
|
||||||
|
- thanks to improvements in `go test -c` `ginkgo` no longer needs to fix Go's compilation output to ensure compilation errors are expressed relative to the CWD. [#357]
|
||||||
|
- `ginkgo watch -watchRegExp=...` allows you to specify a custom regular expression to watch. Only files matching the regular expression are watched for changes (the default is `\.go$`) [#356]
|
||||||
|
- `ginkgo` now always emits compilation output. Previously, only failed compilation output was printed out. [#277]
|
||||||
|
- `ginkgo -requireSuite` now fails the test run if there are `*_test.go` files but `go test` fails to detect any tests. Typically this means you forgot to run `ginkgo bootstrap` to generate a suite file. [#344]
|
||||||
|
- `ginkgo -timeout=DURATION` allows you to adjust the timeout for the entire test suite (default is 24 hours) [#248]
|
||||||
|
|
||||||
|
## 1.3.0 3/28/2017
|
||||||
|
|
||||||
|
Improvements:
|
||||||
|
|
||||||
|
- Significantly improved parallel test distribution. Now instead of pre-sharding test cases across workers (which can result in idle workers and poor test performance) Ginkgo uses a shared queue to keep all workers busy until all tests are complete. This improves test-time performance and consistency.
|
||||||
|
- `Skip(message)` can be used to skip the current test.
|
||||||
|
- Added `extensions/table` - a Ginkgo DSL for [Table Driven Tests](http://onsi.github.io/ginkgo/#table-driven-tests)
|
||||||
|
- Add `GinkgoRandomSeed()` - shorthand for `config.GinkgoConfig.RandomSeed`
|
||||||
|
- Support for retrying flaky tests with `--flakeAttempts`
|
||||||
|
- `ginkgo ./...` now recurses as you'd expect
|
||||||
|
- Added `Specify` a synonym for `It`
|
||||||
|
- Support colorise on Windows
|
||||||
|
- Broader support for various go compilation flags in the `ginkgo` CLI
|
||||||
|
|
||||||
|
Bug Fixes:
|
||||||
|
|
||||||
|
- Ginkgo tests now fail when you `panic(nil)` (#167)
|
||||||
|
|
||||||
|
## 1.2.0 5/31/2015
|
||||||
|
|
||||||
|
Improvements
|
||||||
|
|
||||||
|
- `ginkgo -coverpkg` calls down to `go test -coverpkg` (#160)
|
||||||
|
- `ginkgo -afterSuiteHook COMMAND` invokes the passed-in `COMMAND` after a test suite completes (#152)
|
||||||
|
- Relaxed requirement for Go 1.4+. `ginkgo` now works with Go v1.3+ (#166)
|
||||||
|
|
||||||
|
## 1.2.0-beta
|
||||||
|
|
||||||
|
Ginkgo now requires Go 1.4+
|
||||||
|
|
||||||
|
Improvements:
|
||||||
|
|
||||||
|
- Call reporters in reverse order when announcing spec completion -- allows custom reporters to emit output before the default reporter does.
|
||||||
|
- Improved focus behavior. Now, this:
|
||||||
|
|
||||||
|
```golang
|
||||||
|
FDescribe("Some describe", func() {
|
||||||
|
It("A", func() {})
|
||||||
|
|
||||||
|
FIt("B", func() {})
|
||||||
|
})
|
||||||
|
```
|
||||||
|
|
||||||
|
will run `B` but *not* `A`. This tends to be a common usage pattern when in the thick of writing and debugging tests.
|
||||||
|
- When `SIGINT` is received, Ginkgo will emit the contents of the `GinkgoWriter` before running the `AfterSuite`. Useful for debugging stuck tests.
|
||||||
|
- When `--progress` is set, Ginkgo will write test progress (in particular, Ginkgo will say when it is about to run a BeforeEach, AfterEach, It, etc...) to the `GinkgoWriter`. This is useful for debugging stuck tests and tests that generate many logs.
|
||||||
|
- Improved output when an error occurs in a setup or teardown block.
|
||||||
|
- When `--dryRun` is set, Ginkgo will walk the spec tree and emit to its reporter *without* actually running anything. Best paired with `-v` to understand which specs will run in which order.
|
||||||
|
- Add `By` to help document long `It`s. `By` simply writes to the `GinkgoWriter`.
|
||||||
|
- Add support for precompiled tests:
|
||||||
|
- `ginkgo build <path-to-package>` will now compile the package, producing a file named `package.test`
|
||||||
|
- The compiled `package.test` file can be run directly. This runs the tests in series.
|
||||||
|
- To run precompiled tests in parallel, you can run: `ginkgo -p package.test`
|
||||||
|
- Support `bootstrap`ping and `generate`ing [Agouti](http://agouti.org) specs.
|
||||||
|
- `ginkgo generate` and `ginkgo bootstrap` now honor the package name already defined in a given directory
|
||||||
|
- The `ginkgo` CLI ignores `SIGQUIT`. Prevents its stack dump from interlacing with the underlying test suite's stack dump.
|
||||||
|
- The `ginkgo` CLI now compiles tests into a temporary directory instead of the package directory. This necessitates upgrading to Go v1.4+.
|
||||||
|
- `ginkgo -notify` now works on Linux
|
||||||
|
|
||||||
|
Bug Fixes:
|
||||||
|
|
||||||
|
- If --skipPackages is used and all packages are skipped, Ginkgo should exit 0.
|
||||||
|
- Fix tempfile leak when running in parallel
|
||||||
|
- Fix incorrect failure message when a panic occurs during a parallel test run
|
||||||
|
- Fixed an issue where a pending test within a focused context (or a focused test within a pending context) would skip all other tests.
|
||||||
|
- Be more consistent about handling SIGTERM as well as SIGINT
|
||||||
|
- When interrupted while concurrently compiling test suites in the background, Ginkgo now cleans up the compiled artifacts.
|
||||||
|
- Fixed a long standing bug where `ginkgo -p` would hang if a process spawned by one of the Ginkgo parallel nodes does not exit. (Hooray!)
|
||||||
|
|
||||||
|
## 1.1.0 (8/2/2014)
|
||||||
|
|
||||||
|
No changes, just dropping the beta.
|
||||||
|
|
||||||
|
## 1.1.0-beta (7/22/2014)
|
||||||
|
New Features:
|
||||||
|
|
||||||
|
- `ginkgo watch` now monitors packages *and their dependencies* for changes. The depth of the dependency tree can be modified with the `-depth` flag.
|
||||||
|
- Test suites with a programmatic focus (`FIt`, `FDescribe`, etc...) exit with non-zero status code, even when they pass. This allows CI systems to detect accidental commits of focused test suites.
|
||||||
|
- `ginkgo -p` runs the testsuite in parallel with an auto-detected number of nodes.
|
||||||
|
- `ginkgo -tags=TAG_LIST` passes a list of tags down to the `go build` command.
|
||||||
|
- `ginkgo --failFast` aborts the test suite after the first failure.
|
||||||
|
- `ginkgo generate file_1 file_2` can take multiple file arguments.
|
||||||
|
- Ginkgo now summarizes any spec failures that occurred at the end of the test run.
|
||||||
|
- `ginkgo --randomizeSuites` will run tests *suites* in random order using the generated/passed-in seed.
|
||||||
|
|
||||||
|
Improvements:
|
||||||
|
|
||||||
|
- `ginkgo -skipPackage` now takes a comma-separated list of strings. If the *relative path* to a package matches one of the entries in the comma-separated list, that package is skipped.
|
||||||
|
- `ginkgo --untilItFails` no longer recompiles between attempts.
|
||||||
|
- Ginkgo now panics when a runnable node (`It`, `BeforeEach`, `JustBeforeEach`, `AfterEach`, `Measure`) is nested within another runnable node. This is always a mistake. Any test suites that panic because of this change should be fixed.
|
||||||
|
|
||||||
|
Bug Fixes:
|
||||||
|
|
||||||
|
- `ginkgo boostrap` and `ginkgo generate` no longer fail when dealing with `hyphen-separated-packages`.
|
||||||
|
- parallel specs are now better distributed across nodes - fixed a crashing bug where (for example) distributing 11 tests across 7 nodes would panic
|
||||||
|
|
||||||
|
## 1.0.0 (5/24/2014)
|
||||||
|
New Features:
|
||||||
|
|
||||||
|
- Add `GinkgoParallelNode()` - shorthand for `config.GinkgoConfig.ParallelNode`
|
||||||
|
|
||||||
|
Improvements:
|
||||||
|
|
||||||
|
- When compilation fails, the compilation output is rewritten to present a correct *relative* path. Allows ⌘-clicking in iTerm open the file in your text editor.
|
||||||
|
- `--untilItFails` and `ginkgo watch` now generate new random seeds between test runs, unless a particular random seed is specified.
|
||||||
|
|
||||||
|
Bug Fixes:
|
||||||
|
|
||||||
|
- `-cover` now generates a correctly combined coverprofile when running with in parallel with multiple `-node`s.
|
||||||
|
- Print out the contents of the `GinkgoWriter` when `BeforeSuite` or `AfterSuite` fail.
|
||||||
|
- Fix all remaining race conditions in Ginkgo's test suite.
|
||||||
|
|
||||||
|
## 1.0.0-beta (4/14/2014)
|
||||||
|
Breaking changes:
|
||||||
|
|
||||||
|
- `thirdparty/gomocktestreporter` is gone. Use `GinkgoT()` instead
|
||||||
|
- Modified the Reporter interface
|
||||||
|
- `watch` is now a subcommand, not a flag.
|
||||||
|
|
||||||
|
DSL changes:
|
||||||
|
|
||||||
|
- `BeforeSuite` and `AfterSuite` for setting up and tearing down test suites.
|
||||||
|
- `AfterSuite` is triggered on interrupt (`^C`) as well as exit.
|
||||||
|
- `SynchronizedBeforeSuite` and `SynchronizedAfterSuite` for setting up and tearing down singleton resources across parallel nodes.
|
||||||
|
|
||||||
|
CLI changes:
|
||||||
|
|
||||||
|
- `watch` is now a subcommand, not a flag
|
||||||
|
- `--nodot` flag can be passed to `ginkgo generate` and `ginkgo bootstrap` to avoid dot imports. This explicitly imports all exported identifiers in Ginkgo and Gomega. Refreshing this list can be done by running `ginkgo nodot`
|
||||||
|
- Additional arguments can be passed to specs. Pass them after the `--` separator
|
||||||
|
- `--skipPackage` flag takes a regexp and ignores any packages with package names passing said regexp.
|
||||||
|
- `--trace` flag prints out full stack traces when errors occur, not just the line at which the error occurs.
|
||||||
|
|
||||||
|
Misc:
|
||||||
|
|
||||||
|
- Start using semantic versioning
|
||||||
|
- Start maintaining changelog
|
||||||
|
|
||||||
|
Major refactor:
|
||||||
|
|
||||||
|
- Pull out Ginkgo's internal to `internal`
|
||||||
|
- Rename `example` everywhere to `spec`
|
||||||
|
- Much more!
|
13
vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md
generated
vendored
Normal file
13
vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
# Contributing to Ginkgo
|
||||||
|
|
||||||
|
Your contributions to Ginkgo are essential for its long-term maintenance and improvement.
|
||||||
|
|
||||||
|
- Please **open an issue first** - describe what problem you are trying to solve and give the community a forum for input and feedback ahead of investing time in writing code!
|
||||||
|
- Ensure adequate test coverage:
|
||||||
|
- When adding to the Ginkgo library, add unit and/or integration tests (under the `integration` folder).
|
||||||
|
- When adding to the Ginkgo CLI, note that there are very few unit tests. Please add an integration test.
|
||||||
|
- Make sure all the tests succeed via `ginkgo -r -p`
|
||||||
|
- Vet your changes via `go vet ./...`
|
||||||
|
- Update the documentation. Ginko uses `godoc` comments and documentation in `docs/index.md`. You can run `bundle exec jekyll serve` in the `docs` directory to preview your changes.
|
||||||
|
|
||||||
|
Thanks for supporting Ginkgo!
|
20
vendor/github.com/onsi/ginkgo/v2/LICENSE
generated
vendored
Normal file
20
vendor/github.com/onsi/ginkgo/v2/LICENSE
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
Copyright (c) 2013-2014 Onsi Fakhouri
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining
|
||||||
|
a copy of this software and associated documentation files (the
|
||||||
|
"Software"), to deal in the Software without restriction, including
|
||||||
|
without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
distribute, sublicense, and/or sell copies of the Software, and to
|
||||||
|
permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be
|
||||||
|
included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
|
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||||
|
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||||
|
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||||
|
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||||
|
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
119
vendor/github.com/onsi/ginkgo/v2/README.md
generated
vendored
Normal file
119
vendor/github.com/onsi/ginkgo/v2/README.md
generated
vendored
Normal file
@ -0,0 +1,119 @@
|
|||||||
|
![Ginkgo](https://onsi.github.io/ginkgo/images/ginkgo.png)
|
||||||
|
|
||||||
|
[![test](https://github.com/onsi/ginkgo/workflows/test/badge.svg?branch=master)](https://github.com/onsi/ginkgo/actions?query=workflow%3Atest+branch%3Amaster) | [Ginkgo Docs](https://onsi.github.io/ginkgo/)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# Ginkgo 2.0 is now Generally Available!
|
||||||
|
|
||||||
|
You can learn more about 2.0 in the [Migration Guide](https://onsi.github.io/ginkgo/MIGRATING_TO_V2)!
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
Ginkgo is a mature testing framework for Go designed to help you write expressive specs. Ginkgo builds on top of Go's `testing` foundation and is complemented by the [Gomega](https://github.com/onsi/gomega) matcher library. Together, Ginkgo and Gomega let you express the intent behind your specs clearly:
|
||||||
|
|
||||||
|
```go
|
||||||
|
import (
|
||||||
|
. "github.com/onsi/ginkgo/v2"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
...
|
||||||
|
)
|
||||||
|
|
||||||
|
Describe("Checking books out of the library", Label("library"), func() {
|
||||||
|
var library *libraries.Library
|
||||||
|
var book *books.Book
|
||||||
|
var valjean *users.User
|
||||||
|
BeforeEach(func() {
|
||||||
|
library = libraries.NewClient()
|
||||||
|
book = &books.Book{
|
||||||
|
Title: "Les Miserables",
|
||||||
|
Author: "Victor Hugo",
|
||||||
|
}
|
||||||
|
valjean = users.NewUser("Jean Valjean")
|
||||||
|
})
|
||||||
|
|
||||||
|
When("the library has the book in question", func() {
|
||||||
|
BeforeEach(func() {
|
||||||
|
Expect(library.Store(book)).To(Succeed())
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("and the book is available", func() {
|
||||||
|
It("lends it to the reader", func() {
|
||||||
|
Expect(valjean.Checkout(library, "Les Miserables")).To(Succeed())
|
||||||
|
Expect(valjean.Books()).To(ContainElement(book))
|
||||||
|
Expect(library.UserWithBook(book)).To(Equal(valjean))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("but the book has already been checked out", func() {
|
||||||
|
var javert *users.User
|
||||||
|
BeforeEach(func() {
|
||||||
|
javert = users.NewUser("Javert")
|
||||||
|
Expect(javert.Checkout(library, "Les Miserables")).To(Succeed())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("tells the user", func() {
|
||||||
|
err := valjean.Checkout(library, "Les Miserables")
|
||||||
|
Expect(error).To(MatchError("Les Miserables is currently checked out"))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("lets the user place a hold and get notified later", func() {
|
||||||
|
Expect(valjean.Hold(library, "Les Miserables")).To(Succeed())
|
||||||
|
Expect(valjean.Holds()).To(ContainElement(book))
|
||||||
|
|
||||||
|
By("when Javert returns the book")
|
||||||
|
Expect(javert.Return(library, book)).To(Succeed())
|
||||||
|
|
||||||
|
By("it eventually informs Valjean")
|
||||||
|
notification := "Les Miserables is ready for pick up"
|
||||||
|
Eventually(valjean.Notifications).Should(ContainElement(notification))
|
||||||
|
|
||||||
|
Expect(valjean.Checkout(library, "Les Miserables")).To(Succeed())
|
||||||
|
Expect(valjean.Books()).To(ContainElement(book))
|
||||||
|
Expect(valjean.Holds()).To(BeEmpty())
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
When("the library does not have the book in question", func() {
|
||||||
|
It("tells the reader the book is unavailable", func() {
|
||||||
|
err := valjean.Checkout(library, "Les Miserables")
|
||||||
|
Expect(error).To(MatchError("Les Miserables is not in the library catalog"))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
```
|
||||||
|
|
||||||
|
Jump to the [docs](https://onsi.github.io/ginkgo/) to learn more. It's easy to [bootstrap](https://onsi.github.io/ginkgo/#bootstrapping-a-suite) and start writing your [first specs](https://onsi.github.io/ginkgo/#adding-specs-to-a-suite).
|
||||||
|
|
||||||
|
If you have a question, comment, bug report, feature request, etc. please open a [GitHub issue](https://github.com/onsi/ginkgo/issues/new), or visit the [Ginkgo Slack channel](https://app.slack.com/client/T029RQSE6/CQQ50BBNW).
|
||||||
|
|
||||||
|
## Capabilities
|
||||||
|
|
||||||
|
Whether writing basic unit specs, complex integration specs, or even performance specs - Ginkgo gives you an expressive Domain-Specific Language (DSL) that will be familiar to users coming from frameworks such as [Quick](https://github.com/Quick/Quick), [RSpec](https://rspec.info), [Jasmine](https://jasmine.github.io), and [Busted](https://olivinelabs.com/busted/). This style of testing is sometimes referred to as "Behavior-Driven Development" (BDD) though Ginkgo's utility extends beyond acceptance-level testing.
|
||||||
|
|
||||||
|
With Ginkgo's DSL you can use nestable [`Describe`, `Context` and `When` container nodes](https://onsi.github.io/ginkgo/#organizing-specs-with-container-nodes) to help you organize your specs. [`BeforeEach` and `AfterEach` setup nodes](https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and cleanup. [`It` and `Specify` subject nodes](https://onsi.github.io/ginkgo/#spec-subjects-it) that hold your assertions. [`BeforeSuite` and `AfterSuite` nodes](https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite) to prep for and cleanup after a suite... and [much more!](https://onsi.github.io/ginkgo/#writing-specs)
|
||||||
|
|
||||||
|
At runtime, Ginkgo can run your specs in reproducibly [random order](https://onsi.github.io/ginkgo/#spec-randomization) and has sophisticated support for [spec parallelization](https://onsi.github.io/ginkgo/#spec-parallelization). In fact, running specs in parallel is as easy as
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ginkgo -p
|
||||||
|
```
|
||||||
|
|
||||||
|
By following [established patterns for writing parallel specs](https://onsi.github.io/ginkgo/#patterns-for-parallel-integration-specs) you can build even large, complex integration suites that parallelize cleanly and run performantly.
|
||||||
|
|
||||||
|
As your suites grow Ginkgo helps you keep your specs organized with [labels](https://onsi.github.io/ginkgo/#spec-labels) and lets you easily run [subsets of specs](https://onsi.github.io/ginkgo/#filtering-specs), either [programmatically](https://onsi.github.io/ginkgo/#focused-specs) or on the [command line](https://onsi.github.io/ginkgo/#combining-filters). And Ginkgo's reporting infrastructure generates machine-readable output in a [variety of formats](https://onsi.github.io/ginkgo/#generating-machine-readable-reports) _and_ allows you to build your own [custom reporting infrastructure](https://onsi.github.io/ginkgo/#generating-reports-programmatically).
|
||||||
|
|
||||||
|
Ginkgo ships with `ginkgo`, a [command line tool](https://onsi.github.io/ginkgo/#ginkgo-cli-overview) with support for generating, running, filtering, and profiling Ginkgo suites. You can even have Ginkgo automatically run your specs when it detects a change with `ginkgo watch`, enabling rapid feedback loops during test-driven development.
|
||||||
|
|
||||||
|
And that's just Ginkgo! [Gomega](https://onsi.github.io/gomega/) brings a rich, mature, family of [assertions and matchers](https://onsi.github.io/gomega/#provided-matchers) to your suites. With Gomega you can easily mix [synchronous and asynchronous assertions](https://onsi.github.io/ginkgo/#patterns-for-asynchronous-testing) in your specs. You can even build your own set of expressive domain-specific matchers quickly and easily by composing Gomega's [existing building blocks](https://onsi.github.io/ginkgo/#building-custom-matchers).
|
||||||
|
|
||||||
|
Happy Testing!
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
Ginkgo is MIT-Licensed
|
||||||
|
|
||||||
|
## Contributing
|
||||||
|
|
||||||
|
See [CONTRIBUTING.md](CONTRIBUTING.md)
|
17
vendor/github.com/onsi/ginkgo/v2/RELEASING.md
generated
vendored
Normal file
17
vendor/github.com/onsi/ginkgo/v2/RELEASING.md
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
A Ginkgo release is a tagged git sha and a GitHub release. To cut a release:
|
||||||
|
|
||||||
|
1. Ensure CHANGELOG.md is up to date.
|
||||||
|
- Use `git log --pretty=format:'- %s [%h]' HEAD...vX.X.X` to list all the commits since the last release
|
||||||
|
- Categorize the changes into
|
||||||
|
- Breaking Changes (requires a major version)
|
||||||
|
- New Features (minor version)
|
||||||
|
- Fixes (fix version)
|
||||||
|
- Maintenance (which in general should not be mentioned in `CHANGELOG.md` as they have no user impact)
|
||||||
|
1. Update `VERSION` in `types/version.go`
|
||||||
|
1. Commit, push, and release:
|
||||||
|
```
|
||||||
|
git commit -m "vM.m.p"
|
||||||
|
git push
|
||||||
|
gh release create "vM.m.p"
|
||||||
|
git fetch --tags origin master
|
||||||
|
```
|
69
vendor/github.com/onsi/ginkgo/v2/config/deprecated.go
generated
vendored
Normal file
69
vendor/github.com/onsi/ginkgo/v2/config/deprecated.go
generated
vendored
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
package config
|
||||||
|
|
||||||
|
// GinkgoConfigType has been deprecated and its equivalent now lives in
|
||||||
|
// the types package. You can no longer access Ginkgo configuration from the config
|
||||||
|
// package. Instead use the DSL's GinkgoConfiguration() function to get copies of the
|
||||||
|
// current configuration
|
||||||
|
//
|
||||||
|
// GinkgoConfigType is still here so custom V1 reporters do not result in a compilation error
|
||||||
|
// It will be removed in a future minor release of Ginkgo
|
||||||
|
type GinkgoConfigType = DeprecatedGinkgoConfigType
|
||||||
|
type DeprecatedGinkgoConfigType struct {
|
||||||
|
RandomSeed int64
|
||||||
|
RandomizeAllSpecs bool
|
||||||
|
RegexScansFilePath bool
|
||||||
|
FocusStrings []string
|
||||||
|
SkipStrings []string
|
||||||
|
SkipMeasurements bool
|
||||||
|
FailOnPending bool
|
||||||
|
FailFast bool
|
||||||
|
FlakeAttempts int
|
||||||
|
EmitSpecProgress bool
|
||||||
|
DryRun bool
|
||||||
|
DebugParallel bool
|
||||||
|
|
||||||
|
ParallelNode int
|
||||||
|
ParallelTotal int
|
||||||
|
SyncHost string
|
||||||
|
StreamHost string
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultReporterConfigType has been deprecated and its equivalent now lives in
|
||||||
|
// the types package. You can no longer access Ginkgo configuration from the config
|
||||||
|
// package. Instead use the DSL's GinkgoConfiguration() function to get copies of the
|
||||||
|
// current configuration
|
||||||
|
//
|
||||||
|
// DefaultReporterConfigType is still here so custom V1 reporters do not result in a compilation error
|
||||||
|
// It will be removed in a future minor release of Ginkgo
|
||||||
|
type DefaultReporterConfigType = DeprecatedDefaultReporterConfigType
|
||||||
|
type DeprecatedDefaultReporterConfigType struct {
|
||||||
|
NoColor bool
|
||||||
|
SlowSpecThreshold float64
|
||||||
|
NoisyPendings bool
|
||||||
|
NoisySkippings bool
|
||||||
|
Succinct bool
|
||||||
|
Verbose bool
|
||||||
|
FullTrace bool
|
||||||
|
ReportPassed bool
|
||||||
|
ReportFile string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sadly there is no way to gracefully deprecate access to these global config variables.
|
||||||
|
// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method
|
||||||
|
// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails
|
||||||
|
type GinkgoConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead struct{}
|
||||||
|
|
||||||
|
// Sadly there is no way to gracefully deprecate access to these global config variables.
|
||||||
|
// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method
|
||||||
|
// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails
|
||||||
|
var GinkgoConfig = GinkgoConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead{}
|
||||||
|
|
||||||
|
// Sadly there is no way to gracefully deprecate access to these global config variables.
|
||||||
|
// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method
|
||||||
|
// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails
|
||||||
|
type DefaultReporterConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead struct{}
|
||||||
|
|
||||||
|
// Sadly there is no way to gracefully deprecate access to these global config variables.
|
||||||
|
// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method
|
||||||
|
// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails
|
||||||
|
var DefaultReporterConfig = DefaultReporterConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead{}
|
687
vendor/github.com/onsi/ginkgo/v2/core_dsl.go
generated
vendored
Normal file
687
vendor/github.com/onsi/ginkgo/v2/core_dsl.go
generated
vendored
Normal file
@ -0,0 +1,687 @@
|
|||||||
|
/*
|
||||||
|
Ginkgo is a testing framework for Go designed to help you write expressive tests.
|
||||||
|
https://github.com/onsi/ginkgo
|
||||||
|
MIT-Licensed
|
||||||
|
|
||||||
|
The godoc documentation outlines Ginkgo's API. Since Ginkgo is a Domain-Specific Language it is important to
|
||||||
|
build a mental model for Ginkgo - the narrative documentation at https://onsi.github.io/ginkgo/ is designed to help you do that.
|
||||||
|
You should start there - even a brief skim will be helpful. At minimum you should skim through the https://onsi.github.io/ginkgo/#getting-started chapter.
|
||||||
|
|
||||||
|
Ginkgo's is best paired with the Gomega matcher library: https://github.com/onsi/gomega
|
||||||
|
|
||||||
|
You can run Ginkgo specs with go test - however we recommend using the ginkgo cli. It enables functionality
|
||||||
|
that go test does not (especially running suites in parallel). You can learn more at https://onsi.github.io/ginkgo/#ginkgo-cli-overview
|
||||||
|
or by running 'ginkgo help'.
|
||||||
|
*/
|
||||||
|
package ginkgo
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/formatter"
|
||||||
|
"github.com/onsi/ginkgo/v2/internal"
|
||||||
|
"github.com/onsi/ginkgo/v2/internal/global"
|
||||||
|
"github.com/onsi/ginkgo/v2/internal/interrupt_handler"
|
||||||
|
"github.com/onsi/ginkgo/v2/internal/parallel_support"
|
||||||
|
"github.com/onsi/ginkgo/v2/reporters"
|
||||||
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
const GINKGO_VERSION = types.VERSION
|
||||||
|
|
||||||
|
var flagSet types.GinkgoFlagSet
|
||||||
|
var deprecationTracker = types.NewDeprecationTracker()
|
||||||
|
var suiteConfig = types.NewDefaultSuiteConfig()
|
||||||
|
var reporterConfig = types.NewDefaultReporterConfig()
|
||||||
|
var suiteDidRun = false
|
||||||
|
var outputInterceptor internal.OutputInterceptor
|
||||||
|
var client parallel_support.Client
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
var err error
|
||||||
|
flagSet, err = types.BuildTestSuiteFlagSet(&suiteConfig, &reporterConfig)
|
||||||
|
exitIfErr(err)
|
||||||
|
GinkgoWriter = internal.NewWriter(os.Stdout)
|
||||||
|
}
|
||||||
|
|
||||||
|
func exitIfErr(err error) {
|
||||||
|
if err != nil {
|
||||||
|
if outputInterceptor != nil {
|
||||||
|
outputInterceptor.Shutdown()
|
||||||
|
}
|
||||||
|
if client != nil {
|
||||||
|
client.Close()
|
||||||
|
}
|
||||||
|
fmt.Fprintln(formatter.ColorableStdErr, err.Error())
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func exitIfErrors(errors []error) {
|
||||||
|
if len(errors) > 0 {
|
||||||
|
if outputInterceptor != nil {
|
||||||
|
outputInterceptor.Shutdown()
|
||||||
|
}
|
||||||
|
if client != nil {
|
||||||
|
client.Close()
|
||||||
|
}
|
||||||
|
for _, err := range errors {
|
||||||
|
fmt.Fprintln(formatter.ColorableStdErr, err.Error())
|
||||||
|
}
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//The interface implemented by GinkgoWriter
|
||||||
|
type GinkgoWriterInterface interface {
|
||||||
|
io.Writer
|
||||||
|
|
||||||
|
Print(a ...interface{})
|
||||||
|
Printf(format string, a ...interface{})
|
||||||
|
Println(a ...interface{})
|
||||||
|
|
||||||
|
TeeTo(writer io.Writer)
|
||||||
|
ClearTeeWriters()
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
GinkgoWriter implements a GinkgoWriterInterface and io.Writer
|
||||||
|
|
||||||
|
When running in verbose mode (ginkgo -v) any writes to GinkgoWriter will be immediately printed
|
||||||
|
to stdout. Otherwise, GinkgoWriter will buffer any writes produced during the current test and flush them to screen
|
||||||
|
only if the current test fails.
|
||||||
|
|
||||||
|
GinkgoWriter also provides convenience Print, Printf and Println methods and allows you to tee to a custom writer via GinkgoWriter.TeeTo(writer).
|
||||||
|
Writes to GinkgoWriter are immediately sent to any registered TeeTo() writers. You can unregister all TeeTo() Writers with GinkgoWriter.ClearTeeWriters()
|
||||||
|
|
||||||
|
You can learn more at https://onsi.github.io/ginkgo/#logging-output
|
||||||
|
*/
|
||||||
|
var GinkgoWriter GinkgoWriterInterface
|
||||||
|
|
||||||
|
//The interface by which Ginkgo receives *testing.T
|
||||||
|
type GinkgoTestingT interface {
|
||||||
|
Fail()
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
GinkgoConfiguration returns the configuration of the current suite.
|
||||||
|
|
||||||
|
The first return value is the SuiteConfig which controls aspects of how the suite runs,
|
||||||
|
the second return value is the ReporterConfig which controls aspects of how Ginkgo's default
|
||||||
|
reporter emits output.
|
||||||
|
|
||||||
|
Mutating the returned configurations has no effect. To reconfigure Ginkgo programmatically you need
|
||||||
|
to pass in your mutated copies into RunSpecs().
|
||||||
|
|
||||||
|
You can learn more at https://onsi.github.io/ginkgo/#overriding-ginkgos-command-line-configuration-in-the-suite
|
||||||
|
*/
|
||||||
|
func GinkgoConfiguration() (types.SuiteConfig, types.ReporterConfig) {
|
||||||
|
return suiteConfig, reporterConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
GinkgoRandomSeed returns the seed used to randomize spec execution order. It is
|
||||||
|
useful for seeding your own pseudorandom number generators to ensure
|
||||||
|
consistent executions from run to run, where your tests contain variability (for
|
||||||
|
example, when selecting random spec data).
|
||||||
|
|
||||||
|
You can learn more at https://onsi.github.io/ginkgo/#spec-randomization
|
||||||
|
*/
|
||||||
|
func GinkgoRandomSeed() int64 {
|
||||||
|
return suiteConfig.RandomSeed
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
GinkgoParallelProcess returns the parallel process number for the current ginkgo process
|
||||||
|
The process number is 1-indexed. You can use GinkgoParallelProcess() to shard access to shared
|
||||||
|
resources across your suites. You can learn more about patterns for sharding at https://onsi.github.io/ginkgo/#patterns-for-parallel-integration-specs
|
||||||
|
|
||||||
|
For more on how specs are parallelized in Ginkgo, see http://onsi.github.io/ginkgo/#spec-parallelization
|
||||||
|
*/
|
||||||
|
func GinkgoParallelProcess() int {
|
||||||
|
return suiteConfig.ParallelProcess
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
PauseOutputInterception() pauses Ginkgo's output interception. This is only relevant
|
||||||
|
when running in parallel and output to stdout/stderr is being intercepted. You generally
|
||||||
|
don't need to call this function - however there are cases when Ginkgo's output interception
|
||||||
|
mechanisms can interfere with external processes launched by the test process.
|
||||||
|
|
||||||
|
In particular, if an external process is launched that has cmd.Stdout/cmd.Stderr set to os.Stdout/os.Stderr
|
||||||
|
then Ginkgo's output interceptor will hang. To circumvent this, set cmd.Stdout/cmd.Stderr to GinkgoWriter.
|
||||||
|
If, for some reason, you aren't able to do that, you can PauseOutputInterception() before starting the process
|
||||||
|
then ResumeOutputInterception() after starting it.
|
||||||
|
|
||||||
|
Note that PauseOutputInterception() does not cause stdout writes to print to the console -
|
||||||
|
this simply stops intercepting and storing stdout writes to an internal buffer.
|
||||||
|
*/
|
||||||
|
func PauseOutputInterception() {
|
||||||
|
if outputInterceptor == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
outputInterceptor.PauseIntercepting()
|
||||||
|
}
|
||||||
|
|
||||||
|
//ResumeOutputInterception() - see docs for PauseOutputInterception()
|
||||||
|
func ResumeOutputInterception() {
|
||||||
|
if outputInterceptor == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
outputInterceptor.ResumeIntercepting()
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
RunSpecs is the entry point for the Ginkgo spec runner.
|
||||||
|
|
||||||
|
You must call this within a Golang testing TestX(t *testing.T) function.
|
||||||
|
If you bootstrapped your suite with "ginkgo bootstrap" this is already
|
||||||
|
done for you.
|
||||||
|
|
||||||
|
Ginkgo is typically configured via command-line flags. This configuration
|
||||||
|
can be overridden, however, and passed into RunSpecs as optional arguments:
|
||||||
|
|
||||||
|
func TestMySuite(t *testing.T) {
|
||||||
|
RegisterFailHandler(gomega.Fail)
|
||||||
|
// fetch the current config
|
||||||
|
suiteConfig, reporterConfig := GinkgoConfiguration()
|
||||||
|
// adjust it
|
||||||
|
suiteConfig.SkipStrings = []string{"NEVER-RUN"}
|
||||||
|
reporterConfig.FullTrace = true
|
||||||
|
// pass it in to RunSpecs
|
||||||
|
RunSpecs(t, "My Suite", suiteConfig, reporterConfig)
|
||||||
|
}
|
||||||
|
|
||||||
|
Note that some configuration changes can lead to undefined behavior. For example,
|
||||||
|
you should not change ParallelProcess or ParallelTotal as the Ginkgo CLI is responsible
|
||||||
|
for setting these and orchestrating parallel specs across the parallel processes. See http://onsi.github.io/ginkgo/#spec-parallelization
|
||||||
|
for more on how specs are parallelized in Ginkgo.
|
||||||
|
|
||||||
|
You can also pass suite-level Label() decorators to RunSpecs. The passed-in labels will apply to all specs in the suite.
|
||||||
|
*/
|
||||||
|
func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool {
|
||||||
|
if suiteDidRun {
|
||||||
|
exitIfErr(types.GinkgoErrors.RerunningSuite())
|
||||||
|
}
|
||||||
|
suiteDidRun = true
|
||||||
|
|
||||||
|
suiteLabels := Labels{}
|
||||||
|
configErrors := []error{}
|
||||||
|
for _, arg := range args {
|
||||||
|
switch arg := arg.(type) {
|
||||||
|
case types.SuiteConfig:
|
||||||
|
suiteConfig = arg
|
||||||
|
case types.ReporterConfig:
|
||||||
|
reporterConfig = arg
|
||||||
|
case Labels:
|
||||||
|
suiteLabels = append(suiteLabels, arg...)
|
||||||
|
default:
|
||||||
|
configErrors = append(configErrors, types.GinkgoErrors.UnknownTypePassedToRunSpecs(arg))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
exitIfErrors(configErrors)
|
||||||
|
|
||||||
|
configErrors = types.VetConfig(flagSet, suiteConfig, reporterConfig)
|
||||||
|
if len(configErrors) > 0 {
|
||||||
|
fmt.Fprintf(formatter.ColorableStdErr, formatter.F("{{red}}Ginkgo detected configuration issues:{{/}}\n"))
|
||||||
|
for _, err := range configErrors {
|
||||||
|
fmt.Fprintf(formatter.ColorableStdErr, err.Error())
|
||||||
|
}
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
var reporter reporters.Reporter
|
||||||
|
if suiteConfig.ParallelTotal == 1 {
|
||||||
|
reporter = reporters.NewDefaultReporter(reporterConfig, formatter.ColorableStdOut)
|
||||||
|
outputInterceptor = internal.NoopOutputInterceptor{}
|
||||||
|
client = nil
|
||||||
|
} else {
|
||||||
|
reporter = reporters.NoopReporter{}
|
||||||
|
switch strings.ToLower(suiteConfig.OutputInterceptorMode) {
|
||||||
|
case "swap":
|
||||||
|
outputInterceptor = internal.NewOSGlobalReassigningOutputInterceptor()
|
||||||
|
case "none":
|
||||||
|
outputInterceptor = internal.NoopOutputInterceptor{}
|
||||||
|
default:
|
||||||
|
outputInterceptor = internal.NewOutputInterceptor()
|
||||||
|
}
|
||||||
|
client = parallel_support.NewClient(suiteConfig.ParallelHost)
|
||||||
|
if !client.Connect() {
|
||||||
|
client = nil
|
||||||
|
exitIfErr(types.GinkgoErrors.UnreachableParallelHost(suiteConfig.ParallelHost))
|
||||||
|
}
|
||||||
|
defer client.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
writer := GinkgoWriter.(*internal.Writer)
|
||||||
|
if reporterConfig.Verbose && suiteConfig.ParallelTotal == 1 {
|
||||||
|
writer.SetMode(internal.WriterModeStreamAndBuffer)
|
||||||
|
} else {
|
||||||
|
writer.SetMode(internal.WriterModeBufferOnly)
|
||||||
|
}
|
||||||
|
|
||||||
|
if reporterConfig.WillGenerateReport() {
|
||||||
|
registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig)
|
||||||
|
}
|
||||||
|
|
||||||
|
err := global.Suite.BuildTree()
|
||||||
|
exitIfErr(err)
|
||||||
|
|
||||||
|
suitePath, err := os.Getwd()
|
||||||
|
exitIfErr(err)
|
||||||
|
suitePath, err = filepath.Abs(suitePath)
|
||||||
|
exitIfErr(err)
|
||||||
|
|
||||||
|
passed, hasFocusedTests := global.Suite.Run(description, suiteLabels, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(suiteConfig.Timeout, client), client, suiteConfig)
|
||||||
|
outputInterceptor.Shutdown()
|
||||||
|
|
||||||
|
flagSet.ValidateDeprecations(deprecationTracker)
|
||||||
|
if deprecationTracker.DidTrackDeprecations() {
|
||||||
|
fmt.Fprintln(formatter.ColorableStdErr, deprecationTracker.DeprecationsReport())
|
||||||
|
}
|
||||||
|
|
||||||
|
if !passed {
|
||||||
|
t.Fail()
|
||||||
|
}
|
||||||
|
|
||||||
|
if passed && hasFocusedTests && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" {
|
||||||
|
fmt.Println("PASS | FOCUSED")
|
||||||
|
os.Exit(types.GINKGO_FOCUS_EXIT_CODE)
|
||||||
|
}
|
||||||
|
return passed
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Skip instructs Ginkgo to skip the current spec
|
||||||
|
|
||||||
|
You can call Skip in any Setup or Subject node closure.
|
||||||
|
|
||||||
|
For more on how to filter specs in Ginkgo see https://onsi.github.io/ginkgo/#filtering-specs
|
||||||
|
*/
|
||||||
|
func Skip(message string, callerSkip ...int) {
|
||||||
|
skip := 0
|
||||||
|
if len(callerSkip) > 0 {
|
||||||
|
skip = callerSkip[0]
|
||||||
|
}
|
||||||
|
cl := types.NewCodeLocationWithStackTrace(skip + 1)
|
||||||
|
global.Failer.Skip(message, cl)
|
||||||
|
panic(types.GinkgoErrors.UncaughtGinkgoPanic(cl))
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Fail notifies Ginkgo that the current spec has failed. (Gomega will call Fail for you automatically when an assertion fails.)
|
||||||
|
|
||||||
|
Under the hood, Fail panics to end execution of the current spec. Ginkgo will catch this panic and proceed with
|
||||||
|
the subsequent spec. If you call Fail, or make an assertion, within a goroutine launched by your spec you must
|
||||||
|
add defer GinkgoRecover() to the goroutine to catch the panic emitted by Fail.
|
||||||
|
|
||||||
|
You can call Fail in any Setup or Subject node closure.
|
||||||
|
|
||||||
|
You can learn more about how Ginkgo manages failures here: https://onsi.github.io/ginkgo/#mental-model-how-ginkgo-handles-failure
|
||||||
|
*/
|
||||||
|
func Fail(message string, callerSkip ...int) {
|
||||||
|
skip := 0
|
||||||
|
if len(callerSkip) > 0 {
|
||||||
|
skip = callerSkip[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
cl := types.NewCodeLocationWithStackTrace(skip + 1)
|
||||||
|
global.Failer.Fail(message, cl)
|
||||||
|
panic(types.GinkgoErrors.UncaughtGinkgoPanic(cl))
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
AbortSuite instructs Ginkgo to fail the current spec and skip all subsequent specs, thereby aborting the suite.
|
||||||
|
|
||||||
|
You can call AbortSuite in any Setup or Subject node closure.
|
||||||
|
|
||||||
|
You can learn more about how Ginkgo handles suite interruptions here: https://onsi.github.io/ginkgo/#interrupting-aborting-and-timing-out-suites
|
||||||
|
*/
|
||||||
|
func AbortSuite(message string, callerSkip ...int) {
|
||||||
|
skip := 0
|
||||||
|
if len(callerSkip) > 0 {
|
||||||
|
skip = callerSkip[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
cl := types.NewCodeLocationWithStackTrace(skip + 1)
|
||||||
|
global.Failer.AbortSuite(message, cl)
|
||||||
|
panic(types.GinkgoErrors.UncaughtGinkgoPanic(cl))
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
GinkgoRecover should be deferred at the top of any spawned goroutine that (may) call `Fail`
|
||||||
|
Since Gomega assertions call fail, you should throw a `defer GinkgoRecover()` at the top of any goroutine that
|
||||||
|
calls out to Gomega
|
||||||
|
|
||||||
|
Here's why: Ginkgo's `Fail` method records the failure and then panics to prevent
|
||||||
|
further assertions from running. This panic must be recovered. Normally, Ginkgo recovers the panic for you,
|
||||||
|
however if a panic originates on a goroutine *launched* from one of your specs there's no
|
||||||
|
way for Ginkgo to rescue the panic. To do this, you must remember to `defer GinkgoRecover()` at the top of such a goroutine.
|
||||||
|
|
||||||
|
You can learn more about how Ginkgo manages failures here: https://onsi.github.io/ginkgo/#mental-model-how-ginkgo-handles-failure
|
||||||
|
*/
|
||||||
|
func GinkgoRecover() {
|
||||||
|
e := recover()
|
||||||
|
if e != nil {
|
||||||
|
global.Failer.Panic(types.NewCodeLocationWithStackTrace(1), e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// pushNode is used by the various test construction DSL methods to push nodes onto the suite
|
||||||
|
// it handles returned errors, emits a detailed error message to help the user learn what they may have done wrong, then exits
|
||||||
|
func pushNode(node internal.Node, errors []error) bool {
|
||||||
|
exitIfErrors(errors)
|
||||||
|
exitIfErr(global.Suite.PushNode(node))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Describe nodes are Container nodes that allow you to organize your specs. A Describe node's closure can contain any number of
|
||||||
|
Setup nodes (e.g. BeforeEach, AfterEach, JustBeforeEach), and Subject nodes (i.e. It).
|
||||||
|
|
||||||
|
Context and When nodes are aliases for Describe - use whichever gives your suite a better narrative flow. It is idomatic
|
||||||
|
to Describe the behavior of an object or function and, within that Describe, outline a number of Contexts and Whens.
|
||||||
|
|
||||||
|
You can learn more at https://onsi.github.io/ginkgo/#organizing-specs-with-container-nodes
|
||||||
|
In addition, container nodes can be decorated with a variety of decorators. You can learn more here: https://onsi.github.io/ginkgo/#decorator-reference
|
||||||
|
*/
|
||||||
|
func Describe(text string, args ...interface{}) bool {
|
||||||
|
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...))
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
FDescribe focuses specs within the Describe block.
|
||||||
|
*/
|
||||||
|
func FDescribe(text string, args ...interface{}) bool {
|
||||||
|
args = append(args, internal.Focus)
|
||||||
|
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...))
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
PDescribe marks specs within the Describe block as pending.
|
||||||
|
*/
|
||||||
|
func PDescribe(text string, args ...interface{}) bool {
|
||||||
|
args = append(args, internal.Pending)
|
||||||
|
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...))
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
XDescribe marks specs within the Describe block as pending.
|
||||||
|
|
||||||
|
XDescribe is an alias for PDescribe
|
||||||
|
*/
|
||||||
|
var XDescribe = PDescribe
|
||||||
|
|
||||||
|
/* Context is an alias for Describe - it generates the exact same kind of Container node */
|
||||||
|
var Context, FContext, PContext, XContext = Describe, FDescribe, PDescribe, XDescribe
|
||||||
|
|
||||||
|
/* When is an alias for Describe - it generates the exact same kind of Container node */
|
||||||
|
func When(text string, args ...interface{}) bool {
|
||||||
|
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...))
|
||||||
|
}
|
||||||
|
|
||||||
|
/* When is an alias for Describe - it generates the exact same kind of Container node */
|
||||||
|
func FWhen(text string, args ...interface{}) bool {
|
||||||
|
args = append(args, internal.Focus)
|
||||||
|
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...))
|
||||||
|
}
|
||||||
|
|
||||||
|
/* When is an alias for Describe - it generates the exact same kind of Container node */
|
||||||
|
func PWhen(text string, args ...interface{}) bool {
|
||||||
|
args = append(args, internal.Pending)
|
||||||
|
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...))
|
||||||
|
}
|
||||||
|
|
||||||
|
var XWhen = PWhen
|
||||||
|
|
||||||
|
/*
|
||||||
|
It nodes are Subject nodes that contain your spec code and assertions.
|
||||||
|
|
||||||
|
Each It node corresponds to an individual Ginkgo spec. You cannot nest any other Ginkgo nodes within an It node's closure.
|
||||||
|
|
||||||
|
You can learn more at https://onsi.github.io/ginkgo/#spec-subjects-it
|
||||||
|
In addition, subject nodes can be decorated with a variety of decorators. You can learn more here: https://onsi.github.io/ginkgo/#decorator-reference
|
||||||
|
*/
|
||||||
|
func It(text string, args ...interface{}) bool {
|
||||||
|
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...))
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
FIt allows you to focus an individual It.
|
||||||
|
*/
|
||||||
|
func FIt(text string, args ...interface{}) bool {
|
||||||
|
args = append(args, internal.Focus)
|
||||||
|
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...))
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
PIt allows you to mark an individual It as pending.
|
||||||
|
*/
|
||||||
|
func PIt(text string, args ...interface{}) bool {
|
||||||
|
args = append(args, internal.Pending)
|
||||||
|
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...))
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
XIt allows you to mark an individual It as pending.
|
||||||
|
|
||||||
|
XIt is an alias for PIt
|
||||||
|
*/
|
||||||
|
var XIt = PIt
|
||||||
|
|
||||||
|
/*
|
||||||
|
Specify is an alias for It - it can allow for more natural wording in some context.
|
||||||
|
*/
|
||||||
|
var Specify, FSpecify, PSpecify, XSpecify = It, FIt, PIt, XIt
|
||||||
|
|
||||||
|
/*
|
||||||
|
By allows you to better document complex Specs.
|
||||||
|
|
||||||
|
Generally you should try to keep your Its short and to the point. This is not always possible, however,
|
||||||
|
especially in the context of integration tests that capture complex or lengthy workflows.
|
||||||
|
|
||||||
|
By allows you to document such flows. By may be called within a Setup or Subject node (It, BeforeEach, etc...)
|
||||||
|
and will simply log the passed in text to the GinkgoWriter. If By is handed a function it will immediately run the function.
|
||||||
|
|
||||||
|
By will also generate and attach a ReportEntry to the spec. This will ensure that By annotations appear in Ginkgo's machine-readable reports.
|
||||||
|
|
||||||
|
Note that By does not generate a new Ginkgo node - rather it is simply synctactic sugar around GinkgoWriter and AddReportEntry
|
||||||
|
You can learn more about By here: https://onsi.github.io/ginkgo/#documenting-complex-specs-by
|
||||||
|
*/
|
||||||
|
func By(text string, callback ...func()) {
|
||||||
|
if !global.Suite.InRunPhase() {
|
||||||
|
exitIfErr(types.GinkgoErrors.ByNotDuringRunPhase(types.NewCodeLocation(1)))
|
||||||
|
}
|
||||||
|
value := struct {
|
||||||
|
Text string
|
||||||
|
Duration time.Duration
|
||||||
|
}{
|
||||||
|
Text: text,
|
||||||
|
}
|
||||||
|
t := time.Now()
|
||||||
|
AddReportEntry("By Step", ReportEntryVisibilityNever, Offset(1), &value, t)
|
||||||
|
formatter := formatter.NewWithNoColorBool(reporterConfig.NoColor)
|
||||||
|
GinkgoWriter.Println(formatter.F("{{bold}}STEP:{{/}} %s {{gray}}%s{{/}}", text, t.Format(types.GINKGO_TIME_FORMAT)))
|
||||||
|
if len(callback) == 1 {
|
||||||
|
callback[0]()
|
||||||
|
value.Duration = time.Since(t)
|
||||||
|
}
|
||||||
|
if len(callback) > 1 {
|
||||||
|
panic("just one callback per By, please")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
BeforeSuite nodes are suite-level Setup nodes that run just once before any specs are run.
|
||||||
|
When running in parallel, each parallel process will call BeforeSuite.
|
||||||
|
|
||||||
|
You may only register *one* BeforeSuite handler per test suite. You typically do so in your bootstrap file at the top level.
|
||||||
|
|
||||||
|
You cannot nest any other Ginkgo nodes within a BeforeSuite node's closure.
|
||||||
|
You can learn more here: https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite
|
||||||
|
*/
|
||||||
|
func BeforeSuite(body func()) bool {
|
||||||
|
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeSuite, "", body))
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
AfterSuite nodes are suite-level Setup nodes run after all specs have finished - regardless of whether specs have passed or failed.
|
||||||
|
AfterSuite node closures always run, even if Ginkgo receives an interrupt signal (^C), in order to ensure cleanup occurs.
|
||||||
|
|
||||||
|
When running in parallel, each parallel process will call AfterSuite.
|
||||||
|
|
||||||
|
You may only register *one* AfterSuite handler per test suite. You typically do so in your bootstrap file at the top level.
|
||||||
|
|
||||||
|
You cannot nest any other Ginkgo nodes within an AfterSuite node's closure.
|
||||||
|
You can learn more here: https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite
|
||||||
|
*/
|
||||||
|
func AfterSuite(body func()) bool {
|
||||||
|
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterSuite, "", body))
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
SynchronizedBeforeSuite nodes allow you to perform some of the suite setup just once - on parallel process #1 - and then pass information
|
||||||
|
from that setup to the rest of the suite setup on all processes. This is useful for performing expensive or singleton setup once, then passing
|
||||||
|
information from that setup to all parallel processes.
|
||||||
|
|
||||||
|
SynchronizedBeforeSuite accomplishes this by taking *two* function arguments and passing data between them.
|
||||||
|
The first function is only run on parallel process #1. The second is run on all processes, but *only* after the first function completes successfully. The functions have the following signatures:
|
||||||
|
|
||||||
|
The first function (which only runs on process #1) has the signature:
|
||||||
|
|
||||||
|
func() []byte
|
||||||
|
|
||||||
|
The byte array returned by the first function is then passed to the second function, which has the signature:
|
||||||
|
|
||||||
|
func(data []byte)
|
||||||
|
|
||||||
|
You cannot nest any other Ginkgo nodes within an SynchronizedBeforeSuite node's closure.
|
||||||
|
You can learn more, and see some examples, here: https://onsi.github.io/ginkgo/#parallel-suite-setup-and-cleanup-synchronizedbeforesuite-and-synchronizedaftersuite
|
||||||
|
*/
|
||||||
|
func SynchronizedBeforeSuite(process1Body func() []byte, allProcessBody func([]byte)) bool {
|
||||||
|
return pushNode(internal.NewSynchronizedBeforeSuiteNode(process1Body, allProcessBody, types.NewCodeLocation(1)))
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
SynchronizedAfterSuite nodes complement the SynchronizedBeforeSuite nodes in solving the problem of splitting clean up into a piece that runs on all processes
|
||||||
|
and a piece that must only run once - on process #1.
|
||||||
|
|
||||||
|
SynchronizedAfterSuite accomplishes this by taking *two* function arguments. The first runs on all processes. The second runs only on parallel process #1
|
||||||
|
and *only* after all other processes have finished and exited. This ensures that process #1, and any resources it is managing, remain alive until
|
||||||
|
all other processes are finished.
|
||||||
|
|
||||||
|
Note that you can also use DeferCleanup() in SynchronizedBeforeSuite to accomplish similar results.
|
||||||
|
|
||||||
|
You cannot nest any other Ginkgo nodes within an SynchronizedAfterSuite node's closure.
|
||||||
|
You can learn more, and see some examples, here: https://onsi.github.io/ginkgo/#parallel-suite-setup-and-cleanup-synchronizedbeforesuite-and-synchronizedaftersuite
|
||||||
|
*/
|
||||||
|
func SynchronizedAfterSuite(allProcessBody func(), process1Body func()) bool {
|
||||||
|
return pushNode(internal.NewSynchronizedAfterSuiteNode(allProcessBody, process1Body, types.NewCodeLocation(1)))
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
BeforeEach nodes are Setup nodes whose closures run before It node closures. When multiple BeforeEach nodes
|
||||||
|
are defined in nested Container nodes the outermost BeforeEach node closures are run first.
|
||||||
|
|
||||||
|
You cannot nest any other Ginkgo nodes within a BeforeEach node's closure.
|
||||||
|
You can learn more here: https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach
|
||||||
|
*/
|
||||||
|
func BeforeEach(args ...interface{}) bool {
|
||||||
|
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeEach, "", args...))
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
JustBeforeEach nodes are similar to BeforeEach nodes, however they are guaranteed to run *after* all BeforeEach node closures - just before the It node closure.
|
||||||
|
This can allow you to separate configuration from creation of resources for a spec.
|
||||||
|
|
||||||
|
You cannot nest any other Ginkgo nodes within a JustBeforeEach node's closure.
|
||||||
|
You can learn more and see some examples here: https://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach
|
||||||
|
*/
|
||||||
|
func JustBeforeEach(args ...interface{}) bool {
|
||||||
|
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeJustBeforeEach, "", args...))
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
AfterEach nodes are Setup nodes whose closures run after It node closures. When multiple AfterEach nodes
|
||||||
|
are defined in nested Container nodes the innermost AfterEach node closures are run first.
|
||||||
|
|
||||||
|
Note that you can also use DeferCleanup() in other Setup or Subject nodes to accomplish similar results.
|
||||||
|
|
||||||
|
You cannot nest any other Ginkgo nodes within an AfterEach node's closure.
|
||||||
|
You can learn more here: https://onsi.github.io/ginkgo/#spec-cleanup-aftereach-and-defercleanup
|
||||||
|
*/
|
||||||
|
func AfterEach(args ...interface{}) bool {
|
||||||
|
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterEach, "", args...))
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
JustAfterEach nodes are similar to AfterEach nodes, however they are guaranteed to run *before* all AfterEach node closures - just after the It node closure. This can allow you to separate diagnostics collection from teardown for a spec.
|
||||||
|
|
||||||
|
You cannot nest any other Ginkgo nodes within a JustAfterEach node's closure.
|
||||||
|
You can learn more and see some examples here: https://onsi.github.io/ginkgo/#separating-diagnostics-collection-and-teardown-justaftereach
|
||||||
|
*/
|
||||||
|
func JustAfterEach(args ...interface{}) bool {
|
||||||
|
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeJustAfterEach, "", args...))
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
BeforeAll nodes are Setup nodes that can occur inside Ordered contaienrs. They run just once before any specs in the Ordered container run.
|
||||||
|
|
||||||
|
Multiple BeforeAll nodes can be defined in a given Ordered container however they cannot be nested inside any other container.
|
||||||
|
|
||||||
|
You cannot nest any other Ginkgo nodes within a BeforeAll node's closure.
|
||||||
|
You can learn more about Ordered Containers at: https://onsi.github.io/ginkgo/#ordered-containers
|
||||||
|
And you can learn more about BeforeAll at: https://onsi.github.io/ginkgo/#setup-in-ordered-containers-beforeall-and-afterall
|
||||||
|
*/
|
||||||
|
func BeforeAll(args ...interface{}) bool {
|
||||||
|
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeAll, "", args...))
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
AfterAll nodes are Setup nodes that can occur inside Ordered contaienrs. They run just once after all specs in the Ordered container have run.
|
||||||
|
|
||||||
|
Multiple AfterAll nodes can be defined in a given Ordered container however they cannot be nested inside any other container.
|
||||||
|
|
||||||
|
Note that you can also use DeferCleanup() in a BeforeAll node to accomplish similar behavior.
|
||||||
|
|
||||||
|
You cannot nest any other Ginkgo nodes within an AfterAll node's closure.
|
||||||
|
You can learn more about Ordered Containers at: https://onsi.github.io/ginkgo/#ordered-containers
|
||||||
|
And you can learn more about AfterAll at: https://onsi.github.io/ginkgo/#setup-in-ordered-containers-beforeall-and-afterall
|
||||||
|
*/
|
||||||
|
func AfterAll(args ...interface{}) bool {
|
||||||
|
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterAll, "", args...))
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
DeferCleanup can be called within any Setup or Subject node to register a cleanup callback that Ginkgo will call at the appropriate time to cleanup after the spec.
|
||||||
|
|
||||||
|
DeferCleanup can be passed:
|
||||||
|
1. A function that takes no arguments and returns no values.
|
||||||
|
2. A function that returns an error (in which case it will assert that the returned error was nil, or it will fail the spec).
|
||||||
|
3. A function that takes arguments (and optionally returns an error) followed by a list of arguments to passe to the function. For example:
|
||||||
|
|
||||||
|
BeforeEach(func() {
|
||||||
|
DeferCleanup(os.SetEnv, "FOO", os.GetEnv("FOO"))
|
||||||
|
os.SetEnv("FOO", "BAR")
|
||||||
|
})
|
||||||
|
|
||||||
|
will register a cleanup handler that will set the environment variable "FOO" to it's current value (obtained by os.GetEnv("FOO")) after the spec runs and then sets the environment variable "FOO" to "BAR" for the current spec.
|
||||||
|
|
||||||
|
When DeferCleanup is called in BeforeEach, JustBeforeEach, It, AfterEach, or JustAfterEach the registered callback will be invoked when the spec completes (i.e. it will behave like an AfterEach node)
|
||||||
|
When DeferCleanup is called in BeforeAll or AfterAll the registered callback will be invoked when the ordered container completes (i.e. it will behave like an AfterAll node)
|
||||||
|
When DeferCleanup is called in BeforeSuite, SynchronizedBeforeSuite, AfterSuite, or SynchronizedAfterSuite the registered callback will be invoked when the suite completes (i.e. it will behave like an AfterSuite node)
|
||||||
|
|
||||||
|
Note that DeferCleanup does not represent a node but rather dynamically generates the appropriate type of cleanup node based on the context in which it is called. As such you must call DeferCleanup within a Setup or Subject node, and not within a Container node.
|
||||||
|
You can learn more about DeferCleanup here: https://onsi.github.io/ginkgo/#cleaning-up-our-cleanup-code-defercleanup
|
||||||
|
*/
|
||||||
|
func DeferCleanup(args ...interface{}) {
|
||||||
|
fail := func(message string, cl types.CodeLocation) {
|
||||||
|
global.Failer.Fail(message, cl)
|
||||||
|
}
|
||||||
|
pushNode(internal.NewCleanupNode(fail, args...))
|
||||||
|
}
|
82
vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go
generated
vendored
Normal file
82
vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go
generated
vendored
Normal file
@ -0,0 +1,82 @@
|
|||||||
|
package ginkgo
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/onsi/ginkgo/v2/internal"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
Offset(uint) is a decorator that allows you to change the stack-frame offset used when computing the line number of the node in question.
|
||||||
|
|
||||||
|
You can learn more here: https://onsi.github.io/ginkgo/#the-offset-decorator
|
||||||
|
You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
|
||||||
|
*/
|
||||||
|
type Offset = internal.Offset
|
||||||
|
|
||||||
|
/*
|
||||||
|
FlakeAttempts(uint N) is a decorator that allows you to mark individual specs or spec containers as flaky. Ginkgo will run them up to `N` times until they pass.
|
||||||
|
|
||||||
|
You can learn more here: https://onsi.github.io/ginkgo/#repeating-spec-runs-and-managing-flaky-specs
|
||||||
|
You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
|
||||||
|
*/
|
||||||
|
type FlakeAttempts = internal.FlakeAttempts
|
||||||
|
|
||||||
|
/*
|
||||||
|
Focus is a decorator that allows you to mark a spec or container as focused. Identical to FIt and FDescribe.
|
||||||
|
|
||||||
|
You can learn more here: https://onsi.github.io/ginkgo/#filtering-specs
|
||||||
|
You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
|
||||||
|
*/
|
||||||
|
const Focus = internal.Focus
|
||||||
|
|
||||||
|
/*
|
||||||
|
Pending is a decorator that allows you to mark a spec or container as pending. Identical to PIt and PDescribe.
|
||||||
|
|
||||||
|
You can learn more here: https://onsi.github.io/ginkgo/#filtering-specs
|
||||||
|
You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
|
||||||
|
*/
|
||||||
|
const Pending = internal.Pending
|
||||||
|
|
||||||
|
/*
|
||||||
|
Serial is a decorator that allows you to mark a spec or container as serial. These specs will never run in parallel with other specs.
|
||||||
|
Tests in ordered containers cannot be marked as serial - mark the ordered container instead.
|
||||||
|
|
||||||
|
You can learn more here: https://onsi.github.io/ginkgo/#serial-specs
|
||||||
|
You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
|
||||||
|
*/
|
||||||
|
const Serial = internal.Serial
|
||||||
|
|
||||||
|
/*
|
||||||
|
Ordered is a decorator that allows you to mark a container as ordered. Tests in the container will always run in the order they appear.
|
||||||
|
They will never be randomized and they will never run in parallel with one another, though they may run in parallel with other specs.
|
||||||
|
|
||||||
|
You can learn more here: https://onsi.github.io/ginkgo/#ordered-containers
|
||||||
|
You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
|
||||||
|
*/
|
||||||
|
const Ordered = internal.Ordered
|
||||||
|
|
||||||
|
/*
|
||||||
|
OncePerOrdered is a decorator that allows you to mark outer BeforeEach, AfterEach, JustBeforeEach, and JustAfterEach setup nodes to run once
|
||||||
|
per ordered context. Normally these setup nodes run around each individual spec, with OncePerOrdered they will run once around the set of specs in an ordered container.
|
||||||
|
The behavior for non-Ordered containers/specs is unchanged.
|
||||||
|
|
||||||
|
You can learh more here: https://onsi.github.io/ginkgo/#setup-around-ordered-containers-the-onceperordered-decorator
|
||||||
|
You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
|
||||||
|
*/
|
||||||
|
const OncePerOrdered = internal.OncePerOrdered
|
||||||
|
|
||||||
|
/*
|
||||||
|
Label decorates specs with Labels. Multiple labels can be passed to Label and these can be arbitrary strings but must not include the following characters: "&|!,()/".
|
||||||
|
Labels can be applied to container and subject nodes, but not setup nodes. You can provide multiple Labels to a given node and a spec's labels is the union of all labels in its node hierarchy.
|
||||||
|
|
||||||
|
You can learn more here: https://onsi.github.io/ginkgo/#spec-labels
|
||||||
|
You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
|
||||||
|
*/
|
||||||
|
func Label(labels ...string) Labels {
|
||||||
|
return Labels(labels)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Labels are the type for spec Label decorators. Use Label(...) to construct Labels.
|
||||||
|
You can learn more here: https://onsi.github.io/ginkgo/#spec-labels
|
||||||
|
*/
|
||||||
|
type Labels = internal.Labels
|
135
vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go
generated
vendored
Normal file
135
vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go
generated
vendored
Normal file
@ -0,0 +1,135 @@
|
|||||||
|
package ginkgo
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/internal"
|
||||||
|
"github.com/onsi/ginkgo/v2/internal/global"
|
||||||
|
"github.com/onsi/ginkgo/v2/reporters"
|
||||||
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
Deprecated: Done Channel for asynchronous testing
|
||||||
|
|
||||||
|
The Done channel pattern is no longer supported in Ginkgo 2.0.
|
||||||
|
See here for better patterns for asynchronouse testing: https://onsi.github.io/ginkgo/#patterns-for-asynchronous-testing
|
||||||
|
|
||||||
|
For a migration guide see: https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-async-testing
|
||||||
|
*/
|
||||||
|
type Done = internal.Done
|
||||||
|
|
||||||
|
/*
|
||||||
|
Deprecated: Custom Ginkgo test reporters are deprecated in Ginkgo 2.0.
|
||||||
|
|
||||||
|
Use Ginkgo's reporting nodes instead and 2.0 reporting infrastructure instead. You can learn more here: https://onsi.github.io/ginkgo/#reporting-infrastructure
|
||||||
|
For a migration guide see: https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-custom-reporters
|
||||||
|
*/
|
||||||
|
type Reporter = reporters.DeprecatedReporter
|
||||||
|
|
||||||
|
/*
|
||||||
|
Deprecated: Custom Reporters have been removed in Ginkgo 2.0. RunSpecsWithDefaultAndCustomReporters will simply call RunSpecs()
|
||||||
|
|
||||||
|
Use Ginkgo's reporting nodes instead and 2.0 reporting infrastructure instead. You can learn more here: https://onsi.github.io/ginkgo/#reporting-infrastructure
|
||||||
|
For a migration guide see: https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-custom-reporters
|
||||||
|
*/
|
||||||
|
func RunSpecsWithDefaultAndCustomReporters(t GinkgoTestingT, description string, _ []Reporter) bool {
|
||||||
|
deprecationTracker.TrackDeprecation(types.Deprecations.CustomReporter())
|
||||||
|
return RunSpecs(t, description)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Deprecated: Custom Reporters have been removed in Ginkgo 2.0. RunSpecsWithCustomReporters will simply call RunSpecs()
|
||||||
|
|
||||||
|
Use Ginkgo's reporting nodes instead and 2.0 reporting infrastructure instead. You can learn more here: https://onsi.github.io/ginkgo/#reporting-infrastructure
|
||||||
|
For a migration guide see: https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-custom-reporters
|
||||||
|
*/
|
||||||
|
func RunSpecsWithCustomReporters(t GinkgoTestingT, description string, _ []Reporter) bool {
|
||||||
|
deprecationTracker.TrackDeprecation(types.Deprecations.CustomReporter())
|
||||||
|
return RunSpecs(t, description)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Deprecated: GinkgoTestDescription has been replaced with SpecReport.
|
||||||
|
|
||||||
|
Use CurrentSpecReport() instead.
|
||||||
|
You can learn more here: https://onsi.github.io/ginkgo/#getting-a-report-for-the-current-spec
|
||||||
|
The SpecReport type is documented here: https://pkg.go.dev/github.com/onsi/ginkgo/v2/types#SpecReport
|
||||||
|
*/
|
||||||
|
type DeprecatedGinkgoTestDescription struct {
|
||||||
|
FullTestText string
|
||||||
|
ComponentTexts []string
|
||||||
|
TestText string
|
||||||
|
|
||||||
|
FileName string
|
||||||
|
LineNumber int
|
||||||
|
|
||||||
|
Failed bool
|
||||||
|
Duration time.Duration
|
||||||
|
}
|
||||||
|
type GinkgoTestDescription = DeprecatedGinkgoTestDescription
|
||||||
|
|
||||||
|
/*
|
||||||
|
Deprecated: CurrentGinkgoTestDescription has been replaced with CurrentSpecReport.
|
||||||
|
|
||||||
|
Use CurrentSpecReport() instead.
|
||||||
|
You can learn more here: https://onsi.github.io/ginkgo/#getting-a-report-for-the-current-spec
|
||||||
|
The SpecReport type is documented here: https://pkg.go.dev/github.com/onsi/ginkgo/v2/types#SpecReport
|
||||||
|
*/
|
||||||
|
func CurrentGinkgoTestDescription() DeprecatedGinkgoTestDescription {
|
||||||
|
deprecationTracker.TrackDeprecation(
|
||||||
|
types.Deprecations.CurrentGinkgoTestDescription(),
|
||||||
|
types.NewCodeLocation(1),
|
||||||
|
)
|
||||||
|
report := global.Suite.CurrentSpecReport()
|
||||||
|
if report.State == types.SpecStateInvalid {
|
||||||
|
return GinkgoTestDescription{}
|
||||||
|
}
|
||||||
|
componentTexts := []string{}
|
||||||
|
componentTexts = append(componentTexts, report.ContainerHierarchyTexts...)
|
||||||
|
componentTexts = append(componentTexts, report.LeafNodeText)
|
||||||
|
|
||||||
|
return DeprecatedGinkgoTestDescription{
|
||||||
|
ComponentTexts: componentTexts,
|
||||||
|
FullTestText: report.FullText(),
|
||||||
|
TestText: report.LeafNodeText,
|
||||||
|
FileName: report.LeafNodeLocation.FileName,
|
||||||
|
LineNumber: report.LeafNodeLocation.LineNumber,
|
||||||
|
Failed: report.State.Is(types.SpecStateFailureStates),
|
||||||
|
Duration: report.RunTime,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Deprecated: GinkgoParallelNode() has been renamed to GinkgoParallelProcess()
|
||||||
|
*/
|
||||||
|
func GinkgoParallelNode() int {
|
||||||
|
deprecationTracker.TrackDeprecation(
|
||||||
|
types.Deprecations.ParallelNode(),
|
||||||
|
types.NewCodeLocation(1),
|
||||||
|
)
|
||||||
|
return GinkgoParallelProcess()
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Deprecated: Benchmarker has been removed from Ginkgo 2.0
|
||||||
|
|
||||||
|
Use Gomega's gmeasure package instead.
|
||||||
|
You can learn more here: https://onsi.github.io/ginkgo/#benchmarking-code
|
||||||
|
*/
|
||||||
|
type Benchmarker interface {
|
||||||
|
Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration)
|
||||||
|
RecordValue(name string, value float64, info ...interface{})
|
||||||
|
RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{})
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Deprecated: Measure() has been removed from Ginkgo 2.0
|
||||||
|
|
||||||
|
Use Gomega's gmeasure package instead.
|
||||||
|
You can learn more here: https://onsi.github.io/ginkgo/#benchmarking-code
|
||||||
|
*/
|
||||||
|
func Measure(_ ...interface{}) bool {
|
||||||
|
deprecationTracker.TrackDeprecation(types.Deprecations.Measure(), types.NewCodeLocation(1))
|
||||||
|
return true
|
||||||
|
}
|
41
vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go
generated
vendored
Normal file
41
vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
// +build !windows
|
||||||
|
|
||||||
|
/*
|
||||||
|
These packages are used for colorize on Windows and contributed by mattn.jp@gmail.com
|
||||||
|
|
||||||
|
* go-colorable: <https://github.com/mattn/go-colorable>
|
||||||
|
* go-isatty: <https://github.com/mattn/go-isatty>
|
||||||
|
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2016 Yasuhiro Matsumoto
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package formatter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
func newColorable(file *os.File) io.Writer {
|
||||||
|
return file
|
||||||
|
}
|
809
vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go
generated
vendored
Normal file
809
vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go
generated
vendored
Normal file
@ -0,0 +1,809 @@
|
|||||||
|
/*
|
||||||
|
These packages are used for colorize on Windows and contributed by mattn.jp@gmail.com
|
||||||
|
|
||||||
|
* go-colorable: <https://github.com/mattn/go-colorable>
|
||||||
|
* go-isatty: <https://github.com/mattn/go-isatty>
|
||||||
|
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2016 Yasuhiro Matsumoto
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package formatter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
kernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||||
|
procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
|
||||||
|
procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute")
|
||||||
|
procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition")
|
||||||
|
procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW")
|
||||||
|
procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute")
|
||||||
|
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
|
||||||
|
)
|
||||||
|
|
||||||
|
func isTerminal(fd uintptr) bool {
|
||||||
|
var st uint32
|
||||||
|
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0)
|
||||||
|
return r != 0 && e == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
foregroundBlue = 0x1
|
||||||
|
foregroundGreen = 0x2
|
||||||
|
foregroundRed = 0x4
|
||||||
|
foregroundIntensity = 0x8
|
||||||
|
foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity)
|
||||||
|
backgroundBlue = 0x10
|
||||||
|
backgroundGreen = 0x20
|
||||||
|
backgroundRed = 0x40
|
||||||
|
backgroundIntensity = 0x80
|
||||||
|
backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity)
|
||||||
|
)
|
||||||
|
|
||||||
|
type wchar uint16
|
||||||
|
type short int16
|
||||||
|
type dword uint32
|
||||||
|
type word uint16
|
||||||
|
|
||||||
|
type coord struct {
|
||||||
|
x short
|
||||||
|
y short
|
||||||
|
}
|
||||||
|
|
||||||
|
type smallRect struct {
|
||||||
|
left short
|
||||||
|
top short
|
||||||
|
right short
|
||||||
|
bottom short
|
||||||
|
}
|
||||||
|
|
||||||
|
type consoleScreenBufferInfo struct {
|
||||||
|
size coord
|
||||||
|
cursorPosition coord
|
||||||
|
attributes word
|
||||||
|
window smallRect
|
||||||
|
maximumWindowSize coord
|
||||||
|
}
|
||||||
|
|
||||||
|
type writer struct {
|
||||||
|
out io.Writer
|
||||||
|
handle syscall.Handle
|
||||||
|
lastbuf bytes.Buffer
|
||||||
|
oldattr word
|
||||||
|
}
|
||||||
|
|
||||||
|
func newColorable(file *os.File) io.Writer {
|
||||||
|
if file == nil {
|
||||||
|
panic("nil passed instead of *os.File to NewColorable()")
|
||||||
|
}
|
||||||
|
|
||||||
|
if isTerminal(file.Fd()) {
|
||||||
|
var csbi consoleScreenBufferInfo
|
||||||
|
handle := syscall.Handle(file.Fd())
|
||||||
|
procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
|
||||||
|
return &writer{out: file, handle: handle, oldattr: csbi.attributes}
|
||||||
|
} else {
|
||||||
|
return file
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var color256 = map[int]int{
|
||||||
|
0: 0x000000,
|
||||||
|
1: 0x800000,
|
||||||
|
2: 0x008000,
|
||||||
|
3: 0x808000,
|
||||||
|
4: 0x000080,
|
||||||
|
5: 0x800080,
|
||||||
|
6: 0x008080,
|
||||||
|
7: 0xc0c0c0,
|
||||||
|
8: 0x808080,
|
||||||
|
9: 0xff0000,
|
||||||
|
10: 0x00ff00,
|
||||||
|
11: 0xffff00,
|
||||||
|
12: 0x0000ff,
|
||||||
|
13: 0xff00ff,
|
||||||
|
14: 0x00ffff,
|
||||||
|
15: 0xffffff,
|
||||||
|
16: 0x000000,
|
||||||
|
17: 0x00005f,
|
||||||
|
18: 0x000087,
|
||||||
|
19: 0x0000af,
|
||||||
|
20: 0x0000d7,
|
||||||
|
21: 0x0000ff,
|
||||||
|
22: 0x005f00,
|
||||||
|
23: 0x005f5f,
|
||||||
|
24: 0x005f87,
|
||||||
|
25: 0x005faf,
|
||||||
|
26: 0x005fd7,
|
||||||
|
27: 0x005fff,
|
||||||
|
28: 0x008700,
|
||||||
|
29: 0x00875f,
|
||||||
|
30: 0x008787,
|
||||||
|
31: 0x0087af,
|
||||||
|
32: 0x0087d7,
|
||||||
|
33: 0x0087ff,
|
||||||
|
34: 0x00af00,
|
||||||
|
35: 0x00af5f,
|
||||||
|
36: 0x00af87,
|
||||||
|
37: 0x00afaf,
|
||||||
|
38: 0x00afd7,
|
||||||
|
39: 0x00afff,
|
||||||
|
40: 0x00d700,
|
||||||
|
41: 0x00d75f,
|
||||||
|
42: 0x00d787,
|
||||||
|
43: 0x00d7af,
|
||||||
|
44: 0x00d7d7,
|
||||||
|
45: 0x00d7ff,
|
||||||
|
46: 0x00ff00,
|
||||||
|
47: 0x00ff5f,
|
||||||
|
48: 0x00ff87,
|
||||||
|
49: 0x00ffaf,
|
||||||
|
50: 0x00ffd7,
|
||||||
|
51: 0x00ffff,
|
||||||
|
52: 0x5f0000,
|
||||||
|
53: 0x5f005f,
|
||||||
|
54: 0x5f0087,
|
||||||
|
55: 0x5f00af,
|
||||||
|
56: 0x5f00d7,
|
||||||
|
57: 0x5f00ff,
|
||||||
|
58: 0x5f5f00,
|
||||||
|
59: 0x5f5f5f,
|
||||||
|
60: 0x5f5f87,
|
||||||
|
61: 0x5f5faf,
|
||||||
|
62: 0x5f5fd7,
|
||||||
|
63: 0x5f5fff,
|
||||||
|
64: 0x5f8700,
|
||||||
|
65: 0x5f875f,
|
||||||
|
66: 0x5f8787,
|
||||||
|
67: 0x5f87af,
|
||||||
|
68: 0x5f87d7,
|
||||||
|
69: 0x5f87ff,
|
||||||
|
70: 0x5faf00,
|
||||||
|
71: 0x5faf5f,
|
||||||
|
72: 0x5faf87,
|
||||||
|
73: 0x5fafaf,
|
||||||
|
74: 0x5fafd7,
|
||||||
|
75: 0x5fafff,
|
||||||
|
76: 0x5fd700,
|
||||||
|
77: 0x5fd75f,
|
||||||
|
78: 0x5fd787,
|
||||||
|
79: 0x5fd7af,
|
||||||
|
80: 0x5fd7d7,
|
||||||
|
81: 0x5fd7ff,
|
||||||
|
82: 0x5fff00,
|
||||||
|
83: 0x5fff5f,
|
||||||
|
84: 0x5fff87,
|
||||||
|
85: 0x5fffaf,
|
||||||
|
86: 0x5fffd7,
|
||||||
|
87: 0x5fffff,
|
||||||
|
88: 0x870000,
|
||||||
|
89: 0x87005f,
|
||||||
|
90: 0x870087,
|
||||||
|
91: 0x8700af,
|
||||||
|
92: 0x8700d7,
|
||||||
|
93: 0x8700ff,
|
||||||
|
94: 0x875f00,
|
||||||
|
95: 0x875f5f,
|
||||||
|
96: 0x875f87,
|
||||||
|
97: 0x875faf,
|
||||||
|
98: 0x875fd7,
|
||||||
|
99: 0x875fff,
|
||||||
|
100: 0x878700,
|
||||||
|
101: 0x87875f,
|
||||||
|
102: 0x878787,
|
||||||
|
103: 0x8787af,
|
||||||
|
104: 0x8787d7,
|
||||||
|
105: 0x8787ff,
|
||||||
|
106: 0x87af00,
|
||||||
|
107: 0x87af5f,
|
||||||
|
108: 0x87af87,
|
||||||
|
109: 0x87afaf,
|
||||||
|
110: 0x87afd7,
|
||||||
|
111: 0x87afff,
|
||||||
|
112: 0x87d700,
|
||||||
|
113: 0x87d75f,
|
||||||
|
114: 0x87d787,
|
||||||
|
115: 0x87d7af,
|
||||||
|
116: 0x87d7d7,
|
||||||
|
117: 0x87d7ff,
|
||||||
|
118: 0x87ff00,
|
||||||
|
119: 0x87ff5f,
|
||||||
|
120: 0x87ff87,
|
||||||
|
121: 0x87ffaf,
|
||||||
|
122: 0x87ffd7,
|
||||||
|
123: 0x87ffff,
|
||||||
|
124: 0xaf0000,
|
||||||
|
125: 0xaf005f,
|
||||||
|
126: 0xaf0087,
|
||||||
|
127: 0xaf00af,
|
||||||
|
128: 0xaf00d7,
|
||||||
|
129: 0xaf00ff,
|
||||||
|
130: 0xaf5f00,
|
||||||
|
131: 0xaf5f5f,
|
||||||
|
132: 0xaf5f87,
|
||||||
|
133: 0xaf5faf,
|
||||||
|
134: 0xaf5fd7,
|
||||||
|
135: 0xaf5fff,
|
||||||
|
136: 0xaf8700,
|
||||||
|
137: 0xaf875f,
|
||||||
|
138: 0xaf8787,
|
||||||
|
139: 0xaf87af,
|
||||||
|
140: 0xaf87d7,
|
||||||
|
141: 0xaf87ff,
|
||||||
|
142: 0xafaf00,
|
||||||
|
143: 0xafaf5f,
|
||||||
|
144: 0xafaf87,
|
||||||
|
145: 0xafafaf,
|
||||||
|
146: 0xafafd7,
|
||||||
|
147: 0xafafff,
|
||||||
|
148: 0xafd700,
|
||||||
|
149: 0xafd75f,
|
||||||
|
150: 0xafd787,
|
||||||
|
151: 0xafd7af,
|
||||||
|
152: 0xafd7d7,
|
||||||
|
153: 0xafd7ff,
|
||||||
|
154: 0xafff00,
|
||||||
|
155: 0xafff5f,
|
||||||
|
156: 0xafff87,
|
||||||
|
157: 0xafffaf,
|
||||||
|
158: 0xafffd7,
|
||||||
|
159: 0xafffff,
|
||||||
|
160: 0xd70000,
|
||||||
|
161: 0xd7005f,
|
||||||
|
162: 0xd70087,
|
||||||
|
163: 0xd700af,
|
||||||
|
164: 0xd700d7,
|
||||||
|
165: 0xd700ff,
|
||||||
|
166: 0xd75f00,
|
||||||
|
167: 0xd75f5f,
|
||||||
|
168: 0xd75f87,
|
||||||
|
169: 0xd75faf,
|
||||||
|
170: 0xd75fd7,
|
||||||
|
171: 0xd75fff,
|
||||||
|
172: 0xd78700,
|
||||||
|
173: 0xd7875f,
|
||||||
|
174: 0xd78787,
|
||||||
|
175: 0xd787af,
|
||||||
|
176: 0xd787d7,
|
||||||
|
177: 0xd787ff,
|
||||||
|
178: 0xd7af00,
|
||||||
|
179: 0xd7af5f,
|
||||||
|
180: 0xd7af87,
|
||||||
|
181: 0xd7afaf,
|
||||||
|
182: 0xd7afd7,
|
||||||
|
183: 0xd7afff,
|
||||||
|
184: 0xd7d700,
|
||||||
|
185: 0xd7d75f,
|
||||||
|
186: 0xd7d787,
|
||||||
|
187: 0xd7d7af,
|
||||||
|
188: 0xd7d7d7,
|
||||||
|
189: 0xd7d7ff,
|
||||||
|
190: 0xd7ff00,
|
||||||
|
191: 0xd7ff5f,
|
||||||
|
192: 0xd7ff87,
|
||||||
|
193: 0xd7ffaf,
|
||||||
|
194: 0xd7ffd7,
|
||||||
|
195: 0xd7ffff,
|
||||||
|
196: 0xff0000,
|
||||||
|
197: 0xff005f,
|
||||||
|
198: 0xff0087,
|
||||||
|
199: 0xff00af,
|
||||||
|
200: 0xff00d7,
|
||||||
|
201: 0xff00ff,
|
||||||
|
202: 0xff5f00,
|
||||||
|
203: 0xff5f5f,
|
||||||
|
204: 0xff5f87,
|
||||||
|
205: 0xff5faf,
|
||||||
|
206: 0xff5fd7,
|
||||||
|
207: 0xff5fff,
|
||||||
|
208: 0xff8700,
|
||||||
|
209: 0xff875f,
|
||||||
|
210: 0xff8787,
|
||||||
|
211: 0xff87af,
|
||||||
|
212: 0xff87d7,
|
||||||
|
213: 0xff87ff,
|
||||||
|
214: 0xffaf00,
|
||||||
|
215: 0xffaf5f,
|
||||||
|
216: 0xffaf87,
|
||||||
|
217: 0xffafaf,
|
||||||
|
218: 0xffafd7,
|
||||||
|
219: 0xffafff,
|
||||||
|
220: 0xffd700,
|
||||||
|
221: 0xffd75f,
|
||||||
|
222: 0xffd787,
|
||||||
|
223: 0xffd7af,
|
||||||
|
224: 0xffd7d7,
|
||||||
|
225: 0xffd7ff,
|
||||||
|
226: 0xffff00,
|
||||||
|
227: 0xffff5f,
|
||||||
|
228: 0xffff87,
|
||||||
|
229: 0xffffaf,
|
||||||
|
230: 0xffffd7,
|
||||||
|
231: 0xffffff,
|
||||||
|
232: 0x080808,
|
||||||
|
233: 0x121212,
|
||||||
|
234: 0x1c1c1c,
|
||||||
|
235: 0x262626,
|
||||||
|
236: 0x303030,
|
||||||
|
237: 0x3a3a3a,
|
||||||
|
238: 0x444444,
|
||||||
|
239: 0x4e4e4e,
|
||||||
|
240: 0x585858,
|
||||||
|
241: 0x626262,
|
||||||
|
242: 0x6c6c6c,
|
||||||
|
243: 0x767676,
|
||||||
|
244: 0x808080,
|
||||||
|
245: 0x8a8a8a,
|
||||||
|
246: 0x949494,
|
||||||
|
247: 0x9e9e9e,
|
||||||
|
248: 0xa8a8a8,
|
||||||
|
249: 0xb2b2b2,
|
||||||
|
250: 0xbcbcbc,
|
||||||
|
251: 0xc6c6c6,
|
||||||
|
252: 0xd0d0d0,
|
||||||
|
253: 0xdadada,
|
||||||
|
254: 0xe4e4e4,
|
||||||
|
255: 0xeeeeee,
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *writer) Write(data []byte) (n int, err error) {
|
||||||
|
var csbi consoleScreenBufferInfo
|
||||||
|
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||||
|
|
||||||
|
er := bytes.NewBuffer(data)
|
||||||
|
loop:
|
||||||
|
for {
|
||||||
|
r1, _, err := procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||||
|
if r1 == 0 {
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
|
||||||
|
c1, _, err := er.ReadRune()
|
||||||
|
if err != nil {
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
if c1 != 0x1b {
|
||||||
|
fmt.Fprint(w.out, string(c1))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
c2, _, err := er.ReadRune()
|
||||||
|
if err != nil {
|
||||||
|
w.lastbuf.WriteRune(c1)
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
if c2 != 0x5b {
|
||||||
|
w.lastbuf.WriteRune(c1)
|
||||||
|
w.lastbuf.WriteRune(c2)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
var m rune
|
||||||
|
for {
|
||||||
|
c, _, err := er.ReadRune()
|
||||||
|
if err != nil {
|
||||||
|
w.lastbuf.WriteRune(c1)
|
||||||
|
w.lastbuf.WriteRune(c2)
|
||||||
|
w.lastbuf.Write(buf.Bytes())
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' {
|
||||||
|
m = c
|
||||||
|
break
|
||||||
|
}
|
||||||
|
buf.Write([]byte(string(c)))
|
||||||
|
}
|
||||||
|
|
||||||
|
var csbi consoleScreenBufferInfo
|
||||||
|
switch m {
|
||||||
|
case 'A':
|
||||||
|
n, err = strconv.Atoi(buf.String())
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||||
|
csbi.cursorPosition.y -= short(n)
|
||||||
|
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||||
|
case 'B':
|
||||||
|
n, err = strconv.Atoi(buf.String())
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||||
|
csbi.cursorPosition.y += short(n)
|
||||||
|
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||||
|
case 'C':
|
||||||
|
n, err = strconv.Atoi(buf.String())
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||||
|
csbi.cursorPosition.x -= short(n)
|
||||||
|
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||||
|
case 'D':
|
||||||
|
n, err = strconv.Atoi(buf.String())
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if n, err = strconv.Atoi(buf.String()); err == nil {
|
||||||
|
var csbi consoleScreenBufferInfo
|
||||||
|
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||||
|
csbi.cursorPosition.x += short(n)
|
||||||
|
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||||
|
}
|
||||||
|
case 'E':
|
||||||
|
n, err = strconv.Atoi(buf.String())
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||||
|
csbi.cursorPosition.x = 0
|
||||||
|
csbi.cursorPosition.y += short(n)
|
||||||
|
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||||
|
case 'F':
|
||||||
|
n, err = strconv.Atoi(buf.String())
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||||
|
csbi.cursorPosition.x = 0
|
||||||
|
csbi.cursorPosition.y -= short(n)
|
||||||
|
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||||
|
case 'G':
|
||||||
|
n, err = strconv.Atoi(buf.String())
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||||
|
csbi.cursorPosition.x = short(n)
|
||||||
|
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||||
|
case 'H':
|
||||||
|
token := strings.Split(buf.String(), ";")
|
||||||
|
if len(token) != 2 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
n1, err := strconv.Atoi(token[0])
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
n2, err := strconv.Atoi(token[1])
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
csbi.cursorPosition.x = short(n2)
|
||||||
|
csbi.cursorPosition.x = short(n1)
|
||||||
|
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||||
|
case 'J':
|
||||||
|
n, err := strconv.Atoi(buf.String())
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
var cursor coord
|
||||||
|
switch n {
|
||||||
|
case 0:
|
||||||
|
cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y}
|
||||||
|
case 1:
|
||||||
|
cursor = coord{x: csbi.window.left, y: csbi.window.top}
|
||||||
|
case 2:
|
||||||
|
cursor = coord{x: csbi.window.left, y: csbi.window.top}
|
||||||
|
}
|
||||||
|
var count, written dword
|
||||||
|
count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x)
|
||||||
|
procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
|
||||||
|
procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
|
||||||
|
case 'K':
|
||||||
|
n, err := strconv.Atoi(buf.String())
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
var cursor coord
|
||||||
|
switch n {
|
||||||
|
case 0:
|
||||||
|
cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y}
|
||||||
|
case 1:
|
||||||
|
cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y}
|
||||||
|
case 2:
|
||||||
|
cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y}
|
||||||
|
}
|
||||||
|
var count, written dword
|
||||||
|
count = dword(csbi.size.x - csbi.cursorPosition.x)
|
||||||
|
procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
|
||||||
|
procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
|
||||||
|
case 'm':
|
||||||
|
attr := csbi.attributes
|
||||||
|
cs := buf.String()
|
||||||
|
if cs == "" {
|
||||||
|
procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.oldattr))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
token := strings.Split(cs, ";")
|
||||||
|
for i := 0; i < len(token); i += 1 {
|
||||||
|
ns := token[i]
|
||||||
|
if n, err = strconv.Atoi(ns); err == nil {
|
||||||
|
switch {
|
||||||
|
case n == 0 || n == 100:
|
||||||
|
attr = w.oldattr
|
||||||
|
case 1 <= n && n <= 5:
|
||||||
|
attr |= foregroundIntensity
|
||||||
|
case n == 7:
|
||||||
|
attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4)
|
||||||
|
case 22 == n || n == 25 || n == 25:
|
||||||
|
attr |= foregroundIntensity
|
||||||
|
case n == 27:
|
||||||
|
attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4)
|
||||||
|
case 30 <= n && n <= 37:
|
||||||
|
attr = (attr & backgroundMask)
|
||||||
|
if (n-30)&1 != 0 {
|
||||||
|
attr |= foregroundRed
|
||||||
|
}
|
||||||
|
if (n-30)&2 != 0 {
|
||||||
|
attr |= foregroundGreen
|
||||||
|
}
|
||||||
|
if (n-30)&4 != 0 {
|
||||||
|
attr |= foregroundBlue
|
||||||
|
}
|
||||||
|
case n == 38: // set foreground color.
|
||||||
|
if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") {
|
||||||
|
if n256, err := strconv.Atoi(token[i+2]); err == nil {
|
||||||
|
if n256foreAttr == nil {
|
||||||
|
n256setup()
|
||||||
|
}
|
||||||
|
attr &= backgroundMask
|
||||||
|
attr |= n256foreAttr[n256]
|
||||||
|
i += 2
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
attr = attr & (w.oldattr & backgroundMask)
|
||||||
|
}
|
||||||
|
case n == 39: // reset foreground color.
|
||||||
|
attr &= backgroundMask
|
||||||
|
attr |= w.oldattr & foregroundMask
|
||||||
|
case 40 <= n && n <= 47:
|
||||||
|
attr = (attr & foregroundMask)
|
||||||
|
if (n-40)&1 != 0 {
|
||||||
|
attr |= backgroundRed
|
||||||
|
}
|
||||||
|
if (n-40)&2 != 0 {
|
||||||
|
attr |= backgroundGreen
|
||||||
|
}
|
||||||
|
if (n-40)&4 != 0 {
|
||||||
|
attr |= backgroundBlue
|
||||||
|
}
|
||||||
|
case n == 48: // set background color.
|
||||||
|
if i < len(token)-2 && token[i+1] == "5" {
|
||||||
|
if n256, err := strconv.Atoi(token[i+2]); err == nil {
|
||||||
|
if n256backAttr == nil {
|
||||||
|
n256setup()
|
||||||
|
}
|
||||||
|
attr &= foregroundMask
|
||||||
|
attr |= n256backAttr[n256]
|
||||||
|
i += 2
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
attr = attr & (w.oldattr & foregroundMask)
|
||||||
|
}
|
||||||
|
case n == 49: // reset foreground color.
|
||||||
|
attr &= foregroundMask
|
||||||
|
attr |= w.oldattr & backgroundMask
|
||||||
|
case 90 <= n && n <= 97:
|
||||||
|
attr = (attr & backgroundMask)
|
||||||
|
attr |= foregroundIntensity
|
||||||
|
if (n-90)&1 != 0 {
|
||||||
|
attr |= foregroundRed
|
||||||
|
}
|
||||||
|
if (n-90)&2 != 0 {
|
||||||
|
attr |= foregroundGreen
|
||||||
|
}
|
||||||
|
if (n-90)&4 != 0 {
|
||||||
|
attr |= foregroundBlue
|
||||||
|
}
|
||||||
|
case 100 <= n && n <= 107:
|
||||||
|
attr = (attr & foregroundMask)
|
||||||
|
attr |= backgroundIntensity
|
||||||
|
if (n-100)&1 != 0 {
|
||||||
|
attr |= backgroundRed
|
||||||
|
}
|
||||||
|
if (n-100)&2 != 0 {
|
||||||
|
attr |= backgroundGreen
|
||||||
|
}
|
||||||
|
if (n-100)&4 != 0 {
|
||||||
|
attr |= backgroundBlue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return len(data) - w.lastbuf.Len(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type consoleColor struct {
|
||||||
|
rgb int
|
||||||
|
red bool
|
||||||
|
green bool
|
||||||
|
blue bool
|
||||||
|
intensity bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c consoleColor) foregroundAttr() (attr word) {
|
||||||
|
if c.red {
|
||||||
|
attr |= foregroundRed
|
||||||
|
}
|
||||||
|
if c.green {
|
||||||
|
attr |= foregroundGreen
|
||||||
|
}
|
||||||
|
if c.blue {
|
||||||
|
attr |= foregroundBlue
|
||||||
|
}
|
||||||
|
if c.intensity {
|
||||||
|
attr |= foregroundIntensity
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c consoleColor) backgroundAttr() (attr word) {
|
||||||
|
if c.red {
|
||||||
|
attr |= backgroundRed
|
||||||
|
}
|
||||||
|
if c.green {
|
||||||
|
attr |= backgroundGreen
|
||||||
|
}
|
||||||
|
if c.blue {
|
||||||
|
attr |= backgroundBlue
|
||||||
|
}
|
||||||
|
if c.intensity {
|
||||||
|
attr |= backgroundIntensity
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var color16 = []consoleColor{
|
||||||
|
consoleColor{0x000000, false, false, false, false},
|
||||||
|
consoleColor{0x000080, false, false, true, false},
|
||||||
|
consoleColor{0x008000, false, true, false, false},
|
||||||
|
consoleColor{0x008080, false, true, true, false},
|
||||||
|
consoleColor{0x800000, true, false, false, false},
|
||||||
|
consoleColor{0x800080, true, false, true, false},
|
||||||
|
consoleColor{0x808000, true, true, false, false},
|
||||||
|
consoleColor{0xc0c0c0, true, true, true, false},
|
||||||
|
consoleColor{0x808080, false, false, false, true},
|
||||||
|
consoleColor{0x0000ff, false, false, true, true},
|
||||||
|
consoleColor{0x00ff00, false, true, false, true},
|
||||||
|
consoleColor{0x00ffff, false, true, true, true},
|
||||||
|
consoleColor{0xff0000, true, false, false, true},
|
||||||
|
consoleColor{0xff00ff, true, false, true, true},
|
||||||
|
consoleColor{0xffff00, true, true, false, true},
|
||||||
|
consoleColor{0xffffff, true, true, true, true},
|
||||||
|
}
|
||||||
|
|
||||||
|
type hsv struct {
|
||||||
|
h, s, v float32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a hsv) dist(b hsv) float32 {
|
||||||
|
dh := a.h - b.h
|
||||||
|
switch {
|
||||||
|
case dh > 0.5:
|
||||||
|
dh = 1 - dh
|
||||||
|
case dh < -0.5:
|
||||||
|
dh = -1 - dh
|
||||||
|
}
|
||||||
|
ds := a.s - b.s
|
||||||
|
dv := a.v - b.v
|
||||||
|
return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func toHSV(rgb int) hsv {
|
||||||
|
r, g, b := float32((rgb&0xFF0000)>>16)/256.0,
|
||||||
|
float32((rgb&0x00FF00)>>8)/256.0,
|
||||||
|
float32(rgb&0x0000FF)/256.0
|
||||||
|
min, max := minmax3f(r, g, b)
|
||||||
|
h := max - min
|
||||||
|
if h > 0 {
|
||||||
|
if max == r {
|
||||||
|
h = (g - b) / h
|
||||||
|
if h < 0 {
|
||||||
|
h += 6
|
||||||
|
}
|
||||||
|
} else if max == g {
|
||||||
|
h = 2 + (b-r)/h
|
||||||
|
} else {
|
||||||
|
h = 4 + (r-g)/h
|
||||||
|
}
|
||||||
|
}
|
||||||
|
h /= 6.0
|
||||||
|
s := max - min
|
||||||
|
if max != 0 {
|
||||||
|
s /= max
|
||||||
|
}
|
||||||
|
v := max
|
||||||
|
return hsv{h: h, s: s, v: v}
|
||||||
|
}
|
||||||
|
|
||||||
|
type hsvTable []hsv
|
||||||
|
|
||||||
|
func toHSVTable(rgbTable []consoleColor) hsvTable {
|
||||||
|
t := make(hsvTable, len(rgbTable))
|
||||||
|
for i, c := range rgbTable {
|
||||||
|
t[i] = toHSV(c.rgb)
|
||||||
|
}
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t hsvTable) find(rgb int) consoleColor {
|
||||||
|
hsv := toHSV(rgb)
|
||||||
|
n := 7
|
||||||
|
l := float32(5.0)
|
||||||
|
for i, p := range t {
|
||||||
|
d := hsv.dist(p)
|
||||||
|
if d < l {
|
||||||
|
l, n = d, i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return color16[n]
|
||||||
|
}
|
||||||
|
|
||||||
|
func minmax3f(a, b, c float32) (min, max float32) {
|
||||||
|
if a < b {
|
||||||
|
if b < c {
|
||||||
|
return a, c
|
||||||
|
} else if a < c {
|
||||||
|
return a, b
|
||||||
|
} else {
|
||||||
|
return c, b
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if a < c {
|
||||||
|
return b, c
|
||||||
|
} else if b < c {
|
||||||
|
return b, a
|
||||||
|
} else {
|
||||||
|
return c, a
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var n256foreAttr []word
|
||||||
|
var n256backAttr []word
|
||||||
|
|
||||||
|
func n256setup() {
|
||||||
|
n256foreAttr = make([]word, 256)
|
||||||
|
n256backAttr = make([]word, 256)
|
||||||
|
t := toHSVTable(color16)
|
||||||
|
for i, rgb := range color256 {
|
||||||
|
c := t.find(rgb)
|
||||||
|
n256foreAttr[i] = c.foregroundAttr()
|
||||||
|
n256backAttr[i] = c.backgroundAttr()
|
||||||
|
}
|
||||||
|
}
|
195
vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go
generated
vendored
Normal file
195
vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go
generated
vendored
Normal file
@ -0,0 +1,195 @@
|
|||||||
|
package formatter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ColorableStdOut and ColorableStdErr enable color output support on Windows
|
||||||
|
var ColorableStdOut = newColorable(os.Stdout)
|
||||||
|
var ColorableStdErr = newColorable(os.Stderr)
|
||||||
|
|
||||||
|
const COLS = 80
|
||||||
|
|
||||||
|
type ColorMode uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
ColorModeNone ColorMode = iota
|
||||||
|
ColorModeTerminal
|
||||||
|
ColorModePassthrough
|
||||||
|
)
|
||||||
|
|
||||||
|
var SingletonFormatter = New(ColorModeTerminal)
|
||||||
|
|
||||||
|
func F(format string, args ...interface{}) string {
|
||||||
|
return SingletonFormatter.F(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Fi(indentation uint, format string, args ...interface{}) string {
|
||||||
|
return SingletonFormatter.Fi(indentation, format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string {
|
||||||
|
return SingletonFormatter.Fiw(indentation, maxWidth, format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
type Formatter struct {
|
||||||
|
ColorMode ColorMode
|
||||||
|
colors map[string]string
|
||||||
|
styleRe *regexp.Regexp
|
||||||
|
preserveColorStylingTags bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewWithNoColorBool(noColor bool) Formatter {
|
||||||
|
if noColor {
|
||||||
|
return New(ColorModeNone)
|
||||||
|
}
|
||||||
|
return New(ColorModeTerminal)
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(colorMode ColorMode) Formatter {
|
||||||
|
f := Formatter{
|
||||||
|
ColorMode: colorMode,
|
||||||
|
colors: map[string]string{
|
||||||
|
"/": "\x1b[0m",
|
||||||
|
"bold": "\x1b[1m",
|
||||||
|
"underline": "\x1b[4m",
|
||||||
|
|
||||||
|
"red": "\x1b[38;5;9m",
|
||||||
|
"orange": "\x1b[38;5;214m",
|
||||||
|
"coral": "\x1b[38;5;204m",
|
||||||
|
"magenta": "\x1b[38;5;13m",
|
||||||
|
"green": "\x1b[38;5;10m",
|
||||||
|
"dark-green": "\x1b[38;5;28m",
|
||||||
|
"yellow": "\x1b[38;5;11m",
|
||||||
|
"light-yellow": "\x1b[38;5;228m",
|
||||||
|
"cyan": "\x1b[38;5;14m",
|
||||||
|
"gray": "\x1b[38;5;243m",
|
||||||
|
"light-gray": "\x1b[38;5;246m",
|
||||||
|
"blue": "\x1b[38;5;12m",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
colors := []string{}
|
||||||
|
for color := range f.colors {
|
||||||
|
colors = append(colors, color)
|
||||||
|
}
|
||||||
|
f.styleRe = regexp.MustCompile("{{(" + strings.Join(colors, "|") + ")}}")
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f Formatter) F(format string, args ...interface{}) string {
|
||||||
|
return f.Fi(0, format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f Formatter) Fi(indentation uint, format string, args ...interface{}) string {
|
||||||
|
return f.Fiw(indentation, 0, format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f Formatter) Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string {
|
||||||
|
out := fmt.Sprintf(f.style(format), args...)
|
||||||
|
|
||||||
|
if indentation == 0 && maxWidth == 0 {
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
lines := strings.Split(out, "\n")
|
||||||
|
|
||||||
|
if maxWidth != 0 {
|
||||||
|
outLines := []string{}
|
||||||
|
|
||||||
|
maxWidth = maxWidth - indentation*2
|
||||||
|
for _, line := range lines {
|
||||||
|
if f.length(line) <= maxWidth {
|
||||||
|
outLines = append(outLines, line)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
words := strings.Split(line, " ")
|
||||||
|
outWords := []string{words[0]}
|
||||||
|
length := uint(f.length(words[0]))
|
||||||
|
for _, word := range words[1:] {
|
||||||
|
wordLength := f.length(word)
|
||||||
|
if length+wordLength+1 <= maxWidth {
|
||||||
|
length += wordLength + 1
|
||||||
|
outWords = append(outWords, word)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
outLines = append(outLines, strings.Join(outWords, " "))
|
||||||
|
outWords = []string{word}
|
||||||
|
length = wordLength
|
||||||
|
}
|
||||||
|
if len(outWords) > 0 {
|
||||||
|
outLines = append(outLines, strings.Join(outWords, " "))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
lines = outLines
|
||||||
|
}
|
||||||
|
|
||||||
|
if indentation == 0 {
|
||||||
|
return strings.Join(lines, "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
padding := strings.Repeat(" ", int(indentation))
|
||||||
|
for i := range lines {
|
||||||
|
if lines[i] != "" {
|
||||||
|
lines[i] = padding + lines[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(lines, "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f Formatter) length(styled string) uint {
|
||||||
|
n := uint(0)
|
||||||
|
inStyle := false
|
||||||
|
for _, b := range styled {
|
||||||
|
if inStyle {
|
||||||
|
if b == 'm' {
|
||||||
|
inStyle = false
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if b == '\x1b' {
|
||||||
|
inStyle = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
n += 1
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f Formatter) CycleJoin(elements []string, joiner string, cycle []string) string {
|
||||||
|
if len(elements) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
n := len(cycle)
|
||||||
|
out := ""
|
||||||
|
for i, text := range elements {
|
||||||
|
out += cycle[i%n] + text
|
||||||
|
if i < len(elements)-1 {
|
||||||
|
out += joiner
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out += "{{/}}"
|
||||||
|
return f.style(out)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f Formatter) style(s string) string {
|
||||||
|
switch f.ColorMode {
|
||||||
|
case ColorModeNone:
|
||||||
|
return f.styleRe.ReplaceAllString(s, "")
|
||||||
|
case ColorModePassthrough:
|
||||||
|
return s
|
||||||
|
case ColorModeTerminal:
|
||||||
|
return f.styleRe.ReplaceAllStringFunc(s, func(match string) string {
|
||||||
|
if out, ok := f.colors[strings.Trim(match, "{}")]; ok {
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
return match
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return ""
|
||||||
|
}
|
45
vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go
generated
vendored
Normal file
45
vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
package ginkgo
|
||||||
|
|
||||||
|
import "github.com/onsi/ginkgo/v2/internal/testingtproxy"
|
||||||
|
|
||||||
|
/*
|
||||||
|
GinkgoT() implements an interface analogous to *testing.T and can be used with
|
||||||
|
third-party libraries that accept *testing.T through an interface.
|
||||||
|
|
||||||
|
GinkgoT() takes an optional offset argument that can be used to get the
|
||||||
|
correct line number associated with the failure.
|
||||||
|
|
||||||
|
You can learn more here: https://onsi.github.io/ginkgo/#using-third-party-libraries
|
||||||
|
*/
|
||||||
|
func GinkgoT(optionalOffset ...int) GinkgoTInterface {
|
||||||
|
offset := 3
|
||||||
|
if len(optionalOffset) > 0 {
|
||||||
|
offset = optionalOffset[0]
|
||||||
|
}
|
||||||
|
return testingtproxy.New(GinkgoWriter, Fail, Skip, DeferCleanup, CurrentSpecReport, offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
The interface returned by GinkgoT(). This covers most of the methods in the testing package's T.
|
||||||
|
*/
|
||||||
|
type GinkgoTInterface interface {
|
||||||
|
Cleanup(func())
|
||||||
|
Setenv(kev, value string)
|
||||||
|
Error(args ...interface{})
|
||||||
|
Errorf(format string, args ...interface{})
|
||||||
|
Fail()
|
||||||
|
FailNow()
|
||||||
|
Failed() bool
|
||||||
|
Fatal(args ...interface{})
|
||||||
|
Fatalf(format string, args ...interface{})
|
||||||
|
Helper()
|
||||||
|
Log(args ...interface{})
|
||||||
|
Logf(format string, args ...interface{})
|
||||||
|
Name() string
|
||||||
|
Parallel()
|
||||||
|
Skip(args ...interface{})
|
||||||
|
SkipNow()
|
||||||
|
Skipf(format string, args ...interface{})
|
||||||
|
Skipped() bool
|
||||||
|
TempDir() string
|
||||||
|
}
|
9
vendor/github.com/onsi/ginkgo/v2/internal/counter.go
generated
vendored
Normal file
9
vendor/github.com/onsi/ginkgo/v2/internal/counter.go
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
package internal
|
||||||
|
|
||||||
|
func MakeIncrementingIndexCounter() func() (int, error) {
|
||||||
|
idx := -1
|
||||||
|
return func() (int, error) {
|
||||||
|
idx += 1
|
||||||
|
return idx, nil
|
||||||
|
}
|
||||||
|
}
|
99
vendor/github.com/onsi/ginkgo/v2/internal/failer.go
generated
vendored
Normal file
99
vendor/github.com/onsi/ginkgo/v2/internal/failer.go
generated
vendored
Normal file
@ -0,0 +1,99 @@
|
|||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Failer struct {
|
||||||
|
lock *sync.Mutex
|
||||||
|
failure types.Failure
|
||||||
|
state types.SpecState
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewFailer() *Failer {
|
||||||
|
return &Failer{
|
||||||
|
lock: &sync.Mutex{},
|
||||||
|
state: types.SpecStatePassed,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Failer) GetState() types.SpecState {
|
||||||
|
f.lock.Lock()
|
||||||
|
defer f.lock.Unlock()
|
||||||
|
return f.state
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Failer) GetFailure() types.Failure {
|
||||||
|
f.lock.Lock()
|
||||||
|
defer f.lock.Unlock()
|
||||||
|
return f.failure
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Failer) Panic(location types.CodeLocation, forwardedPanic interface{}) {
|
||||||
|
f.lock.Lock()
|
||||||
|
defer f.lock.Unlock()
|
||||||
|
|
||||||
|
if f.state == types.SpecStatePassed {
|
||||||
|
f.state = types.SpecStatePanicked
|
||||||
|
f.failure = types.Failure{
|
||||||
|
Message: "Test Panicked",
|
||||||
|
Location: location,
|
||||||
|
ForwardedPanic: fmt.Sprintf("%v", forwardedPanic),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Failer) Fail(message string, location types.CodeLocation) {
|
||||||
|
f.lock.Lock()
|
||||||
|
defer f.lock.Unlock()
|
||||||
|
|
||||||
|
if f.state == types.SpecStatePassed {
|
||||||
|
f.state = types.SpecStateFailed
|
||||||
|
f.failure = types.Failure{
|
||||||
|
Message: message,
|
||||||
|
Location: location,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Failer) Skip(message string, location types.CodeLocation) {
|
||||||
|
f.lock.Lock()
|
||||||
|
defer f.lock.Unlock()
|
||||||
|
|
||||||
|
if f.state == types.SpecStatePassed {
|
||||||
|
f.state = types.SpecStateSkipped
|
||||||
|
f.failure = types.Failure{
|
||||||
|
Message: message,
|
||||||
|
Location: location,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Failer) AbortSuite(message string, location types.CodeLocation) {
|
||||||
|
f.lock.Lock()
|
||||||
|
defer f.lock.Unlock()
|
||||||
|
|
||||||
|
if f.state == types.SpecStatePassed {
|
||||||
|
f.state = types.SpecStateAborted
|
||||||
|
f.failure = types.Failure{
|
||||||
|
Message: message,
|
||||||
|
Location: location,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Failer) Drain() (types.SpecState, types.Failure) {
|
||||||
|
f.lock.Lock()
|
||||||
|
defer f.lock.Unlock()
|
||||||
|
|
||||||
|
failure := f.failure
|
||||||
|
outcome := f.state
|
||||||
|
|
||||||
|
f.state = types.SpecStatePassed
|
||||||
|
f.failure = types.Failure{}
|
||||||
|
|
||||||
|
return outcome, failure
|
||||||
|
}
|
125
vendor/github.com/onsi/ginkgo/v2/internal/focus.go
generated
vendored
Normal file
125
vendor/github.com/onsi/ginkgo/v2/internal/focus.go
generated
vendored
Normal file
@ -0,0 +1,125 @@
|
|||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
If a container marked as focus has a descendant that is also marked as focus, Ginkgo's policy is to
|
||||||
|
unmark the container's focus. This gives developers a more intuitive experience when debugging specs.
|
||||||
|
It is common to focus a container to just run a subset of specs, then identify the specific specs within the container to focus -
|
||||||
|
this policy allows the developer to simply focus those specific specs and not need to go back and turn the focus off of the container:
|
||||||
|
|
||||||
|
As a common example, consider:
|
||||||
|
|
||||||
|
FDescribe("something to debug", function() {
|
||||||
|
It("works", function() {...})
|
||||||
|
It("works", function() {...})
|
||||||
|
FIt("doesn't work", function() {...})
|
||||||
|
It("works", function() {...})
|
||||||
|
})
|
||||||
|
|
||||||
|
here the developer's intent is to focus in on the `"doesn't work"` spec and not to run the adjacent specs in the focused `"something to debug"` container.
|
||||||
|
The nested policy applied by this function enables this behavior.
|
||||||
|
*/
|
||||||
|
func ApplyNestedFocusPolicyToTree(tree *TreeNode) {
|
||||||
|
var walkTree func(tree *TreeNode) bool
|
||||||
|
walkTree = func(tree *TreeNode) bool {
|
||||||
|
if tree.Node.MarkedPending {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
hasFocusedDescendant := false
|
||||||
|
for _, child := range tree.Children {
|
||||||
|
childHasFocus := walkTree(child)
|
||||||
|
hasFocusedDescendant = hasFocusedDescendant || childHasFocus
|
||||||
|
}
|
||||||
|
tree.Node.MarkedFocus = tree.Node.MarkedFocus && !hasFocusedDescendant
|
||||||
|
return tree.Node.MarkedFocus || hasFocusedDescendant
|
||||||
|
}
|
||||||
|
|
||||||
|
walkTree(tree)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Ginkgo supports focussing specs using `FIt`, `FDescribe`, etc. - this is called "programmatic focus"
|
||||||
|
It also supports focussing specs using regular expressions on the command line (`-focus=`, `-skip=`) that match against spec text
|
||||||
|
and file filters (`-focus-files=`, `-skip-files=`) that match against code locations for nodes in specs.
|
||||||
|
|
||||||
|
If any of the CLI flags are provided they take precedence. The file filters run first followed by the regex filters.
|
||||||
|
|
||||||
|
This function sets the `Skip` property on specs by applying Ginkgo's focus policy:
|
||||||
|
- If there are no CLI arguments and no programmatic focus, do nothing.
|
||||||
|
- If there are no CLI arguments but a spec somewhere has programmatic focus, skip any specs that have no programmatic focus.
|
||||||
|
- If there are CLI arguments parse them and skip any specs that either don't match the focus filters or do match the skip filters.
|
||||||
|
|
||||||
|
*Note:* specs with pending nodes are Skipped when created by NewSpec.
|
||||||
|
*/
|
||||||
|
func ApplyFocusToSpecs(specs Specs, description string, suiteLabels Labels, suiteConfig types.SuiteConfig) (Specs, bool) {
|
||||||
|
focusString := strings.Join(suiteConfig.FocusStrings, "|")
|
||||||
|
skipString := strings.Join(suiteConfig.SkipStrings, "|")
|
||||||
|
|
||||||
|
hasFocusCLIFlags := focusString != "" || skipString != "" || len(suiteConfig.SkipFiles) > 0 || len(suiteConfig.FocusFiles) > 0 || suiteConfig.LabelFilter != ""
|
||||||
|
|
||||||
|
type SkipCheck func(spec Spec) bool
|
||||||
|
|
||||||
|
// by default, skip any specs marked pending
|
||||||
|
skipChecks := []SkipCheck{func(spec Spec) bool { return spec.Nodes.HasNodeMarkedPending() }}
|
||||||
|
hasProgrammaticFocus := false
|
||||||
|
|
||||||
|
if !hasFocusCLIFlags {
|
||||||
|
// check for programmatic focus
|
||||||
|
for _, spec := range specs {
|
||||||
|
if spec.Nodes.HasNodeMarkedFocus() && !spec.Nodes.HasNodeMarkedPending() {
|
||||||
|
skipChecks = append(skipChecks, func(spec Spec) bool { return !spec.Nodes.HasNodeMarkedFocus() })
|
||||||
|
hasProgrammaticFocus = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if suiteConfig.LabelFilter != "" {
|
||||||
|
labelFilter, _ := types.ParseLabelFilter(suiteConfig.LabelFilter)
|
||||||
|
skipChecks = append(skipChecks, func(spec Spec) bool {
|
||||||
|
return !labelFilter(UnionOfLabels(suiteLabels, spec.Nodes.UnionOfLabels()))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(suiteConfig.FocusFiles) > 0 {
|
||||||
|
focusFilters, _ := types.ParseFileFilters(suiteConfig.FocusFiles)
|
||||||
|
skipChecks = append(skipChecks, func(spec Spec) bool { return !focusFilters.Matches(spec.Nodes.CodeLocations()) })
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(suiteConfig.SkipFiles) > 0 {
|
||||||
|
skipFilters, _ := types.ParseFileFilters(suiteConfig.SkipFiles)
|
||||||
|
skipChecks = append(skipChecks, func(spec Spec) bool { return skipFilters.Matches(spec.Nodes.CodeLocations()) })
|
||||||
|
}
|
||||||
|
|
||||||
|
if focusString != "" {
|
||||||
|
// skip specs that don't match the focus string
|
||||||
|
re := regexp.MustCompile(focusString)
|
||||||
|
skipChecks = append(skipChecks, func(spec Spec) bool { return !re.MatchString(description + " " + spec.Text()) })
|
||||||
|
}
|
||||||
|
|
||||||
|
if skipString != "" {
|
||||||
|
// skip specs that match the skip string
|
||||||
|
re := regexp.MustCompile(skipString)
|
||||||
|
skipChecks = append(skipChecks, func(spec Spec) bool { return re.MatchString(description + " " + spec.Text()) })
|
||||||
|
}
|
||||||
|
|
||||||
|
// skip specs if shouldSkip() is true. note that we do nothing if shouldSkip() is false to avoid overwriting skip status established by the node's pending status
|
||||||
|
processedSpecs := Specs{}
|
||||||
|
for _, spec := range specs {
|
||||||
|
for _, skipCheck := range skipChecks {
|
||||||
|
if skipCheck(spec) {
|
||||||
|
spec.Skip = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
processedSpecs = append(processedSpecs, spec)
|
||||||
|
}
|
||||||
|
|
||||||
|
return processedSpecs, hasProgrammaticFocus
|
||||||
|
}
|
17
vendor/github.com/onsi/ginkgo/v2/internal/global/init.go
generated
vendored
Normal file
17
vendor/github.com/onsi/ginkgo/v2/internal/global/init.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
package global
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/onsi/ginkgo/v2/internal"
|
||||||
|
)
|
||||||
|
|
||||||
|
var Suite *internal.Suite
|
||||||
|
var Failer *internal.Failer
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
InitializeGlobals()
|
||||||
|
}
|
||||||
|
|
||||||
|
func InitializeGlobals() {
|
||||||
|
Failer = internal.NewFailer()
|
||||||
|
Suite = internal.NewSuite()
|
||||||
|
}
|
544
vendor/github.com/onsi/ginkgo/v2/internal/group.go
generated
vendored
Normal file
544
vendor/github.com/onsi/ginkgo/v2/internal/group.go
generated
vendored
Normal file
@ -0,0 +1,544 @@
|
|||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type runOncePair struct {
|
||||||
|
//nodeId should only run once...
|
||||||
|
nodeID uint
|
||||||
|
nodeType types.NodeType
|
||||||
|
//...for specs in a hierarchy that includes this context
|
||||||
|
containerID uint
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pair runOncePair) isZero() bool {
|
||||||
|
return pair.nodeID == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func runOncePairForNode(node Node, containerID uint) runOncePair {
|
||||||
|
return runOncePair{
|
||||||
|
nodeID: node.ID,
|
||||||
|
nodeType: node.NodeType,
|
||||||
|
containerID: containerID,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type runOncePairs []runOncePair
|
||||||
|
|
||||||
|
func runOncePairsForSpec(spec Spec) runOncePairs {
|
||||||
|
pairs := runOncePairs{}
|
||||||
|
|
||||||
|
containers := spec.Nodes.WithType(types.NodeTypeContainer)
|
||||||
|
for _, node := range spec.Nodes {
|
||||||
|
if node.NodeType.Is(types.NodeTypeBeforeAll | types.NodeTypeAfterAll) {
|
||||||
|
pairs = append(pairs, runOncePairForNode(node, containers.FirstWithNestingLevel(node.NestingLevel-1).ID))
|
||||||
|
} else if node.NodeType.Is(types.NodeTypeBeforeEach|types.NodeTypeJustBeforeEach|types.NodeTypeAfterEach|types.NodeTypeJustAfterEach) && node.MarkedOncePerOrdered {
|
||||||
|
passedIntoAnOrderedContainer := false
|
||||||
|
firstOrderedContainerDeeperThanNode := containers.FirstSatisfying(func(container Node) bool {
|
||||||
|
passedIntoAnOrderedContainer = passedIntoAnOrderedContainer || container.MarkedOrdered
|
||||||
|
return container.NestingLevel >= node.NestingLevel && passedIntoAnOrderedContainer
|
||||||
|
})
|
||||||
|
if firstOrderedContainerDeeperThanNode.IsZero() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
pairs = append(pairs, runOncePairForNode(node, firstOrderedContainerDeeperThanNode.ID))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return pairs
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pairs runOncePairs) runOncePairFor(nodeID uint) runOncePair {
|
||||||
|
for i := range pairs {
|
||||||
|
if pairs[i].nodeID == nodeID {
|
||||||
|
return pairs[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return runOncePair{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pairs runOncePairs) hasRunOncePair(pair runOncePair) bool {
|
||||||
|
for i := range pairs {
|
||||||
|
if pairs[i] == pair {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pairs runOncePairs) withType(nodeTypes types.NodeType) runOncePairs {
|
||||||
|
count := 0
|
||||||
|
for i := range pairs {
|
||||||
|
if pairs[i].nodeType.Is(nodeTypes) {
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
out, j := make(runOncePairs, count), 0
|
||||||
|
for i := range pairs {
|
||||||
|
if pairs[i].nodeType.Is(nodeTypes) {
|
||||||
|
out[j] = pairs[i]
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
type group struct {
|
||||||
|
suite *Suite
|
||||||
|
specs Specs
|
||||||
|
runOncePairs map[uint]runOncePairs
|
||||||
|
runOnceTracker map[runOncePair]types.SpecState
|
||||||
|
|
||||||
|
succeeded bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func newGroup(suite *Suite) *group {
|
||||||
|
return &group{
|
||||||
|
suite: suite,
|
||||||
|
runOncePairs: map[uint]runOncePairs{},
|
||||||
|
runOnceTracker: map[runOncePair]types.SpecState{},
|
||||||
|
succeeded: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *group) initialReportForSpec(spec Spec) types.SpecReport {
|
||||||
|
return types.SpecReport{
|
||||||
|
ContainerHierarchyTexts: spec.Nodes.WithType(types.NodeTypeContainer).Texts(),
|
||||||
|
ContainerHierarchyLocations: spec.Nodes.WithType(types.NodeTypeContainer).CodeLocations(),
|
||||||
|
ContainerHierarchyLabels: spec.Nodes.WithType(types.NodeTypeContainer).Labels(),
|
||||||
|
LeafNodeLocation: spec.FirstNodeWithType(types.NodeTypeIt).CodeLocation,
|
||||||
|
LeafNodeType: types.NodeTypeIt,
|
||||||
|
LeafNodeText: spec.FirstNodeWithType(types.NodeTypeIt).Text,
|
||||||
|
LeafNodeLabels: []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels),
|
||||||
|
ParallelProcess: g.suite.config.ParallelProcess,
|
||||||
|
IsSerial: spec.Nodes.HasNodeMarkedSerial(),
|
||||||
|
IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *group) evaluateSkipStatus(spec Spec) (types.SpecState, types.Failure) {
|
||||||
|
if spec.Nodes.HasNodeMarkedPending() {
|
||||||
|
return types.SpecStatePending, types.Failure{}
|
||||||
|
}
|
||||||
|
if spec.Skip {
|
||||||
|
return types.SpecStateSkipped, types.Failure{}
|
||||||
|
}
|
||||||
|
if g.suite.interruptHandler.Status().Interrupted || g.suite.skipAll {
|
||||||
|
return types.SpecStateSkipped, types.Failure{}
|
||||||
|
}
|
||||||
|
if !g.succeeded {
|
||||||
|
return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt),
|
||||||
|
"Spec skipped because an earlier spec in an ordered container failed")
|
||||||
|
}
|
||||||
|
beforeOncePairs := g.runOncePairs[spec.SubjectID()].withType(types.NodeTypeBeforeAll | types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach)
|
||||||
|
for _, pair := range beforeOncePairs {
|
||||||
|
if g.runOnceTracker[pair].Is(types.SpecStateSkipped) {
|
||||||
|
return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt),
|
||||||
|
fmt.Sprintf("Spec skipped because Skip() was called in %s", pair.nodeType))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if g.suite.config.DryRun {
|
||||||
|
return types.SpecStatePassed, types.Failure{}
|
||||||
|
}
|
||||||
|
return g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *group) isLastSpecWithPair(specID uint, pair runOncePair) bool {
|
||||||
|
lastSpecID := uint(0)
|
||||||
|
for idx := range g.specs {
|
||||||
|
if g.specs[idx].Skip {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
sID := g.specs[idx].SubjectID()
|
||||||
|
if g.runOncePairs[sID].hasRunOncePair(pair) {
|
||||||
|
lastSpecID = sID
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return lastSpecID == specID
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) {
|
||||||
|
interruptStatus := g.suite.interruptHandler.Status()
|
||||||
|
|
||||||
|
pairs := g.runOncePairs[spec.SubjectID()]
|
||||||
|
|
||||||
|
nodes := spec.Nodes.WithType(types.NodeTypeBeforeAll)
|
||||||
|
nodes = append(nodes, spec.Nodes.WithType(types.NodeTypeBeforeEach)...).SortedByAscendingNestingLevel()
|
||||||
|
nodes = append(nodes, spec.Nodes.WithType(types.NodeTypeJustBeforeEach).SortedByAscendingNestingLevel()...)
|
||||||
|
nodes = append(nodes, spec.Nodes.FirstNodeWithType(types.NodeTypeIt))
|
||||||
|
terminatingNode, terminatingPair := Node{}, runOncePair{}
|
||||||
|
|
||||||
|
for _, node := range nodes {
|
||||||
|
oncePair := pairs.runOncePairFor(node.ID)
|
||||||
|
if !oncePair.isZero() && g.runOnceTracker[oncePair].Is(types.SpecStatePassed) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure = g.suite.runNode(node, interruptStatus.Channel, spec.Nodes.BestTextFor(node))
|
||||||
|
g.suite.currentSpecReport.RunTime = time.Since(g.suite.currentSpecReport.StartTime)
|
||||||
|
if !oncePair.isZero() {
|
||||||
|
g.runOnceTracker[oncePair] = g.suite.currentSpecReport.State
|
||||||
|
}
|
||||||
|
if g.suite.currentSpecReport.State != types.SpecStatePassed {
|
||||||
|
terminatingNode, terminatingPair = node, oncePair
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
afterNodeWasRun := map[uint]bool{}
|
||||||
|
includeDeferCleanups := false
|
||||||
|
for {
|
||||||
|
nodes := spec.Nodes.WithType(types.NodeTypeAfterEach)
|
||||||
|
nodes = append(nodes, spec.Nodes.WithType(types.NodeTypeAfterAll)...).SortedByDescendingNestingLevel()
|
||||||
|
nodes = append(spec.Nodes.WithType(types.NodeTypeJustAfterEach).SortedByDescendingNestingLevel(), nodes...)
|
||||||
|
if !terminatingNode.IsZero() {
|
||||||
|
nodes = nodes.WithinNestingLevel(terminatingNode.NestingLevel)
|
||||||
|
}
|
||||||
|
if includeDeferCleanups {
|
||||||
|
nodes = append(nodes, g.suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterEach).Reverse()...)
|
||||||
|
nodes = append(nodes, g.suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterAll).Reverse()...)
|
||||||
|
}
|
||||||
|
nodes = nodes.Filter(func(node Node) bool {
|
||||||
|
if afterNodeWasRun[node.ID] {
|
||||||
|
//this node has already been run on this attempt, don't rerun it
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
pair := runOncePair{}
|
||||||
|
switch node.NodeType {
|
||||||
|
case types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll:
|
||||||
|
// check if we were generated in an AfterNode that has already run
|
||||||
|
if afterNodeWasRun[node.NodeIDWhereCleanupWasGenerated] {
|
||||||
|
return true // we were, so we should definitely run this cleanup now
|
||||||
|
}
|
||||||
|
// looks like this cleanup nodes was generated by a before node or it.
|
||||||
|
// the run-once status of a cleanup node is governed by the run-once status of its generator
|
||||||
|
pair = pairs.runOncePairFor(node.NodeIDWhereCleanupWasGenerated)
|
||||||
|
default:
|
||||||
|
pair = pairs.runOncePairFor(node.ID)
|
||||||
|
}
|
||||||
|
if pair.isZero() {
|
||||||
|
// this node is not governed by any run-once policy, we should run it
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// it's our last chance to run if we're the last spec for our oncePair
|
||||||
|
isLastSpecWithPair := g.isLastSpecWithPair(spec.SubjectID(), pair)
|
||||||
|
|
||||||
|
switch g.suite.currentSpecReport.State {
|
||||||
|
case types.SpecStatePassed: //this attempt is passing...
|
||||||
|
return isLastSpecWithPair //...we should run-once if we'this is our last chance
|
||||||
|
case types.SpecStateSkipped: //the spec was skipped by the user...
|
||||||
|
if isLastSpecWithPair {
|
||||||
|
return true //...we're the last spec, so we should run the AfterNode
|
||||||
|
}
|
||||||
|
if !terminatingPair.isZero() && terminatingNode.NestingLevel == node.NestingLevel {
|
||||||
|
return true //...or, a run-once node at our nesting level was skipped which means this is our last chance to run
|
||||||
|
}
|
||||||
|
case types.SpecStateFailed, types.SpecStatePanicked: // the spec has failed...
|
||||||
|
if isFinalAttempt {
|
||||||
|
return true //...if this was the last attempt then we're the last spec to run and so the AfterNode should run
|
||||||
|
}
|
||||||
|
if !terminatingPair.isZero() { // ...and it failed in a run-once. which will be running again
|
||||||
|
if node.NodeType.Is(types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll) {
|
||||||
|
return terminatingNode.ID == node.NodeIDWhereCleanupWasGenerated // we should run this node if we're a clean-up generated by it
|
||||||
|
} else {
|
||||||
|
return terminatingNode.NestingLevel == node.NestingLevel // ...or if we're at the same nesting level
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case types.SpecStateInterrupted, types.SpecStateAborted: // ...we've been interrupted and/or aborted
|
||||||
|
return true //...that means the test run is over and we should clean up the stack. Run the AfterNode
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
|
||||||
|
if len(nodes) == 0 && includeDeferCleanups {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, node := range nodes {
|
||||||
|
afterNodeWasRun[node.ID] = true
|
||||||
|
state, failure := g.suite.runNode(node, g.suite.interruptHandler.Status().Channel, spec.Nodes.BestTextFor(node))
|
||||||
|
g.suite.currentSpecReport.RunTime = time.Since(g.suite.currentSpecReport.StartTime)
|
||||||
|
if g.suite.currentSpecReport.State == types.SpecStatePassed || state == types.SpecStateAborted {
|
||||||
|
g.suite.currentSpecReport.State = state
|
||||||
|
g.suite.currentSpecReport.Failure = failure
|
||||||
|
}
|
||||||
|
}
|
||||||
|
includeDeferCleanups = true
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *group) run(specs Specs) {
|
||||||
|
g.specs = specs
|
||||||
|
for _, spec := range g.specs {
|
||||||
|
g.runOncePairs[spec.SubjectID()] = runOncePairsForSpec(spec)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, spec := range g.specs {
|
||||||
|
g.suite.currentSpecReport = g.initialReportForSpec(spec)
|
||||||
|
g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure = g.evaluateSkipStatus(spec)
|
||||||
|
g.suite.reporter.WillRun(g.suite.currentSpecReport)
|
||||||
|
g.suite.reportEach(spec, types.NodeTypeReportBeforeEach)
|
||||||
|
|
||||||
|
skip := g.suite.config.DryRun || g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates|types.SpecStateSkipped|types.SpecStatePending)
|
||||||
|
|
||||||
|
g.suite.currentSpecReport.StartTime = time.Now()
|
||||||
|
if !skip {
|
||||||
|
maxAttempts := max(1, spec.FlakeAttempts())
|
||||||
|
if g.suite.config.FlakeAttempts > 0 {
|
||||||
|
maxAttempts = g.suite.config.FlakeAttempts
|
||||||
|
}
|
||||||
|
for attempt := 0; attempt < maxAttempts; attempt++ {
|
||||||
|
g.suite.currentSpecReport.NumAttempts = attempt + 1
|
||||||
|
g.suite.writer.Truncate()
|
||||||
|
g.suite.outputInterceptor.StartInterceptingOutput()
|
||||||
|
if attempt > 0 {
|
||||||
|
fmt.Fprintf(g.suite.writer, "\nGinkgo: Attempt #%d Failed. Retrying...\n", attempt)
|
||||||
|
}
|
||||||
|
|
||||||
|
g.attemptSpec(attempt == maxAttempts-1, spec)
|
||||||
|
|
||||||
|
g.suite.currentSpecReport.EndTime = time.Now()
|
||||||
|
g.suite.currentSpecReport.RunTime = g.suite.currentSpecReport.EndTime.Sub(g.suite.currentSpecReport.StartTime)
|
||||||
|
g.suite.currentSpecReport.CapturedGinkgoWriterOutput += string(g.suite.writer.Bytes())
|
||||||
|
g.suite.currentSpecReport.CapturedStdOutErr += g.suite.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||||
|
|
||||||
|
if g.suite.currentSpecReport.State.Is(types.SpecStatePassed | types.SpecStateSkipped | types.SpecStateAborted | types.SpecStateInterrupted) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
g.suite.reportEach(spec, types.NodeTypeReportAfterEach)
|
||||||
|
g.suite.processCurrentSpecReport()
|
||||||
|
if g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
|
||||||
|
g.succeeded = false
|
||||||
|
}
|
||||||
|
g.suite.currentSpecReport = types.SpecReport{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *group) oldRun(specs Specs) {
|
||||||
|
var suite = g.suite
|
||||||
|
nodeState := map[uint]types.SpecState{}
|
||||||
|
groupSucceeded := true
|
||||||
|
|
||||||
|
indexOfLastSpecContainingNodeID := func(id uint) int {
|
||||||
|
lastIdx := -1
|
||||||
|
for idx := range specs {
|
||||||
|
if specs[idx].Nodes.ContainsNodeID(id) && !specs[idx].Skip {
|
||||||
|
lastIdx = idx
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return lastIdx
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, spec := range specs {
|
||||||
|
suite.currentSpecReport = types.SpecReport{
|
||||||
|
ContainerHierarchyTexts: spec.Nodes.WithType(types.NodeTypeContainer).Texts(),
|
||||||
|
ContainerHierarchyLocations: spec.Nodes.WithType(types.NodeTypeContainer).CodeLocations(),
|
||||||
|
ContainerHierarchyLabels: spec.Nodes.WithType(types.NodeTypeContainer).Labels(),
|
||||||
|
LeafNodeLocation: spec.FirstNodeWithType(types.NodeTypeIt).CodeLocation,
|
||||||
|
LeafNodeType: types.NodeTypeIt,
|
||||||
|
LeafNodeText: spec.FirstNodeWithType(types.NodeTypeIt).Text,
|
||||||
|
LeafNodeLabels: []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels),
|
||||||
|
ParallelProcess: suite.config.ParallelProcess,
|
||||||
|
IsSerial: spec.Nodes.HasNodeMarkedSerial(),
|
||||||
|
IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(),
|
||||||
|
}
|
||||||
|
|
||||||
|
skip := spec.Skip
|
||||||
|
if spec.Nodes.HasNodeMarkedPending() {
|
||||||
|
skip = true
|
||||||
|
suite.currentSpecReport.State = types.SpecStatePending
|
||||||
|
} else {
|
||||||
|
if suite.interruptHandler.Status().Interrupted || suite.skipAll {
|
||||||
|
skip = true
|
||||||
|
}
|
||||||
|
if !groupSucceeded {
|
||||||
|
skip = true
|
||||||
|
suite.currentSpecReport.Failure = suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt),
|
||||||
|
"Spec skipped because an earlier spec in an ordered container failed")
|
||||||
|
}
|
||||||
|
for _, node := range spec.Nodes.WithType(types.NodeTypeBeforeAll) {
|
||||||
|
if nodeState[node.ID] == types.SpecStateSkipped {
|
||||||
|
skip = true
|
||||||
|
suite.currentSpecReport.Failure = suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt),
|
||||||
|
"Spec skipped because Skip() was called in BeforeAll")
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if skip {
|
||||||
|
suite.currentSpecReport.State = types.SpecStateSkipped
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if suite.config.DryRun && !skip {
|
||||||
|
skip = true
|
||||||
|
suite.currentSpecReport.State = types.SpecStatePassed
|
||||||
|
}
|
||||||
|
|
||||||
|
suite.reporter.WillRun(suite.currentSpecReport)
|
||||||
|
//send the spec report to any attached ReportBeforeEach blocks - this will update suite.currentSpecReport if failures occur in these blocks
|
||||||
|
suite.reportEach(spec, types.NodeTypeReportBeforeEach)
|
||||||
|
if suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
|
||||||
|
//the reportEach failed, skip this spec
|
||||||
|
skip = true
|
||||||
|
}
|
||||||
|
|
||||||
|
suite.currentSpecReport.StartTime = time.Now()
|
||||||
|
maxAttempts := max(1, spec.FlakeAttempts())
|
||||||
|
if suite.config.FlakeAttempts > 0 {
|
||||||
|
maxAttempts = suite.config.FlakeAttempts
|
||||||
|
}
|
||||||
|
|
||||||
|
for attempt := 0; !skip && (attempt < maxAttempts); attempt++ {
|
||||||
|
suite.currentSpecReport.NumAttempts = attempt + 1
|
||||||
|
suite.writer.Truncate()
|
||||||
|
suite.outputInterceptor.StartInterceptingOutput()
|
||||||
|
if attempt > 0 {
|
||||||
|
fmt.Fprintf(suite.writer, "\nGinkgo: Attempt #%d Failed. Retrying...\n", attempt)
|
||||||
|
}
|
||||||
|
isFinalAttempt := (attempt == maxAttempts-1)
|
||||||
|
|
||||||
|
interruptStatus := suite.interruptHandler.Status()
|
||||||
|
deepestNestingLevelAttained := -1
|
||||||
|
var nodes = spec.Nodes.WithType(types.NodeTypeBeforeAll).Filter(func(n Node) bool {
|
||||||
|
return nodeState[n.ID] != types.SpecStatePassed
|
||||||
|
})
|
||||||
|
nodes = nodes.CopyAppend(spec.Nodes.WithType(types.NodeTypeBeforeEach)...).SortedByAscendingNestingLevel()
|
||||||
|
nodes = nodes.CopyAppend(spec.Nodes.WithType(types.NodeTypeJustBeforeEach).SortedByAscendingNestingLevel()...)
|
||||||
|
nodes = nodes.CopyAppend(spec.Nodes.WithType(types.NodeTypeIt)...)
|
||||||
|
|
||||||
|
var terminatingNode Node
|
||||||
|
for j := range nodes {
|
||||||
|
deepestNestingLevelAttained = max(deepestNestingLevelAttained, nodes[j].NestingLevel)
|
||||||
|
suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(nodes[j], interruptStatus.Channel, spec.Nodes.BestTextFor(nodes[j]))
|
||||||
|
suite.currentSpecReport.RunTime = time.Since(suite.currentSpecReport.StartTime)
|
||||||
|
nodeState[nodes[j].ID] = suite.currentSpecReport.State
|
||||||
|
if suite.currentSpecReport.State != types.SpecStatePassed {
|
||||||
|
terminatingNode = nodes[j]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
afterAllNodesThatRan := map[uint]bool{}
|
||||||
|
// pull out some shared code so we aren't repeating ourselves down below. this just runs after and cleanup nodes
|
||||||
|
runAfterAndCleanupNodes := func(nodes Nodes) {
|
||||||
|
for j := range nodes {
|
||||||
|
state, failure := suite.runNode(nodes[j], suite.interruptHandler.Status().Channel, spec.Nodes.BestTextFor(nodes[j]))
|
||||||
|
suite.currentSpecReport.RunTime = time.Since(suite.currentSpecReport.StartTime)
|
||||||
|
nodeState[nodes[j].ID] = state
|
||||||
|
if suite.currentSpecReport.State == types.SpecStatePassed || state == types.SpecStateAborted {
|
||||||
|
suite.currentSpecReport.State = state
|
||||||
|
suite.currentSpecReport.Failure = failure
|
||||||
|
if state != types.SpecStatePassed {
|
||||||
|
terminatingNode = nodes[j]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if nodes[j].NodeType.Is(types.NodeTypeAfterAll) {
|
||||||
|
afterAllNodesThatRan[nodes[j].ID] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// pull out a helper that captures the logic of whether or not we should run a given After node.
|
||||||
|
// there is complexity here stemming from the fact that we allow nested ordered contexts and flakey retries
|
||||||
|
shouldRunAfterNode := func(n Node) bool {
|
||||||
|
if n.NodeType.Is(types.NodeTypeAfterEach | types.NodeTypeJustAfterEach) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
var id uint
|
||||||
|
if n.NodeType.Is(types.NodeTypeAfterAll) {
|
||||||
|
id = n.ID
|
||||||
|
if afterAllNodesThatRan[id] { //we've already run on this attempt. don't run again.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if n.NodeType.Is(types.NodeTypeCleanupAfterAll) {
|
||||||
|
id = n.NodeIDWhereCleanupWasGenerated
|
||||||
|
}
|
||||||
|
isLastSpecWithNode := indexOfLastSpecContainingNodeID(id) == i
|
||||||
|
|
||||||
|
switch suite.currentSpecReport.State {
|
||||||
|
case types.SpecStatePassed: //we've passed so far...
|
||||||
|
return isLastSpecWithNode //... and we're the last spec with this AfterNode, so we should run it
|
||||||
|
case types.SpecStateSkipped: //the spec was skipped by the user...
|
||||||
|
if isLastSpecWithNode {
|
||||||
|
return true //...we're the last spec, so we should run the AfterNode
|
||||||
|
}
|
||||||
|
if terminatingNode.NodeType.Is(types.NodeTypeBeforeAll) && terminatingNode.NestingLevel == n.NestingLevel {
|
||||||
|
return true //...or, a BeforeAll was skipped and it's at our nesting level, so our subgroup is going to skip
|
||||||
|
}
|
||||||
|
case types.SpecStateFailed, types.SpecStatePanicked: // the spec has failed...
|
||||||
|
if isFinalAttempt {
|
||||||
|
return true //...if this was the last attempt then we're the last spec to run and so the AfterNode should run
|
||||||
|
}
|
||||||
|
if terminatingNode.NodeType.Is(types.NodeTypeBeforeAll) {
|
||||||
|
//...we'll be rerunning a BeforeAll so we should cleanup after it if...
|
||||||
|
if n.NodeType.Is(types.NodeTypeAfterAll) && terminatingNode.NestingLevel == n.NestingLevel {
|
||||||
|
return true //we're at the same nesting level
|
||||||
|
}
|
||||||
|
if n.NodeType.Is(types.NodeTypeCleanupAfterAll) && terminatingNode.ID == n.NodeIDWhereCleanupWasGenerated {
|
||||||
|
return true //we're a DeferCleanup generated by it
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if terminatingNode.NodeType.Is(types.NodeTypeAfterAll) {
|
||||||
|
//...we'll be rerunning an AfterAll so we should cleanup after it if...
|
||||||
|
if n.NodeType.Is(types.NodeTypeCleanupAfterAll) && terminatingNode.ID == n.NodeIDWhereCleanupWasGenerated {
|
||||||
|
return true //we're a DeferCleanup generated by it
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case types.SpecStateInterrupted, types.SpecStateAborted: // ...we've been interrupted and/or aborted
|
||||||
|
return true //...that means the test run is over and we should clean up the stack. Run the AfterNode
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// first pass - run all the JustAfterEach, Aftereach, and AfterAlls. Our shoudlRunAfterNode filter function will clean up the AfterAlls for us.
|
||||||
|
afterNodes := spec.Nodes.WithType(types.NodeTypeJustAfterEach).SortedByDescendingNestingLevel()
|
||||||
|
afterNodes = afterNodes.CopyAppend(spec.Nodes.WithType(types.NodeTypeAfterEach).CopyAppend(spec.Nodes.WithType(types.NodeTypeAfterAll)...).SortedByDescendingNestingLevel()...)
|
||||||
|
afterNodes = afterNodes.WithinNestingLevel(deepestNestingLevelAttained)
|
||||||
|
afterNodes = afterNodes.Filter(shouldRunAfterNode)
|
||||||
|
runAfterAndCleanupNodes(afterNodes)
|
||||||
|
|
||||||
|
// second-pass perhaps we didn't run the AfterAlls but a state change due to an AfterEach now requires us to run the AfterAlls:
|
||||||
|
afterNodes = spec.Nodes.WithType(types.NodeTypeAfterAll).WithinNestingLevel(deepestNestingLevelAttained).Filter(shouldRunAfterNode)
|
||||||
|
runAfterAndCleanupNodes(afterNodes)
|
||||||
|
|
||||||
|
// now we run any DeferCleanups
|
||||||
|
afterNodes = suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterEach).Reverse()
|
||||||
|
afterNodes = append(afterNodes, suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterAll).Filter(shouldRunAfterNode).Reverse()...)
|
||||||
|
runAfterAndCleanupNodes(afterNodes)
|
||||||
|
|
||||||
|
// third-pass, perhaps a DeferCleanup failed and now we need to run the AfterAlls.
|
||||||
|
afterNodes = spec.Nodes.WithType(types.NodeTypeAfterAll).WithinNestingLevel(deepestNestingLevelAttained).Filter(shouldRunAfterNode)
|
||||||
|
runAfterAndCleanupNodes(afterNodes)
|
||||||
|
|
||||||
|
// and finally - running AfterAlls may have generated some new DeferCleanup nodes, let's run them to finish up
|
||||||
|
afterNodes = suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterAll).Reverse().Filter(shouldRunAfterNode)
|
||||||
|
runAfterAndCleanupNodes(afterNodes)
|
||||||
|
|
||||||
|
suite.currentSpecReport.EndTime = time.Now()
|
||||||
|
suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime)
|
||||||
|
suite.currentSpecReport.CapturedGinkgoWriterOutput += string(suite.writer.Bytes())
|
||||||
|
suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||||
|
|
||||||
|
if suite.currentSpecReport.State.Is(types.SpecStatePassed | types.SpecStateSkipped | types.SpecStateAborted | types.SpecStateInterrupted) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//send the spec report to any attached ReportAfterEach blocks - this will update suite.currentSpecReport if failures occur in these blocks
|
||||||
|
suite.reportEach(spec, types.NodeTypeReportAfterEach)
|
||||||
|
suite.processCurrentSpecReport()
|
||||||
|
if suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
|
||||||
|
groupSucceeded = false
|
||||||
|
}
|
||||||
|
suite.currentSpecReport = types.SpecReport{}
|
||||||
|
}
|
||||||
|
}
|
212
vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go
generated
vendored
Normal file
212
vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go
generated
vendored
Normal file
@ -0,0 +1,212 @@
|
|||||||
|
package interrupt_handler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"runtime"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/formatter"
|
||||||
|
"github.com/onsi/ginkgo/v2/internal/parallel_support"
|
||||||
|
)
|
||||||
|
|
||||||
|
const TIMEOUT_REPEAT_INTERRUPT_MAXIMUM_DURATION = 30 * time.Second
|
||||||
|
const TIMEOUT_REPEAT_INTERRUPT_FRACTION_OF_TIMEOUT = 10
|
||||||
|
const ABORT_POLLING_INTERVAL = 500 * time.Millisecond
|
||||||
|
const ABORT_REPEAT_INTERRUPT_DURATION = 30 * time.Second
|
||||||
|
|
||||||
|
type InterruptCause uint
|
||||||
|
|
||||||
|
const (
|
||||||
|
InterruptCauseInvalid InterruptCause = iota
|
||||||
|
|
||||||
|
InterruptCauseSignal
|
||||||
|
InterruptCauseTimeout
|
||||||
|
InterruptCauseAbortByOtherProcess
|
||||||
|
)
|
||||||
|
|
||||||
|
func (ic InterruptCause) String() string {
|
||||||
|
switch ic {
|
||||||
|
case InterruptCauseSignal:
|
||||||
|
return "Interrupted by User"
|
||||||
|
case InterruptCauseTimeout:
|
||||||
|
return "Interrupted by Timeout"
|
||||||
|
case InterruptCauseAbortByOtherProcess:
|
||||||
|
return "Interrupted by Other Ginkgo Process"
|
||||||
|
}
|
||||||
|
return "INVALID_INTERRUPT_CAUSE"
|
||||||
|
}
|
||||||
|
|
||||||
|
type InterruptStatus struct {
|
||||||
|
Interrupted bool
|
||||||
|
Channel chan interface{}
|
||||||
|
Cause InterruptCause
|
||||||
|
}
|
||||||
|
|
||||||
|
type InterruptHandlerInterface interface {
|
||||||
|
Status() InterruptStatus
|
||||||
|
SetInterruptPlaceholderMessage(string)
|
||||||
|
ClearInterruptPlaceholderMessage()
|
||||||
|
InterruptMessageWithStackTraces() string
|
||||||
|
}
|
||||||
|
|
||||||
|
type InterruptHandler struct {
|
||||||
|
c chan interface{}
|
||||||
|
lock *sync.Mutex
|
||||||
|
interrupted bool
|
||||||
|
interruptPlaceholderMessage string
|
||||||
|
interruptCause InterruptCause
|
||||||
|
client parallel_support.Client
|
||||||
|
stop chan interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewInterruptHandler(timeout time.Duration, client parallel_support.Client) *InterruptHandler {
|
||||||
|
handler := &InterruptHandler{
|
||||||
|
c: make(chan interface{}),
|
||||||
|
lock: &sync.Mutex{},
|
||||||
|
interrupted: false,
|
||||||
|
stop: make(chan interface{}),
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
handler.registerForInterrupts(timeout)
|
||||||
|
return handler
|
||||||
|
}
|
||||||
|
|
||||||
|
func (handler *InterruptHandler) Stop() {
|
||||||
|
close(handler.stop)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (handler *InterruptHandler) registerForInterrupts(timeout time.Duration) {
|
||||||
|
// os signal handling
|
||||||
|
signalChannel := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(signalChannel, os.Interrupt, syscall.SIGTERM)
|
||||||
|
|
||||||
|
// timeout handling
|
||||||
|
var timeoutChannel <-chan time.Time
|
||||||
|
var timeoutTimer *time.Timer
|
||||||
|
if timeout > 0 {
|
||||||
|
timeoutTimer = time.NewTimer(timeout)
|
||||||
|
timeoutChannel = timeoutTimer.C
|
||||||
|
}
|
||||||
|
|
||||||
|
// cross-process abort handling
|
||||||
|
var abortChannel chan bool
|
||||||
|
if handler.client != nil {
|
||||||
|
abortChannel = make(chan bool)
|
||||||
|
go func() {
|
||||||
|
pollTicker := time.NewTicker(ABORT_POLLING_INTERVAL)
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-pollTicker.C:
|
||||||
|
if handler.client.ShouldAbort() {
|
||||||
|
abortChannel <- true
|
||||||
|
pollTicker.Stop()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
case <-handler.stop:
|
||||||
|
pollTicker.Stop()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// listen for any interrupt signals
|
||||||
|
// note that some (timeouts, cross-process aborts) will only trigger once
|
||||||
|
// for these we set up a ticker to keep interrupting the suite until it ends
|
||||||
|
// this ensures any `AfterEach` or `AfterSuite`s that get stuck cleaning up
|
||||||
|
// get interrupted eventually
|
||||||
|
go func() {
|
||||||
|
var interruptCause InterruptCause
|
||||||
|
var repeatChannel <-chan time.Time
|
||||||
|
var repeatTicker *time.Ticker
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-signalChannel:
|
||||||
|
interruptCause = InterruptCauseSignal
|
||||||
|
case <-timeoutChannel:
|
||||||
|
interruptCause = InterruptCauseTimeout
|
||||||
|
repeatInterruptTimeout := timeout / time.Duration(TIMEOUT_REPEAT_INTERRUPT_FRACTION_OF_TIMEOUT)
|
||||||
|
if repeatInterruptTimeout > TIMEOUT_REPEAT_INTERRUPT_MAXIMUM_DURATION {
|
||||||
|
repeatInterruptTimeout = TIMEOUT_REPEAT_INTERRUPT_MAXIMUM_DURATION
|
||||||
|
}
|
||||||
|
timeoutTimer.Stop()
|
||||||
|
repeatTicker = time.NewTicker(repeatInterruptTimeout)
|
||||||
|
repeatChannel = repeatTicker.C
|
||||||
|
case <-abortChannel:
|
||||||
|
interruptCause = InterruptCauseAbortByOtherProcess
|
||||||
|
repeatTicker = time.NewTicker(ABORT_REPEAT_INTERRUPT_DURATION)
|
||||||
|
repeatChannel = repeatTicker.C
|
||||||
|
case <-repeatChannel:
|
||||||
|
//do nothing, just interrupt again using the same interruptCause
|
||||||
|
case <-handler.stop:
|
||||||
|
if timeoutTimer != nil {
|
||||||
|
timeoutTimer.Stop()
|
||||||
|
}
|
||||||
|
if repeatTicker != nil {
|
||||||
|
repeatTicker.Stop()
|
||||||
|
}
|
||||||
|
signal.Stop(signalChannel)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
handler.lock.Lock()
|
||||||
|
handler.interruptCause = interruptCause
|
||||||
|
if handler.interruptPlaceholderMessage != "" {
|
||||||
|
fmt.Println(handler.interruptPlaceholderMessage)
|
||||||
|
}
|
||||||
|
handler.interrupted = true
|
||||||
|
close(handler.c)
|
||||||
|
handler.c = make(chan interface{})
|
||||||
|
handler.lock.Unlock()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (handler *InterruptHandler) Status() InterruptStatus {
|
||||||
|
handler.lock.Lock()
|
||||||
|
defer handler.lock.Unlock()
|
||||||
|
|
||||||
|
return InterruptStatus{
|
||||||
|
Interrupted: handler.interrupted,
|
||||||
|
Channel: handler.c,
|
||||||
|
Cause: handler.interruptCause,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (handler *InterruptHandler) SetInterruptPlaceholderMessage(message string) {
|
||||||
|
handler.lock.Lock()
|
||||||
|
defer handler.lock.Unlock()
|
||||||
|
|
||||||
|
handler.interruptPlaceholderMessage = message
|
||||||
|
}
|
||||||
|
|
||||||
|
func (handler *InterruptHandler) ClearInterruptPlaceholderMessage() {
|
||||||
|
handler.lock.Lock()
|
||||||
|
defer handler.lock.Unlock()
|
||||||
|
|
||||||
|
handler.interruptPlaceholderMessage = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (handler *InterruptHandler) InterruptMessageWithStackTraces() string {
|
||||||
|
handler.lock.Lock()
|
||||||
|
out := fmt.Sprintf("%s\n\n", handler.interruptCause.String())
|
||||||
|
defer handler.lock.Unlock()
|
||||||
|
if handler.interruptCause == InterruptCauseAbortByOtherProcess {
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
out += "Here's a stack trace of all running goroutines:\n"
|
||||||
|
buf := make([]byte, 8192)
|
||||||
|
for {
|
||||||
|
n := runtime.Stack(buf, true)
|
||||||
|
if n < len(buf) {
|
||||||
|
buf = buf[:n]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
buf = make([]byte, 2*len(buf))
|
||||||
|
}
|
||||||
|
out += formatter.Fi(1, "%s", string(buf))
|
||||||
|
return out
|
||||||
|
}
|
15
vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go
generated
vendored
Normal file
15
vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
//go:build freebsd || openbsd || netbsd || dragonfly || darwin || linux || solaris
|
||||||
|
// +build freebsd openbsd netbsd dragonfly darwin linux solaris
|
||||||
|
|
||||||
|
package interrupt_handler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
func SwallowSigQuit() {
|
||||||
|
c := make(chan os.Signal, 1024)
|
||||||
|
signal.Notify(c, syscall.SIGQUIT)
|
||||||
|
}
|
8
vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go
generated
vendored
Normal file
8
vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
//go:build windows
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package interrupt_handler
|
||||||
|
|
||||||
|
func SwallowSigQuit() {
|
||||||
|
//noop
|
||||||
|
}
|
660
vendor/github.com/onsi/ginkgo/v2/internal/node.go
generated
vendored
Normal file
660
vendor/github.com/onsi/ginkgo/v2/internal/node.go
generated
vendored
Normal file
@ -0,0 +1,660 @@
|
|||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _global_node_id_counter = uint(0)
|
||||||
|
var _global_id_mutex = &sync.Mutex{}
|
||||||
|
|
||||||
|
func UniqueNodeID() uint {
|
||||||
|
//There's a reace in the internal integration tests if we don't make
|
||||||
|
//accessing _global_node_id_counter safe across goroutines.
|
||||||
|
_global_id_mutex.Lock()
|
||||||
|
defer _global_id_mutex.Unlock()
|
||||||
|
_global_node_id_counter += 1
|
||||||
|
return _global_node_id_counter
|
||||||
|
}
|
||||||
|
|
||||||
|
type Node struct {
|
||||||
|
ID uint
|
||||||
|
NodeType types.NodeType
|
||||||
|
|
||||||
|
Text string
|
||||||
|
Body func()
|
||||||
|
CodeLocation types.CodeLocation
|
||||||
|
NestingLevel int
|
||||||
|
|
||||||
|
SynchronizedBeforeSuiteProc1Body func() []byte
|
||||||
|
SynchronizedBeforeSuiteAllProcsBody func([]byte)
|
||||||
|
|
||||||
|
SynchronizedAfterSuiteAllProcsBody func()
|
||||||
|
SynchronizedAfterSuiteProc1Body func()
|
||||||
|
|
||||||
|
ReportEachBody func(types.SpecReport)
|
||||||
|
ReportAfterSuiteBody func(types.Report)
|
||||||
|
|
||||||
|
MarkedFocus bool
|
||||||
|
MarkedPending bool
|
||||||
|
MarkedSerial bool
|
||||||
|
MarkedOrdered bool
|
||||||
|
MarkedOncePerOrdered bool
|
||||||
|
FlakeAttempts int
|
||||||
|
Labels Labels
|
||||||
|
|
||||||
|
NodeIDWhereCleanupWasGenerated uint
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decoration Types
|
||||||
|
type focusType bool
|
||||||
|
type pendingType bool
|
||||||
|
type serialType bool
|
||||||
|
type orderedType bool
|
||||||
|
type honorsOrderedType bool
|
||||||
|
|
||||||
|
const Focus = focusType(true)
|
||||||
|
const Pending = pendingType(true)
|
||||||
|
const Serial = serialType(true)
|
||||||
|
const Ordered = orderedType(true)
|
||||||
|
const OncePerOrdered = honorsOrderedType(true)
|
||||||
|
|
||||||
|
type FlakeAttempts uint
|
||||||
|
type Offset uint
|
||||||
|
type Done chan<- interface{} // Deprecated Done Channel for asynchronous testing
|
||||||
|
type Labels []string
|
||||||
|
|
||||||
|
func UnionOfLabels(labels ...Labels) Labels {
|
||||||
|
out := Labels{}
|
||||||
|
seen := map[string]bool{}
|
||||||
|
for _, labelSet := range labels {
|
||||||
|
for _, label := range labelSet {
|
||||||
|
if !seen[label] {
|
||||||
|
seen[label] = true
|
||||||
|
out = append(out, label)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func PartitionDecorations(args ...interface{}) ([]interface{}, []interface{}) {
|
||||||
|
decorations := []interface{}{}
|
||||||
|
remainingArgs := []interface{}{}
|
||||||
|
for _, arg := range args {
|
||||||
|
if isDecoration(arg) {
|
||||||
|
decorations = append(decorations, arg)
|
||||||
|
} else {
|
||||||
|
remainingArgs = append(remainingArgs, arg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return decorations, remainingArgs
|
||||||
|
}
|
||||||
|
|
||||||
|
func isDecoration(arg interface{}) bool {
|
||||||
|
switch t := reflect.TypeOf(arg); {
|
||||||
|
case t == nil:
|
||||||
|
return false
|
||||||
|
case t == reflect.TypeOf(Offset(0)):
|
||||||
|
return true
|
||||||
|
case t == reflect.TypeOf(types.CodeLocation{}):
|
||||||
|
return true
|
||||||
|
case t == reflect.TypeOf(Focus):
|
||||||
|
return true
|
||||||
|
case t == reflect.TypeOf(Pending):
|
||||||
|
return true
|
||||||
|
case t == reflect.TypeOf(Serial):
|
||||||
|
return true
|
||||||
|
case t == reflect.TypeOf(Ordered):
|
||||||
|
return true
|
||||||
|
case t == reflect.TypeOf(OncePerOrdered):
|
||||||
|
return true
|
||||||
|
case t == reflect.TypeOf(FlakeAttempts(0)):
|
||||||
|
return true
|
||||||
|
case t == reflect.TypeOf(Labels{}):
|
||||||
|
return true
|
||||||
|
case t.Kind() == reflect.Slice && isSliceOfDecorations(arg):
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func isSliceOfDecorations(slice interface{}) bool {
|
||||||
|
vSlice := reflect.ValueOf(slice)
|
||||||
|
if vSlice.Len() == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i := 0; i < vSlice.Len(); i++ {
|
||||||
|
if !isDecoration(vSlice.Index(i).Interface()) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeType, text string, args ...interface{}) (Node, []error) {
|
||||||
|
baseOffset := 2
|
||||||
|
node := Node{
|
||||||
|
ID: UniqueNodeID(),
|
||||||
|
NodeType: nodeType,
|
||||||
|
Text: text,
|
||||||
|
Labels: Labels{},
|
||||||
|
CodeLocation: types.NewCodeLocation(baseOffset),
|
||||||
|
NestingLevel: -1,
|
||||||
|
}
|
||||||
|
errors := []error{}
|
||||||
|
appendError := func(err error) {
|
||||||
|
if err != nil {
|
||||||
|
errors = append(errors, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
args = unrollInterfaceSlice(args)
|
||||||
|
|
||||||
|
remainingArgs := []interface{}{}
|
||||||
|
//First get the CodeLocation up-to-date
|
||||||
|
for _, arg := range args {
|
||||||
|
switch v := arg.(type) {
|
||||||
|
case Offset:
|
||||||
|
node.CodeLocation = types.NewCodeLocation(baseOffset + int(v))
|
||||||
|
case types.CodeLocation:
|
||||||
|
node.CodeLocation = v
|
||||||
|
default:
|
||||||
|
remainingArgs = append(remainingArgs, arg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
labelsSeen := map[string]bool{}
|
||||||
|
trackedFunctionError := false
|
||||||
|
args = remainingArgs
|
||||||
|
remainingArgs = []interface{}{}
|
||||||
|
//now process the rest of the args
|
||||||
|
for _, arg := range args {
|
||||||
|
|
||||||
|
switch t := reflect.TypeOf(arg); {
|
||||||
|
case t == reflect.TypeOf(float64(0)):
|
||||||
|
break //ignore deprecated timeouts
|
||||||
|
case t == reflect.TypeOf(Focus):
|
||||||
|
node.MarkedFocus = bool(arg.(focusType))
|
||||||
|
if !nodeType.Is(types.NodeTypesForContainerAndIt) {
|
||||||
|
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Focus"))
|
||||||
|
}
|
||||||
|
case t == reflect.TypeOf(Pending):
|
||||||
|
node.MarkedPending = bool(arg.(pendingType))
|
||||||
|
if !nodeType.Is(types.NodeTypesForContainerAndIt) {
|
||||||
|
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Pending"))
|
||||||
|
}
|
||||||
|
case t == reflect.TypeOf(Serial):
|
||||||
|
node.MarkedSerial = bool(arg.(serialType))
|
||||||
|
if !nodeType.Is(types.NodeTypesForContainerAndIt) {
|
||||||
|
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Serial"))
|
||||||
|
}
|
||||||
|
case t == reflect.TypeOf(Ordered):
|
||||||
|
node.MarkedOrdered = bool(arg.(orderedType))
|
||||||
|
if !nodeType.Is(types.NodeTypeContainer) {
|
||||||
|
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Ordered"))
|
||||||
|
}
|
||||||
|
case t == reflect.TypeOf(OncePerOrdered):
|
||||||
|
node.MarkedOncePerOrdered = bool(arg.(honorsOrderedType))
|
||||||
|
if !nodeType.Is(types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach | types.NodeTypeAfterEach | types.NodeTypeJustAfterEach) {
|
||||||
|
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "OncePerOrdered"))
|
||||||
|
}
|
||||||
|
case t == reflect.TypeOf(FlakeAttempts(0)):
|
||||||
|
node.FlakeAttempts = int(arg.(FlakeAttempts))
|
||||||
|
if !nodeType.Is(types.NodeTypesForContainerAndIt) {
|
||||||
|
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "FlakeAttempts"))
|
||||||
|
}
|
||||||
|
case t == reflect.TypeOf(Labels{}):
|
||||||
|
if !nodeType.Is(types.NodeTypesForContainerAndIt) {
|
||||||
|
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Label"))
|
||||||
|
}
|
||||||
|
for _, label := range arg.(Labels) {
|
||||||
|
if !labelsSeen[label] {
|
||||||
|
labelsSeen[label] = true
|
||||||
|
label, err := types.ValidateAndCleanupLabel(label, node.CodeLocation)
|
||||||
|
node.Labels = append(node.Labels, label)
|
||||||
|
appendError(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case t.Kind() == reflect.Func:
|
||||||
|
if node.Body != nil {
|
||||||
|
appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
|
||||||
|
trackedFunctionError = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
isValid := (t.NumOut() == 0) && (t.NumIn() <= 1) && (t.NumIn() == 0 || t.In(0) == reflect.TypeOf(make(Done)))
|
||||||
|
if !isValid {
|
||||||
|
appendError(types.GinkgoErrors.InvalidBodyType(t, node.CodeLocation, nodeType))
|
||||||
|
trackedFunctionError = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if t.NumIn() == 0 {
|
||||||
|
node.Body = arg.(func())
|
||||||
|
} else {
|
||||||
|
deprecationTracker.TrackDeprecation(types.Deprecations.Async(), node.CodeLocation)
|
||||||
|
deprecatedAsyncBody := arg.(func(Done))
|
||||||
|
node.Body = func() { deprecatedAsyncBody(make(Done)) }
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
remainingArgs = append(remainingArgs, arg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//validations
|
||||||
|
if node.MarkedPending && node.MarkedFocus {
|
||||||
|
appendError(types.GinkgoErrors.InvalidDeclarationOfFocusedAndPending(node.CodeLocation, nodeType))
|
||||||
|
}
|
||||||
|
|
||||||
|
if node.Body == nil && !node.MarkedPending && !trackedFunctionError {
|
||||||
|
appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType))
|
||||||
|
}
|
||||||
|
for _, arg := range remainingArgs {
|
||||||
|
appendError(types.GinkgoErrors.UnknownDecorator(node.CodeLocation, nodeType, arg))
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(errors) > 0 {
|
||||||
|
return Node{}, errors
|
||||||
|
}
|
||||||
|
|
||||||
|
return node, errors
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSynchronizedBeforeSuiteNode(proc1Body func() []byte, allProcsBody func([]byte), codeLocation types.CodeLocation) (Node, []error) {
|
||||||
|
return Node{
|
||||||
|
ID: UniqueNodeID(),
|
||||||
|
NodeType: types.NodeTypeSynchronizedBeforeSuite,
|
||||||
|
SynchronizedBeforeSuiteProc1Body: proc1Body,
|
||||||
|
SynchronizedBeforeSuiteAllProcsBody: allProcsBody,
|
||||||
|
CodeLocation: codeLocation,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSynchronizedAfterSuiteNode(allProcsBody func(), proc1Body func(), codeLocation types.CodeLocation) (Node, []error) {
|
||||||
|
return Node{
|
||||||
|
ID: UniqueNodeID(),
|
||||||
|
NodeType: types.NodeTypeSynchronizedAfterSuite,
|
||||||
|
SynchronizedAfterSuiteAllProcsBody: allProcsBody,
|
||||||
|
SynchronizedAfterSuiteProc1Body: proc1Body,
|
||||||
|
CodeLocation: codeLocation,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewReportBeforeEachNode(body func(types.SpecReport), codeLocation types.CodeLocation) (Node, []error) {
|
||||||
|
return Node{
|
||||||
|
ID: UniqueNodeID(),
|
||||||
|
NodeType: types.NodeTypeReportBeforeEach,
|
||||||
|
ReportEachBody: body,
|
||||||
|
CodeLocation: codeLocation,
|
||||||
|
NestingLevel: -1,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewReportAfterEachNode(body func(types.SpecReport), codeLocation types.CodeLocation) (Node, []error) {
|
||||||
|
return Node{
|
||||||
|
ID: UniqueNodeID(),
|
||||||
|
NodeType: types.NodeTypeReportAfterEach,
|
||||||
|
ReportEachBody: body,
|
||||||
|
CodeLocation: codeLocation,
|
||||||
|
NestingLevel: -1,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewReportAfterSuiteNode(text string, body func(types.Report), codeLocation types.CodeLocation) (Node, []error) {
|
||||||
|
return Node{
|
||||||
|
ID: UniqueNodeID(),
|
||||||
|
Text: text,
|
||||||
|
NodeType: types.NodeTypeReportAfterSuite,
|
||||||
|
ReportAfterSuiteBody: body,
|
||||||
|
CodeLocation: codeLocation,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewCleanupNode(fail func(string, types.CodeLocation), args ...interface{}) (Node, []error) {
|
||||||
|
baseOffset := 2
|
||||||
|
node := Node{
|
||||||
|
ID: UniqueNodeID(),
|
||||||
|
NodeType: types.NodeTypeCleanupInvalid,
|
||||||
|
CodeLocation: types.NewCodeLocation(baseOffset),
|
||||||
|
NestingLevel: -1,
|
||||||
|
}
|
||||||
|
remainingArgs := []interface{}{}
|
||||||
|
for _, arg := range args {
|
||||||
|
switch t := reflect.TypeOf(arg); {
|
||||||
|
case t == reflect.TypeOf(Offset(0)):
|
||||||
|
node.CodeLocation = types.NewCodeLocation(baseOffset + int(arg.(Offset)))
|
||||||
|
case t == reflect.TypeOf(types.CodeLocation{}):
|
||||||
|
node.CodeLocation = arg.(types.CodeLocation)
|
||||||
|
default:
|
||||||
|
remainingArgs = append(remainingArgs, arg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(remainingArgs) == 0 {
|
||||||
|
return Node{}, []error{types.GinkgoErrors.DeferCleanupInvalidFunction(node.CodeLocation)}
|
||||||
|
}
|
||||||
|
callback := reflect.ValueOf(remainingArgs[0])
|
||||||
|
if !(callback.Kind() == reflect.Func && callback.Type().NumOut() <= 1) {
|
||||||
|
return Node{}, []error{types.GinkgoErrors.DeferCleanupInvalidFunction(node.CodeLocation)}
|
||||||
|
}
|
||||||
|
callArgs := []reflect.Value{}
|
||||||
|
for _, arg := range remainingArgs[1:] {
|
||||||
|
callArgs = append(callArgs, reflect.ValueOf(arg))
|
||||||
|
}
|
||||||
|
cl := node.CodeLocation
|
||||||
|
node.Body = func() {
|
||||||
|
out := callback.Call(callArgs)
|
||||||
|
if len(out) == 1 && !out[0].IsNil() {
|
||||||
|
fail(fmt.Sprintf("DeferCleanup callback returned error: %v", out[0]), cl)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n Node) IsZero() bool {
|
||||||
|
return n.ID == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Nodes */
|
||||||
|
type Nodes []Node
|
||||||
|
|
||||||
|
func (n Nodes) CopyAppend(nodes ...Node) Nodes {
|
||||||
|
numN := len(n)
|
||||||
|
out := make(Nodes, numN+len(nodes))
|
||||||
|
for i, node := range n {
|
||||||
|
out[i] = node
|
||||||
|
}
|
||||||
|
for j, node := range nodes {
|
||||||
|
out[numN+j] = node
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n Nodes) SplitAround(pivot Node) (Nodes, Nodes) {
|
||||||
|
pivotIdx := len(n)
|
||||||
|
for i := range n {
|
||||||
|
if n[i].ID == pivot.ID {
|
||||||
|
pivotIdx = i
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
left := n[:pivotIdx]
|
||||||
|
right := Nodes{}
|
||||||
|
if pivotIdx+1 < len(n) {
|
||||||
|
right = n[pivotIdx+1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
return left, right
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n Nodes) FirstNodeWithType(nodeTypes types.NodeType) Node {
|
||||||
|
for i := range n {
|
||||||
|
if n[i].NodeType.Is(nodeTypes) {
|
||||||
|
return n[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return Node{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n Nodes) WithType(nodeTypes types.NodeType) Nodes {
|
||||||
|
count := 0
|
||||||
|
for i := range n {
|
||||||
|
if n[i].NodeType.Is(nodeTypes) {
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
out, j := make(Nodes, count), 0
|
||||||
|
for i := range n {
|
||||||
|
if n[i].NodeType.Is(nodeTypes) {
|
||||||
|
out[j] = n[i]
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n Nodes) WithoutType(nodeTypes types.NodeType) Nodes {
|
||||||
|
count := 0
|
||||||
|
for i := range n {
|
||||||
|
if !n[i].NodeType.Is(nodeTypes) {
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
out, j := make(Nodes, count), 0
|
||||||
|
for i := range n {
|
||||||
|
if !n[i].NodeType.Is(nodeTypes) {
|
||||||
|
out[j] = n[i]
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n Nodes) WithoutNode(nodeToExclude Node) Nodes {
|
||||||
|
idxToExclude := len(n)
|
||||||
|
for i := range n {
|
||||||
|
if n[i].ID == nodeToExclude.ID {
|
||||||
|
idxToExclude = i
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if idxToExclude == len(n) {
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
out, j := make(Nodes, len(n)-1), 0
|
||||||
|
for i := range n {
|
||||||
|
if i == idxToExclude {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
out[j] = n[i]
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n Nodes) Filter(filter func(Node) bool) Nodes {
|
||||||
|
trufa, count := make([]bool, len(n)), 0
|
||||||
|
for i := range n {
|
||||||
|
if filter(n[i]) {
|
||||||
|
trufa[i] = true
|
||||||
|
count += 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out, j := make(Nodes, count), 0
|
||||||
|
for i := range n {
|
||||||
|
if trufa[i] {
|
||||||
|
out[j] = n[i]
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n Nodes) FirstSatisfying(filter func(Node) bool) Node {
|
||||||
|
for i := range n {
|
||||||
|
if filter(n[i]) {
|
||||||
|
return n[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return Node{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n Nodes) WithinNestingLevel(deepestNestingLevel int) Nodes {
|
||||||
|
count := 0
|
||||||
|
for i := range n {
|
||||||
|
if n[i].NestingLevel <= deepestNestingLevel {
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out, j := make(Nodes, count), 0
|
||||||
|
for i := range n {
|
||||||
|
if n[i].NestingLevel <= deepestNestingLevel {
|
||||||
|
out[j] = n[i]
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n Nodes) SortedByDescendingNestingLevel() Nodes {
|
||||||
|
out := make(Nodes, len(n))
|
||||||
|
copy(out, n)
|
||||||
|
sort.SliceStable(out, func(i int, j int) bool {
|
||||||
|
return out[i].NestingLevel > out[j].NestingLevel
|
||||||
|
})
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n Nodes) SortedByAscendingNestingLevel() Nodes {
|
||||||
|
out := make(Nodes, len(n))
|
||||||
|
copy(out, n)
|
||||||
|
sort.SliceStable(out, func(i int, j int) bool {
|
||||||
|
return out[i].NestingLevel < out[j].NestingLevel
|
||||||
|
})
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n Nodes) FirstWithNestingLevel(level int) Node {
|
||||||
|
for i := range n {
|
||||||
|
if n[i].NestingLevel == level {
|
||||||
|
return n[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return Node{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n Nodes) Reverse() Nodes {
|
||||||
|
out := make(Nodes, len(n))
|
||||||
|
for i := range n {
|
||||||
|
out[len(n)-1-i] = n[i]
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n Nodes) Texts() []string {
|
||||||
|
out := make([]string, len(n))
|
||||||
|
for i := range n {
|
||||||
|
out[i] = n[i].Text
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n Nodes) Labels() [][]string {
|
||||||
|
out := make([][]string, len(n))
|
||||||
|
for i := range n {
|
||||||
|
if n[i].Labels == nil {
|
||||||
|
out[i] = []string{}
|
||||||
|
} else {
|
||||||
|
out[i] = []string(n[i].Labels)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n Nodes) UnionOfLabels() []string {
|
||||||
|
out := []string{}
|
||||||
|
seen := map[string]bool{}
|
||||||
|
for i := range n {
|
||||||
|
for _, label := range n[i].Labels {
|
||||||
|
if !seen[label] {
|
||||||
|
seen[label] = true
|
||||||
|
out = append(out, label)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n Nodes) CodeLocations() []types.CodeLocation {
|
||||||
|
out := make([]types.CodeLocation, len(n))
|
||||||
|
for i := range n {
|
||||||
|
out[i] = n[i].CodeLocation
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n Nodes) BestTextFor(node Node) string {
|
||||||
|
if node.Text != "" {
|
||||||
|
return node.Text
|
||||||
|
}
|
||||||
|
parentNestingLevel := node.NestingLevel - 1
|
||||||
|
for i := range n {
|
||||||
|
if n[i].Text != "" && n[i].NestingLevel == parentNestingLevel {
|
||||||
|
return n[i].Text
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n Nodes) ContainsNodeID(id uint) bool {
|
||||||
|
for i := range n {
|
||||||
|
if n[i].ID == id {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n Nodes) HasNodeMarkedPending() bool {
|
||||||
|
for i := range n {
|
||||||
|
if n[i].MarkedPending {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n Nodes) HasNodeMarkedFocus() bool {
|
||||||
|
for i := range n {
|
||||||
|
if n[i].MarkedFocus {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n Nodes) HasNodeMarkedSerial() bool {
|
||||||
|
for i := range n {
|
||||||
|
if n[i].MarkedSerial {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n Nodes) FirstNodeMarkedOrdered() Node {
|
||||||
|
for i := range n {
|
||||||
|
if n[i].MarkedOrdered {
|
||||||
|
return n[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return Node{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func unrollInterfaceSlice(args interface{}) []interface{} {
|
||||||
|
v := reflect.ValueOf(args)
|
||||||
|
if v.Kind() != reflect.Slice {
|
||||||
|
return []interface{}{args}
|
||||||
|
}
|
||||||
|
out := []interface{}{}
|
||||||
|
for i := 0; i < v.Len(); i++ {
|
||||||
|
el := reflect.ValueOf(v.Index(i).Interface())
|
||||||
|
if el.Kind() == reflect.Slice && el.Type() != reflect.TypeOf(Labels{}) {
|
||||||
|
out = append(out, unrollInterfaceSlice(el.Interface())...)
|
||||||
|
} else {
|
||||||
|
out = append(out, v.Index(i).Interface())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
121
vendor/github.com/onsi/ginkgo/v2/internal/ordering.go
generated
vendored
Normal file
121
vendor/github.com/onsi/ginkgo/v2/internal/ordering.go
generated
vendored
Normal file
@ -0,0 +1,121 @@
|
|||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type GroupedSpecIndices []SpecIndices
|
||||||
|
type SpecIndices []int
|
||||||
|
|
||||||
|
func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices, GroupedSpecIndices) {
|
||||||
|
/*
|
||||||
|
Ginkgo has sophisticated support for randomizing specs. Specs are guaranteed to have the same
|
||||||
|
order for a given seed across test runs.
|
||||||
|
|
||||||
|
By default only top-level containers and specs are shuffled - this makes for a more intuitive debugging
|
||||||
|
experience - specs within a given container run in the order they appear in the file.
|
||||||
|
|
||||||
|
Developers can set -randomizeAllSpecs to shuffle _all_ specs.
|
||||||
|
|
||||||
|
In addition, spec containers can be marked as Ordered. Specs within an Ordered container are never shuffled.
|
||||||
|
|
||||||
|
Finally, specs and spec containers can be marked as Serial. When running in parallel, serial specs run on Process #1 _after_ all other processes have finished.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Seed a new random source based on thee configured random seed.
|
||||||
|
r := rand.New(rand.NewSource(suiteConfig.RandomSeed))
|
||||||
|
|
||||||
|
// first break things into execution groups
|
||||||
|
// a group represents a single unit of execution and is a collection of SpecIndices
|
||||||
|
// usually a group is just a single spec, however ordered containers must be preserved as a single group
|
||||||
|
executionGroupIDs := []uint{}
|
||||||
|
executionGroups := map[uint]SpecIndices{}
|
||||||
|
for idx, spec := range specs {
|
||||||
|
groupNode := spec.Nodes.FirstNodeMarkedOrdered()
|
||||||
|
if groupNode.IsZero() {
|
||||||
|
groupNode = spec.Nodes.FirstNodeWithType(types.NodeTypeIt)
|
||||||
|
}
|
||||||
|
executionGroups[groupNode.ID] = append(executionGroups[groupNode.ID], idx)
|
||||||
|
if len(executionGroups[groupNode.ID]) == 1 {
|
||||||
|
executionGroupIDs = append(executionGroupIDs, groupNode.ID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// now, we only shuffle all the execution groups if we're randomizing all specs, otherwise
|
||||||
|
// we shuffle outermost containers. so we need to form shufflable groupings of GroupIDs
|
||||||
|
shufflableGroupingIDs := []uint{}
|
||||||
|
shufflableGroupingIDToGroupIDs := map[uint][]uint{}
|
||||||
|
shufflableGroupingsIDToSortKeys := map[uint]string{}
|
||||||
|
|
||||||
|
// for each execution group we're going to have to pick a node to represent how the
|
||||||
|
// execution group is grouped for shuffling:
|
||||||
|
nodeTypesToShuffle := types.NodeTypesForContainerAndIt
|
||||||
|
if suiteConfig.RandomizeAllSpecs {
|
||||||
|
nodeTypesToShuffle = types.NodeTypeIt
|
||||||
|
}
|
||||||
|
|
||||||
|
//so, fo reach execution group:
|
||||||
|
for _, groupID := range executionGroupIDs {
|
||||||
|
// pick out a representative spec
|
||||||
|
representativeSpec := specs[executionGroups[groupID][0]]
|
||||||
|
|
||||||
|
// and grab the node on the spec that will represent which shufflable group this execution group belongs tu
|
||||||
|
shufflableGroupingNode := representativeSpec.Nodes.FirstNodeWithType(nodeTypesToShuffle)
|
||||||
|
|
||||||
|
//add the execution group to its shufflable group
|
||||||
|
shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID] = append(shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID], groupID)
|
||||||
|
|
||||||
|
//and if it's the first one in
|
||||||
|
if len(shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID]) == 1 {
|
||||||
|
// record the shuffleable group ID
|
||||||
|
shufflableGroupingIDs = append(shufflableGroupingIDs, shufflableGroupingNode.ID)
|
||||||
|
// and record the sort key to use
|
||||||
|
shufflableGroupingsIDToSortKeys[shufflableGroupingNode.ID] = shufflableGroupingNode.CodeLocation.String()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// now we sort the shufflable groups by the sort key. We use the shufflable group nodes code location and break ties using its node id
|
||||||
|
sort.SliceStable(shufflableGroupingIDs, func(i, j int) bool {
|
||||||
|
keyA := shufflableGroupingsIDToSortKeys[shufflableGroupingIDs[i]]
|
||||||
|
keyB := shufflableGroupingsIDToSortKeys[shufflableGroupingIDs[j]]
|
||||||
|
if keyA == keyB {
|
||||||
|
return shufflableGroupingIDs[i] < shufflableGroupingIDs[j]
|
||||||
|
} else {
|
||||||
|
return keyA < keyB
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// now we permute the sorted shufflable grouping IDs and build the ordered Groups
|
||||||
|
orderedGroups := GroupedSpecIndices{}
|
||||||
|
permutation := r.Perm(len(shufflableGroupingIDs))
|
||||||
|
for _, j := range permutation {
|
||||||
|
//let's get the execution group IDs for this shufflable group:
|
||||||
|
executionGroupIDsForJ := shufflableGroupingIDToGroupIDs[shufflableGroupingIDs[j]]
|
||||||
|
// and we'll add their associated specindices to the orderedGroups slice:
|
||||||
|
for _, executionGroupID := range executionGroupIDsForJ {
|
||||||
|
orderedGroups = append(orderedGroups, executionGroups[executionGroupID])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we're running in series, we're done.
|
||||||
|
if suiteConfig.ParallelTotal == 1 {
|
||||||
|
return orderedGroups, GroupedSpecIndices{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// We're running in parallel so we need to partition the ordered groups into a parallelizable set and a serialized set.
|
||||||
|
// The parallelizable groups will run across all Ginkgo processes...
|
||||||
|
// ...the serial groups will only run on Process #1 after all other processes have exited.
|
||||||
|
parallelizableGroups, serialGroups := GroupedSpecIndices{}, GroupedSpecIndices{}
|
||||||
|
for _, specIndices := range orderedGroups {
|
||||||
|
if specs[specIndices[0]].Nodes.HasNodeMarkedSerial() {
|
||||||
|
serialGroups = append(serialGroups, specIndices)
|
||||||
|
} else {
|
||||||
|
parallelizableGroups = append(parallelizableGroups, specIndices)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return parallelizableGroups, serialGroups
|
||||||
|
}
|
250
vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go
generated
vendored
Normal file
250
vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go
generated
vendored
Normal file
@ -0,0 +1,250 @@
|
|||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const BAILOUT_TIME = 1 * time.Second
|
||||||
|
const BAILOUT_MESSAGE = `Ginkgo detected an issue while intercepting output.
|
||||||
|
|
||||||
|
When running in parallel, Ginkgo captures stdout and stderr output
|
||||||
|
and attaches it to the running spec. It looks like that process is getting
|
||||||
|
stuck for this suite.
|
||||||
|
|
||||||
|
This usually happens if you, or a library you are using, spin up an external
|
||||||
|
process and set cmd.Stdout = os.Stdout and/or cmd.Stderr = os.Stderr. This
|
||||||
|
causes the external process to keep Ginkgo's output interceptor pipe open and
|
||||||
|
causes output interception to hang.
|
||||||
|
|
||||||
|
Ginkgo has detected this and shortcircuited the capture process. The specs
|
||||||
|
will continue running after this message however output from the external
|
||||||
|
process that caused this issue will not be captured.
|
||||||
|
|
||||||
|
You have several options to fix this. In preferred order they are:
|
||||||
|
|
||||||
|
1. Pass GinkgoWriter instead of os.Stdout or os.Stderr to your process.
|
||||||
|
2. Ensure your process exits before the current spec completes. If your
|
||||||
|
process is long-lived and must cross spec boundaries, this option won't
|
||||||
|
work for you.
|
||||||
|
3. Pause Ginkgo's output interceptor before starting your process and then
|
||||||
|
resume it after. Use PauseOutputInterception() and ResumeOutputInterception()
|
||||||
|
to do this.
|
||||||
|
4. Set --output-interceptor-mode=none when running your Ginkgo suite. This will
|
||||||
|
turn off all output interception but allow specs to run in parallel without this
|
||||||
|
issue. You may miss important output if you do this including output from Go's
|
||||||
|
race detector.
|
||||||
|
|
||||||
|
More details on issue #851 - https://github.com/onsi/ginkgo/issues/851
|
||||||
|
`
|
||||||
|
|
||||||
|
/*
|
||||||
|
The OutputInterceptor is used by to
|
||||||
|
intercept and capture all stdin and stderr output during a test run.
|
||||||
|
*/
|
||||||
|
type OutputInterceptor interface {
|
||||||
|
StartInterceptingOutput()
|
||||||
|
StartInterceptingOutputAndForwardTo(io.Writer)
|
||||||
|
StopInterceptingAndReturnOutput() string
|
||||||
|
|
||||||
|
PauseIntercepting()
|
||||||
|
ResumeIntercepting()
|
||||||
|
|
||||||
|
Shutdown()
|
||||||
|
}
|
||||||
|
|
||||||
|
type NoopOutputInterceptor struct{}
|
||||||
|
|
||||||
|
func (interceptor NoopOutputInterceptor) StartInterceptingOutput() {}
|
||||||
|
func (interceptor NoopOutputInterceptor) StartInterceptingOutputAndForwardTo(io.Writer) {}
|
||||||
|
func (interceptor NoopOutputInterceptor) StopInterceptingAndReturnOutput() string { return "" }
|
||||||
|
func (interceptor NoopOutputInterceptor) PauseIntercepting() {}
|
||||||
|
func (interceptor NoopOutputInterceptor) ResumeIntercepting() {}
|
||||||
|
func (interceptor NoopOutputInterceptor) Shutdown() {}
|
||||||
|
|
||||||
|
type pipePair struct {
|
||||||
|
reader *os.File
|
||||||
|
writer *os.File
|
||||||
|
}
|
||||||
|
|
||||||
|
func startPipeFactory(pipeChannel chan pipePair, shutdown chan interface{}) {
|
||||||
|
for {
|
||||||
|
//make the next pipe...
|
||||||
|
pair := pipePair{}
|
||||||
|
pair.reader, pair.writer, _ = os.Pipe()
|
||||||
|
select {
|
||||||
|
//...and provide it to the next consumer (they are responsible for closing the files)
|
||||||
|
case pipeChannel <- pair:
|
||||||
|
continue
|
||||||
|
//...or close the files if we were told to shutdown
|
||||||
|
case <-shutdown:
|
||||||
|
pair.reader.Close()
|
||||||
|
pair.writer.Close()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type interceptorImplementation interface {
|
||||||
|
CreateStdoutStderrClones() (*os.File, *os.File)
|
||||||
|
ConnectPipeToStdoutStderr(*os.File)
|
||||||
|
RestoreStdoutStderrFromClones(*os.File, *os.File)
|
||||||
|
ShutdownClones(*os.File, *os.File)
|
||||||
|
}
|
||||||
|
|
||||||
|
type genericOutputInterceptor struct {
|
||||||
|
intercepting bool
|
||||||
|
|
||||||
|
stdoutClone *os.File
|
||||||
|
stderrClone *os.File
|
||||||
|
pipe pipePair
|
||||||
|
|
||||||
|
shutdown chan interface{}
|
||||||
|
emergencyBailout chan interface{}
|
||||||
|
pipeChannel chan pipePair
|
||||||
|
interceptedContent chan string
|
||||||
|
|
||||||
|
forwardTo io.Writer
|
||||||
|
accumulatedOutput string
|
||||||
|
|
||||||
|
implementation interceptorImplementation
|
||||||
|
}
|
||||||
|
|
||||||
|
func (interceptor *genericOutputInterceptor) StartInterceptingOutput() {
|
||||||
|
interceptor.StartInterceptingOutputAndForwardTo(io.Discard)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (interceptor *genericOutputInterceptor) StartInterceptingOutputAndForwardTo(w io.Writer) {
|
||||||
|
if interceptor.intercepting {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
interceptor.accumulatedOutput = ""
|
||||||
|
interceptor.forwardTo = w
|
||||||
|
interceptor.ResumeIntercepting()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (interceptor *genericOutputInterceptor) StopInterceptingAndReturnOutput() string {
|
||||||
|
if interceptor.intercepting {
|
||||||
|
interceptor.PauseIntercepting()
|
||||||
|
}
|
||||||
|
return interceptor.accumulatedOutput
|
||||||
|
}
|
||||||
|
|
||||||
|
func (interceptor *genericOutputInterceptor) ResumeIntercepting() {
|
||||||
|
if interceptor.intercepting {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
interceptor.intercepting = true
|
||||||
|
if interceptor.stdoutClone == nil {
|
||||||
|
interceptor.stdoutClone, interceptor.stderrClone = interceptor.implementation.CreateStdoutStderrClones()
|
||||||
|
interceptor.shutdown = make(chan interface{})
|
||||||
|
go startPipeFactory(interceptor.pipeChannel, interceptor.shutdown)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now we make a pipe, we'll use this to redirect the input to the 1 and 2 file descriptors (this is how everything else in the world is tring to log to stdout and stderr)
|
||||||
|
// we get the pipe from our pipe factory. it runs in the background so we can request the next pipe while the spec being intercepted is running
|
||||||
|
interceptor.pipe = <-interceptor.pipeChannel
|
||||||
|
|
||||||
|
interceptor.emergencyBailout = make(chan interface{})
|
||||||
|
|
||||||
|
//Spin up a goroutine to copy data from the pipe into a buffer, this is how we capture any output the user is emitting
|
||||||
|
go func() {
|
||||||
|
buffer := &bytes.Buffer{}
|
||||||
|
destination := io.MultiWriter(buffer, interceptor.forwardTo)
|
||||||
|
copyFinished := make(chan interface{})
|
||||||
|
reader := interceptor.pipe.reader
|
||||||
|
go func() {
|
||||||
|
io.Copy(destination, reader)
|
||||||
|
reader.Close() // close the read end of the pipe so we don't leak a file descriptor
|
||||||
|
close(copyFinished)
|
||||||
|
}()
|
||||||
|
select {
|
||||||
|
case <-copyFinished:
|
||||||
|
interceptor.interceptedContent <- buffer.String()
|
||||||
|
case <-interceptor.emergencyBailout:
|
||||||
|
interceptor.interceptedContent <- ""
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
interceptor.implementation.ConnectPipeToStdoutStderr(interceptor.pipe.writer)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (interceptor *genericOutputInterceptor) PauseIntercepting() {
|
||||||
|
if !interceptor.intercepting {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// first we have to close the write end of the pipe. To do this we have to close all file descriptors pointing
|
||||||
|
// to the write end. So that would be the pipewriter itself, and FD #1 and FD #2 if we've Dup2'd them
|
||||||
|
interceptor.pipe.writer.Close() // the pipewriter itself
|
||||||
|
|
||||||
|
// we also need to stop intercepting. we do that by reconnecting the stdout and stderr file descriptions back to their respective #1 and #2 file descriptors;
|
||||||
|
// this also closes #1 and #2 before it points that their original stdout and stderr file descriptions
|
||||||
|
interceptor.implementation.RestoreStdoutStderrFromClones(interceptor.stdoutClone, interceptor.stderrClone)
|
||||||
|
|
||||||
|
var content string
|
||||||
|
select {
|
||||||
|
case content = <-interceptor.interceptedContent:
|
||||||
|
case <-time.After(BAILOUT_TIME):
|
||||||
|
/*
|
||||||
|
By closing all the pipe writer's file descriptors associated with the pipe writer's file description the io.Copy reading from the reader
|
||||||
|
should eventually receive an EOF and exit.
|
||||||
|
|
||||||
|
**However**, if the user has spun up an external process and passed in os.Stdout/os.Stderr to cmd.Stdout/cmd.Stderr then the external process
|
||||||
|
will have a file descriptor pointing to the pipe writer's file description and it will not close until the external process exits.
|
||||||
|
|
||||||
|
That would leave us hanging here waiting for the io.Copy to close forever. Instead we invoke this emergency escape valve. This returns whatever
|
||||||
|
content we've got but leaves the io.Copy running. This ensures the external process can continue writing without hanging at the cost of leaking a goroutine
|
||||||
|
and file descriptor (those these will be cleaned up when the process exits).
|
||||||
|
|
||||||
|
We tack on a message to notify the user that they've hit this edgecase and encourage them to address it.
|
||||||
|
*/
|
||||||
|
close(interceptor.emergencyBailout)
|
||||||
|
content = <-interceptor.interceptedContent + BAILOUT_MESSAGE
|
||||||
|
}
|
||||||
|
|
||||||
|
interceptor.accumulatedOutput += content
|
||||||
|
interceptor.intercepting = false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (interceptor *genericOutputInterceptor) Shutdown() {
|
||||||
|
interceptor.PauseIntercepting()
|
||||||
|
|
||||||
|
if interceptor.stdoutClone != nil {
|
||||||
|
close(interceptor.shutdown)
|
||||||
|
interceptor.implementation.ShutdownClones(interceptor.stdoutClone, interceptor.stderrClone)
|
||||||
|
interceptor.stdoutClone = nil
|
||||||
|
interceptor.stderrClone = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* This is used on windows builds but included here so it can be explicitly tested on unix systems too */
|
||||||
|
func NewOSGlobalReassigningOutputInterceptor() OutputInterceptor {
|
||||||
|
return &genericOutputInterceptor{
|
||||||
|
interceptedContent: make(chan string),
|
||||||
|
pipeChannel: make(chan pipePair),
|
||||||
|
shutdown: make(chan interface{}),
|
||||||
|
implementation: &osGlobalReassigningOutputInterceptorImpl{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type osGlobalReassigningOutputInterceptorImpl struct{}
|
||||||
|
|
||||||
|
func (impl *osGlobalReassigningOutputInterceptorImpl) CreateStdoutStderrClones() (*os.File, *os.File) {
|
||||||
|
return os.Stdout, os.Stderr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (impl *osGlobalReassigningOutputInterceptorImpl) ConnectPipeToStdoutStderr(pipeWriter *os.File) {
|
||||||
|
os.Stdout = pipeWriter
|
||||||
|
os.Stderr = pipeWriter
|
||||||
|
}
|
||||||
|
|
||||||
|
func (impl *osGlobalReassigningOutputInterceptorImpl) RestoreStdoutStderrFromClones(stdoutClone *os.File, stderrClone *os.File) {
|
||||||
|
os.Stdout = stdoutClone
|
||||||
|
os.Stderr = stderrClone
|
||||||
|
}
|
||||||
|
|
||||||
|
func (impl *osGlobalReassigningOutputInterceptorImpl) ShutdownClones(_ *os.File, _ *os.File) {
|
||||||
|
//noop
|
||||||
|
}
|
62
vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go
generated
vendored
Normal file
62
vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
//go:build freebsd || openbsd || netbsd || dragonfly || darwin || linux || solaris
|
||||||
|
// +build freebsd openbsd netbsd dragonfly darwin linux solaris
|
||||||
|
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewOutputInterceptor() OutputInterceptor {
|
||||||
|
return &genericOutputInterceptor{
|
||||||
|
interceptedContent: make(chan string),
|
||||||
|
pipeChannel: make(chan pipePair),
|
||||||
|
shutdown: make(chan interface{}),
|
||||||
|
implementation: &dupSyscallOutputInterceptorImpl{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type dupSyscallOutputInterceptorImpl struct{}
|
||||||
|
|
||||||
|
func (impl *dupSyscallOutputInterceptorImpl) CreateStdoutStderrClones() (*os.File, *os.File) {
|
||||||
|
// To clone stdout and stderr we:
|
||||||
|
// First, create two clone file descriptors that point to the stdout and stderr file descriptions
|
||||||
|
stdoutCloneFD, _ := unix.Dup(1)
|
||||||
|
stderrCloneFD, _ := unix.Dup(2)
|
||||||
|
|
||||||
|
// And then wrap the clone file descriptors in files.
|
||||||
|
// One benefit of this (that we don't use yet) is that we can actually write
|
||||||
|
// to these files to emit output to the console evne though we're intercepting output
|
||||||
|
stdoutClone := os.NewFile(uintptr(stdoutCloneFD), "stdout-clone")
|
||||||
|
stderrClone := os.NewFile(uintptr(stderrCloneFD), "stderr-clone")
|
||||||
|
|
||||||
|
//these clones remain alive throughout the lifecycle of the suite and don't need to be recreated
|
||||||
|
//this speeds things up a bit, actually.
|
||||||
|
return stdoutClone, stderrClone
|
||||||
|
}
|
||||||
|
|
||||||
|
func (impl *dupSyscallOutputInterceptorImpl) ConnectPipeToStdoutStderr(pipeWriter *os.File) {
|
||||||
|
// To redirect output to our pipe we need to point the 1 and 2 file descriptors (which is how the world tries to log things)
|
||||||
|
// to the write end of the pipe.
|
||||||
|
// We do this with Dup2 (possibly Dup3 on some architectures) to have file descriptors 1 and 2 point to the same file description as the pipeWriter
|
||||||
|
// This effectively shunts data written to stdout and stderr to the write end of our pipe
|
||||||
|
unix.Dup2(int(pipeWriter.Fd()), 1)
|
||||||
|
unix.Dup2(int(pipeWriter.Fd()), 2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (impl *dupSyscallOutputInterceptorImpl) RestoreStdoutStderrFromClones(stdoutClone *os.File, stderrClone *os.File) {
|
||||||
|
// To restore stdour/stderr from the clones we have the 1 and 2 file descriptors
|
||||||
|
// point to the original file descriptions that we saved off in the clones.
|
||||||
|
// This has the added benefit of closing the connection between these descriptors and the write end of the pipe
|
||||||
|
// which is important to cause the io.Copy on the pipe.Reader to end.
|
||||||
|
unix.Dup2(int(stdoutClone.Fd()), 1)
|
||||||
|
unix.Dup2(int(stderrClone.Fd()), 2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (impl *dupSyscallOutputInterceptorImpl) ShutdownClones(stdoutClone *os.File, stderrClone *os.File) {
|
||||||
|
// We're done with the clones so we can close them to clean up after ourselves
|
||||||
|
stdoutClone.Close()
|
||||||
|
stderrClone.Close()
|
||||||
|
}
|
7
vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_win.go
generated
vendored
Normal file
7
vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_win.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
// +build windows
|
||||||
|
|
||||||
|
package internal
|
||||||
|
|
||||||
|
func NewOutputInterceptor() OutputInterceptor {
|
||||||
|
return NewOSGlobalReassigningOutputInterceptor()
|
||||||
|
}
|
69
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go
generated
vendored
Normal file
69
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go
generated
vendored
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
package parallel_support
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/reporters"
|
||||||
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type BeforeSuiteState struct {
|
||||||
|
Data []byte
|
||||||
|
State types.SpecState
|
||||||
|
}
|
||||||
|
|
||||||
|
type ParallelIndexCounter struct {
|
||||||
|
Index int
|
||||||
|
}
|
||||||
|
|
||||||
|
var ErrorGone = fmt.Errorf("gone")
|
||||||
|
var ErrorFailed = fmt.Errorf("failed")
|
||||||
|
var ErrorEarly = fmt.Errorf("early")
|
||||||
|
|
||||||
|
var POLLING_INTERVAL = 50 * time.Millisecond
|
||||||
|
|
||||||
|
type Server interface {
|
||||||
|
Start()
|
||||||
|
Close()
|
||||||
|
Address() string
|
||||||
|
RegisterAlive(node int, alive func() bool)
|
||||||
|
GetSuiteDone() chan interface{}
|
||||||
|
GetOutputDestination() io.Writer
|
||||||
|
SetOutputDestination(io.Writer)
|
||||||
|
}
|
||||||
|
|
||||||
|
type Client interface {
|
||||||
|
Connect() bool
|
||||||
|
Close() error
|
||||||
|
|
||||||
|
PostSuiteWillBegin(report types.Report) error
|
||||||
|
PostDidRun(report types.SpecReport) error
|
||||||
|
PostSuiteDidEnd(report types.Report) error
|
||||||
|
PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error
|
||||||
|
BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error)
|
||||||
|
BlockUntilNonprimaryProcsHaveFinished() error
|
||||||
|
BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error)
|
||||||
|
FetchNextCounter() (int, error)
|
||||||
|
PostAbort() error
|
||||||
|
ShouldAbort() bool
|
||||||
|
Write(p []byte) (int, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewServer(parallelTotal int, reporter reporters.Reporter) (Server, error) {
|
||||||
|
if os.Getenv("GINKGO_PARALLEL_PROTOCOL") == "HTTP" {
|
||||||
|
return newHttpServer(parallelTotal, reporter)
|
||||||
|
} else {
|
||||||
|
return newRPCServer(parallelTotal, reporter)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewClient(serverHost string) Client {
|
||||||
|
if os.Getenv("GINKGO_PARALLEL_PROTOCOL") == "HTTP" {
|
||||||
|
return newHttpClient(serverHost)
|
||||||
|
} else {
|
||||||
|
return newRPCClient(serverHost)
|
||||||
|
}
|
||||||
|
}
|
152
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go
generated
vendored
Normal file
152
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go
generated
vendored
Normal file
@ -0,0 +1,152 @@
|
|||||||
|
package parallel_support
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type httpClient struct {
|
||||||
|
serverHost string
|
||||||
|
}
|
||||||
|
|
||||||
|
func newHttpClient(serverHost string) *httpClient {
|
||||||
|
return &httpClient{
|
||||||
|
serverHost: serverHost,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *httpClient) Connect() bool {
|
||||||
|
resp, err := http.Get(client.serverHost + "/up")
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
return resp.StatusCode == http.StatusOK
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *httpClient) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *httpClient) post(path string, data interface{}) error {
|
||||||
|
var body io.Reader
|
||||||
|
if data != nil {
|
||||||
|
encoded, err := json.Marshal(data)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
body = bytes.NewBuffer(encoded)
|
||||||
|
}
|
||||||
|
resp, err := http.Post(client.serverHost+path, "application/json", body)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return fmt.Errorf("received unexpected status code %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *httpClient) poll(path string, data interface{}) error {
|
||||||
|
for {
|
||||||
|
resp, err := http.Get(client.serverHost + path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if resp.StatusCode == http.StatusTooEarly {
|
||||||
|
resp.Body.Close()
|
||||||
|
time.Sleep(POLLING_INTERVAL)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
if resp.StatusCode == http.StatusGone {
|
||||||
|
return ErrorGone
|
||||||
|
}
|
||||||
|
if resp.StatusCode == http.StatusFailedDependency {
|
||||||
|
return ErrorFailed
|
||||||
|
}
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return fmt.Errorf("received unexpected status code %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
if data != nil {
|
||||||
|
return json.NewDecoder(resp.Body).Decode(data)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *httpClient) PostSuiteWillBegin(report types.Report) error {
|
||||||
|
return client.post("/suite-will-begin", report)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *httpClient) PostDidRun(report types.SpecReport) error {
|
||||||
|
return client.post("/did-run", report)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *httpClient) PostSuiteDidEnd(report types.Report) error {
|
||||||
|
return client.post("/suite-did-end", report)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *httpClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error {
|
||||||
|
beforeSuiteState := BeforeSuiteState{
|
||||||
|
State: state,
|
||||||
|
Data: data,
|
||||||
|
}
|
||||||
|
return client.post("/before-suite-completed", beforeSuiteState)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *httpClient) BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) {
|
||||||
|
var beforeSuiteState BeforeSuiteState
|
||||||
|
err := client.poll("/before-suite-state", &beforeSuiteState)
|
||||||
|
if err == ErrorGone {
|
||||||
|
return types.SpecStateInvalid, nil, types.GinkgoErrors.SynchronizedBeforeSuiteDisappearedOnProc1()
|
||||||
|
}
|
||||||
|
return beforeSuiteState.State, beforeSuiteState.Data, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *httpClient) BlockUntilNonprimaryProcsHaveFinished() error {
|
||||||
|
return client.poll("/have-nonprimary-procs-finished", nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *httpClient) BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error) {
|
||||||
|
var report types.Report
|
||||||
|
err := client.poll("/aggregated-nonprimary-procs-report", &report)
|
||||||
|
if err == ErrorGone {
|
||||||
|
return types.Report{}, types.GinkgoErrors.AggregatedReportUnavailableDueToNodeDisappearing()
|
||||||
|
}
|
||||||
|
return report, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *httpClient) FetchNextCounter() (int, error) {
|
||||||
|
var counter ParallelIndexCounter
|
||||||
|
err := client.poll("/counter", &counter)
|
||||||
|
return counter.Index, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *httpClient) PostAbort() error {
|
||||||
|
return client.post("/abort", nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *httpClient) ShouldAbort() bool {
|
||||||
|
err := client.poll("/abort", nil)
|
||||||
|
if err == ErrorGone {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *httpClient) Write(p []byte) (int, error) {
|
||||||
|
resp, err := http.Post(client.serverHost+"/emit-output", "text/plain;charset=UTF-8 ", bytes.NewReader(p))
|
||||||
|
resp.Body.Close()
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return 0, fmt.Errorf("failed to emit output")
|
||||||
|
}
|
||||||
|
return len(p), err
|
||||||
|
}
|
214
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go
generated
vendored
Normal file
214
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go
generated
vendored
Normal file
@ -0,0 +1,214 @@
|
|||||||
|
/*
|
||||||
|
|
||||||
|
The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners.
|
||||||
|
This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser).
|
||||||
|
|
||||||
|
*/
|
||||||
|
|
||||||
|
package parallel_support
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/reporters"
|
||||||
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
httpServer spins up on an automatically selected port and listens for communication from the forwarding reporter.
|
||||||
|
It then forwards that communication to attached reporters.
|
||||||
|
*/
|
||||||
|
type httpServer struct {
|
||||||
|
listener net.Listener
|
||||||
|
handler *ServerHandler
|
||||||
|
}
|
||||||
|
|
||||||
|
//Create a new server, automatically selecting a port
|
||||||
|
func newHttpServer(parallelTotal int, reporter reporters.Reporter) (*httpServer, error) {
|
||||||
|
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &httpServer{
|
||||||
|
listener: listener,
|
||||||
|
handler: newServerHandler(parallelTotal, reporter),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
//Start the server. You don't need to `go s.Start()`, just `s.Start()`
|
||||||
|
func (server *httpServer) Start() {
|
||||||
|
httpServer := &http.Server{}
|
||||||
|
mux := http.NewServeMux()
|
||||||
|
httpServer.Handler = mux
|
||||||
|
|
||||||
|
//streaming endpoints
|
||||||
|
mux.HandleFunc("/suite-will-begin", server.specSuiteWillBegin)
|
||||||
|
mux.HandleFunc("/did-run", server.didRun)
|
||||||
|
mux.HandleFunc("/suite-did-end", server.specSuiteDidEnd)
|
||||||
|
mux.HandleFunc("/emit-output", server.emitOutput)
|
||||||
|
|
||||||
|
//synchronization endpoints
|
||||||
|
mux.HandleFunc("/before-suite-completed", server.handleBeforeSuiteCompleted)
|
||||||
|
mux.HandleFunc("/before-suite-state", server.handleBeforeSuiteState)
|
||||||
|
mux.HandleFunc("/have-nonprimary-procs-finished", server.handleHaveNonprimaryProcsFinished)
|
||||||
|
mux.HandleFunc("/aggregated-nonprimary-procs-report", server.handleAggregatedNonprimaryProcsReport)
|
||||||
|
mux.HandleFunc("/counter", server.handleCounter)
|
||||||
|
mux.HandleFunc("/up", server.handleUp)
|
||||||
|
mux.HandleFunc("/abort", server.handleAbort)
|
||||||
|
|
||||||
|
go httpServer.Serve(server.listener)
|
||||||
|
}
|
||||||
|
|
||||||
|
//Stop the server
|
||||||
|
func (server *httpServer) Close() {
|
||||||
|
server.listener.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
//The address the server can be reached it. Pass this into the `ForwardingReporter`.
|
||||||
|
func (server *httpServer) Address() string {
|
||||||
|
return "http://" + server.listener.Addr().String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *httpServer) GetSuiteDone() chan interface{} {
|
||||||
|
return server.handler.done
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *httpServer) GetOutputDestination() io.Writer {
|
||||||
|
return server.handler.outputDestination
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *httpServer) SetOutputDestination(w io.Writer) {
|
||||||
|
server.handler.outputDestination = w
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *httpServer) RegisterAlive(node int, alive func() bool) {
|
||||||
|
server.handler.registerAlive(node, alive)
|
||||||
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// Streaming Endpoints
|
||||||
|
//
|
||||||
|
|
||||||
|
//The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters`
|
||||||
|
func (server *httpServer) decode(writer http.ResponseWriter, request *http.Request, object interface{}) bool {
|
||||||
|
defer request.Body.Close()
|
||||||
|
if json.NewDecoder(request.Body).Decode(object) != nil {
|
||||||
|
writer.WriteHeader(http.StatusBadRequest)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *httpServer) handleError(err error, writer http.ResponseWriter) bool {
|
||||||
|
if err == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
switch err {
|
||||||
|
case ErrorEarly:
|
||||||
|
writer.WriteHeader(http.StatusTooEarly)
|
||||||
|
case ErrorGone:
|
||||||
|
writer.WriteHeader(http.StatusGone)
|
||||||
|
case ErrorFailed:
|
||||||
|
writer.WriteHeader(http.StatusFailedDependency)
|
||||||
|
default:
|
||||||
|
writer.WriteHeader(http.StatusInternalServerError)
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *httpServer) specSuiteWillBegin(writer http.ResponseWriter, request *http.Request) {
|
||||||
|
var report types.Report
|
||||||
|
if !server.decode(writer, request, &report) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
server.handleError(server.handler.SpecSuiteWillBegin(report, voidReceiver), writer)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *httpServer) didRun(writer http.ResponseWriter, request *http.Request) {
|
||||||
|
var report types.SpecReport
|
||||||
|
if !server.decode(writer, request, &report) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
server.handleError(server.handler.DidRun(report, voidReceiver), writer)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *httpServer) specSuiteDidEnd(writer http.ResponseWriter, request *http.Request) {
|
||||||
|
var report types.Report
|
||||||
|
if !server.decode(writer, request, &report) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
server.handleError(server.handler.SpecSuiteDidEnd(report, voidReceiver), writer)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *httpServer) emitOutput(writer http.ResponseWriter, request *http.Request) {
|
||||||
|
output, err := io.ReadAll(request.Body)
|
||||||
|
if err != nil {
|
||||||
|
writer.WriteHeader(http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var n int
|
||||||
|
server.handleError(server.handler.EmitOutput(output, &n), writer)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *httpServer) handleBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) {
|
||||||
|
var beforeSuiteState BeforeSuiteState
|
||||||
|
if !server.decode(writer, request, &beforeSuiteState) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
server.handleError(server.handler.BeforeSuiteCompleted(beforeSuiteState, voidReceiver), writer)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *httpServer) handleBeforeSuiteState(writer http.ResponseWriter, request *http.Request) {
|
||||||
|
var beforeSuiteState BeforeSuiteState
|
||||||
|
if server.handleError(server.handler.BeforeSuiteState(voidSender, &beforeSuiteState), writer) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
json.NewEncoder(writer).Encode(beforeSuiteState)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *httpServer) handleHaveNonprimaryProcsFinished(writer http.ResponseWriter, request *http.Request) {
|
||||||
|
if server.handleError(server.handler.HaveNonprimaryProcsFinished(voidSender, voidReceiver), writer) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
writer.WriteHeader(http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *httpServer) handleAggregatedNonprimaryProcsReport(writer http.ResponseWriter, request *http.Request) {
|
||||||
|
var aggregatedReport types.Report
|
||||||
|
if server.handleError(server.handler.AggregatedNonprimaryProcsReport(voidSender, &aggregatedReport), writer) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
json.NewEncoder(writer).Encode(aggregatedReport)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *httpServer) handleCounter(writer http.ResponseWriter, request *http.Request) {
|
||||||
|
var n int
|
||||||
|
if server.handleError(server.handler.Counter(voidSender, &n), writer) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
json.NewEncoder(writer).Encode(ParallelIndexCounter{Index: n})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *httpServer) handleUp(writer http.ResponseWriter, request *http.Request) {
|
||||||
|
writer.WriteHeader(http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *httpServer) handleAbort(writer http.ResponseWriter, request *http.Request) {
|
||||||
|
if request.Method == "GET" {
|
||||||
|
var shouldAbort bool
|
||||||
|
server.handler.ShouldAbort(voidSender, &shouldAbort)
|
||||||
|
if shouldAbort {
|
||||||
|
writer.WriteHeader(http.StatusGone)
|
||||||
|
} else {
|
||||||
|
writer.WriteHeader(http.StatusOK)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
server.handler.Abort(voidSender, voidReceiver)
|
||||||
|
}
|
||||||
|
}
|
119
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go
generated
vendored
Normal file
119
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go
generated
vendored
Normal file
@ -0,0 +1,119 @@
|
|||||||
|
package parallel_support
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/rpc"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type rpcClient struct {
|
||||||
|
serverHost string
|
||||||
|
client *rpc.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRPCClient(serverHost string) *rpcClient {
|
||||||
|
return &rpcClient{
|
||||||
|
serverHost: serverHost,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *rpcClient) Connect() bool {
|
||||||
|
var err error
|
||||||
|
if client.client != nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
client.client, err = rpc.DialHTTPPath("tcp", client.serverHost, "/")
|
||||||
|
if err != nil {
|
||||||
|
client.client = nil
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *rpcClient) Close() error {
|
||||||
|
return client.client.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *rpcClient) poll(method string, data interface{}) error {
|
||||||
|
for {
|
||||||
|
err := client.client.Call(method, voidSender, data)
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
switch err.Error() {
|
||||||
|
case ErrorEarly.Error():
|
||||||
|
time.Sleep(POLLING_INTERVAL)
|
||||||
|
case ErrorGone.Error():
|
||||||
|
return ErrorGone
|
||||||
|
case ErrorFailed.Error():
|
||||||
|
return ErrorFailed
|
||||||
|
default:
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *rpcClient) PostSuiteWillBegin(report types.Report) error {
|
||||||
|
return client.client.Call("Server.SpecSuiteWillBegin", report, voidReceiver)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *rpcClient) PostDidRun(report types.SpecReport) error {
|
||||||
|
return client.client.Call("Server.DidRun", report, voidReceiver)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *rpcClient) PostSuiteDidEnd(report types.Report) error {
|
||||||
|
return client.client.Call("Server.SpecSuiteDidEnd", report, voidReceiver)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *rpcClient) Write(p []byte) (int, error) {
|
||||||
|
var n int
|
||||||
|
err := client.client.Call("Server.EmitOutput", p, &n)
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *rpcClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error {
|
||||||
|
beforeSuiteState := BeforeSuiteState{
|
||||||
|
State: state,
|
||||||
|
Data: data,
|
||||||
|
}
|
||||||
|
return client.client.Call("Server.BeforeSuiteCompleted", beforeSuiteState, voidReceiver)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *rpcClient) BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) {
|
||||||
|
var beforeSuiteState BeforeSuiteState
|
||||||
|
err := client.poll("Server.BeforeSuiteState", &beforeSuiteState)
|
||||||
|
if err == ErrorGone {
|
||||||
|
return types.SpecStateInvalid, nil, types.GinkgoErrors.SynchronizedBeforeSuiteDisappearedOnProc1()
|
||||||
|
}
|
||||||
|
return beforeSuiteState.State, beforeSuiteState.Data, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *rpcClient) BlockUntilNonprimaryProcsHaveFinished() error {
|
||||||
|
return client.poll("Server.HaveNonprimaryProcsFinished", voidReceiver)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *rpcClient) BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error) {
|
||||||
|
var report types.Report
|
||||||
|
err := client.poll("Server.AggregatedNonprimaryProcsReport", &report)
|
||||||
|
if err == ErrorGone {
|
||||||
|
return types.Report{}, types.GinkgoErrors.AggregatedReportUnavailableDueToNodeDisappearing()
|
||||||
|
}
|
||||||
|
return report, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *rpcClient) FetchNextCounter() (int, error) {
|
||||||
|
var counter int
|
||||||
|
err := client.client.Call("Server.Counter", voidSender, &counter)
|
||||||
|
return counter, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *rpcClient) PostAbort() error {
|
||||||
|
return client.client.Call("Server.Abort", voidSender, voidReceiver)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *rpcClient) ShouldAbort() bool {
|
||||||
|
var shouldAbort bool
|
||||||
|
client.client.Call("Server.ShouldAbort", voidSender, &shouldAbort)
|
||||||
|
return shouldAbort
|
||||||
|
}
|
75
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go
generated
vendored
Normal file
75
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go
generated
vendored
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
/*
|
||||||
|
|
||||||
|
The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners.
|
||||||
|
This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser).
|
||||||
|
|
||||||
|
*/
|
||||||
|
|
||||||
|
package parallel_support
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/rpc"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/reporters"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
RPCServer spins up on an automatically selected port and listens for communication from the forwarding reporter.
|
||||||
|
It then forwards that communication to attached reporters.
|
||||||
|
*/
|
||||||
|
type RPCServer struct {
|
||||||
|
listener net.Listener
|
||||||
|
handler *ServerHandler
|
||||||
|
}
|
||||||
|
|
||||||
|
//Create a new server, automatically selecting a port
|
||||||
|
func newRPCServer(parallelTotal int, reporter reporters.Reporter) (*RPCServer, error) {
|
||||||
|
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &RPCServer{
|
||||||
|
listener: listener,
|
||||||
|
handler: newServerHandler(parallelTotal, reporter),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
//Start the server. You don't need to `go s.Start()`, just `s.Start()`
|
||||||
|
func (server *RPCServer) Start() {
|
||||||
|
rpcServer := rpc.NewServer()
|
||||||
|
rpcServer.RegisterName("Server", server.handler) //register the handler's methods as the server
|
||||||
|
|
||||||
|
httpServer := &http.Server{}
|
||||||
|
httpServer.Handler = rpcServer
|
||||||
|
|
||||||
|
go httpServer.Serve(server.listener)
|
||||||
|
}
|
||||||
|
|
||||||
|
//Stop the server
|
||||||
|
func (server *RPCServer) Close() {
|
||||||
|
server.listener.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
//The address the server can be reached it. Pass this into the `ForwardingReporter`.
|
||||||
|
func (server *RPCServer) Address() string {
|
||||||
|
return server.listener.Addr().String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *RPCServer) GetSuiteDone() chan interface{} {
|
||||||
|
return server.handler.done
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *RPCServer) GetOutputDestination() io.Writer {
|
||||||
|
return server.handler.outputDestination
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *RPCServer) SetOutputDestination(w io.Writer) {
|
||||||
|
server.handler.outputDestination = w
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *RPCServer) RegisterAlive(node int, alive func() bool) {
|
||||||
|
server.handler.registerAlive(node, alive)
|
||||||
|
}
|
202
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go
generated
vendored
Normal file
202
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
|||||||
|
package parallel_support
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/reporters"
|
||||||
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Void struct{}
|
||||||
|
|
||||||
|
var voidReceiver *Void = &Void{}
|
||||||
|
var voidSender Void
|
||||||
|
|
||||||
|
// ServerHandler is an RPC-compatible handler that is shared between the http server and the rpc server.
|
||||||
|
// It handles all the business logic to avoid duplication between the two servers
|
||||||
|
|
||||||
|
type ServerHandler struct {
|
||||||
|
done chan interface{}
|
||||||
|
outputDestination io.Writer
|
||||||
|
reporter reporters.Reporter
|
||||||
|
alives []func() bool
|
||||||
|
lock *sync.Mutex
|
||||||
|
beforeSuiteState BeforeSuiteState
|
||||||
|
parallelTotal int
|
||||||
|
counter int
|
||||||
|
counterLock *sync.Mutex
|
||||||
|
shouldAbort bool
|
||||||
|
|
||||||
|
numSuiteDidBegins int
|
||||||
|
numSuiteDidEnds int
|
||||||
|
aggregatedReport types.Report
|
||||||
|
reportHoldingArea []types.SpecReport
|
||||||
|
}
|
||||||
|
|
||||||
|
func newServerHandler(parallelTotal int, reporter reporters.Reporter) *ServerHandler {
|
||||||
|
return &ServerHandler{
|
||||||
|
reporter: reporter,
|
||||||
|
lock: &sync.Mutex{},
|
||||||
|
counterLock: &sync.Mutex{},
|
||||||
|
alives: make([]func() bool, parallelTotal),
|
||||||
|
beforeSuiteState: BeforeSuiteState{Data: nil, State: types.SpecStateInvalid},
|
||||||
|
parallelTotal: parallelTotal,
|
||||||
|
outputDestination: os.Stdout,
|
||||||
|
done: make(chan interface{}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (handler *ServerHandler) SpecSuiteWillBegin(report types.Report, _ *Void) error {
|
||||||
|
handler.lock.Lock()
|
||||||
|
defer handler.lock.Unlock()
|
||||||
|
|
||||||
|
handler.numSuiteDidBegins += 1
|
||||||
|
|
||||||
|
// all summaries are identical, so it's fine to simply emit the last one of these
|
||||||
|
if handler.numSuiteDidBegins == handler.parallelTotal {
|
||||||
|
handler.reporter.SuiteWillBegin(report)
|
||||||
|
|
||||||
|
for _, summary := range handler.reportHoldingArea {
|
||||||
|
handler.reporter.WillRun(summary)
|
||||||
|
handler.reporter.DidRun(summary)
|
||||||
|
}
|
||||||
|
|
||||||
|
handler.reportHoldingArea = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (handler *ServerHandler) DidRun(report types.SpecReport, _ *Void) error {
|
||||||
|
handler.lock.Lock()
|
||||||
|
defer handler.lock.Unlock()
|
||||||
|
|
||||||
|
if handler.numSuiteDidBegins == handler.parallelTotal {
|
||||||
|
handler.reporter.WillRun(report)
|
||||||
|
handler.reporter.DidRun(report)
|
||||||
|
} else {
|
||||||
|
handler.reportHoldingArea = append(handler.reportHoldingArea, report)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (handler *ServerHandler) SpecSuiteDidEnd(report types.Report, _ *Void) error {
|
||||||
|
handler.lock.Lock()
|
||||||
|
defer handler.lock.Unlock()
|
||||||
|
|
||||||
|
handler.numSuiteDidEnds += 1
|
||||||
|
if handler.numSuiteDidEnds == 1 {
|
||||||
|
handler.aggregatedReport = report
|
||||||
|
} else {
|
||||||
|
handler.aggregatedReport = handler.aggregatedReport.Add(report)
|
||||||
|
}
|
||||||
|
|
||||||
|
if handler.numSuiteDidEnds == handler.parallelTotal {
|
||||||
|
handler.reporter.SuiteDidEnd(handler.aggregatedReport)
|
||||||
|
close(handler.done)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (handler *ServerHandler) EmitOutput(output []byte, n *int) error {
|
||||||
|
var err error
|
||||||
|
*n, err = handler.outputDestination.Write(output)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (handler *ServerHandler) registerAlive(proc int, alive func() bool) {
|
||||||
|
handler.lock.Lock()
|
||||||
|
defer handler.lock.Unlock()
|
||||||
|
handler.alives[proc-1] = alive
|
||||||
|
}
|
||||||
|
|
||||||
|
func (handler *ServerHandler) procIsAlive(proc int) bool {
|
||||||
|
handler.lock.Lock()
|
||||||
|
defer handler.lock.Unlock()
|
||||||
|
alive := handler.alives[proc-1]
|
||||||
|
if alive == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return alive()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (handler *ServerHandler) haveNonprimaryProcsFinished() bool {
|
||||||
|
for i := 2; i <= handler.parallelTotal; i++ {
|
||||||
|
if handler.procIsAlive(i) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (handler *ServerHandler) BeforeSuiteCompleted(beforeSuiteState BeforeSuiteState, _ *Void) error {
|
||||||
|
handler.lock.Lock()
|
||||||
|
defer handler.lock.Unlock()
|
||||||
|
handler.beforeSuiteState = beforeSuiteState
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (handler *ServerHandler) BeforeSuiteState(_ Void, beforeSuiteState *BeforeSuiteState) error {
|
||||||
|
proc1IsAlive := handler.procIsAlive(1)
|
||||||
|
handler.lock.Lock()
|
||||||
|
defer handler.lock.Unlock()
|
||||||
|
if handler.beforeSuiteState.State == types.SpecStateInvalid {
|
||||||
|
if proc1IsAlive {
|
||||||
|
return ErrorEarly
|
||||||
|
} else {
|
||||||
|
return ErrorGone
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*beforeSuiteState = handler.beforeSuiteState
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (handler *ServerHandler) HaveNonprimaryProcsFinished(_ Void, _ *Void) error {
|
||||||
|
if handler.haveNonprimaryProcsFinished() {
|
||||||
|
return nil
|
||||||
|
} else {
|
||||||
|
return ErrorEarly
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (handler *ServerHandler) AggregatedNonprimaryProcsReport(_ Void, report *types.Report) error {
|
||||||
|
if handler.haveNonprimaryProcsFinished() {
|
||||||
|
handler.lock.Lock()
|
||||||
|
defer handler.lock.Unlock()
|
||||||
|
if handler.numSuiteDidEnds == handler.parallelTotal-1 {
|
||||||
|
*report = handler.aggregatedReport
|
||||||
|
return nil
|
||||||
|
} else {
|
||||||
|
return ErrorGone
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return ErrorEarly
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (handler *ServerHandler) Counter(_ Void, counter *int) error {
|
||||||
|
handler.counterLock.Lock()
|
||||||
|
defer handler.counterLock.Unlock()
|
||||||
|
*counter = handler.counter
|
||||||
|
handler.counter++
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (handler *ServerHandler) Abort(_ Void, _ *Void) error {
|
||||||
|
handler.lock.Lock()
|
||||||
|
defer handler.lock.Unlock()
|
||||||
|
handler.shouldAbort = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (handler *ServerHandler) ShouldAbort(_ Void, shouldAbort *bool) error {
|
||||||
|
handler.lock.Lock()
|
||||||
|
defer handler.lock.Unlock()
|
||||||
|
*shouldAbort = handler.shouldAbort
|
||||||
|
return nil
|
||||||
|
}
|
40
vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go
generated
vendored
Normal file
40
vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ReportEntry = types.ReportEntry
|
||||||
|
|
||||||
|
func NewReportEntry(name string, cl types.CodeLocation, args ...interface{}) (ReportEntry, error) {
|
||||||
|
out := ReportEntry{
|
||||||
|
Visibility: types.ReportEntryVisibilityAlways,
|
||||||
|
Name: name,
|
||||||
|
Time: time.Now(),
|
||||||
|
Location: cl,
|
||||||
|
}
|
||||||
|
var didSetValue = false
|
||||||
|
for _, arg := range args {
|
||||||
|
switch reflect.TypeOf(arg) {
|
||||||
|
case reflect.TypeOf(types.ReportEntryVisibilityAlways):
|
||||||
|
out.Visibility = arg.(types.ReportEntryVisibility)
|
||||||
|
case reflect.TypeOf(types.CodeLocation{}):
|
||||||
|
out.Location = arg.(types.CodeLocation)
|
||||||
|
case reflect.TypeOf(Offset(0)):
|
||||||
|
out.Location = types.NewCodeLocation(2 + int(arg.(Offset)))
|
||||||
|
case reflect.TypeOf(out.Time):
|
||||||
|
out.Time = arg.(time.Time)
|
||||||
|
default:
|
||||||
|
if didSetValue {
|
||||||
|
return ReportEntry{}, types.GinkgoErrors.TooManyReportEntryValues(out.Location, arg)
|
||||||
|
}
|
||||||
|
out.Value = types.WrapEntryValue(arg)
|
||||||
|
didSetValue = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return out, nil
|
||||||
|
}
|
71
vendor/github.com/onsi/ginkgo/v2/internal/spec.go
generated
vendored
Normal file
71
vendor/github.com/onsi/ginkgo/v2/internal/spec.go
generated
vendored
Normal file
@ -0,0 +1,71 @@
|
|||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Spec struct {
|
||||||
|
Nodes Nodes
|
||||||
|
Skip bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s Spec) SubjectID() uint {
|
||||||
|
return s.Nodes.FirstNodeWithType(types.NodeTypeIt).ID
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s Spec) Text() string {
|
||||||
|
texts := []string{}
|
||||||
|
for i := range s.Nodes {
|
||||||
|
if s.Nodes[i].Text != "" {
|
||||||
|
texts = append(texts, s.Nodes[i].Text)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return strings.Join(texts, " ")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s Spec) FirstNodeWithType(nodeTypes types.NodeType) Node {
|
||||||
|
return s.Nodes.FirstNodeWithType(nodeTypes)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s Spec) FlakeAttempts() int {
|
||||||
|
flakeAttempts := 0
|
||||||
|
for i := range s.Nodes {
|
||||||
|
if s.Nodes[i].FlakeAttempts > 0 {
|
||||||
|
flakeAttempts = s.Nodes[i].FlakeAttempts
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return flakeAttempts
|
||||||
|
}
|
||||||
|
|
||||||
|
type Specs []Spec
|
||||||
|
|
||||||
|
func (s Specs) HasAnySpecsMarkedPending() bool {
|
||||||
|
for i := range s {
|
||||||
|
if s[i].Nodes.HasNodeMarkedPending() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s Specs) CountWithoutSkip() int {
|
||||||
|
n := 0
|
||||||
|
for i := range s {
|
||||||
|
if !s[i].Skip {
|
||||||
|
n += 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s Specs) AtIndices(indices SpecIndices) Specs {
|
||||||
|
out := make(Specs, len(indices))
|
||||||
|
for i, idx := range indices {
|
||||||
|
out[i] = s[idx]
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
629
vendor/github.com/onsi/ginkgo/v2/internal/suite.go
generated
vendored
Normal file
629
vendor/github.com/onsi/ginkgo/v2/internal/suite.go
generated
vendored
Normal file
@ -0,0 +1,629 @@
|
|||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/formatter"
|
||||||
|
"github.com/onsi/ginkgo/v2/internal/interrupt_handler"
|
||||||
|
"github.com/onsi/ginkgo/v2/internal/parallel_support"
|
||||||
|
"github.com/onsi/ginkgo/v2/reporters"
|
||||||
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Phase uint
|
||||||
|
|
||||||
|
const (
|
||||||
|
PhaseBuildTopLevel Phase = iota
|
||||||
|
PhaseBuildTree
|
||||||
|
PhaseRun
|
||||||
|
)
|
||||||
|
|
||||||
|
type Suite struct {
|
||||||
|
tree *TreeNode
|
||||||
|
topLevelContainers Nodes
|
||||||
|
|
||||||
|
phase Phase
|
||||||
|
|
||||||
|
suiteNodes Nodes
|
||||||
|
cleanupNodes Nodes
|
||||||
|
|
||||||
|
failer *Failer
|
||||||
|
reporter reporters.Reporter
|
||||||
|
writer WriterInterface
|
||||||
|
outputInterceptor OutputInterceptor
|
||||||
|
interruptHandler interrupt_handler.InterruptHandlerInterface
|
||||||
|
config types.SuiteConfig
|
||||||
|
|
||||||
|
skipAll bool
|
||||||
|
report types.Report
|
||||||
|
currentSpecReport types.SpecReport
|
||||||
|
currentNode Node
|
||||||
|
|
||||||
|
client parallel_support.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSuite() *Suite {
|
||||||
|
return &Suite{
|
||||||
|
tree: &TreeNode{},
|
||||||
|
phase: PhaseBuildTopLevel,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) BuildTree() error {
|
||||||
|
// During PhaseBuildTopLevel, the top level containers are stored in suite.topLevelCotainers and entered
|
||||||
|
// We now enter PhaseBuildTree where these top level containers are entered and added to the spec tree
|
||||||
|
suite.phase = PhaseBuildTree
|
||||||
|
for _, topLevelContainer := range suite.topLevelContainers {
|
||||||
|
err := suite.PushNode(topLevelContainer)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string, failer *Failer, reporter reporters.Reporter, writer WriterInterface, outputInterceptor OutputInterceptor, interruptHandler interrupt_handler.InterruptHandlerInterface, client parallel_support.Client, suiteConfig types.SuiteConfig) (bool, bool) {
|
||||||
|
if suite.phase != PhaseBuildTree {
|
||||||
|
panic("cannot run before building the tree = call suite.BuildTree() first")
|
||||||
|
}
|
||||||
|
ApplyNestedFocusPolicyToTree(suite.tree)
|
||||||
|
specs := GenerateSpecsFromTreeRoot(suite.tree)
|
||||||
|
specs, hasProgrammaticFocus := ApplyFocusToSpecs(specs, description, suiteLabels, suiteConfig)
|
||||||
|
|
||||||
|
suite.phase = PhaseRun
|
||||||
|
suite.client = client
|
||||||
|
suite.failer = failer
|
||||||
|
suite.reporter = reporter
|
||||||
|
suite.writer = writer
|
||||||
|
suite.outputInterceptor = outputInterceptor
|
||||||
|
suite.interruptHandler = interruptHandler
|
||||||
|
suite.config = suiteConfig
|
||||||
|
|
||||||
|
success := suite.runSpecs(description, suiteLabels, suitePath, hasProgrammaticFocus, specs)
|
||||||
|
|
||||||
|
return success, hasProgrammaticFocus
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) InRunPhase() bool {
|
||||||
|
return suite.phase == PhaseRun
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Tree Construction methods
|
||||||
|
|
||||||
|
PushNode is used during PhaseBuildTopLevel and PhaseBuildTree
|
||||||
|
*/
|
||||||
|
|
||||||
|
func (suite *Suite) PushNode(node Node) error {
|
||||||
|
if node.NodeType.Is(types.NodeTypeCleanupInvalid | types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll | types.NodeTypeCleanupAfterSuite) {
|
||||||
|
return suite.pushCleanupNode(node)
|
||||||
|
}
|
||||||
|
|
||||||
|
if node.NodeType.Is(types.NodeTypeBeforeSuite | types.NodeTypeAfterSuite | types.NodeTypeSynchronizedBeforeSuite | types.NodeTypeSynchronizedAfterSuite | types.NodeTypeReportAfterSuite) {
|
||||||
|
return suite.pushSuiteNode(node)
|
||||||
|
}
|
||||||
|
|
||||||
|
if suite.phase == PhaseRun {
|
||||||
|
return types.GinkgoErrors.PushingNodeInRunPhase(node.NodeType, node.CodeLocation)
|
||||||
|
}
|
||||||
|
|
||||||
|
if node.MarkedSerial {
|
||||||
|
firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered()
|
||||||
|
if !firstOrderedNode.IsZero() && !firstOrderedNode.MarkedSerial {
|
||||||
|
return types.GinkgoErrors.InvalidSerialNodeInNonSerialOrderedContainer(node.CodeLocation, node.NodeType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if node.NodeType.Is(types.NodeTypeBeforeAll | types.NodeTypeAfterAll) {
|
||||||
|
firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered()
|
||||||
|
if firstOrderedNode.IsZero() {
|
||||||
|
return types.GinkgoErrors.SetupNodeNotInOrderedContainer(node.CodeLocation, node.NodeType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if node.NodeType == types.NodeTypeContainer {
|
||||||
|
// During PhaseBuildTopLevel we only track the top level containers without entering them
|
||||||
|
// We only enter the top level container nodes during PhaseBuildTree
|
||||||
|
//
|
||||||
|
// This ensures the tree is only constructed after `go spec` has called `flag.Parse()` and gives
|
||||||
|
// the user an opportunity to load suiteConfiguration information in the `TestX` go spec hook just before `RunSpecs`
|
||||||
|
// is invoked. This makes the lifecycle easier to reason about and solves issues like #693.
|
||||||
|
if suite.phase == PhaseBuildTopLevel {
|
||||||
|
suite.topLevelContainers = append(suite.topLevelContainers, node)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if suite.phase == PhaseBuildTree {
|
||||||
|
parentTree := suite.tree
|
||||||
|
suite.tree = &TreeNode{Node: node}
|
||||||
|
parentTree.AppendChild(suite.tree)
|
||||||
|
err := func() (err error) {
|
||||||
|
defer func() {
|
||||||
|
if e := recover(); e != nil {
|
||||||
|
err = types.GinkgoErrors.CaughtPanicDuringABuildPhase(e, node.CodeLocation)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
node.Body()
|
||||||
|
return err
|
||||||
|
}()
|
||||||
|
suite.tree = parentTree
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
suite.tree.AppendChild(&TreeNode{Node: node})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) pushSuiteNode(node Node) error {
|
||||||
|
if suite.phase == PhaseBuildTree {
|
||||||
|
return types.GinkgoErrors.SuiteNodeInNestedContext(node.NodeType, node.CodeLocation)
|
||||||
|
}
|
||||||
|
|
||||||
|
if suite.phase == PhaseRun {
|
||||||
|
return types.GinkgoErrors.SuiteNodeDuringRunPhase(node.NodeType, node.CodeLocation)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch node.NodeType {
|
||||||
|
case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite:
|
||||||
|
existingBefores := suite.suiteNodes.WithType(types.NodeTypeBeforeSuite | types.NodeTypeSynchronizedBeforeSuite)
|
||||||
|
if len(existingBefores) > 0 {
|
||||||
|
return types.GinkgoErrors.MultipleBeforeSuiteNodes(node.NodeType, node.CodeLocation, existingBefores[0].NodeType, existingBefores[0].CodeLocation)
|
||||||
|
}
|
||||||
|
case types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite:
|
||||||
|
existingAfters := suite.suiteNodes.WithType(types.NodeTypeAfterSuite | types.NodeTypeSynchronizedAfterSuite)
|
||||||
|
if len(existingAfters) > 0 {
|
||||||
|
return types.GinkgoErrors.MultipleAfterSuiteNodes(node.NodeType, node.CodeLocation, existingAfters[0].NodeType, existingAfters[0].CodeLocation)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
suite.suiteNodes = append(suite.suiteNodes, node)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) pushCleanupNode(node Node) error {
|
||||||
|
if suite.phase != PhaseRun || suite.currentNode.IsZero() {
|
||||||
|
return types.GinkgoErrors.PushingCleanupNodeDuringTreeConstruction(node.CodeLocation)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch suite.currentNode.NodeType {
|
||||||
|
case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite, types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite:
|
||||||
|
node.NodeType = types.NodeTypeCleanupAfterSuite
|
||||||
|
case types.NodeTypeBeforeAll, types.NodeTypeAfterAll:
|
||||||
|
node.NodeType = types.NodeTypeCleanupAfterAll
|
||||||
|
case types.NodeTypeReportBeforeEach, types.NodeTypeReportAfterEach, types.NodeTypeReportAfterSuite:
|
||||||
|
return types.GinkgoErrors.PushingCleanupInReportingNode(node.CodeLocation, suite.currentNode.NodeType)
|
||||||
|
case types.NodeTypeCleanupInvalid, types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll, types.NodeTypeCleanupAfterSuite:
|
||||||
|
return types.GinkgoErrors.PushingCleanupInCleanupNode(node.CodeLocation)
|
||||||
|
default:
|
||||||
|
node.NodeType = types.NodeTypeCleanupAfterEach
|
||||||
|
}
|
||||||
|
|
||||||
|
node.NodeIDWhereCleanupWasGenerated = suite.currentNode.ID
|
||||||
|
node.NestingLevel = suite.currentNode.NestingLevel
|
||||||
|
suite.cleanupNodes = append(suite.cleanupNodes, node)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Spec Running methods - used during PhaseRun
|
||||||
|
*/
|
||||||
|
func (suite *Suite) CurrentSpecReport() types.SpecReport {
|
||||||
|
report := suite.currentSpecReport
|
||||||
|
if suite.writer != nil {
|
||||||
|
report.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
|
||||||
|
}
|
||||||
|
return report
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) AddReportEntry(entry ReportEntry) error {
|
||||||
|
if suite.phase != PhaseRun {
|
||||||
|
return types.GinkgoErrors.AddReportEntryNotDuringRunPhase(entry.Location)
|
||||||
|
}
|
||||||
|
suite.currentSpecReport.ReportEntries = append(suite.currentSpecReport.ReportEntries, entry)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) isRunningInParallel() bool {
|
||||||
|
return suite.config.ParallelTotal > 1
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) processCurrentSpecReport() {
|
||||||
|
suite.reporter.DidRun(suite.currentSpecReport)
|
||||||
|
if suite.isRunningInParallel() {
|
||||||
|
suite.client.PostDidRun(suite.currentSpecReport)
|
||||||
|
}
|
||||||
|
suite.report.SpecReports = append(suite.report.SpecReports, suite.currentSpecReport)
|
||||||
|
|
||||||
|
if suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
|
||||||
|
suite.report.SuiteSucceeded = false
|
||||||
|
if suite.config.FailFast || suite.currentSpecReport.State.Is(types.SpecStateAborted) {
|
||||||
|
suite.skipAll = true
|
||||||
|
if suite.isRunningInParallel() {
|
||||||
|
suite.client.PostAbort()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath string, hasProgrammaticFocus bool, specs Specs) bool {
|
||||||
|
numSpecsThatWillBeRun := specs.CountWithoutSkip()
|
||||||
|
|
||||||
|
suite.report = types.Report{
|
||||||
|
SuitePath: suitePath,
|
||||||
|
SuiteDescription: description,
|
||||||
|
SuiteLabels: suiteLabels,
|
||||||
|
SuiteConfig: suite.config,
|
||||||
|
SuiteHasProgrammaticFocus: hasProgrammaticFocus,
|
||||||
|
PreRunStats: types.PreRunStats{
|
||||||
|
TotalSpecs: len(specs),
|
||||||
|
SpecsThatWillRun: numSpecsThatWillBeRun,
|
||||||
|
},
|
||||||
|
StartTime: time.Now(),
|
||||||
|
}
|
||||||
|
|
||||||
|
suite.reporter.SuiteWillBegin(suite.report)
|
||||||
|
if suite.isRunningInParallel() {
|
||||||
|
suite.client.PostSuiteWillBegin(suite.report)
|
||||||
|
}
|
||||||
|
|
||||||
|
suite.report.SuiteSucceeded = true
|
||||||
|
suite.runBeforeSuite(numSpecsThatWillBeRun)
|
||||||
|
|
||||||
|
if suite.report.SuiteSucceeded {
|
||||||
|
groupedSpecIndices, serialGroupedSpecIndices := OrderSpecs(specs, suite.config)
|
||||||
|
nextIndex := MakeIncrementingIndexCounter()
|
||||||
|
if suite.isRunningInParallel() {
|
||||||
|
nextIndex = suite.client.FetchNextCounter
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
groupedSpecIdx, err := nextIndex()
|
||||||
|
if err != nil {
|
||||||
|
suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, fmt.Sprintf("Failed to iterate over specs:\n%s", err.Error()))
|
||||||
|
suite.report.SuiteSucceeded = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if groupedSpecIdx >= len(groupedSpecIndices) {
|
||||||
|
if suite.config.ParallelProcess == 1 && len(serialGroupedSpecIndices) > 0 {
|
||||||
|
groupedSpecIndices, serialGroupedSpecIndices, nextIndex = serialGroupedSpecIndices, GroupedSpecIndices{}, MakeIncrementingIndexCounter()
|
||||||
|
suite.client.BlockUntilNonprimaryProcsHaveFinished()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// the complexity for running groups of specs is very high because of Ordered containers and FlakeAttempts
|
||||||
|
// we encapsulate that complexity in the notion of a Group that can run
|
||||||
|
// Group is really just an extension of suite so it gets passed a suite and has access to all its internals
|
||||||
|
// Note that group is stateful and intended for single use!
|
||||||
|
newGroup(suite).run(specs.AtIndices(groupedSpecIndices[groupedSpecIdx]))
|
||||||
|
}
|
||||||
|
|
||||||
|
if specs.HasAnySpecsMarkedPending() && suite.config.FailOnPending {
|
||||||
|
suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Detected pending specs and --fail-on-pending is set")
|
||||||
|
suite.report.SuiteSucceeded = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
suite.runAfterSuiteCleanup(numSpecsThatWillBeRun)
|
||||||
|
|
||||||
|
interruptStatus := suite.interruptHandler.Status()
|
||||||
|
if interruptStatus.Interrupted {
|
||||||
|
suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, interruptStatus.Cause.String())
|
||||||
|
suite.report.SuiteSucceeded = false
|
||||||
|
}
|
||||||
|
suite.report.EndTime = time.Now()
|
||||||
|
suite.report.RunTime = suite.report.EndTime.Sub(suite.report.StartTime)
|
||||||
|
|
||||||
|
if suite.config.ParallelProcess == 1 {
|
||||||
|
suite.runReportAfterSuite()
|
||||||
|
}
|
||||||
|
suite.reporter.SuiteDidEnd(suite.report)
|
||||||
|
if suite.isRunningInParallel() {
|
||||||
|
suite.client.PostSuiteDidEnd(suite.report)
|
||||||
|
}
|
||||||
|
|
||||||
|
return suite.report.SuiteSucceeded
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) runBeforeSuite(numSpecsThatWillBeRun int) {
|
||||||
|
interruptStatus := suite.interruptHandler.Status()
|
||||||
|
beforeSuiteNode := suite.suiteNodes.FirstNodeWithType(types.NodeTypeBeforeSuite | types.NodeTypeSynchronizedBeforeSuite)
|
||||||
|
if !beforeSuiteNode.IsZero() && !interruptStatus.Interrupted && numSpecsThatWillBeRun > 0 {
|
||||||
|
suite.currentSpecReport = types.SpecReport{
|
||||||
|
LeafNodeType: beforeSuiteNode.NodeType,
|
||||||
|
LeafNodeLocation: beforeSuiteNode.CodeLocation,
|
||||||
|
ParallelProcess: suite.config.ParallelProcess,
|
||||||
|
}
|
||||||
|
suite.reporter.WillRun(suite.currentSpecReport)
|
||||||
|
suite.runSuiteNode(beforeSuiteNode, interruptStatus.Channel)
|
||||||
|
if suite.currentSpecReport.State.Is(types.SpecStateSkipped) {
|
||||||
|
suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Suite skipped in BeforeSuite")
|
||||||
|
suite.skipAll = true
|
||||||
|
}
|
||||||
|
suite.processCurrentSpecReport()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) {
|
||||||
|
afterSuiteNode := suite.suiteNodes.FirstNodeWithType(types.NodeTypeAfterSuite | types.NodeTypeSynchronizedAfterSuite)
|
||||||
|
if !afterSuiteNode.IsZero() && numSpecsThatWillBeRun > 0 {
|
||||||
|
suite.currentSpecReport = types.SpecReport{
|
||||||
|
LeafNodeType: afterSuiteNode.NodeType,
|
||||||
|
LeafNodeLocation: afterSuiteNode.CodeLocation,
|
||||||
|
ParallelProcess: suite.config.ParallelProcess,
|
||||||
|
}
|
||||||
|
suite.reporter.WillRun(suite.currentSpecReport)
|
||||||
|
suite.runSuiteNode(afterSuiteNode, suite.interruptHandler.Status().Channel)
|
||||||
|
suite.processCurrentSpecReport()
|
||||||
|
}
|
||||||
|
|
||||||
|
afterSuiteCleanup := suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterSuite).Reverse()
|
||||||
|
if len(afterSuiteCleanup) > 0 {
|
||||||
|
for _, cleanupNode := range afterSuiteCleanup {
|
||||||
|
suite.currentSpecReport = types.SpecReport{
|
||||||
|
LeafNodeType: cleanupNode.NodeType,
|
||||||
|
LeafNodeLocation: cleanupNode.CodeLocation,
|
||||||
|
ParallelProcess: suite.config.ParallelProcess,
|
||||||
|
}
|
||||||
|
suite.reporter.WillRun(suite.currentSpecReport)
|
||||||
|
suite.runSuiteNode(cleanupNode, suite.interruptHandler.Status().Channel)
|
||||||
|
suite.processCurrentSpecReport()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) runReportAfterSuite() {
|
||||||
|
for _, node := range suite.suiteNodes.WithType(types.NodeTypeReportAfterSuite) {
|
||||||
|
suite.currentSpecReport = types.SpecReport{
|
||||||
|
LeafNodeType: node.NodeType,
|
||||||
|
LeafNodeLocation: node.CodeLocation,
|
||||||
|
LeafNodeText: node.Text,
|
||||||
|
ParallelProcess: suite.config.ParallelProcess,
|
||||||
|
}
|
||||||
|
suite.reporter.WillRun(suite.currentSpecReport)
|
||||||
|
suite.runReportAfterSuiteNode(node, suite.report)
|
||||||
|
suite.processCurrentSpecReport()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) {
|
||||||
|
nodes := spec.Nodes.WithType(nodeType)
|
||||||
|
if nodeType == types.NodeTypeReportAfterEach {
|
||||||
|
nodes = nodes.SortedByDescendingNestingLevel()
|
||||||
|
}
|
||||||
|
if nodeType == types.NodeTypeReportBeforeEach {
|
||||||
|
nodes = nodes.SortedByAscendingNestingLevel()
|
||||||
|
}
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range nodes {
|
||||||
|
suite.writer.Truncate()
|
||||||
|
suite.outputInterceptor.StartInterceptingOutput()
|
||||||
|
report := suite.currentSpecReport
|
||||||
|
nodes[i].Body = func() {
|
||||||
|
nodes[i].ReportEachBody(report)
|
||||||
|
}
|
||||||
|
suite.interruptHandler.SetInterruptPlaceholderMessage(formatter.Fiw(0, formatter.COLS,
|
||||||
|
"{{yellow}}Ginkgo received an interrupt signal but is currently running a %s node. To avoid an invalid report the %s node will not be interrupted however subsequent tests will be skipped.{{/}}\n\n{{bold}}The running %s node is at:\n%s.{{/}}",
|
||||||
|
nodeType, nodeType, nodeType,
|
||||||
|
nodes[i].CodeLocation,
|
||||||
|
))
|
||||||
|
state, failure := suite.runNode(nodes[i], nil, spec.Nodes.BestTextFor(nodes[i]))
|
||||||
|
suite.interruptHandler.ClearInterruptPlaceholderMessage()
|
||||||
|
// If the spec is not in a failure state (i.e. it's Passed/Skipped/Pending) and the reporter has failed, override the state.
|
||||||
|
// Also, if the reporter is every aborted - always override the state to propagate the abort
|
||||||
|
if (!suite.currentSpecReport.State.Is(types.SpecStateFailureStates) && state.Is(types.SpecStateFailureStates)) || state.Is(types.SpecStateAborted) {
|
||||||
|
suite.currentSpecReport.State = state
|
||||||
|
suite.currentSpecReport.Failure = failure
|
||||||
|
}
|
||||||
|
suite.currentSpecReport.CapturedGinkgoWriterOutput += string(suite.writer.Bytes())
|
||||||
|
suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) runSuiteNode(node Node, interruptChannel chan interface{}) {
|
||||||
|
if suite.config.DryRun {
|
||||||
|
suite.currentSpecReport.State = types.SpecStatePassed
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
suite.writer.Truncate()
|
||||||
|
suite.outputInterceptor.StartInterceptingOutput()
|
||||||
|
suite.currentSpecReport.StartTime = time.Now()
|
||||||
|
|
||||||
|
var err error
|
||||||
|
switch node.NodeType {
|
||||||
|
case types.NodeTypeBeforeSuite, types.NodeTypeAfterSuite:
|
||||||
|
suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "")
|
||||||
|
case types.NodeTypeCleanupAfterSuite:
|
||||||
|
if suite.config.ParallelTotal > 1 && suite.config.ParallelProcess == 1 {
|
||||||
|
err = suite.client.BlockUntilNonprimaryProcsHaveFinished()
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "")
|
||||||
|
}
|
||||||
|
case types.NodeTypeSynchronizedBeforeSuite:
|
||||||
|
var data []byte
|
||||||
|
var runAllProcs bool
|
||||||
|
if suite.config.ParallelProcess == 1 {
|
||||||
|
if suite.config.ParallelTotal > 1 {
|
||||||
|
suite.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||||
|
suite.outputInterceptor.StartInterceptingOutputAndForwardTo(suite.client)
|
||||||
|
}
|
||||||
|
node.Body = func() { data = node.SynchronizedBeforeSuiteProc1Body() }
|
||||||
|
suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "")
|
||||||
|
if suite.config.ParallelTotal > 1 {
|
||||||
|
suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||||
|
suite.outputInterceptor.StartInterceptingOutput()
|
||||||
|
if suite.currentSpecReport.State.Is(types.SpecStatePassed) {
|
||||||
|
err = suite.client.PostSynchronizedBeforeSuiteCompleted(types.SpecStatePassed, data)
|
||||||
|
} else {
|
||||||
|
err = suite.client.PostSynchronizedBeforeSuiteCompleted(suite.currentSpecReport.State, nil)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
runAllProcs = suite.currentSpecReport.State.Is(types.SpecStatePassed) && err == nil
|
||||||
|
} else {
|
||||||
|
var proc1State types.SpecState
|
||||||
|
proc1State, data, err = suite.client.BlockUntilSynchronizedBeforeSuiteData()
|
||||||
|
switch proc1State {
|
||||||
|
case types.SpecStatePassed:
|
||||||
|
runAllProcs = true
|
||||||
|
case types.SpecStateFailed, types.SpecStatePanicked:
|
||||||
|
err = types.GinkgoErrors.SynchronizedBeforeSuiteFailedOnProc1()
|
||||||
|
case types.SpecStateInterrupted, types.SpecStateAborted, types.SpecStateSkipped:
|
||||||
|
suite.currentSpecReport.State = proc1State
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if runAllProcs {
|
||||||
|
node.Body = func() { node.SynchronizedBeforeSuiteAllProcsBody(data) }
|
||||||
|
suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "")
|
||||||
|
}
|
||||||
|
case types.NodeTypeSynchronizedAfterSuite:
|
||||||
|
node.Body = node.SynchronizedAfterSuiteAllProcsBody
|
||||||
|
suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "")
|
||||||
|
if suite.config.ParallelProcess == 1 {
|
||||||
|
if suite.config.ParallelTotal > 1 {
|
||||||
|
err = suite.client.BlockUntilNonprimaryProcsHaveFinished()
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
if suite.config.ParallelTotal > 1 {
|
||||||
|
suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||||
|
suite.outputInterceptor.StartInterceptingOutputAndForwardTo(suite.client)
|
||||||
|
}
|
||||||
|
|
||||||
|
node.Body = node.SynchronizedAfterSuiteProc1Body
|
||||||
|
state, failure := suite.runNode(node, interruptChannel, "")
|
||||||
|
if suite.currentSpecReport.State.Is(types.SpecStatePassed) {
|
||||||
|
suite.currentSpecReport.State, suite.currentSpecReport.Failure = state, failure
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil && !suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
|
||||||
|
suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
suite.currentSpecReport.EndTime = time.Now()
|
||||||
|
suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime)
|
||||||
|
suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
|
||||||
|
suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) runReportAfterSuiteNode(node Node, report types.Report) {
|
||||||
|
suite.writer.Truncate()
|
||||||
|
suite.outputInterceptor.StartInterceptingOutput()
|
||||||
|
suite.currentSpecReport.StartTime = time.Now()
|
||||||
|
|
||||||
|
if suite.config.ParallelTotal > 1 {
|
||||||
|
aggregatedReport, err := suite.client.BlockUntilAggregatedNonprimaryProcsReport()
|
||||||
|
if err != nil {
|
||||||
|
suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
report = report.Add(aggregatedReport)
|
||||||
|
}
|
||||||
|
|
||||||
|
node.Body = func() { node.ReportAfterSuiteBody(report) }
|
||||||
|
suite.interruptHandler.SetInterruptPlaceholderMessage(formatter.Fiw(0, formatter.COLS,
|
||||||
|
"{{yellow}}Ginkgo received an interrupt signal but is currently running a ReportAfterSuite node. To avoid an invalid report the ReportAfterSuite node will not be interrupted.{{/}}\n\n{{bold}}The running ReportAfterSuite node is at:\n%s.{{/}}",
|
||||||
|
node.CodeLocation,
|
||||||
|
))
|
||||||
|
suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, nil, "")
|
||||||
|
suite.interruptHandler.ClearInterruptPlaceholderMessage()
|
||||||
|
|
||||||
|
suite.currentSpecReport.EndTime = time.Now()
|
||||||
|
suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime)
|
||||||
|
suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
|
||||||
|
suite.currentSpecReport.CapturedStdOutErr = suite.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) runNode(node Node, interruptChannel chan interface{}, text string) (types.SpecState, types.Failure) {
|
||||||
|
if node.NodeType.Is(types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll | types.NodeTypeCleanupAfterSuite) {
|
||||||
|
suite.cleanupNodes = suite.cleanupNodes.WithoutNode(node)
|
||||||
|
}
|
||||||
|
|
||||||
|
suite.currentNode = node
|
||||||
|
defer func() {
|
||||||
|
suite.currentNode = Node{}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if suite.config.EmitSpecProgress {
|
||||||
|
if text == "" {
|
||||||
|
text = "TOP-LEVEL"
|
||||||
|
}
|
||||||
|
s := fmt.Sprintf("[%s] %s\n %s\n", node.NodeType.String(), text, node.CodeLocation.String())
|
||||||
|
suite.writer.Write([]byte(s))
|
||||||
|
}
|
||||||
|
|
||||||
|
var failure types.Failure
|
||||||
|
failure.FailureNodeType, failure.FailureNodeLocation = node.NodeType, node.CodeLocation
|
||||||
|
if node.NodeType.Is(types.NodeTypeIt) || node.NodeType.Is(types.NodeTypesForSuiteLevelNodes) {
|
||||||
|
failure.FailureNodeContext = types.FailureNodeIsLeafNode
|
||||||
|
} else if node.NestingLevel <= 0 {
|
||||||
|
failure.FailureNodeContext = types.FailureNodeAtTopLevel
|
||||||
|
} else {
|
||||||
|
failure.FailureNodeContext, failure.FailureNodeContainerIndex = types.FailureNodeInContainer, node.NestingLevel-1
|
||||||
|
}
|
||||||
|
|
||||||
|
outcomeC := make(chan types.SpecState)
|
||||||
|
failureC := make(chan types.Failure)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
finished := false
|
||||||
|
defer func() {
|
||||||
|
if e := recover(); e != nil || !finished {
|
||||||
|
suite.failer.Panic(types.NewCodeLocationWithStackTrace(2), e)
|
||||||
|
}
|
||||||
|
|
||||||
|
outcome, failureFromRun := suite.failer.Drain()
|
||||||
|
outcomeC <- outcome
|
||||||
|
failureC <- failureFromRun
|
||||||
|
}()
|
||||||
|
|
||||||
|
node.Body()
|
||||||
|
finished = true
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case outcome := <-outcomeC:
|
||||||
|
failureFromRun := <-failureC
|
||||||
|
if outcome == types.SpecStatePassed {
|
||||||
|
return outcome, types.Failure{}
|
||||||
|
}
|
||||||
|
failure.Message, failure.Location, failure.ForwardedPanic = failureFromRun.Message, failureFromRun.Location, failureFromRun.ForwardedPanic
|
||||||
|
return outcome, failure
|
||||||
|
case <-interruptChannel:
|
||||||
|
failure.Message, failure.Location = suite.interruptHandler.InterruptMessageWithStackTraces(), node.CodeLocation
|
||||||
|
return types.SpecStateInterrupted, failure
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) failureForLeafNodeWithMessage(node Node, message string) types.Failure {
|
||||||
|
return types.Failure{
|
||||||
|
Message: message,
|
||||||
|
Location: node.CodeLocation,
|
||||||
|
FailureNodeContext: types.FailureNodeIsLeafNode,
|
||||||
|
FailureNodeType: node.NodeType,
|
||||||
|
FailureNodeLocation: node.CodeLocation,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func max(a, b int) int {
|
||||||
|
if a > b {
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
128
vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go
generated
vendored
Normal file
128
vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go
generated
vendored
Normal file
@ -0,0 +1,128 @@
|
|||||||
|
package testingtproxy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/internal"
|
||||||
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type failFunc func(message string, callerSkip ...int)
|
||||||
|
type skipFunc func(message string, callerSkip ...int)
|
||||||
|
type cleanupFunc func(args ...interface{})
|
||||||
|
type reportFunc func() types.SpecReport
|
||||||
|
|
||||||
|
func New(writer io.Writer, fail failFunc, skip skipFunc, cleanup cleanupFunc, report reportFunc, offset int) *ginkgoTestingTProxy {
|
||||||
|
return &ginkgoTestingTProxy{
|
||||||
|
fail: fail,
|
||||||
|
offset: offset,
|
||||||
|
writer: writer,
|
||||||
|
skip: skip,
|
||||||
|
cleanup: cleanup,
|
||||||
|
report: report,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type ginkgoTestingTProxy struct {
|
||||||
|
fail failFunc
|
||||||
|
skip skipFunc
|
||||||
|
cleanup cleanupFunc
|
||||||
|
report reportFunc
|
||||||
|
offset int
|
||||||
|
writer io.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) Cleanup(f func()) {
|
||||||
|
t.cleanup(f, internal.Offset(1))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) Setenv(key, value string) {
|
||||||
|
originalValue, exists := os.LookupEnv(key)
|
||||||
|
if exists {
|
||||||
|
t.cleanup(os.Setenv, key, originalValue, internal.Offset(1))
|
||||||
|
} else {
|
||||||
|
t.cleanup(os.Unsetenv, key, internal.Offset(1))
|
||||||
|
}
|
||||||
|
|
||||||
|
err := os.Setenv(key, value)
|
||||||
|
if err != nil {
|
||||||
|
t.fail(fmt.Sprintf("Failed to set environment variable: %v", err), 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) Error(args ...interface{}) {
|
||||||
|
t.fail(fmt.Sprintln(args...), t.offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) Errorf(format string, args ...interface{}) {
|
||||||
|
t.fail(fmt.Sprintf(format, args...), t.offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) Fail() {
|
||||||
|
t.fail("failed", t.offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) FailNow() {
|
||||||
|
t.fail("failed", t.offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) Failed() bool {
|
||||||
|
return t.report().Failed()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) Fatal(args ...interface{}) {
|
||||||
|
t.fail(fmt.Sprintln(args...), t.offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) Fatalf(format string, args ...interface{}) {
|
||||||
|
t.fail(fmt.Sprintf(format, args...), t.offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) Helper() {
|
||||||
|
// No-op
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) Log(args ...interface{}) {
|
||||||
|
fmt.Fprintln(t.writer, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) Logf(format string, args ...interface{}) {
|
||||||
|
t.Log(fmt.Sprintf(format, args...))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) Name() string {
|
||||||
|
return t.report().FullText()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) Parallel() {
|
||||||
|
// No-op
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) Skip(args ...interface{}) {
|
||||||
|
t.skip(fmt.Sprintln(args...), t.offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) SkipNow() {
|
||||||
|
t.skip("skip", t.offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) Skipf(format string, args ...interface{}) {
|
||||||
|
t.skip(fmt.Sprintf(format, args...), t.offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) Skipped() bool {
|
||||||
|
return t.report().State.Is(types.SpecStateSkipped)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) TempDir() string {
|
||||||
|
tmpDir, err := os.MkdirTemp("", "ginkgo")
|
||||||
|
if err != nil {
|
||||||
|
t.fail(fmt.Sprintf("Failed to create temporary directory: %v", err), 1)
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
t.cleanup(os.RemoveAll, tmpDir)
|
||||||
|
|
||||||
|
return tmpDir
|
||||||
|
}
|
77
vendor/github.com/onsi/ginkgo/v2/internal/tree.go
generated
vendored
Normal file
77
vendor/github.com/onsi/ginkgo/v2/internal/tree.go
generated
vendored
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
package internal
|
||||||
|
|
||||||
|
import "github.com/onsi/ginkgo/v2/types"
|
||||||
|
|
||||||
|
type TreeNode struct {
|
||||||
|
Node Node
|
||||||
|
Parent *TreeNode
|
||||||
|
Children TreeNodes
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tn *TreeNode) AppendChild(child *TreeNode) {
|
||||||
|
tn.Children = append(tn.Children, child)
|
||||||
|
child.Parent = tn
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tn *TreeNode) AncestorNodeChain() Nodes {
|
||||||
|
if tn.Parent == nil || tn.Parent.Node.IsZero() {
|
||||||
|
return Nodes{tn.Node}
|
||||||
|
}
|
||||||
|
return append(tn.Parent.AncestorNodeChain(), tn.Node)
|
||||||
|
}
|
||||||
|
|
||||||
|
type TreeNodes []*TreeNode
|
||||||
|
|
||||||
|
func (tn TreeNodes) Nodes() Nodes {
|
||||||
|
out := make(Nodes, len(tn))
|
||||||
|
for i := range tn {
|
||||||
|
out[i] = tn[i].Node
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tn TreeNodes) WithID(id uint) *TreeNode {
|
||||||
|
for i := range tn {
|
||||||
|
if tn[i].Node.ID == id {
|
||||||
|
return tn[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func GenerateSpecsFromTreeRoot(tree *TreeNode) Specs {
|
||||||
|
var walkTree func(nestingLevel int, lNodes Nodes, rNodes Nodes, trees TreeNodes) Specs
|
||||||
|
walkTree = func(nestingLevel int, lNodes Nodes, rNodes Nodes, trees TreeNodes) Specs {
|
||||||
|
tests := Specs{}
|
||||||
|
|
||||||
|
nodes := make(Nodes, len(trees))
|
||||||
|
for i := range trees {
|
||||||
|
nodes[i] = trees[i].Node
|
||||||
|
nodes[i].NestingLevel = nestingLevel
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range nodes {
|
||||||
|
if !nodes[i].NodeType.Is(types.NodeTypesForContainerAndIt) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
leftNodes, rightNodes := nodes.SplitAround(nodes[i])
|
||||||
|
leftNodes = leftNodes.WithoutType(types.NodeTypesForContainerAndIt)
|
||||||
|
rightNodes = rightNodes.WithoutType(types.NodeTypesForContainerAndIt)
|
||||||
|
|
||||||
|
leftNodes = lNodes.CopyAppend(leftNodes...)
|
||||||
|
rightNodes = rightNodes.CopyAppend(rNodes...)
|
||||||
|
|
||||||
|
if nodes[i].NodeType.Is(types.NodeTypeIt) {
|
||||||
|
tests = append(tests, Spec{Nodes: leftNodes.CopyAppend(nodes[i]).CopyAppend(rightNodes...)})
|
||||||
|
} else {
|
||||||
|
treeNode := trees.WithID(nodes[i].ID)
|
||||||
|
tests = append(tests, walkTree(nestingLevel+1, leftNodes.CopyAppend(nodes[i]), rightNodes, treeNode.Children)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return tests
|
||||||
|
}
|
||||||
|
|
||||||
|
return walkTree(0, Nodes{}, Nodes{}, tree.Children)
|
||||||
|
}
|
103
vendor/github.com/onsi/ginkgo/v2/internal/writer.go
generated
vendored
Normal file
103
vendor/github.com/onsi/ginkgo/v2/internal/writer.go
generated
vendored
Normal file
@ -0,0 +1,103 @@
|
|||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
type WriterMode uint
|
||||||
|
|
||||||
|
const (
|
||||||
|
WriterModeStreamAndBuffer WriterMode = iota
|
||||||
|
WriterModeBufferOnly
|
||||||
|
)
|
||||||
|
|
||||||
|
type WriterInterface interface {
|
||||||
|
io.Writer
|
||||||
|
|
||||||
|
Truncate()
|
||||||
|
Bytes() []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
//Writer implements WriterInterface and GinkgoWriterInterface
|
||||||
|
type Writer struct {
|
||||||
|
buffer *bytes.Buffer
|
||||||
|
outWriter io.Writer
|
||||||
|
lock *sync.Mutex
|
||||||
|
mode WriterMode
|
||||||
|
|
||||||
|
teeWriters []io.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewWriter(outWriter io.Writer) *Writer {
|
||||||
|
return &Writer{
|
||||||
|
buffer: &bytes.Buffer{},
|
||||||
|
lock: &sync.Mutex{},
|
||||||
|
outWriter: outWriter,
|
||||||
|
mode: WriterModeStreamAndBuffer,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) SetMode(mode WriterMode) {
|
||||||
|
w.lock.Lock()
|
||||||
|
defer w.lock.Unlock()
|
||||||
|
w.mode = mode
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) Write(b []byte) (n int, err error) {
|
||||||
|
w.lock.Lock()
|
||||||
|
defer w.lock.Unlock()
|
||||||
|
|
||||||
|
for _, teeWriter := range w.teeWriters {
|
||||||
|
teeWriter.Write(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
if w.mode == WriterModeStreamAndBuffer {
|
||||||
|
w.outWriter.Write(b)
|
||||||
|
}
|
||||||
|
return w.buffer.Write(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) Truncate() {
|
||||||
|
w.lock.Lock()
|
||||||
|
defer w.lock.Unlock()
|
||||||
|
w.buffer.Reset()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) Bytes() []byte {
|
||||||
|
w.lock.Lock()
|
||||||
|
defer w.lock.Unlock()
|
||||||
|
b := w.buffer.Bytes()
|
||||||
|
copied := make([]byte, len(b))
|
||||||
|
copy(copied, b)
|
||||||
|
return copied
|
||||||
|
}
|
||||||
|
|
||||||
|
//GinkgoWriterInterface
|
||||||
|
func (w *Writer) TeeTo(writer io.Writer) {
|
||||||
|
w.lock.Lock()
|
||||||
|
defer w.lock.Unlock()
|
||||||
|
|
||||||
|
w.teeWriters = append(w.teeWriters, writer)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) ClearTeeWriters() {
|
||||||
|
w.lock.Lock()
|
||||||
|
defer w.lock.Unlock()
|
||||||
|
|
||||||
|
w.teeWriters = []io.Writer{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) Print(a ...interface{}) {
|
||||||
|
fmt.Fprint(w, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) Printf(format string, a ...interface{}) {
|
||||||
|
fmt.Fprintf(w, format, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) Println(a ...interface{}) {
|
||||||
|
fmt.Fprintln(w, a...)
|
||||||
|
}
|
410
vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go
generated
vendored
Normal file
410
vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go
generated
vendored
Normal file
@ -0,0 +1,410 @@
|
|||||||
|
/*
|
||||||
|
Ginkgo's Default Reporter
|
||||||
|
|
||||||
|
A number of command line flags are available to tweak Ginkgo's default output.
|
||||||
|
|
||||||
|
These are documented [here](http://onsi.github.io/ginkgo/#running_tests)
|
||||||
|
*/
|
||||||
|
package reporters
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/formatter"
|
||||||
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type DefaultReporter struct {
|
||||||
|
conf types.ReporterConfig
|
||||||
|
writer io.Writer
|
||||||
|
|
||||||
|
// managing the emission stream
|
||||||
|
lastChar string
|
||||||
|
lastEmissionWasDelimiter bool
|
||||||
|
|
||||||
|
// rendering
|
||||||
|
specDenoter string
|
||||||
|
retryDenoter string
|
||||||
|
formatter formatter.Formatter
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDefaultReporterUnderTest(conf types.ReporterConfig, writer io.Writer) *DefaultReporter {
|
||||||
|
reporter := NewDefaultReporter(conf, writer)
|
||||||
|
reporter.formatter = formatter.New(formatter.ColorModePassthrough)
|
||||||
|
|
||||||
|
return reporter
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDefaultReporter(conf types.ReporterConfig, writer io.Writer) *DefaultReporter {
|
||||||
|
reporter := &DefaultReporter{
|
||||||
|
conf: conf,
|
||||||
|
writer: writer,
|
||||||
|
|
||||||
|
lastChar: "\n",
|
||||||
|
lastEmissionWasDelimiter: false,
|
||||||
|
|
||||||
|
specDenoter: "•",
|
||||||
|
retryDenoter: "↺",
|
||||||
|
formatter: formatter.NewWithNoColorBool(conf.NoColor),
|
||||||
|
}
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
reporter.specDenoter = "+"
|
||||||
|
reporter.retryDenoter = "R"
|
||||||
|
}
|
||||||
|
|
||||||
|
return reporter
|
||||||
|
}
|
||||||
|
|
||||||
|
/* The Reporter Interface */
|
||||||
|
|
||||||
|
func (r *DefaultReporter) SuiteWillBegin(report types.Report) {
|
||||||
|
if r.conf.Verbosity().Is(types.VerbosityLevelSuccinct) {
|
||||||
|
r.emit(r.f("[%d] {{bold}}%s{{/}} ", report.SuiteConfig.RandomSeed, report.SuiteDescription))
|
||||||
|
if len(report.SuiteLabels) > 0 {
|
||||||
|
r.emit(r.f("{{coral}}[%s]{{/}} ", strings.Join(report.SuiteLabels, ", ")))
|
||||||
|
}
|
||||||
|
r.emit(r.f("- %d/%d specs ", report.PreRunStats.SpecsThatWillRun, report.PreRunStats.TotalSpecs))
|
||||||
|
if report.SuiteConfig.ParallelTotal > 1 {
|
||||||
|
r.emit(r.f("- %d procs ", report.SuiteConfig.ParallelTotal))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
banner := r.f("Running Suite: %s - %s", report.SuiteDescription, report.SuitePath)
|
||||||
|
r.emitBlock(banner)
|
||||||
|
bannerWidth := len(banner)
|
||||||
|
if len(report.SuiteLabels) > 0 {
|
||||||
|
labels := strings.Join(report.SuiteLabels, ", ")
|
||||||
|
r.emitBlock(r.f("{{coral}}[%s]{{/}} ", labels))
|
||||||
|
if len(labels)+2 > bannerWidth {
|
||||||
|
bannerWidth = len(labels) + 2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
r.emitBlock(strings.Repeat("=", bannerWidth))
|
||||||
|
|
||||||
|
out := r.f("Random Seed: {{bold}}%d{{/}}", report.SuiteConfig.RandomSeed)
|
||||||
|
if report.SuiteConfig.RandomizeAllSpecs {
|
||||||
|
out += r.f(" - will randomize all specs")
|
||||||
|
}
|
||||||
|
r.emitBlock(out)
|
||||||
|
r.emit("\n")
|
||||||
|
r.emitBlock(r.f("Will run {{bold}}%d{{/}} of {{bold}}%d{{/}} specs", report.PreRunStats.SpecsThatWillRun, report.PreRunStats.TotalSpecs))
|
||||||
|
if report.SuiteConfig.ParallelTotal > 1 {
|
||||||
|
r.emitBlock(r.f("Running in parallel across {{bold}}%d{{/}} processes", report.SuiteConfig.ParallelTotal))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *DefaultReporter) WillRun(report types.SpecReport) {
|
||||||
|
if r.conf.Verbosity().LT(types.VerbosityLevelVerbose) || report.State.Is(types.SpecStatePending|types.SpecStateSkipped) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
r.emitDelimiter()
|
||||||
|
indentation := uint(0)
|
||||||
|
if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) {
|
||||||
|
r.emitBlock(r.f("{{bold}}[%s] %s{{/}}", report.LeafNodeType.String(), report.LeafNodeText))
|
||||||
|
} else {
|
||||||
|
if len(report.ContainerHierarchyTexts) > 0 {
|
||||||
|
r.emitBlock(r.cycleJoin(report.ContainerHierarchyTexts, " "))
|
||||||
|
indentation = 1
|
||||||
|
}
|
||||||
|
line := r.fi(indentation, "{{bold}}%s{{/}}", report.LeafNodeText)
|
||||||
|
labels := report.Labels()
|
||||||
|
if len(labels) > 0 {
|
||||||
|
line += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels, ", "))
|
||||||
|
}
|
||||||
|
r.emitBlock(line)
|
||||||
|
}
|
||||||
|
r.emitBlock(r.fi(indentation, "{{gray}}%s{{/}}", report.LeafNodeLocation))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *DefaultReporter) DidRun(report types.SpecReport) {
|
||||||
|
v := r.conf.Verbosity()
|
||||||
|
var header, highlightColor string
|
||||||
|
includeRuntime, emitGinkgoWriterOutput, stream, denoter := true, true, false, r.specDenoter
|
||||||
|
succinctLocationBlock := v.Is(types.VerbosityLevelSuccinct)
|
||||||
|
|
||||||
|
hasGW := report.CapturedGinkgoWriterOutput != ""
|
||||||
|
hasStd := report.CapturedStdOutErr != ""
|
||||||
|
hasEmittableReports := report.ReportEntries.HasVisibility(types.ReportEntryVisibilityAlways) || (report.ReportEntries.HasVisibility(types.ReportEntryVisibilityFailureOrVerbose) && (!report.Failure.IsZero() || v.GTE(types.VerbosityLevelVerbose)))
|
||||||
|
|
||||||
|
if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) {
|
||||||
|
denoter = fmt.Sprintf("[%s]", report.LeafNodeType)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch report.State {
|
||||||
|
case types.SpecStatePassed:
|
||||||
|
highlightColor, succinctLocationBlock = "{{green}}", v.LT(types.VerbosityLevelVerbose)
|
||||||
|
emitGinkgoWriterOutput = (r.conf.AlwaysEmitGinkgoWriter || v.GTE(types.VerbosityLevelVerbose)) && hasGW
|
||||||
|
if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) {
|
||||||
|
if v.GTE(types.VerbosityLevelVerbose) || hasStd || hasEmittableReports {
|
||||||
|
header = fmt.Sprintf("%s PASSED", denoter)
|
||||||
|
} else {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
header, stream = denoter, true
|
||||||
|
if report.NumAttempts > 1 {
|
||||||
|
header, stream = fmt.Sprintf("%s [FLAKEY TEST - TOOK %d ATTEMPTS TO PASS]", r.retryDenoter, report.NumAttempts), false
|
||||||
|
}
|
||||||
|
if report.RunTime > r.conf.SlowSpecThreshold {
|
||||||
|
header, stream = fmt.Sprintf("%s [SLOW TEST]", header), false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if hasStd || emitGinkgoWriterOutput || hasEmittableReports {
|
||||||
|
stream = false
|
||||||
|
}
|
||||||
|
case types.SpecStatePending:
|
||||||
|
highlightColor = "{{yellow}}"
|
||||||
|
includeRuntime, emitGinkgoWriterOutput = false, false
|
||||||
|
if v.Is(types.VerbosityLevelSuccinct) {
|
||||||
|
header, stream = "P", true
|
||||||
|
} else {
|
||||||
|
header, succinctLocationBlock = "P [PENDING]", v.LT(types.VerbosityLevelVeryVerbose)
|
||||||
|
}
|
||||||
|
case types.SpecStateSkipped:
|
||||||
|
highlightColor = "{{cyan}}"
|
||||||
|
if report.Failure.Message != "" || v.Is(types.VerbosityLevelVeryVerbose) {
|
||||||
|
header = "S [SKIPPED]"
|
||||||
|
} else {
|
||||||
|
header, stream = "S", true
|
||||||
|
}
|
||||||
|
case types.SpecStateFailed:
|
||||||
|
highlightColor, header = "{{red}}", fmt.Sprintf("%s [FAILED]", denoter)
|
||||||
|
case types.SpecStatePanicked:
|
||||||
|
highlightColor, header = "{{magenta}}", fmt.Sprintf("%s! [PANICKED]", denoter)
|
||||||
|
case types.SpecStateInterrupted:
|
||||||
|
highlightColor, header = "{{orange}}", fmt.Sprintf("%s! [INTERRUPTED]", denoter)
|
||||||
|
case types.SpecStateAborted:
|
||||||
|
highlightColor, header = "{{coral}}", fmt.Sprintf("%s! [ABORTED]", denoter)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Emit stream and return
|
||||||
|
if stream {
|
||||||
|
r.emit(r.f(highlightColor + header + "{{/}}"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Emit header
|
||||||
|
r.emitDelimiter()
|
||||||
|
if includeRuntime {
|
||||||
|
header = r.f("%s [%.3f seconds]", header, report.RunTime.Seconds())
|
||||||
|
}
|
||||||
|
r.emitBlock(r.f(highlightColor + header + "{{/}}"))
|
||||||
|
|
||||||
|
// Emit Code Location Block
|
||||||
|
r.emitBlock(r.codeLocationBlock(report, highlightColor, succinctLocationBlock, false))
|
||||||
|
|
||||||
|
//Emit Stdout/Stderr Output
|
||||||
|
if hasStd {
|
||||||
|
r.emitBlock("\n")
|
||||||
|
r.emitBlock(r.fi(1, "{{gray}}Begin Captured StdOut/StdErr Output >>{{/}}"))
|
||||||
|
r.emitBlock(r.fi(2, "%s", report.CapturedStdOutErr))
|
||||||
|
r.emitBlock(r.fi(1, "{{gray}}<< End Captured StdOut/StdErr Output{{/}}"))
|
||||||
|
}
|
||||||
|
|
||||||
|
//Emit Captured GinkgoWriter Output
|
||||||
|
if emitGinkgoWriterOutput && hasGW {
|
||||||
|
r.emitBlock("\n")
|
||||||
|
r.emitBlock(r.fi(1, "{{gray}}Begin Captured GinkgoWriter Output >>{{/}}"))
|
||||||
|
r.emitBlock(r.fi(2, "%s", report.CapturedGinkgoWriterOutput))
|
||||||
|
r.emitBlock(r.fi(1, "{{gray}}<< End Captured GinkgoWriter Output{{/}}"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if hasEmittableReports {
|
||||||
|
r.emitBlock("\n")
|
||||||
|
r.emitBlock(r.fi(1, "{{gray}}Begin Report Entries >>{{/}}"))
|
||||||
|
reportEntries := report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways)
|
||||||
|
if !report.Failure.IsZero() || v.GTE(types.VerbosityLevelVerbose) {
|
||||||
|
reportEntries = report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways, types.ReportEntryVisibilityFailureOrVerbose)
|
||||||
|
}
|
||||||
|
for _, entry := range reportEntries {
|
||||||
|
r.emitBlock(r.fi(2, "{{bold}}"+entry.Name+"{{gray}} - %s @ %s{{/}}", entry.Location, entry.Time.Format(types.GINKGO_TIME_FORMAT)))
|
||||||
|
if representation := entry.StringRepresentation(); representation != "" {
|
||||||
|
r.emitBlock(r.fi(3, representation))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
r.emitBlock(r.fi(1, "{{gray}}<< End Report Entries{{/}}"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Emit Failure Message
|
||||||
|
if !report.Failure.IsZero() {
|
||||||
|
r.emitBlock("\n")
|
||||||
|
r.emitBlock(r.fi(1, highlightColor+"%s{{/}}", report.Failure.Message))
|
||||||
|
r.emitBlock(r.fi(1, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}}\n", report.Failure.FailureNodeType, report.Failure.Location))
|
||||||
|
if report.Failure.ForwardedPanic != "" {
|
||||||
|
r.emitBlock("\n")
|
||||||
|
r.emitBlock(r.fi(1, highlightColor+"%s{{/}}", report.Failure.ForwardedPanic))
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.conf.FullTrace || report.Failure.ForwardedPanic != "" {
|
||||||
|
r.emitBlock("\n")
|
||||||
|
r.emitBlock(r.fi(1, highlightColor+"Full Stack Trace{{/}}"))
|
||||||
|
r.emitBlock(r.fi(2, "%s", report.Failure.Location.FullStackTrace))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
r.emitDelimiter()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *DefaultReporter) SuiteDidEnd(report types.Report) {
|
||||||
|
failures := report.SpecReports.WithState(types.SpecStateFailureStates)
|
||||||
|
if len(failures) > 1 {
|
||||||
|
r.emitBlock("\n\n")
|
||||||
|
r.emitBlock(r.f("{{red}}{{bold}}Summarizing %d Failures:{{/}}", len(failures)))
|
||||||
|
for _, specReport := range failures {
|
||||||
|
highlightColor, heading := "{{red}}", "[FAIL]"
|
||||||
|
switch specReport.State {
|
||||||
|
case types.SpecStatePanicked:
|
||||||
|
highlightColor, heading = "{{magenta}}", "[PANICKED!]"
|
||||||
|
case types.SpecStateAborted:
|
||||||
|
highlightColor, heading = "{{coral}}", "[ABORTED]"
|
||||||
|
case types.SpecStateInterrupted:
|
||||||
|
highlightColor, heading = "{{orange}}", "[INTERRUPTED]"
|
||||||
|
}
|
||||||
|
locationBlock := r.codeLocationBlock(specReport, highlightColor, true, true)
|
||||||
|
r.emitBlock(r.fi(1, highlightColor+"%s{{/}} %s", heading, locationBlock))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//summarize the suite
|
||||||
|
if r.conf.Verbosity().Is(types.VerbosityLevelSuccinct) && report.SuiteSucceeded {
|
||||||
|
r.emit(r.f(" {{green}}SUCCESS!{{/}} %s ", report.RunTime))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
r.emitBlock("\n")
|
||||||
|
color, status := "{{green}}{{bold}}", "SUCCESS!"
|
||||||
|
if !report.SuiteSucceeded {
|
||||||
|
color, status = "{{red}}{{bold}}", "FAIL!"
|
||||||
|
}
|
||||||
|
|
||||||
|
specs := report.SpecReports.WithLeafNodeType(types.NodeTypeIt) //exclude any suite setup nodes
|
||||||
|
r.emitBlock(r.f(color+"Ran %d of %d Specs in %.3f seconds{{/}}",
|
||||||
|
specs.CountWithState(types.SpecStatePassed)+specs.CountWithState(types.SpecStateFailureStates),
|
||||||
|
report.PreRunStats.TotalSpecs,
|
||||||
|
report.RunTime.Seconds()),
|
||||||
|
)
|
||||||
|
|
||||||
|
switch len(report.SpecialSuiteFailureReasons) {
|
||||||
|
case 0:
|
||||||
|
r.emit(r.f(color+"%s{{/}} -- ", status))
|
||||||
|
case 1:
|
||||||
|
r.emit(r.f(color+"%s - %s{{/}} -- ", status, report.SpecialSuiteFailureReasons[0]))
|
||||||
|
default:
|
||||||
|
r.emitBlock(r.f(color+"%s - %s{{/}}\n", status, strings.Join(report.SpecialSuiteFailureReasons, ", ")))
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(specs) == 0 && report.SpecReports.WithLeafNodeType(types.NodeTypeBeforeSuite|types.NodeTypeSynchronizedBeforeSuite).CountWithState(types.SpecStateFailureStates) > 0 {
|
||||||
|
r.emit(r.f("{{cyan}}{{bold}}A BeforeSuite node failed so all tests were skipped.{{/}}\n"))
|
||||||
|
} else {
|
||||||
|
r.emit(r.f("{{green}}{{bold}}%d Passed{{/}} | ", specs.CountWithState(types.SpecStatePassed)))
|
||||||
|
r.emit(r.f("{{red}}{{bold}}%d Failed{{/}} | ", specs.CountWithState(types.SpecStateFailureStates)))
|
||||||
|
if specs.CountOfFlakedSpecs() > 0 {
|
||||||
|
r.emit(r.f("{{light-yellow}}{{bold}}%d Flaked{{/}} | ", specs.CountOfFlakedSpecs()))
|
||||||
|
}
|
||||||
|
r.emit(r.f("{{yellow}}{{bold}}%d Pending{{/}} | ", specs.CountWithState(types.SpecStatePending)))
|
||||||
|
r.emit(r.f("{{cyan}}{{bold}}%d Skipped{{/}}\n", specs.CountWithState(types.SpecStateSkipped)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Emitting to the writer */
|
||||||
|
func (r *DefaultReporter) emit(s string) {
|
||||||
|
if len(s) > 0 {
|
||||||
|
r.lastChar = s[len(s)-1:]
|
||||||
|
r.lastEmissionWasDelimiter = false
|
||||||
|
r.writer.Write([]byte(s))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *DefaultReporter) emitBlock(s string) {
|
||||||
|
if len(s) > 0 {
|
||||||
|
if r.lastChar != "\n" {
|
||||||
|
r.emit("\n")
|
||||||
|
}
|
||||||
|
r.emit(s)
|
||||||
|
if r.lastChar != "\n" {
|
||||||
|
r.emit("\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *DefaultReporter) emitDelimiter() {
|
||||||
|
if r.lastEmissionWasDelimiter {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
r.emitBlock(r.f("{{gray}}%s{{/}}", strings.Repeat("-", 30)))
|
||||||
|
r.lastEmissionWasDelimiter = true
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Rendering text */
|
||||||
|
func (r *DefaultReporter) f(format string, args ...interface{}) string {
|
||||||
|
return r.formatter.F(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *DefaultReporter) fi(indentation uint, format string, args ...interface{}) string {
|
||||||
|
return r.formatter.Fi(indentation, format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *DefaultReporter) cycleJoin(elements []string, joiner string) string {
|
||||||
|
return r.formatter.CycleJoin(elements, joiner, []string{"{{/}}", "{{gray}}"})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightColor string, succinct bool, usePreciseFailureLocation bool) string {
|
||||||
|
texts, locations, labels := []string{}, []types.CodeLocation{}, [][]string{}
|
||||||
|
texts, locations, labels = append(texts, report.ContainerHierarchyTexts...), append(locations, report.ContainerHierarchyLocations...), append(labels, report.ContainerHierarchyLabels...)
|
||||||
|
if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) {
|
||||||
|
texts = append(texts, r.f("[%s] %s", report.LeafNodeType, report.LeafNodeText))
|
||||||
|
} else {
|
||||||
|
texts = append(texts, report.LeafNodeText)
|
||||||
|
}
|
||||||
|
labels = append(labels, report.LeafNodeLabels)
|
||||||
|
locations = append(locations, report.LeafNodeLocation)
|
||||||
|
|
||||||
|
failureLocation := report.Failure.FailureNodeLocation
|
||||||
|
if usePreciseFailureLocation {
|
||||||
|
failureLocation = report.Failure.Location
|
||||||
|
}
|
||||||
|
|
||||||
|
switch report.Failure.FailureNodeContext {
|
||||||
|
case types.FailureNodeAtTopLevel:
|
||||||
|
texts = append([]string{r.f(highlightColor+"{{bold}}TOP-LEVEL [%s]{{/}}", report.Failure.FailureNodeType)}, texts...)
|
||||||
|
locations = append([]types.CodeLocation{failureLocation}, locations...)
|
||||||
|
labels = append([][]string{{}}, labels...)
|
||||||
|
case types.FailureNodeInContainer:
|
||||||
|
i := report.Failure.FailureNodeContainerIndex
|
||||||
|
texts[i] = r.f(highlightColor+"{{bold}}%s [%s]{{/}}", texts[i], report.Failure.FailureNodeType)
|
||||||
|
locations[i] = failureLocation
|
||||||
|
case types.FailureNodeIsLeafNode:
|
||||||
|
i := len(texts) - 1
|
||||||
|
texts[i] = r.f(highlightColor+"{{bold}}[%s] %s{{/}}", report.LeafNodeType, report.LeafNodeText)
|
||||||
|
locations[i] = failureLocation
|
||||||
|
}
|
||||||
|
|
||||||
|
out := ""
|
||||||
|
if succinct {
|
||||||
|
out += r.f("%s", r.cycleJoin(texts, " "))
|
||||||
|
flattenedLabels := report.Labels()
|
||||||
|
if len(flattenedLabels) > 0 {
|
||||||
|
out += r.f(" {{coral}}[%s]{{/}}", strings.Join(flattenedLabels, ", "))
|
||||||
|
}
|
||||||
|
out += "\n"
|
||||||
|
if usePreciseFailureLocation {
|
||||||
|
out += r.f("{{gray}}%s{{/}}", failureLocation)
|
||||||
|
} else {
|
||||||
|
out += r.f("{{gray}}%s{{/}}", locations[len(locations)-1])
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for i := range texts {
|
||||||
|
out += r.fi(uint(i), "%s", texts[i])
|
||||||
|
if len(labels[i]) > 0 {
|
||||||
|
out += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels[i], ", "))
|
||||||
|
}
|
||||||
|
out += "\n"
|
||||||
|
out += r.fi(uint(i), "{{gray}}%s{{/}}\n", locations[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
149
vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go
generated
vendored
Normal file
149
vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go
generated
vendored
Normal file
@ -0,0 +1,149 @@
|
|||||||
|
package reporters
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/onsi/ginkgo/v2/config"
|
||||||
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Deprecated: DeprecatedReporter was how Ginkgo V1 provided support for CustomReporters
|
||||||
|
// this has been removed in V2.
|
||||||
|
// Please read the documentation at:
|
||||||
|
// https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-custom-reporters
|
||||||
|
// for Ginkgo's new behavior and for a migration path.
|
||||||
|
type DeprecatedReporter interface {
|
||||||
|
SuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary)
|
||||||
|
BeforeSuiteDidRun(setupSummary *types.SetupSummary)
|
||||||
|
SpecWillRun(specSummary *types.SpecSummary)
|
||||||
|
SpecDidComplete(specSummary *types.SpecSummary)
|
||||||
|
AfterSuiteDidRun(setupSummary *types.SetupSummary)
|
||||||
|
SuiteDidEnd(summary *types.SuiteSummary)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReportViaDeprecatedReporter takes a V1 custom reporter and a V2 report and
|
||||||
|
// calls the custom reporter's methods with appropriately transformed data from the V2 report.
|
||||||
|
//
|
||||||
|
// ReportViaDeprecatedReporter should be called in a `ReportAfterSuite()`
|
||||||
|
//
|
||||||
|
// Deprecated: ReportViaDeprecatedReporter method exists to help developer bridge between deprecated V1 functionality and the new
|
||||||
|
// reporting support in V2. It will be removed in a future minor version of Ginkgo.
|
||||||
|
func ReportViaDeprecatedReporter(reporter DeprecatedReporter, report types.Report) {
|
||||||
|
conf := config.DeprecatedGinkgoConfigType{
|
||||||
|
RandomSeed: report.SuiteConfig.RandomSeed,
|
||||||
|
RandomizeAllSpecs: report.SuiteConfig.RandomizeAllSpecs,
|
||||||
|
FocusStrings: report.SuiteConfig.FocusStrings,
|
||||||
|
SkipStrings: report.SuiteConfig.SkipStrings,
|
||||||
|
FailOnPending: report.SuiteConfig.FailOnPending,
|
||||||
|
FailFast: report.SuiteConfig.FailFast,
|
||||||
|
FlakeAttempts: report.SuiteConfig.FlakeAttempts,
|
||||||
|
EmitSpecProgress: report.SuiteConfig.EmitSpecProgress,
|
||||||
|
DryRun: report.SuiteConfig.DryRun,
|
||||||
|
ParallelNode: report.SuiteConfig.ParallelProcess,
|
||||||
|
ParallelTotal: report.SuiteConfig.ParallelTotal,
|
||||||
|
SyncHost: report.SuiteConfig.ParallelHost,
|
||||||
|
StreamHost: report.SuiteConfig.ParallelHost,
|
||||||
|
}
|
||||||
|
|
||||||
|
summary := &types.DeprecatedSuiteSummary{
|
||||||
|
SuiteDescription: report.SuiteDescription,
|
||||||
|
SuiteID: report.SuitePath,
|
||||||
|
|
||||||
|
NumberOfSpecsBeforeParallelization: report.PreRunStats.TotalSpecs,
|
||||||
|
NumberOfTotalSpecs: report.PreRunStats.TotalSpecs,
|
||||||
|
NumberOfSpecsThatWillBeRun: report.PreRunStats.SpecsThatWillRun,
|
||||||
|
}
|
||||||
|
|
||||||
|
reporter.SuiteWillBegin(conf, summary)
|
||||||
|
|
||||||
|
for _, spec := range report.SpecReports {
|
||||||
|
switch spec.LeafNodeType {
|
||||||
|
case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite:
|
||||||
|
setupSummary := &types.DeprecatedSetupSummary{
|
||||||
|
ComponentType: spec.LeafNodeType,
|
||||||
|
CodeLocation: spec.LeafNodeLocation,
|
||||||
|
State: spec.State,
|
||||||
|
RunTime: spec.RunTime,
|
||||||
|
Failure: failureFor(spec),
|
||||||
|
CapturedOutput: spec.CombinedOutput(),
|
||||||
|
SuiteID: report.SuitePath,
|
||||||
|
}
|
||||||
|
reporter.BeforeSuiteDidRun(setupSummary)
|
||||||
|
case types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite:
|
||||||
|
setupSummary := &types.DeprecatedSetupSummary{
|
||||||
|
ComponentType: spec.LeafNodeType,
|
||||||
|
CodeLocation: spec.LeafNodeLocation,
|
||||||
|
State: spec.State,
|
||||||
|
RunTime: spec.RunTime,
|
||||||
|
Failure: failureFor(spec),
|
||||||
|
CapturedOutput: spec.CombinedOutput(),
|
||||||
|
SuiteID: report.SuitePath,
|
||||||
|
}
|
||||||
|
reporter.AfterSuiteDidRun(setupSummary)
|
||||||
|
case types.NodeTypeIt:
|
||||||
|
componentTexts, componentCodeLocations := []string{}, []types.CodeLocation{}
|
||||||
|
componentTexts = append(componentTexts, spec.ContainerHierarchyTexts...)
|
||||||
|
componentCodeLocations = append(componentCodeLocations, spec.ContainerHierarchyLocations...)
|
||||||
|
componentTexts = append(componentTexts, spec.LeafNodeText)
|
||||||
|
componentCodeLocations = append(componentCodeLocations, spec.LeafNodeLocation)
|
||||||
|
|
||||||
|
specSummary := &types.DeprecatedSpecSummary{
|
||||||
|
ComponentTexts: componentTexts,
|
||||||
|
ComponentCodeLocations: componentCodeLocations,
|
||||||
|
State: spec.State,
|
||||||
|
RunTime: spec.RunTime,
|
||||||
|
Failure: failureFor(spec),
|
||||||
|
NumberOfSamples: spec.NumAttempts,
|
||||||
|
CapturedOutput: spec.CombinedOutput(),
|
||||||
|
SuiteID: report.SuitePath,
|
||||||
|
}
|
||||||
|
reporter.SpecWillRun(specSummary)
|
||||||
|
reporter.SpecDidComplete(specSummary)
|
||||||
|
|
||||||
|
switch spec.State {
|
||||||
|
case types.SpecStatePending:
|
||||||
|
summary.NumberOfPendingSpecs += 1
|
||||||
|
case types.SpecStateSkipped:
|
||||||
|
summary.NumberOfSkippedSpecs += 1
|
||||||
|
case types.SpecStateFailed, types.SpecStatePanicked, types.SpecStateInterrupted:
|
||||||
|
summary.NumberOfFailedSpecs += 1
|
||||||
|
case types.SpecStatePassed:
|
||||||
|
summary.NumberOfPassedSpecs += 1
|
||||||
|
if spec.NumAttempts > 1 {
|
||||||
|
summary.NumberOfFlakedSpecs += 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
summary.SuiteSucceeded = report.SuiteSucceeded
|
||||||
|
summary.RunTime = report.RunTime
|
||||||
|
|
||||||
|
reporter.SuiteDidEnd(summary)
|
||||||
|
}
|
||||||
|
|
||||||
|
func failureFor(spec types.SpecReport) types.DeprecatedSpecFailure {
|
||||||
|
if spec.Failure.IsZero() {
|
||||||
|
return types.DeprecatedSpecFailure{}
|
||||||
|
}
|
||||||
|
|
||||||
|
index := 0
|
||||||
|
switch spec.Failure.FailureNodeContext {
|
||||||
|
case types.FailureNodeInContainer:
|
||||||
|
index = spec.Failure.FailureNodeContainerIndex
|
||||||
|
case types.FailureNodeAtTopLevel:
|
||||||
|
index = -1
|
||||||
|
case types.FailureNodeIsLeafNode:
|
||||||
|
index = len(spec.ContainerHierarchyTexts) - 1
|
||||||
|
if spec.LeafNodeText != "" {
|
||||||
|
index += 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return types.DeprecatedSpecFailure{
|
||||||
|
Message: spec.Failure.Message,
|
||||||
|
Location: spec.Failure.Location,
|
||||||
|
ForwardedPanic: spec.Failure.ForwardedPanic,
|
||||||
|
ComponentIndex: index,
|
||||||
|
ComponentType: spec.Failure.FailureNodeType,
|
||||||
|
ComponentCodeLocation: spec.Failure.FailureNodeLocation,
|
||||||
|
}
|
||||||
|
}
|
60
vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go
generated
vendored
Normal file
60
vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go
generated
vendored
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
package reporters
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
//GenerateJSONReport produces a JSON-formatted report at the passed in destination
|
||||||
|
func GenerateJSONReport(report types.Report, destination string) error {
|
||||||
|
f, err := os.Create(destination)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
enc := json.NewEncoder(f)
|
||||||
|
enc.SetIndent("", " ")
|
||||||
|
err = enc.Encode([]types.Report{
|
||||||
|
report,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return f.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
//MergeJSONReports produces a single JSON-formatted report at the passed in destination by merging the JSON-formatted reports provided in sources
|
||||||
|
//It skips over reports that fail to decode but reports on them via the returned messages []string
|
||||||
|
func MergeAndCleanupJSONReports(sources []string, destination string) ([]string, error) {
|
||||||
|
messages := []string{}
|
||||||
|
allReports := []types.Report{}
|
||||||
|
for _, source := range sources {
|
||||||
|
reports := []types.Report{}
|
||||||
|
data, err := os.ReadFile(source)
|
||||||
|
if err != nil {
|
||||||
|
messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error()))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
err = json.Unmarshal(data, &reports)
|
||||||
|
if err != nil {
|
||||||
|
messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error()))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
os.Remove(source)
|
||||||
|
allReports = append(allReports, reports...)
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := os.Create(destination)
|
||||||
|
if err != nil {
|
||||||
|
return messages, err
|
||||||
|
}
|
||||||
|
enc := json.NewEncoder(f)
|
||||||
|
enc.SetIndent("", " ")
|
||||||
|
err = enc.Encode(allReports)
|
||||||
|
if err != nil {
|
||||||
|
return messages, err
|
||||||
|
}
|
||||||
|
return messages, f.Close()
|
||||||
|
}
|
307
vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go
generated
vendored
Normal file
307
vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go
generated
vendored
Normal file
@ -0,0 +1,307 @@
|
|||||||
|
/*
|
||||||
|
|
||||||
|
JUnit XML Reporter for Ginkgo
|
||||||
|
|
||||||
|
For usage instructions: http://onsi.github.io/ginkgo/#generating_junit_xml_output
|
||||||
|
|
||||||
|
The schema used for the generated JUnit xml file was adapted from https://llg.cubic.org/docs/junit/
|
||||||
|
|
||||||
|
*/
|
||||||
|
|
||||||
|
package reporters
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/xml"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/config"
|
||||||
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type JUnitTestSuites struct {
|
||||||
|
XMLName xml.Name `xml:"testsuites"`
|
||||||
|
// Tests maps onto the total number of specs in all test suites (this includes any suite nodes such as BeforeSuite)
|
||||||
|
Tests int `xml:"tests,attr"`
|
||||||
|
// Disabled maps onto specs that are pending and/or skipped
|
||||||
|
Disabled int `xml:"disabled,attr"`
|
||||||
|
// Errors maps onto specs that panicked or were interrupted
|
||||||
|
Errors int `xml:"errors,attr"`
|
||||||
|
// Failures maps onto specs that failed
|
||||||
|
Failures int `xml:"failures,attr"`
|
||||||
|
// Time is the time in seconds to execute all test suites
|
||||||
|
Time float64 `xml:"time,attr"`
|
||||||
|
|
||||||
|
//The set of all test suites
|
||||||
|
TestSuites []JUnitTestSuite `xml:"testsuite"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type JUnitTestSuite struct {
|
||||||
|
// Name maps onto the description of the test suite - maps onto Report.SuiteDescription
|
||||||
|
Name string `xml:"name,attr"`
|
||||||
|
// Package maps onto the absolute path to the test suite - maps onto Report.SuitePath
|
||||||
|
Package string `xml:"package,attr"`
|
||||||
|
// Tests maps onto the total number of specs in the test suite (this includes any suite nodes such as BeforeSuite)
|
||||||
|
Tests int `xml:"tests,attr"`
|
||||||
|
// Disabled maps onto specs that are pending
|
||||||
|
Disabled int `xml:"disabled,attr"`
|
||||||
|
// Skiped maps onto specs that are skipped
|
||||||
|
Skipped int `xml:"skipped,attr"`
|
||||||
|
// Errors maps onto specs that panicked or were interrupted
|
||||||
|
Errors int `xml:"errors,attr"`
|
||||||
|
// Failures maps onto specs that failed
|
||||||
|
Failures int `xml:"failures,attr"`
|
||||||
|
// Time is the time in seconds to execute all the test suite - maps onto Report.RunTime
|
||||||
|
Time float64 `xml:"time,attr"`
|
||||||
|
// Timestamp is the ISO 8601 formatted start-time of the suite - maps onto Report.StartTime
|
||||||
|
Timestamp string `xml:"timestamp,attr"`
|
||||||
|
|
||||||
|
//Properties captures the information stored in the rest of the Report type (including SuiteConfig) as key-value pairs
|
||||||
|
Properties JUnitProperties `xml:"properties"`
|
||||||
|
|
||||||
|
//TestCases capture the individual specs
|
||||||
|
TestCases []JUnitTestCase `xml:"testcase"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type JUnitProperties struct {
|
||||||
|
Properties []JUnitProperty `xml:"property"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (jup JUnitProperties) WithName(name string) string {
|
||||||
|
for _, property := range jup.Properties {
|
||||||
|
if property.Name == name {
|
||||||
|
return property.Value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
type JUnitProperty struct {
|
||||||
|
Name string `xml:"name,attr"`
|
||||||
|
Value string `xml:"value,attr"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type JUnitTestCase struct {
|
||||||
|
// Name maps onto the full text of the spec - equivalent to "[SpecReport.LeafNodeType] SpecReport.FullText()"
|
||||||
|
Name string `xml:"name,attr"`
|
||||||
|
// Classname maps onto the name of the test suite - equivalent to Report.SuiteDescription
|
||||||
|
Classname string `xml:"classname,attr"`
|
||||||
|
// Status maps onto the string representation of SpecReport.State
|
||||||
|
Status string `xml:"status,attr"`
|
||||||
|
// Time is the time in seconds to execute the spec - maps onto SpecReport.RunTime
|
||||||
|
Time float64 `xml:"time,attr"`
|
||||||
|
//Skipped is populated with a message if the test was skipped or pending
|
||||||
|
Skipped *JUnitSkipped `xml:"skipped,omitempty"`
|
||||||
|
//Error is populated if the test panicked or was interrupted
|
||||||
|
Error *JUnitError `xml:"error,omitempty"`
|
||||||
|
//Failure is populated if the test failed
|
||||||
|
Failure *JUnitFailure `xml:"failure,omitempty"`
|
||||||
|
//SystemOut maps onto any captured stdout/stderr output - maps onto SpecReport.CapturedStdOutErr
|
||||||
|
SystemOut string `xml:"system-out,omitempty"`
|
||||||
|
//SystemOut maps onto any captured GinkgoWriter output - maps onto SpecReport.CapturedGinkgoWriterOutput
|
||||||
|
SystemErr string `xml:"system-err,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type JUnitSkipped struct {
|
||||||
|
// Message maps onto "pending" if the test was marked pending, "skipped" if the test was marked skipped, and "skipped - REASON" if the user called Skip(REASON)
|
||||||
|
Message string `xml:"message,attr"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type JUnitError struct {
|
||||||
|
//Message maps onto the panic/exception thrown - equivalent to SpecReport.Failure.ForwardedPanic - or to "interrupted"
|
||||||
|
Message string `xml:"message,attr"`
|
||||||
|
//Type is one of "panicked" or "interrupted"
|
||||||
|
Type string `xml:"type,attr"`
|
||||||
|
//Description maps onto the captured stack trace for a panic, or the failure message for an interrupt which will include the dump of running goroutines
|
||||||
|
Description string `xml:",chardata"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type JUnitFailure struct {
|
||||||
|
//Message maps onto the failure message - equivalent to SpecReport.Failure.Message
|
||||||
|
Message string `xml:"message,attr"`
|
||||||
|
//Type is "failed"
|
||||||
|
Type string `xml:"type,attr"`
|
||||||
|
//Description maps onto the location and stack trace of the failure
|
||||||
|
Description string `xml:",chardata"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func GenerateJUnitReport(report types.Report, dst string) error {
|
||||||
|
suite := JUnitTestSuite{
|
||||||
|
Name: report.SuiteDescription,
|
||||||
|
Package: report.SuitePath,
|
||||||
|
Time: report.RunTime.Seconds(),
|
||||||
|
Timestamp: report.StartTime.Format("2006-01-02T15:04:05"),
|
||||||
|
Properties: JUnitProperties{
|
||||||
|
Properties: []JUnitProperty{
|
||||||
|
{"SuiteSucceeded", fmt.Sprintf("%t", report.SuiteSucceeded)},
|
||||||
|
{"SuiteHasProgrammaticFocus", fmt.Sprintf("%t", report.SuiteHasProgrammaticFocus)},
|
||||||
|
{"SpecialSuiteFailureReason", strings.Join(report.SpecialSuiteFailureReasons, ",")},
|
||||||
|
{"SuiteLabels", fmt.Sprintf("[%s]", strings.Join(report.SuiteLabels, ","))},
|
||||||
|
{"RandomSeed", fmt.Sprintf("%d", report.SuiteConfig.RandomSeed)},
|
||||||
|
{"RandomizeAllSpecs", fmt.Sprintf("%t", report.SuiteConfig.RandomizeAllSpecs)},
|
||||||
|
{"LabelFilter", report.SuiteConfig.LabelFilter},
|
||||||
|
{"FocusStrings", strings.Join(report.SuiteConfig.FocusStrings, ",")},
|
||||||
|
{"SkipStrings", strings.Join(report.SuiteConfig.SkipStrings, ",")},
|
||||||
|
{"FocusFiles", strings.Join(report.SuiteConfig.FocusFiles, ";")},
|
||||||
|
{"SkipFiles", strings.Join(report.SuiteConfig.SkipFiles, ";")},
|
||||||
|
{"FailOnPending", fmt.Sprintf("%t", report.SuiteConfig.FailOnPending)},
|
||||||
|
{"FailFast", fmt.Sprintf("%t", report.SuiteConfig.FailFast)},
|
||||||
|
{"FlakeAttempts", fmt.Sprintf("%d", report.SuiteConfig.FlakeAttempts)},
|
||||||
|
{"EmitSpecProgress", fmt.Sprintf("%t", report.SuiteConfig.EmitSpecProgress)},
|
||||||
|
{"DryRun", fmt.Sprintf("%t", report.SuiteConfig.DryRun)},
|
||||||
|
{"ParallelTotal", fmt.Sprintf("%d", report.SuiteConfig.ParallelTotal)},
|
||||||
|
{"OutputInterceptorMode", report.SuiteConfig.OutputInterceptorMode},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, spec := range report.SpecReports {
|
||||||
|
name := fmt.Sprintf("[%s]", spec.LeafNodeType)
|
||||||
|
if spec.FullText() != "" {
|
||||||
|
name = name + " " + spec.FullText()
|
||||||
|
}
|
||||||
|
labels := spec.Labels()
|
||||||
|
if len(labels) > 0 {
|
||||||
|
name = name + " [" + strings.Join(labels, ", ") + "]"
|
||||||
|
}
|
||||||
|
|
||||||
|
test := JUnitTestCase{
|
||||||
|
Name: name,
|
||||||
|
Classname: report.SuiteDescription,
|
||||||
|
Status: spec.State.String(),
|
||||||
|
Time: spec.RunTime.Seconds(),
|
||||||
|
SystemOut: systemOutForUnstructureReporters(spec),
|
||||||
|
SystemErr: spec.CapturedGinkgoWriterOutput,
|
||||||
|
}
|
||||||
|
suite.Tests += 1
|
||||||
|
|
||||||
|
switch spec.State {
|
||||||
|
case types.SpecStateSkipped:
|
||||||
|
message := "skipped"
|
||||||
|
if spec.Failure.Message != "" {
|
||||||
|
message += " - " + spec.Failure.Message
|
||||||
|
}
|
||||||
|
test.Skipped = &JUnitSkipped{Message: message}
|
||||||
|
suite.Skipped += 1
|
||||||
|
case types.SpecStatePending:
|
||||||
|
test.Skipped = &JUnitSkipped{Message: "pending"}
|
||||||
|
suite.Disabled += 1
|
||||||
|
case types.SpecStateFailed:
|
||||||
|
test.Failure = &JUnitFailure{
|
||||||
|
Message: spec.Failure.Message,
|
||||||
|
Type: "failed",
|
||||||
|
Description: fmt.Sprintf("%s\n%s", spec.Failure.Location.String(), spec.Failure.Location.FullStackTrace),
|
||||||
|
}
|
||||||
|
suite.Failures += 1
|
||||||
|
case types.SpecStateInterrupted:
|
||||||
|
test.Error = &JUnitError{
|
||||||
|
Message: "interrupted",
|
||||||
|
Type: "interrupted",
|
||||||
|
Description: spec.Failure.Message,
|
||||||
|
}
|
||||||
|
suite.Errors += 1
|
||||||
|
case types.SpecStateAborted:
|
||||||
|
test.Failure = &JUnitFailure{
|
||||||
|
Message: spec.Failure.Message,
|
||||||
|
Type: "aborted",
|
||||||
|
Description: fmt.Sprintf("%s\n%s", spec.Failure.Location.String(), spec.Failure.Location.FullStackTrace),
|
||||||
|
}
|
||||||
|
suite.Errors += 1
|
||||||
|
case types.SpecStatePanicked:
|
||||||
|
test.Error = &JUnitError{
|
||||||
|
Message: spec.Failure.ForwardedPanic,
|
||||||
|
Type: "panicked",
|
||||||
|
Description: fmt.Sprintf("%s\n%s", spec.Failure.Location.String(), spec.Failure.Location.FullStackTrace),
|
||||||
|
}
|
||||||
|
suite.Errors += 1
|
||||||
|
}
|
||||||
|
|
||||||
|
suite.TestCases = append(suite.TestCases, test)
|
||||||
|
}
|
||||||
|
|
||||||
|
junitReport := JUnitTestSuites{
|
||||||
|
Tests: suite.Tests,
|
||||||
|
Disabled: suite.Disabled + suite.Skipped,
|
||||||
|
Errors: suite.Errors,
|
||||||
|
Failures: suite.Failures,
|
||||||
|
Time: suite.Time,
|
||||||
|
TestSuites: []JUnitTestSuite{suite},
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := os.Create(dst)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
f.WriteString(xml.Header)
|
||||||
|
encoder := xml.NewEncoder(f)
|
||||||
|
encoder.Indent(" ", " ")
|
||||||
|
encoder.Encode(junitReport)
|
||||||
|
|
||||||
|
return f.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func MergeAndCleanupJUnitReports(sources []string, dst string) ([]string, error) {
|
||||||
|
messages := []string{}
|
||||||
|
mergedReport := JUnitTestSuites{}
|
||||||
|
for _, source := range sources {
|
||||||
|
report := JUnitTestSuites{}
|
||||||
|
f, err := os.Open(source)
|
||||||
|
if err != nil {
|
||||||
|
messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error()))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
err = xml.NewDecoder(f).Decode(&report)
|
||||||
|
if err != nil {
|
||||||
|
messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error()))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
os.Remove(source)
|
||||||
|
|
||||||
|
mergedReport.Tests += report.Tests
|
||||||
|
mergedReport.Disabled += report.Disabled
|
||||||
|
mergedReport.Errors += report.Errors
|
||||||
|
mergedReport.Failures += report.Failures
|
||||||
|
mergedReport.Time += report.Time
|
||||||
|
mergedReport.TestSuites = append(mergedReport.TestSuites, report.TestSuites...)
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := os.Create(dst)
|
||||||
|
if err != nil {
|
||||||
|
return messages, err
|
||||||
|
}
|
||||||
|
f.WriteString(xml.Header)
|
||||||
|
encoder := xml.NewEncoder(f)
|
||||||
|
encoder.Indent(" ", " ")
|
||||||
|
encoder.Encode(mergedReport)
|
||||||
|
|
||||||
|
return messages, f.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func systemOutForUnstructureReporters(spec types.SpecReport) string {
|
||||||
|
systemOut := spec.CapturedStdOutErr
|
||||||
|
if len(spec.ReportEntries) > 0 {
|
||||||
|
systemOut += "\nReport Entries:\n"
|
||||||
|
for i, entry := range spec.ReportEntries {
|
||||||
|
systemOut += fmt.Sprintf("%s\n%s\n%s\n", entry.Name, entry.Location, entry.Time.Format(time.RFC3339Nano))
|
||||||
|
if representation := entry.StringRepresentation(); representation != "" {
|
||||||
|
systemOut += representation + "\n"
|
||||||
|
}
|
||||||
|
if i+1 < len(spec.ReportEntries) {
|
||||||
|
systemOut += "--\n"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return systemOut
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated JUnitReporter (so folks can still compile their suites)
|
||||||
|
type JUnitReporter struct{}
|
||||||
|
|
||||||
|
func NewJUnitReporter(_ string) *JUnitReporter { return &JUnitReporter{} }
|
||||||
|
func (reporter *JUnitReporter) SuiteWillBegin(_ config.GinkgoConfigType, _ *types.SuiteSummary) {}
|
||||||
|
func (reporter *JUnitReporter) BeforeSuiteDidRun(_ *types.SetupSummary) {}
|
||||||
|
func (reporter *JUnitReporter) SpecWillRun(_ *types.SpecSummary) {}
|
||||||
|
func (reporter *JUnitReporter) SpecDidComplete(_ *types.SpecSummary) {}
|
||||||
|
func (reporter *JUnitReporter) AfterSuiteDidRun(_ *types.SetupSummary) {}
|
||||||
|
func (reporter *JUnitReporter) SuiteDidEnd(_ *types.SuiteSummary) {}
|
19
vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go
generated
vendored
Normal file
19
vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
package reporters
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Reporter interface {
|
||||||
|
SuiteWillBegin(report types.Report)
|
||||||
|
WillRun(report types.SpecReport)
|
||||||
|
DidRun(report types.SpecReport)
|
||||||
|
SuiteDidEnd(report types.Report)
|
||||||
|
}
|
||||||
|
|
||||||
|
type NoopReporter struct{}
|
||||||
|
|
||||||
|
func (n NoopReporter) SuiteWillBegin(report types.Report) {}
|
||||||
|
func (n NoopReporter) WillRun(report types.SpecReport) {}
|
||||||
|
func (n NoopReporter) DidRun(report types.SpecReport) {}
|
||||||
|
func (n NoopReporter) SuiteDidEnd(report types.Report) {}
|
97
vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go
generated
vendored
Normal file
97
vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go
generated
vendored
Normal file
@ -0,0 +1,97 @@
|
|||||||
|
/*
|
||||||
|
|
||||||
|
TeamCity Reporter for Ginkgo
|
||||||
|
|
||||||
|
Makes use of TeamCity's support for Service Messages
|
||||||
|
http://confluence.jetbrains.com/display/TCD7/Build+Script+Interaction+with+TeamCity#BuildScriptInteractionwithTeamCity-ReportingTests
|
||||||
|
*/
|
||||||
|
|
||||||
|
package reporters
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
func tcEscape(s string) string {
|
||||||
|
s = strings.Replace(s, "|", "||", -1)
|
||||||
|
s = strings.Replace(s, "'", "|'", -1)
|
||||||
|
s = strings.Replace(s, "\n", "|n", -1)
|
||||||
|
s = strings.Replace(s, "\r", "|r", -1)
|
||||||
|
s = strings.Replace(s, "[", "|[", -1)
|
||||||
|
s = strings.Replace(s, "]", "|]", -1)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func GenerateTeamcityReport(report types.Report, dst string) error {
|
||||||
|
f, err := os.Create(dst)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
name := report.SuiteDescription
|
||||||
|
labels := report.SuiteLabels
|
||||||
|
if len(labels) > 0 {
|
||||||
|
name = name + " [" + strings.Join(labels, ", ") + "]"
|
||||||
|
}
|
||||||
|
fmt.Fprintf(f, "##teamcity[testSuiteStarted name='%s']\n", tcEscape(name))
|
||||||
|
for _, spec := range report.SpecReports {
|
||||||
|
name := fmt.Sprintf("[%s]", spec.LeafNodeType)
|
||||||
|
if spec.FullText() != "" {
|
||||||
|
name = name + " " + spec.FullText()
|
||||||
|
}
|
||||||
|
labels := spec.Labels()
|
||||||
|
if len(labels) > 0 {
|
||||||
|
name = name + " [" + strings.Join(labels, ", ") + "]"
|
||||||
|
}
|
||||||
|
|
||||||
|
name = tcEscape(name)
|
||||||
|
fmt.Fprintf(f, "##teamcity[testStarted name='%s']\n", name)
|
||||||
|
switch spec.State {
|
||||||
|
case types.SpecStatePending:
|
||||||
|
fmt.Fprintf(f, "##teamcity[testIgnored name='%s' message='pending']\n", name)
|
||||||
|
case types.SpecStateSkipped:
|
||||||
|
message := "skipped"
|
||||||
|
if spec.Failure.Message != "" {
|
||||||
|
message += " - " + spec.Failure.Message
|
||||||
|
}
|
||||||
|
fmt.Fprintf(f, "##teamcity[testIgnored name='%s' message='%s']\n", name, tcEscape(message))
|
||||||
|
case types.SpecStateFailed:
|
||||||
|
details := fmt.Sprintf("%s\n%s", spec.Failure.Location.String(), spec.Failure.Location.FullStackTrace)
|
||||||
|
fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='failed - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details))
|
||||||
|
case types.SpecStatePanicked:
|
||||||
|
details := fmt.Sprintf("%s\n%s", spec.Failure.Location.String(), spec.Failure.Location.FullStackTrace)
|
||||||
|
fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='panicked - %s' details='%s']\n", name, tcEscape(spec.Failure.ForwardedPanic), tcEscape(details))
|
||||||
|
case types.SpecStateInterrupted:
|
||||||
|
fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='interrupted' details='%s']\n", name, tcEscape(spec.Failure.Message))
|
||||||
|
case types.SpecStateAborted:
|
||||||
|
details := fmt.Sprintf("%s\n%s", spec.Failure.Location.String(), spec.Failure.Location.FullStackTrace)
|
||||||
|
fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='aborted - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details))
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(f, "##teamcity[testStdOut name='%s' out='%s']\n", name, tcEscape(systemOutForUnstructureReporters(spec)))
|
||||||
|
fmt.Fprintf(f, "##teamcity[testStdErr name='%s' out='%s']\n", name, tcEscape(spec.CapturedGinkgoWriterOutput))
|
||||||
|
fmt.Fprintf(f, "##teamcity[testFinished name='%s' duration='%d']\n", name, int(spec.RunTime.Seconds()*1000.0))
|
||||||
|
}
|
||||||
|
fmt.Fprintf(f, "##teamcity[testSuiteFinished name='%s']\n", tcEscape(report.SuiteDescription))
|
||||||
|
|
||||||
|
return f.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func MergeAndCleanupTeamcityReports(sources []string, dst string) ([]string, error) {
|
||||||
|
messages := []string{}
|
||||||
|
merged := []byte{}
|
||||||
|
for _, source := range sources {
|
||||||
|
data, err := os.ReadFile(source)
|
||||||
|
if err != nil {
|
||||||
|
messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error()))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
os.Remove(source)
|
||||||
|
merged = append(merged, data...)
|
||||||
|
}
|
||||||
|
return messages, os.WriteFile(dst, merged, 0666)
|
||||||
|
}
|
153
vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go
generated
vendored
Normal file
153
vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go
generated
vendored
Normal file
@ -0,0 +1,153 @@
|
|||||||
|
package ginkgo
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/internal"
|
||||||
|
"github.com/onsi/ginkgo/v2/internal/global"
|
||||||
|
"github.com/onsi/ginkgo/v2/reporters"
|
||||||
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
Report represents the report for a Suite.
|
||||||
|
It is documented here: https://pkg.go.dev/github.com/onsi/ginkgo/v2/types#Report
|
||||||
|
*/
|
||||||
|
type Report = types.Report
|
||||||
|
|
||||||
|
/*
|
||||||
|
Report represents the report for a Spec.
|
||||||
|
It is documented here: https://pkg.go.dev/github.com/onsi/ginkgo/v2/types#SpecReport
|
||||||
|
*/
|
||||||
|
type SpecReport = types.SpecReport
|
||||||
|
|
||||||
|
/*
|
||||||
|
CurrentSpecReport returns information about the current running spec.
|
||||||
|
The returned object is a types.SpecReport which includes helper methods
|
||||||
|
to make extracting information about the spec easier.
|
||||||
|
|
||||||
|
You can learn more about SpecReport here: https://pkg.go.dev/github.com/onsi/ginkgo/types#SpecReport
|
||||||
|
You can learn more about CurrentSpecReport() here: https://onsi.github.io/ginkgo/#getting-a-report-for-the-current-spec
|
||||||
|
*/
|
||||||
|
func CurrentSpecReport() SpecReport {
|
||||||
|
return global.Suite.CurrentSpecReport()
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
ReportEntryVisibility governs the visibility of ReportEntries in Ginkgo's console reporter
|
||||||
|
|
||||||
|
- ReportEntryVisibilityAlways: the default behavior - the ReportEntry is always emitted.
|
||||||
|
- ReportEntryVisibilityFailureOrVerbose: the ReportEntry is only emitted if the spec fails or if the tests are run with -v (similar to GinkgoWriters behavior).
|
||||||
|
- ReportEntryVisibilityNever: the ReportEntry is never emitted though it appears in any generated machine-readable reports (e.g. by setting `--json-report`).
|
||||||
|
|
||||||
|
You can learn more about Report Entries here: https://onsi.github.io/ginkgo/#attaching-data-to-reports
|
||||||
|
*/
|
||||||
|
type ReportEntryVisibility = types.ReportEntryVisibility
|
||||||
|
|
||||||
|
const ReportEntryVisibilityAlways, ReportEntryVisibilityFailureOrVerbose, ReportEntryVisibilityNever = types.ReportEntryVisibilityAlways, types.ReportEntryVisibilityFailureOrVerbose, types.ReportEntryVisibilityNever
|
||||||
|
|
||||||
|
/*
|
||||||
|
AddReportEntry generates and adds a new ReportEntry to the current spec's SpecReport.
|
||||||
|
It can take any of the following arguments:
|
||||||
|
- A single arbitrary object to attach as the Value of the ReportEntry. This object will be included in any generated reports and will be emitted to the console when the report is emitted.
|
||||||
|
- A ReportEntryVisibility enum to control the visibility of the ReportEntry
|
||||||
|
- An Offset or CodeLocation decoration to control the reported location of the ReportEntry
|
||||||
|
|
||||||
|
If the Value object implements `fmt.Stringer`, it's `String()` representation is used when emitting to the console.
|
||||||
|
|
||||||
|
AddReportEntry() must be called within a Subject or Setup node - not in a Container node.
|
||||||
|
|
||||||
|
You can learn more about Report Entries here: https://onsi.github.io/ginkgo/#attaching-data-to-reports
|
||||||
|
*/
|
||||||
|
func AddReportEntry(name string, args ...interface{}) {
|
||||||
|
cl := types.NewCodeLocation(1)
|
||||||
|
reportEntry, err := internal.NewReportEntry(name, cl, args...)
|
||||||
|
if err != nil {
|
||||||
|
Fail(fmt.Sprintf("Failed to generate Report Entry:\n%s", err.Error()), 1)
|
||||||
|
}
|
||||||
|
err = global.Suite.AddReportEntry(reportEntry)
|
||||||
|
if err != nil {
|
||||||
|
Fail(fmt.Sprintf("Failed to add Report Entry:\n%s", err.Error()), 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
ReportBeforeEach nodes are run for each spec, even if the spec is skipped or pending. ReportBeforeEach nodes take a function that
|
||||||
|
receives a SpecReport. They are called before the spec starts.
|
||||||
|
|
||||||
|
You cannot nest any other Ginkgo nodes within a ReportBeforeEach node's closure.
|
||||||
|
You can learn more about ReportBeforeEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
|
||||||
|
*/
|
||||||
|
func ReportBeforeEach(body func(SpecReport)) bool {
|
||||||
|
return pushNode(internal.NewReportBeforeEachNode(body, types.NewCodeLocation(1)))
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
ReportAfterEach nodes are run for each spec, even if the spec is skipped or pending. ReportAfterEach nodes take a function that
|
||||||
|
receives a SpecReport. They are called after the spec has completed and receive the final report for the spec.
|
||||||
|
|
||||||
|
You cannot nest any other Ginkgo nodes within a ReportAfterEach node's closure.
|
||||||
|
You can learn more about ReportAfterEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
|
||||||
|
*/
|
||||||
|
func ReportAfterEach(body func(SpecReport)) bool {
|
||||||
|
return pushNode(internal.NewReportAfterEachNode(body, types.NewCodeLocation(1)))
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
ReportAfterSuite nodes are run at the end of the suite. ReportAfterSuite nodes take a function that receives a suite Report.
|
||||||
|
|
||||||
|
They are called at the end of the suite, after all specs have run and any AfterSuite or SynchronizedAfterSuite nodes, and are passed in the final report for the suite.
|
||||||
|
ReportAftersuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node)
|
||||||
|
|
||||||
|
When running in parallel, Ginkgo ensures that only one of the parallel nodes runs the ReportAfterSuite and that it is passed a report that is aggregated across
|
||||||
|
all parallel nodes
|
||||||
|
|
||||||
|
In addition to using ReportAfterSuite to programmatically generate suite reports, you can also generate JSON, JUnit, and Teamcity formatted reports using the --json-report, --junit-report, and --teamcity-report ginkgo CLI flags.
|
||||||
|
|
||||||
|
You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure.
|
||||||
|
You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
|
||||||
|
You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports
|
||||||
|
*/
|
||||||
|
func ReportAfterSuite(text string, body func(Report)) bool {
|
||||||
|
return pushNode(internal.NewReportAfterSuiteNode(text, body, types.NewCodeLocation(1)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig types.ReporterConfig) {
|
||||||
|
body := func(report Report) {
|
||||||
|
if reporterConfig.JSONReport != "" {
|
||||||
|
err := reporters.GenerateJSONReport(report, reporterConfig.JSONReport)
|
||||||
|
if err != nil {
|
||||||
|
Fail(fmt.Sprintf("Failed to generate JSON report:\n%s", err.Error()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if reporterConfig.JUnitReport != "" {
|
||||||
|
err := reporters.GenerateJUnitReport(report, reporterConfig.JUnitReport)
|
||||||
|
if err != nil {
|
||||||
|
Fail(fmt.Sprintf("Failed to generate JUnit report:\n%s", err.Error()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if reporterConfig.TeamcityReport != "" {
|
||||||
|
err := reporters.GenerateTeamcityReport(report, reporterConfig.TeamcityReport)
|
||||||
|
if err != nil {
|
||||||
|
Fail(fmt.Sprintf("Failed to generate Teamcity report:\n%s", err.Error()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
flags := []string{}
|
||||||
|
if reporterConfig.JSONReport != "" {
|
||||||
|
flags = append(flags, "--json-report")
|
||||||
|
}
|
||||||
|
if reporterConfig.JUnitReport != "" {
|
||||||
|
flags = append(flags, "--junit-report")
|
||||||
|
}
|
||||||
|
if reporterConfig.TeamcityReport != "" {
|
||||||
|
flags = append(flags, "--teamcity-report")
|
||||||
|
}
|
||||||
|
pushNode(internal.NewReportAfterSuiteNode(
|
||||||
|
fmt.Sprintf("Autogenerated ReportAfterSuite for %s", strings.Join(flags, " ")),
|
||||||
|
body,
|
||||||
|
types.NewCustomCodeLocation("autogenerated by Ginkgo"),
|
||||||
|
))
|
||||||
|
}
|
265
vendor/github.com/onsi/ginkgo/v2/table_dsl.go
generated
vendored
Normal file
265
vendor/github.com/onsi/ginkgo/v2/table_dsl.go
generated
vendored
Normal file
@ -0,0 +1,265 @@
|
|||||||
|
package ginkgo
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/internal"
|
||||||
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
The EntryDescription decorator allows you to pass a format string to DescribeTable() and Entry(). This format string is used to generate entry names via:
|
||||||
|
|
||||||
|
fmt.Sprintf(formatString, parameters...)
|
||||||
|
|
||||||
|
where parameters are the parameters passed into the entry.
|
||||||
|
|
||||||
|
When passed into an Entry the EntryDescription is used to generate the name or that entry. When passed to DescribeTable, the EntryDescription is used to generate the names for any entries that have `nil` descriptions.
|
||||||
|
|
||||||
|
You can learn more about generating EntryDescriptions here: https://onsi.github.io/ginkgo/#generating-entry-descriptions
|
||||||
|
*/
|
||||||
|
type EntryDescription string
|
||||||
|
|
||||||
|
func (ed EntryDescription) render(args ...interface{}) string {
|
||||||
|
return fmt.Sprintf(string(ed), args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
DescribeTable describes a table-driven spec.
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
|
DescribeTable("a simple table",
|
||||||
|
func(x int, y int, expected bool) {
|
||||||
|
Ω(x > y).Should(Equal(expected))
|
||||||
|
},
|
||||||
|
Entry("x > y", 1, 0, true),
|
||||||
|
Entry("x == y", 0, 0, false),
|
||||||
|
Entry("x < y", 0, 1, false),
|
||||||
|
)
|
||||||
|
|
||||||
|
You can learn more about DescribeTable here: https://onsi.github.io/ginkgo/#table-specs
|
||||||
|
And can explore some Table patterns here: https://onsi.github.io/ginkgo/#table-specs-patterns
|
||||||
|
*/
|
||||||
|
func DescribeTable(description string, args ...interface{}) bool {
|
||||||
|
generateTable(description, args...)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
You can focus a table with `FDescribeTable`. This is equivalent to `FDescribe`.
|
||||||
|
*/
|
||||||
|
func FDescribeTable(description string, args ...interface{}) bool {
|
||||||
|
args = append(args, internal.Focus)
|
||||||
|
generateTable(description, args...)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
You can mark a table as pending with `PDescribeTable`. This is equivalent to `PDescribe`.
|
||||||
|
*/
|
||||||
|
func PDescribeTable(description string, args ...interface{}) bool {
|
||||||
|
args = append(args, internal.Pending)
|
||||||
|
generateTable(description, args...)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
You can mark a table as pending with `XDescribeTable`. This is equivalent to `XDescribe`.
|
||||||
|
*/
|
||||||
|
var XDescribeTable = PDescribeTable
|
||||||
|
|
||||||
|
/*
|
||||||
|
TableEntry represents an entry in a table test. You generally use the `Entry` constructor.
|
||||||
|
*/
|
||||||
|
type TableEntry struct {
|
||||||
|
description interface{}
|
||||||
|
decorations []interface{}
|
||||||
|
parameters []interface{}
|
||||||
|
codeLocation types.CodeLocation
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Entry constructs a TableEntry.
|
||||||
|
|
||||||
|
The first argument is a description. This can be a string, a function that accepts the parameters passed to the TableEntry and returns a string, an EntryDescription format string, or nil. If nil is provided then the name of the Entry is derived using the table-level entry description.
|
||||||
|
Subsequent arguments accept any Ginkgo decorators. These are filtered out and the remaining arguments are passed into the Spec function associated with the table.
|
||||||
|
|
||||||
|
Each Entry ends up generating an individual Ginkgo It. The body of the it is the Table Body function with the Entry parameters passed in.
|
||||||
|
|
||||||
|
You can learn more about Entry here: https://onsi.github.io/ginkgo/#table-specs
|
||||||
|
*/
|
||||||
|
func Entry(description interface{}, args ...interface{}) TableEntry {
|
||||||
|
decorations, parameters := internal.PartitionDecorations(args...)
|
||||||
|
return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(1)}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
You can focus a particular entry with FEntry. This is equivalent to FIt.
|
||||||
|
*/
|
||||||
|
func FEntry(description interface{}, args ...interface{}) TableEntry {
|
||||||
|
decorations, parameters := internal.PartitionDecorations(args...)
|
||||||
|
decorations = append(decorations, internal.Focus)
|
||||||
|
return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(1)}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
You can mark a particular entry as pending with PEntry. This is equivalent to PIt.
|
||||||
|
*/
|
||||||
|
func PEntry(description interface{}, args ...interface{}) TableEntry {
|
||||||
|
decorations, parameters := internal.PartitionDecorations(args...)
|
||||||
|
decorations = append(decorations, internal.Pending)
|
||||||
|
return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(1)}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
You can mark a particular entry as pending with XEntry. This is equivalent to XIt.
|
||||||
|
*/
|
||||||
|
var XEntry = PEntry
|
||||||
|
|
||||||
|
func generateTable(description string, args ...interface{}) {
|
||||||
|
cl := types.NewCodeLocation(2)
|
||||||
|
containerNodeArgs := []interface{}{cl}
|
||||||
|
|
||||||
|
entries := []TableEntry{}
|
||||||
|
var itBody interface{}
|
||||||
|
|
||||||
|
var tableLevelEntryDescription interface{}
|
||||||
|
tableLevelEntryDescription = func(args ...interface{}) string {
|
||||||
|
out := []string{}
|
||||||
|
for _, arg := range args {
|
||||||
|
out = append(out, fmt.Sprint(arg))
|
||||||
|
}
|
||||||
|
return "Entry: " + strings.Join(out, ", ")
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, arg := range args {
|
||||||
|
switch t := reflect.TypeOf(arg); {
|
||||||
|
case t == nil:
|
||||||
|
exitIfErr(types.GinkgoErrors.IncorrectParameterTypeForTable(i, "nil", cl))
|
||||||
|
case t == reflect.TypeOf(TableEntry{}):
|
||||||
|
entries = append(entries, arg.(TableEntry))
|
||||||
|
case t == reflect.TypeOf([]TableEntry{}):
|
||||||
|
entries = append(entries, arg.([]TableEntry)...)
|
||||||
|
case t == reflect.TypeOf(EntryDescription("")):
|
||||||
|
tableLevelEntryDescription = arg.(EntryDescription).render
|
||||||
|
case t.Kind() == reflect.Func && t.NumOut() == 1 && t.Out(0) == reflect.TypeOf(""):
|
||||||
|
tableLevelEntryDescription = arg
|
||||||
|
case t.Kind() == reflect.Func:
|
||||||
|
if itBody != nil {
|
||||||
|
exitIfErr(types.GinkgoErrors.MultipleEntryBodyFunctionsForTable(cl))
|
||||||
|
}
|
||||||
|
itBody = arg
|
||||||
|
default:
|
||||||
|
containerNodeArgs = append(containerNodeArgs, arg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
containerNodeArgs = append(containerNodeArgs, func() {
|
||||||
|
for _, entry := range entries {
|
||||||
|
var err error
|
||||||
|
entry := entry
|
||||||
|
var description string
|
||||||
|
switch t := reflect.TypeOf(entry.description); {
|
||||||
|
case t == nil:
|
||||||
|
err = validateParameters(tableLevelEntryDescription, entry.parameters, "Entry Description function", entry.codeLocation)
|
||||||
|
if err == nil {
|
||||||
|
description = invokeFunction(tableLevelEntryDescription, entry.parameters)[0].String()
|
||||||
|
}
|
||||||
|
case t == reflect.TypeOf(EntryDescription("")):
|
||||||
|
description = entry.description.(EntryDescription).render(entry.parameters...)
|
||||||
|
case t == reflect.TypeOf(""):
|
||||||
|
description = entry.description.(string)
|
||||||
|
case t.Kind() == reflect.Func && t.NumOut() == 1 && t.Out(0) == reflect.TypeOf(""):
|
||||||
|
err = validateParameters(entry.description, entry.parameters, "Entry Description function", entry.codeLocation)
|
||||||
|
if err == nil {
|
||||||
|
description = invokeFunction(entry.description, entry.parameters)[0].String()
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
err = types.GinkgoErrors.InvalidEntryDescription(entry.codeLocation)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
err = validateParameters(itBody, entry.parameters, "Table Body function", entry.codeLocation)
|
||||||
|
}
|
||||||
|
itNodeArgs := []interface{}{entry.codeLocation}
|
||||||
|
itNodeArgs = append(itNodeArgs, entry.decorations...)
|
||||||
|
itNodeArgs = append(itNodeArgs, func() {
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
invokeFunction(itBody, entry.parameters)
|
||||||
|
})
|
||||||
|
|
||||||
|
pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, description, itNodeArgs...))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, description, containerNodeArgs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
func invokeFunction(function interface{}, parameters []interface{}) []reflect.Value {
|
||||||
|
inValues := make([]reflect.Value, len(parameters))
|
||||||
|
|
||||||
|
funcType := reflect.TypeOf(function)
|
||||||
|
limit := funcType.NumIn()
|
||||||
|
if funcType.IsVariadic() {
|
||||||
|
limit = limit - 1
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < limit && i < len(parameters); i++ {
|
||||||
|
inValues[i] = computeValue(parameters[i], funcType.In(i))
|
||||||
|
}
|
||||||
|
|
||||||
|
if funcType.IsVariadic() {
|
||||||
|
variadicType := funcType.In(limit).Elem()
|
||||||
|
for i := limit; i < len(parameters); i++ {
|
||||||
|
inValues[i] = computeValue(parameters[i], variadicType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return reflect.ValueOf(function).Call(inValues)
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateParameters(function interface{}, parameters []interface{}, kind string, cl types.CodeLocation) error {
|
||||||
|
funcType := reflect.TypeOf(function)
|
||||||
|
limit := funcType.NumIn()
|
||||||
|
if funcType.IsVariadic() {
|
||||||
|
limit = limit - 1
|
||||||
|
}
|
||||||
|
if len(parameters) < limit {
|
||||||
|
return types.GinkgoErrors.TooFewParametersToTableFunction(limit, len(parameters), kind, cl)
|
||||||
|
}
|
||||||
|
if len(parameters) > limit && !funcType.IsVariadic() {
|
||||||
|
return types.GinkgoErrors.TooManyParametersToTableFunction(limit, len(parameters), kind, cl)
|
||||||
|
}
|
||||||
|
var i = 0
|
||||||
|
for ; i < limit; i++ {
|
||||||
|
actual := reflect.TypeOf(parameters[i])
|
||||||
|
expected := funcType.In(i)
|
||||||
|
if !(actual == nil) && !actual.AssignableTo(expected) {
|
||||||
|
return types.GinkgoErrors.IncorrectParameterTypeToTableFunction(i+1, expected, actual, kind, cl)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if funcType.IsVariadic() {
|
||||||
|
expected := funcType.In(limit).Elem()
|
||||||
|
for ; i < len(parameters); i++ {
|
||||||
|
actual := reflect.TypeOf(parameters[i])
|
||||||
|
if !(actual == nil) && !actual.AssignableTo(expected) {
|
||||||
|
return types.GinkgoErrors.IncorrectVariadicParameterTypeToTableFunction(expected, actual, kind, cl)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func computeValue(parameter interface{}, t reflect.Type) reflect.Value {
|
||||||
|
if parameter == nil {
|
||||||
|
return reflect.Zero(t)
|
||||||
|
} else {
|
||||||
|
return reflect.ValueOf(parameter)
|
||||||
|
}
|
||||||
|
}
|
92
vendor/github.com/onsi/ginkgo/v2/types/code_location.go
generated
vendored
Normal file
92
vendor/github.com/onsi/ginkgo/v2/types/code_location.go
generated
vendored
Normal file
@ -0,0 +1,92 @@
|
|||||||
|
package types
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"regexp"
|
||||||
|
"runtime"
|
||||||
|
"runtime/debug"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type CodeLocation struct {
|
||||||
|
FileName string `json:",omitempty"`
|
||||||
|
LineNumber int `json:",omitempty"`
|
||||||
|
FullStackTrace string `json:",omitempty"`
|
||||||
|
CustomMessage string `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (codeLocation CodeLocation) String() string {
|
||||||
|
if codeLocation.CustomMessage != "" {
|
||||||
|
return codeLocation.CustomMessage
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s:%d", codeLocation.FileName, codeLocation.LineNumber)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (codeLocation CodeLocation) ContentsOfLine() string {
|
||||||
|
if codeLocation.CustomMessage != "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
contents, err := os.ReadFile(codeLocation.FileName)
|
||||||
|
if err != nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
lines := strings.Split(string(contents), "\n")
|
||||||
|
if len(lines) < codeLocation.LineNumber {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return lines[codeLocation.LineNumber-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewCustomCodeLocation(message string) CodeLocation {
|
||||||
|
return CodeLocation{
|
||||||
|
CustomMessage: message,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewCodeLocation(skip int) CodeLocation {
|
||||||
|
_, file, line, _ := runtime.Caller(skip + 1)
|
||||||
|
return CodeLocation{FileName: file, LineNumber: line}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewCodeLocationWithStackTrace(skip int) CodeLocation {
|
||||||
|
_, file, line, _ := runtime.Caller(skip + 1)
|
||||||
|
stackTrace := PruneStack(string(debug.Stack()), skip+1)
|
||||||
|
return CodeLocation{FileName: file, LineNumber: line, FullStackTrace: stackTrace}
|
||||||
|
}
|
||||||
|
|
||||||
|
// PruneStack removes references to functions that are internal to Ginkgo
|
||||||
|
// and the Go runtime from a stack string and a certain number of stack entries
|
||||||
|
// at the beginning of the stack. The stack string has the format
|
||||||
|
// as returned by runtime/debug.Stack. The leading goroutine information is
|
||||||
|
// optional and always removed if present. Beware that runtime/debug.Stack
|
||||||
|
// adds itself as first entry, so typically skip must be >= 1 to remove that
|
||||||
|
// entry.
|
||||||
|
func PruneStack(fullStackTrace string, skip int) string {
|
||||||
|
stack := strings.Split(fullStackTrace, "\n")
|
||||||
|
// Ensure that the even entries are the method names and the
|
||||||
|
// the odd entries the source code information.
|
||||||
|
if len(stack) > 0 && strings.HasPrefix(stack[0], "goroutine ") {
|
||||||
|
// Ignore "goroutine 29 [running]:" line.
|
||||||
|
stack = stack[1:]
|
||||||
|
}
|
||||||
|
// The "+1" is for skipping over the initial entry, which is
|
||||||
|
// runtime/debug.Stack() itself.
|
||||||
|
if len(stack) > 2*(skip+1) {
|
||||||
|
stack = stack[2*(skip+1):]
|
||||||
|
}
|
||||||
|
prunedStack := []string{}
|
||||||
|
if os.Getenv("GINKGO_PRUNE_STACK") == "FALSE" {
|
||||||
|
prunedStack = stack
|
||||||
|
} else {
|
||||||
|
re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`)
|
||||||
|
for i := 0; i < len(stack)/2; i++ {
|
||||||
|
// We filter out based on the source code file name.
|
||||||
|
if !re.Match([]byte(stack[i*2+1])) {
|
||||||
|
prunedStack = append(prunedStack, stack[i*2])
|
||||||
|
prunedStack = append(prunedStack, stack[i*2+1])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return strings.Join(prunedStack, "\n")
|
||||||
|
}
|
723
vendor/github.com/onsi/ginkgo/v2/types/config.go
generated
vendored
Normal file
723
vendor/github.com/onsi/ginkgo/v2/types/config.go
generated
vendored
Normal file
@ -0,0 +1,723 @@
|
|||||||
|
/*
|
||||||
|
Ginkgo accepts a number of configuration options.
|
||||||
|
These are documented [here](http://onsi.github.io/ginkgo/#the-ginkgo-cli)
|
||||||
|
*/
|
||||||
|
|
||||||
|
package types
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Configuration controlling how an individual test suite is run
|
||||||
|
type SuiteConfig struct {
|
||||||
|
RandomSeed int64
|
||||||
|
RandomizeAllSpecs bool
|
||||||
|
FocusStrings []string
|
||||||
|
SkipStrings []string
|
||||||
|
FocusFiles []string
|
||||||
|
SkipFiles []string
|
||||||
|
LabelFilter string
|
||||||
|
FailOnPending bool
|
||||||
|
FailFast bool
|
||||||
|
FlakeAttempts int
|
||||||
|
EmitSpecProgress bool
|
||||||
|
DryRun bool
|
||||||
|
Timeout time.Duration
|
||||||
|
OutputInterceptorMode string
|
||||||
|
|
||||||
|
ParallelProcess int
|
||||||
|
ParallelTotal int
|
||||||
|
ParallelHost string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDefaultSuiteConfig() SuiteConfig {
|
||||||
|
return SuiteConfig{
|
||||||
|
RandomSeed: time.Now().Unix(),
|
||||||
|
Timeout: time.Hour,
|
||||||
|
ParallelProcess: 1,
|
||||||
|
ParallelTotal: 1,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type VerbosityLevel uint
|
||||||
|
|
||||||
|
const (
|
||||||
|
VerbosityLevelSuccinct VerbosityLevel = iota
|
||||||
|
VerbosityLevelNormal
|
||||||
|
VerbosityLevelVerbose
|
||||||
|
VerbosityLevelVeryVerbose
|
||||||
|
)
|
||||||
|
|
||||||
|
func (vl VerbosityLevel) GT(comp VerbosityLevel) bool {
|
||||||
|
return vl > comp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vl VerbosityLevel) GTE(comp VerbosityLevel) bool {
|
||||||
|
return vl >= comp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vl VerbosityLevel) Is(comp VerbosityLevel) bool {
|
||||||
|
return vl == comp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vl VerbosityLevel) LTE(comp VerbosityLevel) bool {
|
||||||
|
return vl <= comp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vl VerbosityLevel) LT(comp VerbosityLevel) bool {
|
||||||
|
return vl < comp
|
||||||
|
}
|
||||||
|
|
||||||
|
// Configuration for Ginkgo's reporter
|
||||||
|
type ReporterConfig struct {
|
||||||
|
NoColor bool
|
||||||
|
SlowSpecThreshold time.Duration
|
||||||
|
Succinct bool
|
||||||
|
Verbose bool
|
||||||
|
VeryVerbose bool
|
||||||
|
FullTrace bool
|
||||||
|
AlwaysEmitGinkgoWriter bool
|
||||||
|
|
||||||
|
JSONReport string
|
||||||
|
JUnitReport string
|
||||||
|
TeamcityReport string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rc ReporterConfig) Verbosity() VerbosityLevel {
|
||||||
|
if rc.Succinct {
|
||||||
|
return VerbosityLevelSuccinct
|
||||||
|
} else if rc.Verbose {
|
||||||
|
return VerbosityLevelVerbose
|
||||||
|
} else if rc.VeryVerbose {
|
||||||
|
return VerbosityLevelVeryVerbose
|
||||||
|
}
|
||||||
|
return VerbosityLevelNormal
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rc ReporterConfig) WillGenerateReport() bool {
|
||||||
|
return rc.JSONReport != "" || rc.JUnitReport != "" || rc.TeamcityReport != ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDefaultReporterConfig() ReporterConfig {
|
||||||
|
return ReporterConfig{
|
||||||
|
SlowSpecThreshold: 5 * time.Second,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Configuration for the Ginkgo CLI
|
||||||
|
type CLIConfig struct {
|
||||||
|
//for build, run, and watch
|
||||||
|
Recurse bool
|
||||||
|
SkipPackage string
|
||||||
|
RequireSuite bool
|
||||||
|
NumCompilers int
|
||||||
|
|
||||||
|
//for run and watch only
|
||||||
|
Procs int
|
||||||
|
Parallel bool
|
||||||
|
AfterRunHook string
|
||||||
|
OutputDir string
|
||||||
|
KeepSeparateCoverprofiles bool
|
||||||
|
KeepSeparateReports bool
|
||||||
|
|
||||||
|
//for run only
|
||||||
|
KeepGoing bool
|
||||||
|
UntilItFails bool
|
||||||
|
Repeat int
|
||||||
|
RandomizeSuites bool
|
||||||
|
|
||||||
|
//for watch only
|
||||||
|
Depth int
|
||||||
|
WatchRegExp string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDefaultCLIConfig() CLIConfig {
|
||||||
|
return CLIConfig{
|
||||||
|
Depth: 1,
|
||||||
|
WatchRegExp: `\.go$`,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g CLIConfig) ComputedProcs() int {
|
||||||
|
if g.Procs > 0 {
|
||||||
|
return g.Procs
|
||||||
|
}
|
||||||
|
|
||||||
|
n := 1
|
||||||
|
if g.Parallel {
|
||||||
|
n = runtime.NumCPU()
|
||||||
|
if n > 4 {
|
||||||
|
n = n - 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g CLIConfig) ComputedNumCompilers() int {
|
||||||
|
if g.NumCompilers > 0 {
|
||||||
|
return g.NumCompilers
|
||||||
|
}
|
||||||
|
|
||||||
|
return runtime.NumCPU()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Configuration for the Ginkgo CLI capturing available go flags
|
||||||
|
// A subset of Go flags are exposed by Ginkgo. Some are available at compile time (e.g. ginkgo build) and others only at run time (e.g. ginkgo run - which has both build and run time flags).
|
||||||
|
// More details can be found at:
|
||||||
|
// https://docs.google.com/spreadsheets/d/1zkp-DS4hU4sAJl5eHh1UmgwxCPQhf3s5a8fbiOI8tJU/
|
||||||
|
type GoFlagsConfig struct {
|
||||||
|
//build-time flags for code-and-performance analysis
|
||||||
|
Race bool
|
||||||
|
Cover bool
|
||||||
|
CoverMode string
|
||||||
|
CoverPkg string
|
||||||
|
Vet string
|
||||||
|
|
||||||
|
//run-time flags for code-and-performance analysis
|
||||||
|
BlockProfile string
|
||||||
|
BlockProfileRate int
|
||||||
|
CoverProfile string
|
||||||
|
CPUProfile string
|
||||||
|
MemProfile string
|
||||||
|
MemProfileRate int
|
||||||
|
MutexProfile string
|
||||||
|
MutexProfileFraction int
|
||||||
|
Trace string
|
||||||
|
|
||||||
|
//build-time flags for building
|
||||||
|
A bool
|
||||||
|
ASMFlags string
|
||||||
|
BuildMode string
|
||||||
|
Compiler string
|
||||||
|
GCCGoFlags string
|
||||||
|
GCFlags string
|
||||||
|
InstallSuffix string
|
||||||
|
LDFlags string
|
||||||
|
LinkShared bool
|
||||||
|
Mod string
|
||||||
|
N bool
|
||||||
|
ModFile string
|
||||||
|
ModCacheRW bool
|
||||||
|
MSan bool
|
||||||
|
PkgDir string
|
||||||
|
Tags string
|
||||||
|
TrimPath bool
|
||||||
|
ToolExec string
|
||||||
|
Work bool
|
||||||
|
X bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDefaultGoFlagsConfig() GoFlagsConfig {
|
||||||
|
return GoFlagsConfig{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g GoFlagsConfig) BinaryMustBePreserved() bool {
|
||||||
|
return g.BlockProfile != "" || g.CPUProfile != "" || g.MemProfile != "" || g.MutexProfile != ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// Configuration that were deprecated in 2.0
|
||||||
|
type deprecatedConfig struct {
|
||||||
|
DebugParallel bool
|
||||||
|
NoisySkippings bool
|
||||||
|
NoisyPendings bool
|
||||||
|
RegexScansFilePath bool
|
||||||
|
SlowSpecThresholdWithFLoatUnits float64
|
||||||
|
Stream bool
|
||||||
|
Notify bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flags
|
||||||
|
|
||||||
|
// Flags sections used by both the CLI and the Ginkgo test process
|
||||||
|
var FlagSections = GinkgoFlagSections{
|
||||||
|
{Key: "multiple-suites", Style: "{{dark-green}}", Heading: "Running Multiple Test Suites"},
|
||||||
|
{Key: "order", Style: "{{green}}", Heading: "Controlling Test Order"},
|
||||||
|
{Key: "parallel", Style: "{{yellow}}", Heading: "Controlling Test Parallelism"},
|
||||||
|
{Key: "low-level-parallel", Style: "{{yellow}}", Heading: "Controlling Test Parallelism",
|
||||||
|
Description: "These are set by the Ginkgo CLI, {{red}}{{bold}}do not set them manually{{/}} via go test.\nUse ginkgo -p or ginkgo -procs=N instead."},
|
||||||
|
{Key: "filter", Style: "{{cyan}}", Heading: "Filtering Tests"},
|
||||||
|
{Key: "failure", Style: "{{red}}", Heading: "Failure Handling"},
|
||||||
|
{Key: "output", Style: "{{magenta}}", Heading: "Controlling Output Formatting"},
|
||||||
|
{Key: "code-and-coverage-analysis", Style: "{{orange}}", Heading: "Code and Coverage Analysis"},
|
||||||
|
{Key: "performance-analysis", Style: "{{coral}}", Heading: "Performance Analysis"},
|
||||||
|
{Key: "debug", Style: "{{blue}}", Heading: "Debugging Tests",
|
||||||
|
Description: "In addition to these flags, Ginkgo supports a few debugging environment variables. To change the parallel server protocol set {{blue}}GINKGO_PARALLEL_PROTOCOL{{/}} to {{bold}}HTTP{{/}}. To avoid pruning callstacks set {{blue}}GINKGO_PRUNE_STACK{{/}} to {{bold}}FALSE{{/}}."},
|
||||||
|
{Key: "watch", Style: "{{light-yellow}}", Heading: "Controlling Ginkgo Watch"},
|
||||||
|
{Key: "misc", Style: "{{light-gray}}", Heading: "Miscellaneous"},
|
||||||
|
{Key: "go-build", Style: "{{light-gray}}", Heading: "Go Build Flags", Succinct: true,
|
||||||
|
Description: "These flags are inherited from go build. Run {{bold}}ginkgo help build{{/}} for more detailed flag documentation."},
|
||||||
|
}
|
||||||
|
|
||||||
|
// SuiteConfigFlags provides flags for the Ginkgo test process, and CLI
|
||||||
|
var SuiteConfigFlags = GinkgoFlags{
|
||||||
|
{KeyPath: "S.RandomSeed", Name: "seed", SectionKey: "order", UsageDefaultValue: "randomly generated by Ginkgo",
|
||||||
|
Usage: "The seed used to randomize the spec suite."},
|
||||||
|
{KeyPath: "S.RandomizeAllSpecs", Name: "randomize-all", SectionKey: "order", DeprecatedName: "randomizeAllSpecs", DeprecatedDocLink: "changed-command-line-flags",
|
||||||
|
Usage: "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When containers."},
|
||||||
|
|
||||||
|
{KeyPath: "S.FailOnPending", Name: "fail-on-pending", SectionKey: "failure", DeprecatedName: "failOnPending", DeprecatedDocLink: "changed-command-line-flags",
|
||||||
|
Usage: "If set, ginkgo will mark the test suite as failed if any specs are pending."},
|
||||||
|
{KeyPath: "S.FailFast", Name: "fail-fast", SectionKey: "failure", DeprecatedName: "failFast", DeprecatedDocLink: "changed-command-line-flags",
|
||||||
|
Usage: "If set, ginkgo will stop running a test suite after a failure occurs."},
|
||||||
|
{KeyPath: "S.FlakeAttempts", Name: "flake-attempts", SectionKey: "failure", UsageDefaultValue: "0 - failed tests are not retried", DeprecatedName: "flakeAttempts", DeprecatedDocLink: "changed-command-line-flags",
|
||||||
|
Usage: "Make up to this many attempts to run each spec. If any of the attempts succeed, the suite will not be failed."},
|
||||||
|
|
||||||
|
{KeyPath: "S.DryRun", Name: "dry-run", SectionKey: "debug", DeprecatedName: "dryRun", DeprecatedDocLink: "changed-command-line-flags",
|
||||||
|
Usage: "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v."},
|
||||||
|
{KeyPath: "S.EmitSpecProgress", Name: "progress", SectionKey: "debug",
|
||||||
|
Usage: "If set, ginkgo will emit progress information as each spec runs to the GinkgoWriter."},
|
||||||
|
{KeyPath: "S.Timeout", Name: "timeout", SectionKey: "debug", UsageDefaultValue: "1h",
|
||||||
|
Usage: "Test suite fails if it does not complete within the specified timeout."},
|
||||||
|
{KeyPath: "S.OutputInterceptorMode", Name: "output-interceptor-mode", SectionKey: "debug", UsageArgument: "dup, swap, or none",
|
||||||
|
Usage: "If set, ginkgo will use the specified output interception strategy when running in parallel. Defaults to dup on unix and swap on windows."},
|
||||||
|
|
||||||
|
{KeyPath: "S.LabelFilter", Name: "label-filter", SectionKey: "filter", UsageArgument: "expression",
|
||||||
|
Usage: "If set, ginkgo will only run specs with labels that match the label-filter. The passed-in expression can include boolean operations (!, &&, ||, ','), groupings via '()', and regular expressions '/regexp/'. e.g. '(cat || dog) && !fruit'"},
|
||||||
|
{KeyPath: "S.FocusStrings", Name: "focus", SectionKey: "filter",
|
||||||
|
Usage: "If set, ginkgo will only run specs that match this regular expression. Can be specified multiple times, values are ORed."},
|
||||||
|
{KeyPath: "S.SkipStrings", Name: "skip", SectionKey: "filter",
|
||||||
|
Usage: "If set, ginkgo will only run specs that do not match this regular expression. Can be specified multiple times, values are ORed."},
|
||||||
|
{KeyPath: "S.FocusFiles", Name: "focus-file", SectionKey: "filter", UsageArgument: "file (regexp) | file:line | file:lineA-lineB | file:line,line,line",
|
||||||
|
Usage: "If set, ginkgo will only run specs in matching files. Can be specified multiple times, values are ORed."},
|
||||||
|
{KeyPath: "S.SkipFiles", Name: "skip-file", SectionKey: "filter", UsageArgument: "file (regexp) | file:line | file:lineA-lineB | file:line,line,line",
|
||||||
|
Usage: "If set, ginkgo will skip specs in matching files. Can be specified multiple times, values are ORed."},
|
||||||
|
|
||||||
|
{KeyPath: "D.RegexScansFilePath", DeprecatedName: "regexScansFilePath", DeprecatedDocLink: "removed--regexscansfilepath", DeprecatedVersion: "2.0.0"},
|
||||||
|
{KeyPath: "D.DebugParallel", DeprecatedName: "debug", DeprecatedDocLink: "removed--debug", DeprecatedVersion: "2.0.0"},
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParallelConfigFlags provides flags for the Ginkgo test process (not the CLI)
|
||||||
|
var ParallelConfigFlags = GinkgoFlags{
|
||||||
|
{KeyPath: "S.ParallelProcess", Name: "parallel.process", SectionKey: "low-level-parallel", UsageDefaultValue: "1",
|
||||||
|
Usage: "This worker process's (one-indexed) process number. For running specs in parallel."},
|
||||||
|
{KeyPath: "S.ParallelTotal", Name: "parallel.total", SectionKey: "low-level-parallel", UsageDefaultValue: "1",
|
||||||
|
Usage: "The total number of worker processes. For running specs in parallel."},
|
||||||
|
{KeyPath: "S.ParallelHost", Name: "parallel.host", SectionKey: "low-level-parallel", UsageDefaultValue: "set by Ginkgo CLI",
|
||||||
|
Usage: "The address for the server that will synchronize the processes."},
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReporterConfigFlags provides flags for the Ginkgo test process, and CLI
|
||||||
|
var ReporterConfigFlags = GinkgoFlags{
|
||||||
|
{KeyPath: "R.NoColor", Name: "no-color", SectionKey: "output", DeprecatedName: "noColor", DeprecatedDocLink: "changed-command-line-flags",
|
||||||
|
Usage: "If set, suppress color output in default reporter."},
|
||||||
|
{KeyPath: "R.SlowSpecThreshold", Name: "slow-spec-threshold", SectionKey: "output", UsageArgument: "duration", UsageDefaultValue: "5s",
|
||||||
|
Usage: "Specs that take longer to run than this threshold are flagged as slow by the default reporter."},
|
||||||
|
{KeyPath: "R.Verbose", Name: "v", SectionKey: "output",
|
||||||
|
Usage: "If set, emits more output including GinkgoWriter contents."},
|
||||||
|
{KeyPath: "R.VeryVerbose", Name: "vv", SectionKey: "output",
|
||||||
|
Usage: "If set, emits with maximal verbosity - includes skipped and pending tests."},
|
||||||
|
{KeyPath: "R.Succinct", Name: "succinct", SectionKey: "output",
|
||||||
|
Usage: "If set, default reporter prints out a very succinct report"},
|
||||||
|
{KeyPath: "R.FullTrace", Name: "trace", SectionKey: "output",
|
||||||
|
Usage: "If set, default reporter prints out the full stack trace when a failure occurs"},
|
||||||
|
{KeyPath: "R.AlwaysEmitGinkgoWriter", Name: "always-emit-ginkgo-writer", SectionKey: "output", DeprecatedName: "reportPassed", DeprecatedDocLink: "renamed--reportpassed",
|
||||||
|
Usage: "If set, default reporter prints out captured output of passed tests."},
|
||||||
|
|
||||||
|
{KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output",
|
||||||
|
Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."},
|
||||||
|
{KeyPath: "R.JUnitReport", Name: "junit-report", UsageArgument: "filename.xml", SectionKey: "output", DeprecatedName: "reportFile", DeprecatedDocLink: "improved-reporting-infrastructure",
|
||||||
|
Usage: "If set, Ginkgo will generate a conformant junit test report in the specified file."},
|
||||||
|
{KeyPath: "R.TeamcityReport", Name: "teamcity-report", UsageArgument: "filename", SectionKey: "output",
|
||||||
|
Usage: "If set, Ginkgo will generate a Teamcity-formatted test report at the specified location."},
|
||||||
|
|
||||||
|
{KeyPath: "D.SlowSpecThresholdWithFLoatUnits", DeprecatedName: "slowSpecThreshold", DeprecatedDocLink: "changed--slowspecthreshold",
|
||||||
|
Usage: "use --slow-spec-threshold instead and pass in a duration string (e.g. '5s', not '5.0')"},
|
||||||
|
{KeyPath: "D.NoisyPendings", DeprecatedName: "noisyPendings", DeprecatedDocLink: "removed--noisypendings-and--noisyskippings", DeprecatedVersion: "2.0.0"},
|
||||||
|
{KeyPath: "D.NoisySkippings", DeprecatedName: "noisySkippings", DeprecatedDocLink: "removed--noisypendings-and--noisyskippings", DeprecatedVersion: "2.0.0"},
|
||||||
|
}
|
||||||
|
|
||||||
|
// BuildTestSuiteFlagSet attaches to the CommandLine flagset and provides flags for the Ginkgo test process
|
||||||
|
func BuildTestSuiteFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig) (GinkgoFlagSet, error) {
|
||||||
|
flags := SuiteConfigFlags.CopyAppend(ParallelConfigFlags...).CopyAppend(ReporterConfigFlags...)
|
||||||
|
flags = flags.WithPrefix("ginkgo")
|
||||||
|
bindings := map[string]interface{}{
|
||||||
|
"S": suiteConfig,
|
||||||
|
"R": reporterConfig,
|
||||||
|
"D": &deprecatedConfig{},
|
||||||
|
}
|
||||||
|
extraGoFlagsSection := GinkgoFlagSection{Style: "{{gray}}", Heading: "Go test flags"}
|
||||||
|
|
||||||
|
return NewAttachedGinkgoFlagSet(flag.CommandLine, flags, bindings, FlagSections, extraGoFlagsSection)
|
||||||
|
}
|
||||||
|
|
||||||
|
// VetConfig validates that the Ginkgo test process' configuration is sound
|
||||||
|
func VetConfig(flagSet GinkgoFlagSet, suiteConfig SuiteConfig, reporterConfig ReporterConfig) []error {
|
||||||
|
errors := []error{}
|
||||||
|
|
||||||
|
if flagSet.WasSet("count") || flagSet.WasSet("test.count") {
|
||||||
|
flag := flagSet.Lookup("count")
|
||||||
|
if flag == nil {
|
||||||
|
flag = flagSet.Lookup("test.count")
|
||||||
|
}
|
||||||
|
count, err := strconv.Atoi(flag.Value.String())
|
||||||
|
if err != nil || count != 1 {
|
||||||
|
errors = append(errors, GinkgoErrors.InvalidGoFlagCount())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if flagSet.WasSet("parallel") || flagSet.WasSet("test.parallel") {
|
||||||
|
errors = append(errors, GinkgoErrors.InvalidGoFlagParallel())
|
||||||
|
}
|
||||||
|
|
||||||
|
if suiteConfig.ParallelTotal < 1 {
|
||||||
|
errors = append(errors, GinkgoErrors.InvalidParallelTotalConfiguration())
|
||||||
|
}
|
||||||
|
|
||||||
|
if suiteConfig.ParallelProcess > suiteConfig.ParallelTotal || suiteConfig.ParallelProcess < 1 {
|
||||||
|
errors = append(errors, GinkgoErrors.InvalidParallelProcessConfiguration())
|
||||||
|
}
|
||||||
|
|
||||||
|
if suiteConfig.ParallelTotal > 1 && suiteConfig.ParallelHost == "" {
|
||||||
|
errors = append(errors, GinkgoErrors.MissingParallelHostConfiguration())
|
||||||
|
}
|
||||||
|
|
||||||
|
if suiteConfig.DryRun && suiteConfig.ParallelTotal > 1 {
|
||||||
|
errors = append(errors, GinkgoErrors.DryRunInParallelConfiguration())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(suiteConfig.FocusFiles) > 0 {
|
||||||
|
_, err := ParseFileFilters(suiteConfig.FocusFiles)
|
||||||
|
if err != nil {
|
||||||
|
errors = append(errors, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(suiteConfig.SkipFiles) > 0 {
|
||||||
|
_, err := ParseFileFilters(suiteConfig.SkipFiles)
|
||||||
|
if err != nil {
|
||||||
|
errors = append(errors, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if suiteConfig.LabelFilter != "" {
|
||||||
|
_, err := ParseLabelFilter(suiteConfig.LabelFilter)
|
||||||
|
if err != nil {
|
||||||
|
errors = append(errors, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch strings.ToLower(suiteConfig.OutputInterceptorMode) {
|
||||||
|
case "", "dup", "swap", "none":
|
||||||
|
default:
|
||||||
|
errors = append(errors, GinkgoErrors.InvalidOutputInterceptorModeConfiguration(suiteConfig.OutputInterceptorMode))
|
||||||
|
}
|
||||||
|
|
||||||
|
numVerbosity := 0
|
||||||
|
for _, v := range []bool{reporterConfig.Succinct, reporterConfig.Verbose, reporterConfig.VeryVerbose} {
|
||||||
|
if v {
|
||||||
|
numVerbosity++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if numVerbosity > 1 {
|
||||||
|
errors = append(errors, GinkgoErrors.ConflictingVerbosityConfiguration())
|
||||||
|
}
|
||||||
|
|
||||||
|
return errors
|
||||||
|
}
|
||||||
|
|
||||||
|
// GinkgoCLISharedFlags provides flags shared by the Ginkgo CLI's build, watch, and run commands
|
||||||
|
var GinkgoCLISharedFlags = GinkgoFlags{
|
||||||
|
{KeyPath: "C.Recurse", Name: "r", SectionKey: "multiple-suites",
|
||||||
|
Usage: "If set, ginkgo finds and runs test suites under the current directory recursively."},
|
||||||
|
{KeyPath: "C.SkipPackage", Name: "skip-package", SectionKey: "multiple-suites", DeprecatedName: "skipPackage", DeprecatedDocLink: "changed-command-line-flags",
|
||||||
|
UsageArgument: "comma-separated list of packages",
|
||||||
|
Usage: "A comma-separated list of package names to be skipped. If any part of the package's path matches, that package is ignored."},
|
||||||
|
{KeyPath: "C.RequireSuite", Name: "require-suite", SectionKey: "failure", DeprecatedName: "requireSuite", DeprecatedDocLink: "changed-command-line-flags",
|
||||||
|
Usage: "If set, Ginkgo fails if there are ginkgo tests in a directory but no invocation of RunSpecs."},
|
||||||
|
{KeyPath: "C.NumCompilers", Name: "compilers", SectionKey: "multiple-suites", UsageDefaultValue: "0 (will autodetect)",
|
||||||
|
Usage: "When running multiple packages, the number of concurrent compilations to perform."},
|
||||||
|
}
|
||||||
|
|
||||||
|
// GinkgoCLIRunAndWatchFlags provides flags shared by the Ginkgo CLI's build and watch commands (but not run)
|
||||||
|
var GinkgoCLIRunAndWatchFlags = GinkgoFlags{
|
||||||
|
{KeyPath: "C.Procs", Name: "procs", SectionKey: "parallel", UsageDefaultValue: "1 (run in series)",
|
||||||
|
Usage: "The number of parallel test nodes to run."},
|
||||||
|
{KeyPath: "C.Procs", Name: "nodes", SectionKey: "parallel", UsageDefaultValue: "1 (run in series)",
|
||||||
|
Usage: "--nodes is an alias for --procs"},
|
||||||
|
{KeyPath: "C.Parallel", Name: "p", SectionKey: "parallel",
|
||||||
|
Usage: "If set, ginkgo will run in parallel with an auto-detected number of nodes."},
|
||||||
|
{KeyPath: "C.AfterRunHook", Name: "after-run-hook", SectionKey: "misc", DeprecatedName: "afterSuiteHook", DeprecatedDocLink: "changed-command-line-flags",
|
||||||
|
Usage: "Command to run when a test suite completes."},
|
||||||
|
{KeyPath: "C.OutputDir", Name: "output-dir", SectionKey: "output", UsageArgument: "directory", DeprecatedName: "outputdir", DeprecatedDocLink: "improved-profiling-support",
|
||||||
|
Usage: "A location to place all generated profiles and reports."},
|
||||||
|
{KeyPath: "C.KeepSeparateCoverprofiles", Name: "keep-separate-coverprofiles", SectionKey: "code-and-coverage-analysis",
|
||||||
|
Usage: "If set, Ginkgo does not merge coverprofiles into one monolithic coverprofile. The coverprofiles will remain in their respective package directories or in -output-dir if set."},
|
||||||
|
{KeyPath: "C.KeepSeparateReports", Name: "keep-separate-reports", SectionKey: "output",
|
||||||
|
Usage: "If set, Ginkgo does not merge per-suite reports (e.g. -json-report) into one monolithic report for the entire testrun. The reports will remain in their respective package directories or in -output-dir if set."},
|
||||||
|
|
||||||
|
{KeyPath: "D.Stream", DeprecatedName: "stream", DeprecatedDocLink: "removed--stream", DeprecatedVersion: "2.0.0"},
|
||||||
|
{KeyPath: "D.Notify", DeprecatedName: "notify", DeprecatedDocLink: "removed--notify", DeprecatedVersion: "2.0.0"},
|
||||||
|
}
|
||||||
|
|
||||||
|
// GinkgoCLIRunFlags provides flags for Ginkgo CLI's run command that aren't shared by any other commands
|
||||||
|
var GinkgoCLIRunFlags = GinkgoFlags{
|
||||||
|
{KeyPath: "C.KeepGoing", Name: "keep-going", SectionKey: "multiple-suites", DeprecatedName: "keepGoing", DeprecatedDocLink: "changed-command-line-flags",
|
||||||
|
Usage: "If set, failures from earlier test suites do not prevent later test suites from running."},
|
||||||
|
{KeyPath: "C.UntilItFails", Name: "until-it-fails", SectionKey: "debug", DeprecatedName: "untilItFails", DeprecatedDocLink: "changed-command-line-flags",
|
||||||
|
Usage: "If set, ginkgo will keep rerunning test suites until a failure occurs."},
|
||||||
|
{KeyPath: "C.Repeat", Name: "repeat", SectionKey: "debug", UsageArgument: "n", UsageDefaultValue: "0 - i.e. no repetition, run only once",
|
||||||
|
Usage: "The number of times to re-run a test-suite. Useful for debugging flaky tests. If set to N the suite will be run N+1 times and will be required to pass each time."},
|
||||||
|
{KeyPath: "C.RandomizeSuites", Name: "randomize-suites", SectionKey: "order", DeprecatedName: "randomizeSuites", DeprecatedDocLink: "changed-command-line-flags",
|
||||||
|
Usage: "If set, ginkgo will randomize the order in which test suites run."},
|
||||||
|
}
|
||||||
|
|
||||||
|
// GinkgoCLIRunFlags provides flags for Ginkgo CLI's watch command that aren't shared by any other commands
|
||||||
|
var GinkgoCLIWatchFlags = GinkgoFlags{
|
||||||
|
{KeyPath: "C.Depth", Name: "depth", SectionKey: "watch",
|
||||||
|
Usage: "Ginkgo will watch dependencies down to this depth in the dependency tree."},
|
||||||
|
{KeyPath: "C.WatchRegExp", Name: "watch-regexp", SectionKey: "watch", DeprecatedName: "watchRegExp", DeprecatedDocLink: "changed-command-line-flags",
|
||||||
|
UsageArgument: "Regular Expression",
|
||||||
|
UsageDefaultValue: `\.go$`,
|
||||||
|
Usage: "Only files matching this regular expression will be watched for changes."},
|
||||||
|
}
|
||||||
|
|
||||||
|
// GoBuildFlags provides flags for the Ginkgo CLI build, run, and watch commands that capture go's build-time flags. These are passed to go test -c by the ginkgo CLI
|
||||||
|
var GoBuildFlags = GinkgoFlags{
|
||||||
|
{KeyPath: "Go.Race", Name: "race", SectionKey: "code-and-coverage-analysis",
|
||||||
|
Usage: "enable data race detection. Supported only on linux/amd64, freebsd/amd64, darwin/amd64, windows/amd64, linux/ppc64le and linux/arm64 (only for 48-bit VMA)."},
|
||||||
|
{KeyPath: "Go.Vet", Name: "vet", UsageArgument: "list", SectionKey: "code-and-coverage-analysis",
|
||||||
|
Usage: `Configure the invocation of "go vet" during "go test" to use the comma-separated list of vet checks. If list is empty, "go test" runs "go vet" with a curated list of checks believed to be always worth addressing. If list is "off", "go test" does not run "go vet" at all. Available checks can be found by running 'go doc cmd/vet'`},
|
||||||
|
{KeyPath: "Go.Cover", Name: "cover", SectionKey: "code-and-coverage-analysis",
|
||||||
|
Usage: "Enable coverage analysis. Note that because coverage works by annotating the source code before compilation, compilation and test failures with coverage enabled may report line numbers that don't correspond to the original sources."},
|
||||||
|
{KeyPath: "Go.CoverMode", Name: "covermode", UsageArgument: "set,count,atomic", SectionKey: "code-and-coverage-analysis",
|
||||||
|
Usage: `Set the mode for coverage analysis for the package[s] being tested. 'set': does this statement run? 'count': how many times does this statement run? 'atomic': like count, but correct in multithreaded tests and more expensive (must use atomic with -race). Sets -cover`},
|
||||||
|
{KeyPath: "Go.CoverPkg", Name: "coverpkg", UsageArgument: "pattern1,pattern2,pattern3", SectionKey: "code-and-coverage-analysis",
|
||||||
|
Usage: "Apply coverage analysis in each test to packages matching the patterns. The default is for each test to analyze only the package being tested. See 'go help packages' for a description of package patterns. Sets -cover."},
|
||||||
|
|
||||||
|
{KeyPath: "Go.A", Name: "a", SectionKey: "go-build",
|
||||||
|
Usage: "force rebuilding of packages that are already up-to-date."},
|
||||||
|
{KeyPath: "Go.ASMFlags", Name: "asmflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build",
|
||||||
|
Usage: "arguments to pass on each go tool asm invocation."},
|
||||||
|
{KeyPath: "Go.BuildMode", Name: "buildmode", UsageArgument: "mode", SectionKey: "go-build",
|
||||||
|
Usage: "build mode to use. See 'go help buildmode' for more."},
|
||||||
|
{KeyPath: "Go.Compiler", Name: "compiler", UsageArgument: "name", SectionKey: "go-build",
|
||||||
|
Usage: "name of compiler to use, as in runtime.Compiler (gccgo or gc)."},
|
||||||
|
{KeyPath: "Go.GCCGoFlags", Name: "gccgoflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build",
|
||||||
|
Usage: "arguments to pass on each gccgo compiler/linker invocation."},
|
||||||
|
{KeyPath: "Go.GCFlags", Name: "gcflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build",
|
||||||
|
Usage: "arguments to pass on each go tool compile invocation."},
|
||||||
|
{KeyPath: "Go.InstallSuffix", Name: "installsuffix", SectionKey: "go-build",
|
||||||
|
Usage: "a suffix to use in the name of the package installation directory, in order to keep output separate from default builds. If using the -race flag, the install suffix is automatically set to raceor, if set explicitly, has _race appended to it. Likewise for the -msan flag. Using a -buildmode option that requires non-default compile flags has a similar effect."},
|
||||||
|
{KeyPath: "Go.LDFlags", Name: "ldflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build",
|
||||||
|
Usage: "arguments to pass on each go tool link invocation."},
|
||||||
|
{KeyPath: "Go.LinkShared", Name: "linkshared", SectionKey: "go-build",
|
||||||
|
Usage: "build code that will be linked against shared libraries previously created with -buildmode=shared."},
|
||||||
|
{KeyPath: "Go.Mod", Name: "mod", UsageArgument: "mode (readonly, vendor, or mod)", SectionKey: "go-build",
|
||||||
|
Usage: "module download mode to use: readonly, vendor, or mod. See 'go help modules' for more."},
|
||||||
|
{KeyPath: "Go.ModCacheRW", Name: "modcacherw", SectionKey: "go-build",
|
||||||
|
Usage: "leave newly-created directories in the module cache read-write instead of making them read-only."},
|
||||||
|
{KeyPath: "Go.ModFile", Name: "modfile", UsageArgument: "file", SectionKey: "go-build",
|
||||||
|
Usage: `in module aware mode, read (and possibly write) an alternate go.mod file instead of the one in the module root directory. A file named go.mod must still be present in order to determine the module root directory, but it is not accessed. When -modfile is specified, an alternate go.sum file is also used: its path is derived from the -modfile flag by trimming the ".mod" extension and appending ".sum".`},
|
||||||
|
{KeyPath: "Go.MSan", Name: "msan", SectionKey: "go-build",
|
||||||
|
Usage: "enable interoperation with memory sanitizer. Supported only on linux/amd64, linux/arm64 and only with Clang/LLVM as the host C compiler. On linux/arm64, pie build mode will be used."},
|
||||||
|
{KeyPath: "Go.N", Name: "n", SectionKey: "go-build",
|
||||||
|
Usage: "print the commands but do not run them."},
|
||||||
|
{KeyPath: "Go.PkgDir", Name: "pkgdir", UsageArgument: "dir", SectionKey: "go-build",
|
||||||
|
Usage: "install and load all packages from dir instead of the usual locations. For example, when building with a non-standard configuration, use -pkgdir to keep generated packages in a separate location."},
|
||||||
|
{KeyPath: "Go.Tags", Name: "tags", UsageArgument: "tag,list", SectionKey: "go-build",
|
||||||
|
Usage: "a comma-separated list of build tags to consider satisfied during the build. For more information about build tags, see the description of build constraints in the documentation for the go/build package. (Earlier versions of Go used a space-separated list, and that form is deprecated but still recognized.)"},
|
||||||
|
{KeyPath: "Go.TrimPath", Name: "trimpath", SectionKey: "go-build",
|
||||||
|
Usage: `remove all file system paths from the resulting executable. Instead of absolute file system paths, the recorded file names will begin with either "go" (for the standard library), or a module path@version (when using modules), or a plain import path (when using GOPATH).`},
|
||||||
|
{KeyPath: "Go.ToolExec", Name: "toolexec", UsageArgument: "'cmd args'", SectionKey: "go-build",
|
||||||
|
Usage: "a program to use to invoke toolchain programs like vet and asm. For example, instead of running asm, the go command will run cmd args /path/to/asm <arguments for asm>'."},
|
||||||
|
{KeyPath: "Go.Work", Name: "work", SectionKey: "go-build",
|
||||||
|
Usage: "print the name of the temporary work directory and do not delete it when exiting."},
|
||||||
|
{KeyPath: "Go.X", Name: "x", SectionKey: "go-build",
|
||||||
|
Usage: "print the commands."},
|
||||||
|
}
|
||||||
|
|
||||||
|
// GoRunFlags provides flags for the Ginkgo CLI run, and watch commands that capture go's run-time flags. These are passed to the compiled test binary by the ginkgo CLI
|
||||||
|
var GoRunFlags = GinkgoFlags{
|
||||||
|
{KeyPath: "Go.CoverProfile", Name: "coverprofile", UsageArgument: "file", SectionKey: "code-and-coverage-analysis",
|
||||||
|
Usage: `Write a coverage profile to the file after all tests have passed. Sets -cover.`},
|
||||||
|
{KeyPath: "Go.BlockProfile", Name: "blockprofile", UsageArgument: "file", SectionKey: "performance-analysis",
|
||||||
|
Usage: `Write a goroutine blocking profile to the specified file when all tests are complete. Preserves test binary.`},
|
||||||
|
{KeyPath: "Go.BlockProfileRate", Name: "blockprofilerate", UsageArgument: "rate", SectionKey: "performance-analysis",
|
||||||
|
Usage: `Control the detail provided in goroutine blocking profiles by calling runtime.SetBlockProfileRate with rate. See 'go doc runtime.SetBlockProfileRate'. The profiler aims to sample, on average, one blocking event every n nanoseconds the program spends blocked. By default, if -test.blockprofile is set without this flag, all blocking events are recorded, equivalent to -test.blockprofilerate=1.`},
|
||||||
|
{KeyPath: "Go.CPUProfile", Name: "cpuprofile", UsageArgument: "file", SectionKey: "performance-analysis",
|
||||||
|
Usage: `Write a CPU profile to the specified file before exiting. Preserves test binary.`},
|
||||||
|
{KeyPath: "Go.MemProfile", Name: "memprofile", UsageArgument: "file", SectionKey: "performance-analysis",
|
||||||
|
Usage: `Write an allocation profile to the file after all tests have passed. Preserves test binary.`},
|
||||||
|
{KeyPath: "Go.MemProfileRate", Name: "memprofilerate", UsageArgument: "rate", SectionKey: "performance-analysis",
|
||||||
|
Usage: `Enable more precise (and expensive) memory allocation profiles by setting runtime.MemProfileRate. See 'go doc runtime.MemProfileRate'. To profile all memory allocations, use -test.memprofilerate=1.`},
|
||||||
|
{KeyPath: "Go.MutexProfile", Name: "mutexprofile", UsageArgument: "file", SectionKey: "performance-analysis",
|
||||||
|
Usage: `Write a mutex contention profile to the specified file when all tests are complete. Preserves test binary.`},
|
||||||
|
{KeyPath: "Go.MutexProfileFraction", Name: "mutexprofilefraction", UsageArgument: "n", SectionKey: "performance-analysis",
|
||||||
|
Usage: `if >= 0, calls runtime.SetMutexProfileFraction() Sample 1 in n stack traces of goroutines holding a contended mutex.`},
|
||||||
|
{KeyPath: "Go.Trace", Name: "execution-trace", UsageArgument: "file", ExportAs: "trace", SectionKey: "performance-analysis",
|
||||||
|
Usage: `Write an execution trace to the specified file before exiting.`},
|
||||||
|
}
|
||||||
|
|
||||||
|
// VetAndInitializeCLIAndGoConfig validates that the Ginkgo CLI's configuration is sound
|
||||||
|
// It returns a potentially mutated copy of the config that rationalizes the configuration to ensure consistency for downstream consumers
|
||||||
|
func VetAndInitializeCLIAndGoConfig(cliConfig CLIConfig, goFlagsConfig GoFlagsConfig) (CLIConfig, GoFlagsConfig, []error) {
|
||||||
|
errors := []error{}
|
||||||
|
|
||||||
|
if cliConfig.Repeat > 0 && cliConfig.UntilItFails {
|
||||||
|
errors = append(errors, GinkgoErrors.BothRepeatAndUntilItFails())
|
||||||
|
}
|
||||||
|
|
||||||
|
//initialize the output directory
|
||||||
|
if cliConfig.OutputDir != "" {
|
||||||
|
err := os.MkdirAll(cliConfig.OutputDir, 0777)
|
||||||
|
if err != nil {
|
||||||
|
errors = append(errors, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//ensure cover mode is configured appropriately
|
||||||
|
if goFlagsConfig.CoverMode != "" || goFlagsConfig.CoverPkg != "" || goFlagsConfig.CoverProfile != "" {
|
||||||
|
goFlagsConfig.Cover = true
|
||||||
|
}
|
||||||
|
if goFlagsConfig.Cover && goFlagsConfig.CoverProfile == "" {
|
||||||
|
goFlagsConfig.CoverProfile = "coverprofile.out"
|
||||||
|
}
|
||||||
|
|
||||||
|
return cliConfig, goFlagsConfig, errors
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateGoTestCompileArgs is used by the Ginkgo CLI to generate command line arguments to pass to the go test -c command when compiling the test
|
||||||
|
func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, destination string, packageToBuild string) ([]string, error) {
|
||||||
|
// if the user has set the CoverProfile run-time flag make sure to set the build-time cover flag to make sure
|
||||||
|
// the built test binary can generate a coverprofile
|
||||||
|
if goFlagsConfig.CoverProfile != "" {
|
||||||
|
goFlagsConfig.Cover = true
|
||||||
|
}
|
||||||
|
|
||||||
|
args := []string{"test", "-c", "-o", destination, packageToBuild}
|
||||||
|
goArgs, err := GenerateFlagArgs(
|
||||||
|
GoBuildFlags,
|
||||||
|
map[string]interface{}{
|
||||||
|
"Go": &goFlagsConfig,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return []string{}, err
|
||||||
|
}
|
||||||
|
args = append(args, goArgs...)
|
||||||
|
return args, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateGinkgoTestRunArgs is used by the Ginkgo CLI to generate command line arguments to pass to the compiled Ginkgo test binary
|
||||||
|
func GenerateGinkgoTestRunArgs(suiteConfig SuiteConfig, reporterConfig ReporterConfig, goFlagsConfig GoFlagsConfig) ([]string, error) {
|
||||||
|
var flags GinkgoFlags
|
||||||
|
flags = SuiteConfigFlags.WithPrefix("ginkgo")
|
||||||
|
flags = flags.CopyAppend(ParallelConfigFlags.WithPrefix("ginkgo")...)
|
||||||
|
flags = flags.CopyAppend(ReporterConfigFlags.WithPrefix("ginkgo")...)
|
||||||
|
flags = flags.CopyAppend(GoRunFlags.WithPrefix("test")...)
|
||||||
|
bindings := map[string]interface{}{
|
||||||
|
"S": &suiteConfig,
|
||||||
|
"R": &reporterConfig,
|
||||||
|
"Go": &goFlagsConfig,
|
||||||
|
}
|
||||||
|
|
||||||
|
return GenerateFlagArgs(flags, bindings)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateGoTestRunArgs is used by the Ginkgo CLI to generate command line arguments to pass to the compiled non-Ginkgo test binary
|
||||||
|
func GenerateGoTestRunArgs(goFlagsConfig GoFlagsConfig) ([]string, error) {
|
||||||
|
flags := GoRunFlags.WithPrefix("test")
|
||||||
|
bindings := map[string]interface{}{
|
||||||
|
"Go": &goFlagsConfig,
|
||||||
|
}
|
||||||
|
|
||||||
|
args, err := GenerateFlagArgs(flags, bindings)
|
||||||
|
if err != nil {
|
||||||
|
return args, err
|
||||||
|
}
|
||||||
|
args = append(args, "--test.v")
|
||||||
|
return args, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BuildRunCommandFlagSet builds the FlagSet for the `ginkgo run` command
|
||||||
|
func BuildRunCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig, cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) {
|
||||||
|
flags := SuiteConfigFlags
|
||||||
|
flags = flags.CopyAppend(ReporterConfigFlags...)
|
||||||
|
flags = flags.CopyAppend(GinkgoCLISharedFlags...)
|
||||||
|
flags = flags.CopyAppend(GinkgoCLIRunAndWatchFlags...)
|
||||||
|
flags = flags.CopyAppend(GinkgoCLIRunFlags...)
|
||||||
|
flags = flags.CopyAppend(GoBuildFlags...)
|
||||||
|
flags = flags.CopyAppend(GoRunFlags...)
|
||||||
|
|
||||||
|
bindings := map[string]interface{}{
|
||||||
|
"S": suiteConfig,
|
||||||
|
"R": reporterConfig,
|
||||||
|
"C": cliConfig,
|
||||||
|
"Go": goFlagsConfig,
|
||||||
|
"D": &deprecatedConfig{},
|
||||||
|
}
|
||||||
|
|
||||||
|
return NewGinkgoFlagSet(flags, bindings, FlagSections)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BuildWatchCommandFlagSet builds the FlagSet for the `ginkgo watch` command
|
||||||
|
func BuildWatchCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig, cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) {
|
||||||
|
flags := SuiteConfigFlags
|
||||||
|
flags = flags.CopyAppend(ReporterConfigFlags...)
|
||||||
|
flags = flags.CopyAppend(GinkgoCLISharedFlags...)
|
||||||
|
flags = flags.CopyAppend(GinkgoCLIRunAndWatchFlags...)
|
||||||
|
flags = flags.CopyAppend(GinkgoCLIWatchFlags...)
|
||||||
|
flags = flags.CopyAppend(GoBuildFlags...)
|
||||||
|
flags = flags.CopyAppend(GoRunFlags...)
|
||||||
|
|
||||||
|
bindings := map[string]interface{}{
|
||||||
|
"S": suiteConfig,
|
||||||
|
"R": reporterConfig,
|
||||||
|
"C": cliConfig,
|
||||||
|
"Go": goFlagsConfig,
|
||||||
|
"D": &deprecatedConfig{},
|
||||||
|
}
|
||||||
|
|
||||||
|
return NewGinkgoFlagSet(flags, bindings, FlagSections)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BuildBuildCommandFlagSet builds the FlagSet for the `ginkgo build` command
|
||||||
|
func BuildBuildCommandFlagSet(cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) {
|
||||||
|
flags := GinkgoCLISharedFlags
|
||||||
|
flags = flags.CopyAppend(GoBuildFlags...)
|
||||||
|
|
||||||
|
bindings := map[string]interface{}{
|
||||||
|
"C": cliConfig,
|
||||||
|
"Go": goFlagsConfig,
|
||||||
|
"D": &deprecatedConfig{},
|
||||||
|
}
|
||||||
|
|
||||||
|
flagSections := make(GinkgoFlagSections, len(FlagSections))
|
||||||
|
copy(flagSections, FlagSections)
|
||||||
|
for i := range flagSections {
|
||||||
|
if flagSections[i].Key == "multiple-suites" {
|
||||||
|
flagSections[i].Heading = "Building Multiple Suites"
|
||||||
|
}
|
||||||
|
if flagSections[i].Key == "go-build" {
|
||||||
|
flagSections[i] = GinkgoFlagSection{Key: "go-build", Style: "{{/}}", Heading: "Go Build Flags",
|
||||||
|
Description: "These flags are inherited from go build."}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return NewGinkgoFlagSet(flags, bindings, flagSections)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BuildLabelsCommandFlagSet(cliConfig *CLIConfig) (GinkgoFlagSet, error) {
|
||||||
|
flags := GinkgoCLISharedFlags.SubsetWithNames("r", "skip-package")
|
||||||
|
|
||||||
|
bindings := map[string]interface{}{
|
||||||
|
"C": cliConfig,
|
||||||
|
}
|
||||||
|
|
||||||
|
flagSections := make(GinkgoFlagSections, len(FlagSections))
|
||||||
|
copy(flagSections, FlagSections)
|
||||||
|
for i := range flagSections {
|
||||||
|
if flagSections[i].Key == "multiple-suites" {
|
||||||
|
flagSections[i].Heading = "Fetching Labels from Multiple Suites"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return NewGinkgoFlagSet(flags, bindings, flagSections)
|
||||||
|
}
|
141
vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go
generated
vendored
Normal file
141
vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go
generated
vendored
Normal file
@ -0,0 +1,141 @@
|
|||||||
|
package types
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
A set of deprecations to make the transition from v1 to v2 easier for users who have written custom reporters.
|
||||||
|
*/
|
||||||
|
|
||||||
|
type SuiteSummary = DeprecatedSuiteSummary
|
||||||
|
type SetupSummary = DeprecatedSetupSummary
|
||||||
|
type SpecSummary = DeprecatedSpecSummary
|
||||||
|
type SpecMeasurement = DeprecatedSpecMeasurement
|
||||||
|
type SpecComponentType = NodeType
|
||||||
|
type SpecFailure = DeprecatedSpecFailure
|
||||||
|
|
||||||
|
var (
|
||||||
|
SpecComponentTypeInvalid = NodeTypeInvalid
|
||||||
|
SpecComponentTypeContainer = NodeTypeContainer
|
||||||
|
SpecComponentTypeIt = NodeTypeIt
|
||||||
|
SpecComponentTypeBeforeEach = NodeTypeBeforeEach
|
||||||
|
SpecComponentTypeJustBeforeEach = NodeTypeJustBeforeEach
|
||||||
|
SpecComponentTypeAfterEach = NodeTypeAfterEach
|
||||||
|
SpecComponentTypeJustAfterEach = NodeTypeJustAfterEach
|
||||||
|
SpecComponentTypeBeforeSuite = NodeTypeBeforeSuite
|
||||||
|
SpecComponentTypeSynchronizedBeforeSuite = NodeTypeSynchronizedBeforeSuite
|
||||||
|
SpecComponentTypeAfterSuite = NodeTypeAfterSuite
|
||||||
|
SpecComponentTypeSynchronizedAfterSuite = NodeTypeSynchronizedAfterSuite
|
||||||
|
)
|
||||||
|
|
||||||
|
type DeprecatedSuiteSummary struct {
|
||||||
|
SuiteDescription string
|
||||||
|
SuiteSucceeded bool
|
||||||
|
SuiteID string
|
||||||
|
|
||||||
|
NumberOfSpecsBeforeParallelization int
|
||||||
|
NumberOfTotalSpecs int
|
||||||
|
NumberOfSpecsThatWillBeRun int
|
||||||
|
NumberOfPendingSpecs int
|
||||||
|
NumberOfSkippedSpecs int
|
||||||
|
NumberOfPassedSpecs int
|
||||||
|
NumberOfFailedSpecs int
|
||||||
|
NumberOfFlakedSpecs int
|
||||||
|
RunTime time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
type DeprecatedSetupSummary struct {
|
||||||
|
ComponentType SpecComponentType
|
||||||
|
CodeLocation CodeLocation
|
||||||
|
|
||||||
|
State SpecState
|
||||||
|
RunTime time.Duration
|
||||||
|
Failure SpecFailure
|
||||||
|
|
||||||
|
CapturedOutput string
|
||||||
|
SuiteID string
|
||||||
|
}
|
||||||
|
|
||||||
|
type DeprecatedSpecSummary struct {
|
||||||
|
ComponentTexts []string
|
||||||
|
ComponentCodeLocations []CodeLocation
|
||||||
|
|
||||||
|
State SpecState
|
||||||
|
RunTime time.Duration
|
||||||
|
Failure SpecFailure
|
||||||
|
IsMeasurement bool
|
||||||
|
NumberOfSamples int
|
||||||
|
Measurements map[string]*DeprecatedSpecMeasurement
|
||||||
|
|
||||||
|
CapturedOutput string
|
||||||
|
SuiteID string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s DeprecatedSpecSummary) HasFailureState() bool {
|
||||||
|
return s.State.Is(SpecStateFailureStates)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s DeprecatedSpecSummary) TimedOut() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s DeprecatedSpecSummary) Panicked() bool {
|
||||||
|
return s.State == SpecStatePanicked
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s DeprecatedSpecSummary) Failed() bool {
|
||||||
|
return s.State == SpecStateFailed
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s DeprecatedSpecSummary) Passed() bool {
|
||||||
|
return s.State == SpecStatePassed
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s DeprecatedSpecSummary) Skipped() bool {
|
||||||
|
return s.State == SpecStateSkipped
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s DeprecatedSpecSummary) Pending() bool {
|
||||||
|
return s.State == SpecStatePending
|
||||||
|
}
|
||||||
|
|
||||||
|
type DeprecatedSpecFailure struct {
|
||||||
|
Message string
|
||||||
|
Location CodeLocation
|
||||||
|
ForwardedPanic string
|
||||||
|
|
||||||
|
ComponentIndex int
|
||||||
|
ComponentType SpecComponentType
|
||||||
|
ComponentCodeLocation CodeLocation
|
||||||
|
}
|
||||||
|
|
||||||
|
type DeprecatedSpecMeasurement struct {
|
||||||
|
Name string
|
||||||
|
Info interface{}
|
||||||
|
Order int
|
||||||
|
|
||||||
|
Results []float64
|
||||||
|
|
||||||
|
Smallest float64
|
||||||
|
Largest float64
|
||||||
|
Average float64
|
||||||
|
StdDeviation float64
|
||||||
|
|
||||||
|
SmallestLabel string
|
||||||
|
LargestLabel string
|
||||||
|
AverageLabel string
|
||||||
|
Units string
|
||||||
|
Precision int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s DeprecatedSpecMeasurement) PrecisionFmt() string {
|
||||||
|
if s.Precision == 0 {
|
||||||
|
return "%f"
|
||||||
|
}
|
||||||
|
|
||||||
|
str := strconv.Itoa(s.Precision)
|
||||||
|
|
||||||
|
return "%." + str + "f"
|
||||||
|
}
|
170
vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go
generated
vendored
Normal file
170
vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go
generated
vendored
Normal file
@ -0,0 +1,170 @@
|
|||||||
|
package types
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"unicode"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/formatter"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Deprecation struct {
|
||||||
|
Message string
|
||||||
|
DocLink string
|
||||||
|
Version string
|
||||||
|
}
|
||||||
|
|
||||||
|
type deprecations struct{}
|
||||||
|
|
||||||
|
var Deprecations = deprecations{}
|
||||||
|
|
||||||
|
func (d deprecations) CustomReporter() Deprecation {
|
||||||
|
return Deprecation{
|
||||||
|
Message: "Support for custom reporters has been removed in V2. Please read the documentation linked to below for Ginkgo's new behavior and for a migration path:",
|
||||||
|
DocLink: "removed-custom-reporters",
|
||||||
|
Version: "1.16.0",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d deprecations) Async() Deprecation {
|
||||||
|
return Deprecation{
|
||||||
|
Message: "You are passing a Done channel to a test node to test asynchronous behavior. This is deprecated in Ginkgo V2. Your test will run synchronously and the timeout will be ignored.",
|
||||||
|
DocLink: "removed-async-testing",
|
||||||
|
Version: "1.16.0",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d deprecations) Measure() Deprecation {
|
||||||
|
return Deprecation{
|
||||||
|
Message: "Measure is deprecated and will be removed in Ginkgo V2. Please migrate to gomega/gmeasure.",
|
||||||
|
DocLink: "removed-measure",
|
||||||
|
Version: "1.16.3",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d deprecations) ParallelNode() Deprecation {
|
||||||
|
return Deprecation{
|
||||||
|
Message: "GinkgoParallelNode is deprecated and will be removed in Ginkgo V2. Please use GinkgoParallelProcess instead.",
|
||||||
|
DocLink: "renamed-ginkgoparallelnode",
|
||||||
|
Version: "1.16.4",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d deprecations) CurrentGinkgoTestDescription() Deprecation {
|
||||||
|
return Deprecation{
|
||||||
|
Message: "CurrentGinkgoTestDescription() is deprecated in Ginkgo V2. Use CurrentSpecReport() instead.",
|
||||||
|
DocLink: "changed-currentginkgotestdescription",
|
||||||
|
Version: "1.16.0",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d deprecations) Convert() Deprecation {
|
||||||
|
return Deprecation{
|
||||||
|
Message: "The convert command is deprecated in Ginkgo V2",
|
||||||
|
DocLink: "removed-ginkgo-convert",
|
||||||
|
Version: "1.16.0",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d deprecations) Blur() Deprecation {
|
||||||
|
return Deprecation{
|
||||||
|
Message: "The blur command is deprecated in Ginkgo V2. Use 'ginkgo unfocus' instead.",
|
||||||
|
Version: "1.16.0",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d deprecations) Nodot() Deprecation {
|
||||||
|
return Deprecation{
|
||||||
|
Message: "The nodot command is deprecated in Ginkgo V2. Please either dot-import Ginkgo or use the package identifier in your code to references objects and types provided by Ginkgo and Gomega.",
|
||||||
|
DocLink: "removed-ginkgo-nodot",
|
||||||
|
Version: "1.16.0",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type DeprecationTracker struct {
|
||||||
|
deprecations map[Deprecation][]CodeLocation
|
||||||
|
lock *sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDeprecationTracker() *DeprecationTracker {
|
||||||
|
return &DeprecationTracker{
|
||||||
|
deprecations: map[Deprecation][]CodeLocation{},
|
||||||
|
lock: &sync.Mutex{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DeprecationTracker) TrackDeprecation(deprecation Deprecation, cl ...CodeLocation) {
|
||||||
|
ackVersion := os.Getenv("ACK_GINKGO_DEPRECATIONS")
|
||||||
|
if deprecation.Version != "" && ackVersion != "" {
|
||||||
|
ack := ParseSemVer(ackVersion)
|
||||||
|
version := ParseSemVer(deprecation.Version)
|
||||||
|
if ack.GreaterThanOrEqualTo(version) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
d.lock.Lock()
|
||||||
|
defer d.lock.Unlock()
|
||||||
|
if len(cl) == 1 {
|
||||||
|
d.deprecations[deprecation] = append(d.deprecations[deprecation], cl[0])
|
||||||
|
} else {
|
||||||
|
d.deprecations[deprecation] = []CodeLocation{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DeprecationTracker) DidTrackDeprecations() bool {
|
||||||
|
d.lock.Lock()
|
||||||
|
defer d.lock.Unlock()
|
||||||
|
return len(d.deprecations) > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DeprecationTracker) DeprecationsReport() string {
|
||||||
|
d.lock.Lock()
|
||||||
|
defer d.lock.Unlock()
|
||||||
|
out := formatter.F("{{light-yellow}}You're using deprecated Ginkgo functionality:{{/}}\n")
|
||||||
|
out += formatter.F("{{light-yellow}}============================================={{/}}\n")
|
||||||
|
for deprecation, locations := range d.deprecations {
|
||||||
|
out += formatter.Fi(1, "{{yellow}}"+deprecation.Message+"{{/}}\n")
|
||||||
|
if deprecation.DocLink != "" {
|
||||||
|
out += formatter.Fi(1, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}https://onsi.github.io/ginkgo/MIGRATING_TO_V2#%s{{/}}\n", deprecation.DocLink)
|
||||||
|
}
|
||||||
|
for _, location := range locations {
|
||||||
|
out += formatter.Fi(2, "{{gray}}%s{{/}}\n", location)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out += formatter.F("\n{{gray}}To silence deprecations that can be silenced set the following environment variable:{{/}}\n")
|
||||||
|
out += formatter.Fi(1, "{{gray}}ACK_GINKGO_DEPRECATIONS=%s{{/}}\n", VERSION)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
type SemVer struct {
|
||||||
|
Major int
|
||||||
|
Minor int
|
||||||
|
Patch int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s SemVer) GreaterThanOrEqualTo(o SemVer) bool {
|
||||||
|
return (s.Major > o.Major) ||
|
||||||
|
(s.Major == o.Major && s.Minor > o.Minor) ||
|
||||||
|
(s.Major == o.Major && s.Minor == o.Minor && s.Patch >= o.Patch)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ParseSemVer(semver string) SemVer {
|
||||||
|
out := SemVer{}
|
||||||
|
semver = strings.TrimFunc(semver, func(r rune) bool {
|
||||||
|
return !(unicode.IsNumber(r) || r == '.')
|
||||||
|
})
|
||||||
|
components := strings.Split(semver, ".")
|
||||||
|
if len(components) > 0 {
|
||||||
|
out.Major, _ = strconv.Atoi(components[0])
|
||||||
|
}
|
||||||
|
if len(components) > 1 {
|
||||||
|
out.Minor, _ = strconv.Atoi(components[1])
|
||||||
|
}
|
||||||
|
if len(components) > 2 {
|
||||||
|
out.Patch, _ = strconv.Atoi(components[2])
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
43
vendor/github.com/onsi/ginkgo/v2/types/enum_support.go
generated
vendored
Normal file
43
vendor/github.com/onsi/ginkgo/v2/types/enum_support.go
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
package types
|
||||||
|
|
||||||
|
import "encoding/json"
|
||||||
|
|
||||||
|
type EnumSupport struct {
|
||||||
|
toString map[uint]string
|
||||||
|
toEnum map[string]uint
|
||||||
|
maxEnum uint
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewEnumSupport(toString map[uint]string) EnumSupport {
|
||||||
|
toEnum, maxEnum := map[string]uint{}, uint(0)
|
||||||
|
for k, v := range toString {
|
||||||
|
toEnum[v] = k
|
||||||
|
if maxEnum < k {
|
||||||
|
maxEnum = k
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return EnumSupport{toString: toString, toEnum: toEnum, maxEnum: maxEnum}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (es EnumSupport) String(e uint) string {
|
||||||
|
if e > es.maxEnum {
|
||||||
|
return es.toString[0]
|
||||||
|
}
|
||||||
|
return es.toString[e]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (es EnumSupport) UnmarshJSON(b []byte) (uint, error) {
|
||||||
|
var dec string
|
||||||
|
if err := json.Unmarshal(b, &dec); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
out := es.toEnum[dec] // if we miss we get 0 which is what we want anyway
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (es EnumSupport) MarshJSON(e uint) ([]byte, error) {
|
||||||
|
if e == 0 || e > es.maxEnum {
|
||||||
|
return json.Marshal(nil)
|
||||||
|
}
|
||||||
|
return json.Marshal(es.toString[e])
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user