mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-11-26 00:00:23 +00:00
rebase: update github.com/onsi/ginkgo to v2.9.5
There was a `replace` statement in `go.mod` that prevented Ginkgo from updating. Kubernetes 1.27 requires a new Ginkgo version. Signed-off-by: Niels de Vos <ndevos@ibm.com>
This commit is contained in:
parent
b1a4590967
commit
6709cdd1d0
6
go.mod
6
go.mod
@ -83,6 +83,7 @@ require (
|
|||||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||||
github.com/go-openapi/jsonreference v0.20.1 // indirect
|
github.com/go-openapi/jsonreference v0.20.1 // indirect
|
||||||
github.com/go-openapi/swag v0.22.3 // indirect
|
github.com/go-openapi/swag v0.22.3 // indirect
|
||||||
|
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||||
github.com/golang/snappy v0.0.4 // indirect
|
github.com/golang/snappy v0.0.4 // indirect
|
||||||
@ -90,6 +91,7 @@ require (
|
|||||||
github.com/google/gnostic v0.6.9 // indirect
|
github.com/google/gnostic v0.6.9 // indirect
|
||||||
github.com/google/go-cmp v0.5.9 // indirect
|
github.com/google/go-cmp v0.5.9 // indirect
|
||||||
github.com/google/gofuzz v1.2.0 // indirect
|
github.com/google/gofuzz v1.2.0 // indirect
|
||||||
|
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect
|
||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 // indirect
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 // indirect
|
||||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||||
@ -152,10 +154,11 @@ require (
|
|||||||
go.uber.org/multierr v1.8.0 // indirect
|
go.uber.org/multierr v1.8.0 // indirect
|
||||||
go.uber.org/zap v1.24.0 // indirect
|
go.uber.org/zap v1.24.0 // indirect
|
||||||
golang.org/x/oauth2 v0.6.0 // indirect
|
golang.org/x/oauth2 v0.6.0 // indirect
|
||||||
golang.org/x/sync v0.1.0 // indirect
|
golang.org/x/sync v0.2.0 // indirect
|
||||||
golang.org/x/term v0.8.0 // indirect
|
golang.org/x/term v0.8.0 // indirect
|
||||||
golang.org/x/text v0.9.0 // indirect
|
golang.org/x/text v0.9.0 // indirect
|
||||||
golang.org/x/time v0.3.0 // indirect
|
golang.org/x/time v0.3.0 // indirect
|
||||||
|
golang.org/x/tools v0.9.1 // indirect
|
||||||
gomodules.xyz/jsonpatch/v2 v2.3.0 // indirect
|
gomodules.xyz/jsonpatch/v2 v2.3.0 // indirect
|
||||||
google.golang.org/api v0.110.0 // indirect
|
google.golang.org/api v0.110.0 // indirect
|
||||||
google.golang.org/appengine v1.6.7 // indirect
|
google.golang.org/appengine v1.6.7 // indirect
|
||||||
@ -184,7 +187,6 @@ replace (
|
|||||||
github.com/ceph/ceph-csi/api => ./api
|
github.com/ceph/ceph-csi/api => ./api
|
||||||
|
|
||||||
// Required for kubernetes 1.26
|
// Required for kubernetes 1.26
|
||||||
github.com/onsi/ginkgo/v2 => github.com/onsi/ginkgo/v2 v2.4.0
|
|
||||||
github.com/portworx/sched-ops => github.com/portworx/sched-ops v0.20.4-openstorage-rc3
|
github.com/portworx/sched-ops => github.com/portworx/sched-ops v0.20.4-openstorage-rc3
|
||||||
gomodules.xyz/jsonpatch/v2 => github.com/gomodules/jsonpatch/v2 v2.2.0
|
gomodules.xyz/jsonpatch/v2 => github.com/gomodules/jsonpatch/v2 v2.2.0
|
||||||
//
|
//
|
||||||
|
55
go.sum
55
go.sum
@ -324,6 +324,7 @@ github.com/frankban/quicktest v1.4.1/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60
|
|||||||
github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk=
|
github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk=
|
||||||
github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU=
|
github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU=
|
||||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
|
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||||
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
||||||
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
|
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
|
||||||
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
|
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
|
||||||
@ -381,6 +382,8 @@ github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG
|
|||||||
github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
|
github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
|
||||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||||
|
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||||
|
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
|
||||||
github.com/go-test/deep v1.0.1/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
|
github.com/go-test/deep v1.0.1/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
|
||||||
github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
|
github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
|
||||||
github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
|
github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
|
||||||
@ -500,6 +503,7 @@ github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLe
|
|||||||
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||||
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||||
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||||
|
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
|
||||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
@ -885,6 +889,8 @@ github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/
|
|||||||
github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2 h1:BQ1HW7hr4IVovMwWg0E0PYcyW8CzqDcVmaew9cujU4s=
|
github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2 h1:BQ1HW7hr4IVovMwWg0E0PYcyW8CzqDcVmaew9cujU4s=
|
||||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||||
github.com/nwaples/rardecode v1.0.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0=
|
github.com/nwaples/rardecode v1.0.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0=
|
||||||
|
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||||
|
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
|
||||||
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
|
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
|
||||||
github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA=
|
github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA=
|
||||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||||
@ -895,13 +901,35 @@ github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:v
|
|||||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs=
|
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||||
|
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
|
||||||
|
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
|
||||||
|
github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU=
|
||||||
|
github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk=
|
||||||
|
github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8AyFNU9d0=
|
||||||
github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo=
|
github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo=
|
||||||
|
github.com/onsi/ginkgo/v2 v2.5.0/go.mod h1:Luc4sArBICYCS8THh8v3i3i5CuSZO+RaQRaJoeNwomw=
|
||||||
|
github.com/onsi/ginkgo/v2 v2.7.0/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo=
|
||||||
|
github.com/onsi/ginkgo/v2 v2.8.1/go.mod h1:N1/NbDngAFcSLdyZ+/aYTYGSlq9qMCS/cNKGJjy+csc=
|
||||||
|
github.com/onsi/ginkgo/v2 v2.9.0/go.mod h1:4xkjoL/tZv4SMWeww56BU5kAt19mVB47gTWxmrTcxyk=
|
||||||
|
github.com/onsi/ginkgo/v2 v2.9.1/go.mod h1:FEcmzVcCHl+4o9bQZVab+4dC9+j+91t2FHSzmGAPfuo=
|
||||||
|
github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q=
|
||||||
|
github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k=
|
||||||
github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||||
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||||
|
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||||
|
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||||
|
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
||||||
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
|
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
|
||||||
|
github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo=
|
||||||
|
github.com/onsi/gomega v1.21.1/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc=
|
||||||
github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM=
|
github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM=
|
||||||
|
github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg=
|
||||||
|
github.com/onsi/gomega v1.24.1/go.mod h1:3AOiACssS3/MajrniINInwbfOOtfZvplPzuRSmvt1jM=
|
||||||
|
github.com/onsi/gomega v1.26.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM=
|
||||||
|
github.com/onsi/gomega v1.27.1/go.mod h1:aHX5xOykVYzWOV4WqQy0sy8BQptgukenXpCXfadcIAw=
|
||||||
|
github.com/onsi/gomega v1.27.3/go.mod h1:5vG284IBtfDAmDyrK+eGyZmUgUlmi+Wngqo557cZ6Gw=
|
||||||
github.com/onsi/gomega v1.27.4/go.mod h1:riYq/GJKh8hhoM01HN6Vmuy93AarCXCBGpvFDK3q3fQ=
|
github.com/onsi/gomega v1.27.4/go.mod h1:riYq/GJKh8hhoM01HN6Vmuy93AarCXCBGpvFDK3q3fQ=
|
||||||
github.com/onsi/gomega v1.27.7 h1:fVih9JD6ogIiHUN6ePK7HJidyEDpWGVB5mzM7cWNXoU=
|
github.com/onsi/gomega v1.27.7 h1:fVih9JD6ogIiHUN6ePK7HJidyEDpWGVB5mzM7cWNXoU=
|
||||||
github.com/onsi/gomega v1.27.7/go.mod h1:1p8OOlwo2iUUDsHnOrjE5UKYJ+e3W8eQ3qSlRahPmr4=
|
github.com/onsi/gomega v1.27.7/go.mod h1:1p8OOlwo2iUUDsHnOrjE5UKYJ+e3W8eQ3qSlRahPmr4=
|
||||||
@ -1279,6 +1307,7 @@ golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
|
|||||||
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||||
golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||||
|
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
|
||||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
@ -1310,6 +1339,7 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/
|
|||||||
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
|
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||||
@ -1325,6 +1355,7 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v
|
|||||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
|
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
|
||||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||||
|
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
|
||||||
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
@ -1333,10 +1364,13 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx
|
|||||||
golang.org/x/net v0.0.0-20211123203042-d83791d6bcd9/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20211123203042-d83791d6bcd9/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||||
|
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||||
|
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
||||||
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
|
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
|
||||||
golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
|
golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
|
||||||
|
golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
|
||||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||||
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||||
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
||||||
@ -1378,8 +1412,9 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ
|
|||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
|
||||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI=
|
||||||
|
golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
@ -1404,10 +1439,13 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||||||
golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
@ -1434,6 +1472,7 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
@ -1459,7 +1498,9 @@ golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||||||
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
@ -1467,7 +1508,9 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
|
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
|
||||||
@ -1476,7 +1519,9 @@ golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXR
|
|||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
|
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
||||||
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
|
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
|
||||||
|
golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
|
||||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||||
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
|
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
|
||||||
golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols=
|
golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols=
|
||||||
@ -1493,6 +1538,7 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|||||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||||
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||||
|
golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||||
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||||
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
|
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
|
||||||
@ -1562,6 +1608,7 @@ golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82u
|
|||||||
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
|
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||||
@ -1570,12 +1617,15 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
|||||||
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
|
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||||
golang.org/x/tools v0.1.11-0.20220316014157-77aa08bb151a/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
golang.org/x/tools v0.1.11-0.20220316014157-77aa08bb151a/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||||
golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
|
golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
|
||||||
golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
|
golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
|
||||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||||
golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
|
golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
|
||||||
|
golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo=
|
||||||
|
golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
|
||||||
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
@ -1786,6 +1836,7 @@ gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76
|
|||||||
gopkg.in/square/go-jose.v2 v2.4.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
gopkg.in/square/go-jose.v2 v2.4.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||||
gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||||
gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI=
|
gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI=
|
||||||
|
gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
14
vendor/github.com/go-task/slim-sprig/.editorconfig
generated
vendored
Normal file
14
vendor/github.com/go-task/slim-sprig/.editorconfig
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
# editorconfig.org
|
||||||
|
|
||||||
|
root = true
|
||||||
|
|
||||||
|
[*]
|
||||||
|
insert_final_newline = true
|
||||||
|
charset = utf-8
|
||||||
|
trim_trailing_whitespace = true
|
||||||
|
indent_style = tab
|
||||||
|
indent_size = 8
|
||||||
|
|
||||||
|
[*.{md,yml,yaml,json}]
|
||||||
|
indent_style = space
|
||||||
|
indent_size = 2
|
1
vendor/github.com/go-task/slim-sprig/.gitattributes
generated
vendored
Normal file
1
vendor/github.com/go-task/slim-sprig/.gitattributes
generated
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
* text=auto
|
2
vendor/github.com/go-task/slim-sprig/.gitignore
generated
vendored
Normal file
2
vendor/github.com/go-task/slim-sprig/.gitignore
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
vendor/
|
||||||
|
/.glide
|
364
vendor/github.com/go-task/slim-sprig/CHANGELOG.md
generated
vendored
Normal file
364
vendor/github.com/go-task/slim-sprig/CHANGELOG.md
generated
vendored
Normal file
@ -0,0 +1,364 @@
|
|||||||
|
# Changelog
|
||||||
|
|
||||||
|
## Release 3.2.0 (2020-12-14)
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- #211: Added randInt function (thanks @kochurovro)
|
||||||
|
- #223: Added fromJson and mustFromJson functions (thanks @mholt)
|
||||||
|
- #242: Added a bcrypt function (thanks @robbiet480)
|
||||||
|
- #253: Added randBytes function (thanks @MikaelSmith)
|
||||||
|
- #254: Added dig function for dicts (thanks @nyarly)
|
||||||
|
- #257: Added regexQuoteMeta for quoting regex metadata (thanks @rheaton)
|
||||||
|
- #261: Added filepath functions osBase, osDir, osExt, osClean, osIsAbs (thanks @zugl)
|
||||||
|
- #268: Added and and all functions for testing conditions (thanks @phuslu)
|
||||||
|
- #181: Added float64 arithmetic addf, add1f, subf, divf, mulf, maxf, and minf
|
||||||
|
(thanks @andrewmostello)
|
||||||
|
- #265: Added chunk function to split array into smaller arrays (thanks @karelbilek)
|
||||||
|
- #270: Extend certificate functions to handle non-RSA keys + add support for
|
||||||
|
ed25519 keys (thanks @misberner)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Removed testing and support for Go 1.12. ed25519 support requires Go 1.13 or newer
|
||||||
|
- Using semver 3.1.1 and mergo 0.3.11
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- #249: Fix htmlDateInZone example (thanks @spawnia)
|
||||||
|
|
||||||
|
NOTE: The dependency github.com/imdario/mergo reverted the breaking change in
|
||||||
|
0.3.9 via 0.3.10 release.
|
||||||
|
|
||||||
|
## Release 3.1.0 (2020-04-16)
|
||||||
|
|
||||||
|
NOTE: The dependency github.com/imdario/mergo made a behavior change in 0.3.9
|
||||||
|
that impacts sprig functionality. Do not use sprig with a version newer than 0.3.8.
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- #225: Added support for generating htpasswd hash (thanks @rustycl0ck)
|
||||||
|
- #224: Added duration filter (thanks @frebib)
|
||||||
|
- #205: Added `seq` function (thanks @thadc23)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- #203: Unlambda functions with correct signature (thanks @muesli)
|
||||||
|
- #236: Updated the license formatting for GitHub display purposes
|
||||||
|
- #238: Updated package dependency versions. Note, mergo not updated to 0.3.9
|
||||||
|
as it causes a breaking change for sprig. That issue is tracked at
|
||||||
|
https://github.com/imdario/mergo/issues/139
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- #229: Fix `seq` example in docs (thanks @kalmant)
|
||||||
|
|
||||||
|
## Release 3.0.2 (2019-12-13)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- #220: Updating to semver v3.0.3 to fix issue with <= ranges
|
||||||
|
- #218: fix typo elyptical->elliptic in ecdsa key description (thanks @laverya)
|
||||||
|
|
||||||
|
## Release 3.0.1 (2019-12-08)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- #212: Updated semver fixing broken constraint checking with ^0.0
|
||||||
|
|
||||||
|
## Release 3.0.0 (2019-10-02)
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- #187: Added durationRound function (thanks @yjp20)
|
||||||
|
- #189: Added numerous template functions that return errors rather than panic (thanks @nrvnrvn)
|
||||||
|
- #193: Added toRawJson support (thanks @Dean-Coakley)
|
||||||
|
- #197: Added get support to dicts (thanks @Dean-Coakley)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- #186: Moving dependency management to Go modules
|
||||||
|
- #186: Updated semver to v3. This has changes in the way ^ is handled
|
||||||
|
- #194: Updated documentation on merging and how it copies. Added example using deepCopy
|
||||||
|
- #196: trunc now supports negative values (thanks @Dean-Coakley)
|
||||||
|
|
||||||
|
## Release 2.22.0 (2019-10-02)
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- #173: Added getHostByName function to resolve dns names to ips (thanks @fcgravalos)
|
||||||
|
- #195: Added deepCopy function for use with dicts
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Updated merge and mergeOverwrite documentation to explain copying and how to
|
||||||
|
use deepCopy with it
|
||||||
|
|
||||||
|
## Release 2.21.0 (2019-09-18)
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- #122: Added encryptAES/decryptAES functions (thanks @n0madic)
|
||||||
|
- #128: Added toDecimal support (thanks @Dean-Coakley)
|
||||||
|
- #169: Added list contcat (thanks @astorath)
|
||||||
|
- #174: Added deepEqual function (thanks @bonifaido)
|
||||||
|
- #170: Added url parse and join functions (thanks @astorath)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- #171: Updated glide config for Google UUID to v1 and to add ranges to semver and testify
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- #172: Fix semver wildcard example (thanks @piepmatz)
|
||||||
|
- #175: Fix dateInZone doc example (thanks @s3than)
|
||||||
|
|
||||||
|
## Release 2.20.0 (2019-06-18)
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- #164: Adding function to get unix epoch for a time (@mattfarina)
|
||||||
|
- #166: Adding tests for date_in_zone (@mattfarina)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- #144: Fix function comments based on best practices from Effective Go (@CodeLingoTeam)
|
||||||
|
- #150: Handles pointer type for time.Time in "htmlDate" (@mapreal19)
|
||||||
|
- #161, #157, #160, #153, #158, #156, #155, #159, #152 documentation updates (@badeadan)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
## Release 2.19.0 (2019-03-02)
|
||||||
|
|
||||||
|
IMPORTANT: This release reverts a change from 2.18.0
|
||||||
|
|
||||||
|
In the previous release (2.18), we prematurely merged a partial change to the crypto functions that led to creating two sets of crypto functions (I blame @technosophos -- since that's me). This release rolls back that change, and does what was originally intended: It alters the existing crypto functions to use secure random.
|
||||||
|
|
||||||
|
We debated whether this classifies as a change worthy of major revision, but given the proximity to the last release, we have decided that treating 2.18 as a faulty release is the correct course of action. We apologize for any inconvenience.
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Fix substr panic 35fb796 (Alexey igrychev)
|
||||||
|
- Remove extra period 1eb7729 (Matthew Lorimor)
|
||||||
|
- Make random string functions use crypto by default 6ceff26 (Matthew Lorimor)
|
||||||
|
- README edits/fixes/suggestions 08fe136 (Lauri Apple)
|
||||||
|
|
||||||
|
|
||||||
|
## Release 2.18.0 (2019-02-12)
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- Added mergeOverwrite function
|
||||||
|
- cryptographic functions that use secure random (see fe1de12)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Improve documentation of regexMatch function, resolves #139 90b89ce (Jan Tagscherer)
|
||||||
|
- Handle has for nil list 9c10885 (Daniel Cohen)
|
||||||
|
- Document behaviour of mergeOverwrite fe0dbe9 (Lukas Rieder)
|
||||||
|
- doc: adds missing documentation. 4b871e6 (Fernandez Ludovic)
|
||||||
|
- Replace outdated goutils imports 01893d2 (Matthew Lorimor)
|
||||||
|
- Surface crypto secure random strings from goutils fe1de12 (Matthew Lorimor)
|
||||||
|
- Handle untyped nil values as paramters to string functions 2b2ec8f (Morten Torkildsen)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fix dict merge issue and provide mergeOverwrite .dst .src1 to overwrite from src -> dst 4c59c12 (Lukas Rieder)
|
||||||
|
- Fix substr var names and comments d581f80 (Dean Coakley)
|
||||||
|
- Fix substr documentation 2737203 (Dean Coakley)
|
||||||
|
|
||||||
|
## Release 2.17.1 (2019-01-03)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
The 2.17.0 release did not have a version pinned for xstrings, which caused compilation failures when xstrings < 1.2 was used. This adds the correct version string to glide.yaml.
|
||||||
|
|
||||||
|
## Release 2.17.0 (2019-01-03)
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- adds alder32sum function and test 6908fc2 (marshallford)
|
||||||
|
- Added kebabcase function ca331a1 (Ilyes512)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Update goutils to 1.1.0 4e1125d (Matt Butcher)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fix 'has' documentation e3f2a85 (dean-coakley)
|
||||||
|
- docs(dict): fix typo in pick example dc424f9 (Dustin Specker)
|
||||||
|
- fixes spelling errors... not sure how that happened 4cf188a (marshallford)
|
||||||
|
|
||||||
|
## Release 2.16.0 (2018-08-13)
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- add splitn function fccb0b0 (Helgi Þorbjörnsson)
|
||||||
|
- Add slice func df28ca7 (gongdo)
|
||||||
|
- Generate serial number a3bdffd (Cody Coons)
|
||||||
|
- Extract values of dict with values function df39312 (Lawrence Jones)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Modify panic message for list.slice ae38335 (gongdo)
|
||||||
|
- Minor improvement in code quality - Removed an unreachable piece of code at defaults.go#L26:6 - Resolve formatting issues. 5834241 (Abhishek Kashyap)
|
||||||
|
- Remove duplicated documentation 1d97af1 (Matthew Fisher)
|
||||||
|
- Test on go 1.11 49df809 (Helgi Þormar Þorbjörnsson)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fix file permissions c5f40b5 (gongdo)
|
||||||
|
- Fix example for buildCustomCert 7779e0d (Tin Lam)
|
||||||
|
|
||||||
|
## Release 2.15.0 (2018-04-02)
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- #68 and #69: Add json helpers to docs (thanks @arunvelsriram)
|
||||||
|
- #66: Add ternary function (thanks @binoculars)
|
||||||
|
- #67: Allow keys function to take multiple dicts (thanks @binoculars)
|
||||||
|
- #89: Added sha1sum to crypto function (thanks @benkeil)
|
||||||
|
- #81: Allow customizing Root CA that used by genSignedCert (thanks @chenzhiwei)
|
||||||
|
- #92: Add travis testing for go 1.10
|
||||||
|
- #93: Adding appveyor config for windows testing
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- #90: Updating to more recent dependencies
|
||||||
|
- #73: replace satori/go.uuid with google/uuid (thanks @petterw)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- #76: Fixed documentation typos (thanks @Thiht)
|
||||||
|
- Fixed rounding issue on the `ago` function. Note, the removes support for Go 1.8 and older
|
||||||
|
|
||||||
|
## Release 2.14.1 (2017-12-01)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- #60: Fix typo in function name documentation (thanks @neil-ca-moore)
|
||||||
|
- #61: Removing line with {{ due to blocking github pages genertion
|
||||||
|
- #64: Update the list functions to handle int, string, and other slices for compatibility
|
||||||
|
|
||||||
|
## Release 2.14.0 (2017-10-06)
|
||||||
|
|
||||||
|
This new version of Sprig adds a set of functions for generating and working with SSL certificates.
|
||||||
|
|
||||||
|
- `genCA` generates an SSL Certificate Authority
|
||||||
|
- `genSelfSignedCert` generates an SSL self-signed certificate
|
||||||
|
- `genSignedCert` generates an SSL certificate and key based on a given CA
|
||||||
|
|
||||||
|
## Release 2.13.0 (2017-09-18)
|
||||||
|
|
||||||
|
This release adds new functions, including:
|
||||||
|
|
||||||
|
- `regexMatch`, `regexFindAll`, `regexFind`, `regexReplaceAll`, `regexReplaceAllLiteral`, and `regexSplit` to work with regular expressions
|
||||||
|
- `floor`, `ceil`, and `round` math functions
|
||||||
|
- `toDate` converts a string to a date
|
||||||
|
- `nindent` is just like `indent` but also prepends a new line
|
||||||
|
- `ago` returns the time from `time.Now`
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- #40: Added basic regex functionality (thanks @alanquillin)
|
||||||
|
- #41: Added ceil floor and round functions (thanks @alanquillin)
|
||||||
|
- #48: Added toDate function (thanks @andreynering)
|
||||||
|
- #50: Added nindent function (thanks @binoculars)
|
||||||
|
- #46: Added ago function (thanks @slayer)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- #51: Updated godocs to include new string functions (thanks @curtisallen)
|
||||||
|
- #49: Added ability to merge multiple dicts (thanks @binoculars)
|
||||||
|
|
||||||
|
## Release 2.12.0 (2017-05-17)
|
||||||
|
|
||||||
|
- `snakecase`, `camelcase`, and `shuffle` are three new string functions
|
||||||
|
- `fail` allows you to bail out of a template render when conditions are not met
|
||||||
|
|
||||||
|
## Release 2.11.0 (2017-05-02)
|
||||||
|
|
||||||
|
- Added `toJson` and `toPrettyJson`
|
||||||
|
- Added `merge`
|
||||||
|
- Refactored documentation
|
||||||
|
|
||||||
|
## Release 2.10.0 (2017-03-15)
|
||||||
|
|
||||||
|
- Added `semver` and `semverCompare` for Semantic Versions
|
||||||
|
- `list` replaces `tuple`
|
||||||
|
- Fixed issue with `join`
|
||||||
|
- Added `first`, `last`, `intial`, `rest`, `prepend`, `append`, `toString`, `toStrings`, `sortAlpha`, `reverse`, `coalesce`, `pluck`, `pick`, `compact`, `keys`, `omit`, `uniq`, `has`, `without`
|
||||||
|
|
||||||
|
## Release 2.9.0 (2017-02-23)
|
||||||
|
|
||||||
|
- Added `splitList` to split a list
|
||||||
|
- Added crypto functions of `genPrivateKey` and `derivePassword`
|
||||||
|
|
||||||
|
## Release 2.8.0 (2016-12-21)
|
||||||
|
|
||||||
|
- Added access to several path functions (`base`, `dir`, `clean`, `ext`, and `abs`)
|
||||||
|
- Added functions for _mutating_ dictionaries (`set`, `unset`, `hasKey`)
|
||||||
|
|
||||||
|
## Release 2.7.0 (2016-12-01)
|
||||||
|
|
||||||
|
- Added `sha256sum` to generate a hash of an input
|
||||||
|
- Added functions to convert a numeric or string to `int`, `int64`, `float64`
|
||||||
|
|
||||||
|
## Release 2.6.0 (2016-10-03)
|
||||||
|
|
||||||
|
- Added a `uuidv4` template function for generating UUIDs inside of a template.
|
||||||
|
|
||||||
|
## Release 2.5.0 (2016-08-19)
|
||||||
|
|
||||||
|
- New `trimSuffix`, `trimPrefix`, `hasSuffix`, and `hasPrefix` functions
|
||||||
|
- New aliases have been added for a few functions that didn't follow the naming conventions (`trimAll` and `abbrevBoth`)
|
||||||
|
- `trimall` and `abbrevboth` (notice the case) are deprecated and will be removed in 3.0.0
|
||||||
|
|
||||||
|
## Release 2.4.0 (2016-08-16)
|
||||||
|
|
||||||
|
- Adds two functions: `until` and `untilStep`
|
||||||
|
|
||||||
|
## Release 2.3.0 (2016-06-21)
|
||||||
|
|
||||||
|
- cat: Concatenate strings with whitespace separators.
|
||||||
|
- replace: Replace parts of a string: `replace " " "-" "Me First"` renders "Me-First"
|
||||||
|
- plural: Format plurals: `len "foo" | plural "one foo" "many foos"` renders "many foos"
|
||||||
|
- indent: Indent blocks of text in a way that is sensitive to "\n" characters.
|
||||||
|
|
||||||
|
## Release 2.2.0 (2016-04-21)
|
||||||
|
|
||||||
|
- Added a `genPrivateKey` function (Thanks @bacongobbler)
|
||||||
|
|
||||||
|
## Release 2.1.0 (2016-03-30)
|
||||||
|
|
||||||
|
- `default` now prints the default value when it does not receive a value down the pipeline. It is much safer now to do `{{.Foo | default "bar"}}`.
|
||||||
|
- Added accessors for "hermetic" functions. These return only functions that, when given the same input, produce the same output.
|
||||||
|
|
||||||
|
## Release 2.0.0 (2016-03-29)
|
||||||
|
|
||||||
|
Because we switched from `int` to `int64` as the return value for all integer math functions, the library's major version number has been incremented.
|
||||||
|
|
||||||
|
- `min` complements `max` (formerly `biggest`)
|
||||||
|
- `empty` indicates that a value is the empty value for its type
|
||||||
|
- `tuple` creates a tuple inside of a template: `{{$t := tuple "a", "b" "c"}}`
|
||||||
|
- `dict` creates a dictionary inside of a template `{{$d := dict "key1" "val1" "key2" "val2"}}`
|
||||||
|
- Date formatters have been added for HTML dates (as used in `date` input fields)
|
||||||
|
- Integer math functions can convert from a number of types, including `string` (via `strconv.ParseInt`).
|
||||||
|
|
||||||
|
## Release 1.2.0 (2016-02-01)
|
||||||
|
|
||||||
|
- Added quote and squote
|
||||||
|
- Added b32enc and b32dec
|
||||||
|
- add now takes varargs
|
||||||
|
- biggest now takes varargs
|
||||||
|
|
||||||
|
## Release 1.1.0 (2015-12-29)
|
||||||
|
|
||||||
|
- Added #4: Added contains function. strings.Contains, but with the arguments
|
||||||
|
switched to simplify common pipelines. (thanks krancour)
|
||||||
|
- Added Travis-CI testing support
|
||||||
|
|
||||||
|
## Release 1.0.0 (2015-12-23)
|
||||||
|
|
||||||
|
- Initial release
|
19
vendor/github.com/go-task/slim-sprig/LICENSE.txt
generated
vendored
Normal file
19
vendor/github.com/go-task/slim-sprig/LICENSE.txt
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
Copyright (C) 2013-2020 Masterminds
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
THE SOFTWARE.
|
73
vendor/github.com/go-task/slim-sprig/README.md
generated
vendored
Normal file
73
vendor/github.com/go-task/slim-sprig/README.md
generated
vendored
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
# Slim-Sprig: Template functions for Go templates [![GoDoc](https://godoc.org/github.com/go-task/slim-sprig?status.svg)](https://godoc.org/github.com/go-task/slim-sprig) [![Go Report Card](https://goreportcard.com/badge/github.com/go-task/slim-sprig)](https://goreportcard.com/report/github.com/go-task/slim-sprig)
|
||||||
|
|
||||||
|
Slim-Sprig is a fork of [Sprig](https://github.com/Masterminds/sprig), but with
|
||||||
|
all functions that depend on external (non standard library) or crypto packages
|
||||||
|
removed.
|
||||||
|
The reason for this is to make this library more lightweight. Most of these
|
||||||
|
functions (specially crypto ones) are not needed on most apps, but costs a lot
|
||||||
|
in terms of binary size and compilation time.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
**Template developers**: Please use Slim-Sprig's [function documentation](https://go-task.github.io/slim-sprig/) for
|
||||||
|
detailed instructions and code snippets for the >100 template functions available.
|
||||||
|
|
||||||
|
**Go developers**: If you'd like to include Slim-Sprig as a library in your program,
|
||||||
|
our API documentation is available [at GoDoc.org](http://godoc.org/github.com/go-task/slim-sprig).
|
||||||
|
|
||||||
|
For standard usage, read on.
|
||||||
|
|
||||||
|
### Load the Slim-Sprig library
|
||||||
|
|
||||||
|
To load the Slim-Sprig `FuncMap`:
|
||||||
|
|
||||||
|
```go
|
||||||
|
|
||||||
|
import (
|
||||||
|
"html/template"
|
||||||
|
|
||||||
|
"github.com/go-task/slim-sprig"
|
||||||
|
)
|
||||||
|
|
||||||
|
// This example illustrates that the FuncMap *must* be set before the
|
||||||
|
// templates themselves are loaded.
|
||||||
|
tpl := template.Must(
|
||||||
|
template.New("base").Funcs(sprig.FuncMap()).ParseGlob("*.html")
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Calling the functions inside of templates
|
||||||
|
|
||||||
|
By convention, all functions are lowercase. This seems to follow the Go
|
||||||
|
idiom for template functions (as opposed to template methods, which are
|
||||||
|
TitleCase). For example, this:
|
||||||
|
|
||||||
|
```
|
||||||
|
{{ "hello!" | upper | repeat 5 }}
|
||||||
|
```
|
||||||
|
|
||||||
|
produces this:
|
||||||
|
|
||||||
|
```
|
||||||
|
HELLO!HELLO!HELLO!HELLO!HELLO!
|
||||||
|
```
|
||||||
|
|
||||||
|
## Principles Driving Our Function Selection
|
||||||
|
|
||||||
|
We followed these principles to decide which functions to add and how to implement them:
|
||||||
|
|
||||||
|
- Use template functions to build layout. The following
|
||||||
|
types of operations are within the domain of template functions:
|
||||||
|
- Formatting
|
||||||
|
- Layout
|
||||||
|
- Simple type conversions
|
||||||
|
- Utilities that assist in handling common formatting and layout needs (e.g. arithmetic)
|
||||||
|
- Template functions should not return errors unless there is no way to print
|
||||||
|
a sensible value. For example, converting a string to an integer should not
|
||||||
|
produce an error if conversion fails. Instead, it should display a default
|
||||||
|
value.
|
||||||
|
- Simple math is necessary for grid layouts, pagers, and so on. Complex math
|
||||||
|
(anything other than arithmetic) should be done outside of templates.
|
||||||
|
- Template functions only deal with the data passed into them. They never retrieve
|
||||||
|
data from a source.
|
||||||
|
- Finally, do not override core Go template functions.
|
12
vendor/github.com/go-task/slim-sprig/Taskfile.yml
generated
vendored
Normal file
12
vendor/github.com/go-task/slim-sprig/Taskfile.yml
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
# https://taskfile.dev
|
||||||
|
|
||||||
|
version: '2'
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
default:
|
||||||
|
cmds:
|
||||||
|
- task: test
|
||||||
|
|
||||||
|
test:
|
||||||
|
cmds:
|
||||||
|
- go test -v .
|
24
vendor/github.com/go-task/slim-sprig/crypto.go
generated
vendored
Normal file
24
vendor/github.com/go-task/slim-sprig/crypto.go
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
package sprig
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha1"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"hash/adler32"
|
||||||
|
)
|
||||||
|
|
||||||
|
func sha256sum(input string) string {
|
||||||
|
hash := sha256.Sum256([]byte(input))
|
||||||
|
return hex.EncodeToString(hash[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
func sha1sum(input string) string {
|
||||||
|
hash := sha1.Sum([]byte(input))
|
||||||
|
return hex.EncodeToString(hash[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
func adler32sum(input string) string {
|
||||||
|
hash := adler32.Checksum([]byte(input))
|
||||||
|
return fmt.Sprintf("%d", hash)
|
||||||
|
}
|
152
vendor/github.com/go-task/slim-sprig/date.go
generated
vendored
Normal file
152
vendor/github.com/go-task/slim-sprig/date.go
generated
vendored
Normal file
@ -0,0 +1,152 @@
|
|||||||
|
package sprig
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Given a format and a date, format the date string.
|
||||||
|
//
|
||||||
|
// Date can be a `time.Time` or an `int, int32, int64`.
|
||||||
|
// In the later case, it is treated as seconds since UNIX
|
||||||
|
// epoch.
|
||||||
|
func date(fmt string, date interface{}) string {
|
||||||
|
return dateInZone(fmt, date, "Local")
|
||||||
|
}
|
||||||
|
|
||||||
|
func htmlDate(date interface{}) string {
|
||||||
|
return dateInZone("2006-01-02", date, "Local")
|
||||||
|
}
|
||||||
|
|
||||||
|
func htmlDateInZone(date interface{}, zone string) string {
|
||||||
|
return dateInZone("2006-01-02", date, zone)
|
||||||
|
}
|
||||||
|
|
||||||
|
func dateInZone(fmt string, date interface{}, zone string) string {
|
||||||
|
var t time.Time
|
||||||
|
switch date := date.(type) {
|
||||||
|
default:
|
||||||
|
t = time.Now()
|
||||||
|
case time.Time:
|
||||||
|
t = date
|
||||||
|
case *time.Time:
|
||||||
|
t = *date
|
||||||
|
case int64:
|
||||||
|
t = time.Unix(date, 0)
|
||||||
|
case int:
|
||||||
|
t = time.Unix(int64(date), 0)
|
||||||
|
case int32:
|
||||||
|
t = time.Unix(int64(date), 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
loc, err := time.LoadLocation(zone)
|
||||||
|
if err != nil {
|
||||||
|
loc, _ = time.LoadLocation("UTC")
|
||||||
|
}
|
||||||
|
|
||||||
|
return t.In(loc).Format(fmt)
|
||||||
|
}
|
||||||
|
|
||||||
|
func dateModify(fmt string, date time.Time) time.Time {
|
||||||
|
d, err := time.ParseDuration(fmt)
|
||||||
|
if err != nil {
|
||||||
|
return date
|
||||||
|
}
|
||||||
|
return date.Add(d)
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustDateModify(fmt string, date time.Time) (time.Time, error) {
|
||||||
|
d, err := time.ParseDuration(fmt)
|
||||||
|
if err != nil {
|
||||||
|
return time.Time{}, err
|
||||||
|
}
|
||||||
|
return date.Add(d), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func dateAgo(date interface{}) string {
|
||||||
|
var t time.Time
|
||||||
|
|
||||||
|
switch date := date.(type) {
|
||||||
|
default:
|
||||||
|
t = time.Now()
|
||||||
|
case time.Time:
|
||||||
|
t = date
|
||||||
|
case int64:
|
||||||
|
t = time.Unix(date, 0)
|
||||||
|
case int:
|
||||||
|
t = time.Unix(int64(date), 0)
|
||||||
|
}
|
||||||
|
// Drop resolution to seconds
|
||||||
|
duration := time.Since(t).Round(time.Second)
|
||||||
|
return duration.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func duration(sec interface{}) string {
|
||||||
|
var n int64
|
||||||
|
switch value := sec.(type) {
|
||||||
|
default:
|
||||||
|
n = 0
|
||||||
|
case string:
|
||||||
|
n, _ = strconv.ParseInt(value, 10, 64)
|
||||||
|
case int64:
|
||||||
|
n = value
|
||||||
|
}
|
||||||
|
return (time.Duration(n) * time.Second).String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func durationRound(duration interface{}) string {
|
||||||
|
var d time.Duration
|
||||||
|
switch duration := duration.(type) {
|
||||||
|
default:
|
||||||
|
d = 0
|
||||||
|
case string:
|
||||||
|
d, _ = time.ParseDuration(duration)
|
||||||
|
case int64:
|
||||||
|
d = time.Duration(duration)
|
||||||
|
case time.Time:
|
||||||
|
d = time.Since(duration)
|
||||||
|
}
|
||||||
|
|
||||||
|
u := uint64(d)
|
||||||
|
neg := d < 0
|
||||||
|
if neg {
|
||||||
|
u = -u
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
year = uint64(time.Hour) * 24 * 365
|
||||||
|
month = uint64(time.Hour) * 24 * 30
|
||||||
|
day = uint64(time.Hour) * 24
|
||||||
|
hour = uint64(time.Hour)
|
||||||
|
minute = uint64(time.Minute)
|
||||||
|
second = uint64(time.Second)
|
||||||
|
)
|
||||||
|
switch {
|
||||||
|
case u > year:
|
||||||
|
return strconv.FormatUint(u/year, 10) + "y"
|
||||||
|
case u > month:
|
||||||
|
return strconv.FormatUint(u/month, 10) + "mo"
|
||||||
|
case u > day:
|
||||||
|
return strconv.FormatUint(u/day, 10) + "d"
|
||||||
|
case u > hour:
|
||||||
|
return strconv.FormatUint(u/hour, 10) + "h"
|
||||||
|
case u > minute:
|
||||||
|
return strconv.FormatUint(u/minute, 10) + "m"
|
||||||
|
case u > second:
|
||||||
|
return strconv.FormatUint(u/second, 10) + "s"
|
||||||
|
}
|
||||||
|
return "0s"
|
||||||
|
}
|
||||||
|
|
||||||
|
func toDate(fmt, str string) time.Time {
|
||||||
|
t, _ := time.ParseInLocation(fmt, str, time.Local)
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustToDate(fmt, str string) (time.Time, error) {
|
||||||
|
return time.ParseInLocation(fmt, str, time.Local)
|
||||||
|
}
|
||||||
|
|
||||||
|
func unixEpoch(date time.Time) string {
|
||||||
|
return strconv.FormatInt(date.Unix(), 10)
|
||||||
|
}
|
163
vendor/github.com/go-task/slim-sprig/defaults.go
generated
vendored
Normal file
163
vendor/github.com/go-task/slim-sprig/defaults.go
generated
vendored
Normal file
@ -0,0 +1,163 @@
|
|||||||
|
package sprig
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"math/rand"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
rand.Seed(time.Now().UnixNano())
|
||||||
|
}
|
||||||
|
|
||||||
|
// dfault checks whether `given` is set, and returns default if not set.
|
||||||
|
//
|
||||||
|
// This returns `d` if `given` appears not to be set, and `given` otherwise.
|
||||||
|
//
|
||||||
|
// For numeric types 0 is unset.
|
||||||
|
// For strings, maps, arrays, and slices, len() = 0 is considered unset.
|
||||||
|
// For bool, false is unset.
|
||||||
|
// Structs are never considered unset.
|
||||||
|
//
|
||||||
|
// For everything else, including pointers, a nil value is unset.
|
||||||
|
func dfault(d interface{}, given ...interface{}) interface{} {
|
||||||
|
|
||||||
|
if empty(given) || empty(given[0]) {
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
return given[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
// empty returns true if the given value has the zero value for its type.
|
||||||
|
func empty(given interface{}) bool {
|
||||||
|
g := reflect.ValueOf(given)
|
||||||
|
if !g.IsValid() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Basically adapted from text/template.isTrue
|
||||||
|
switch g.Kind() {
|
||||||
|
default:
|
||||||
|
return g.IsNil()
|
||||||
|
case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
|
||||||
|
return g.Len() == 0
|
||||||
|
case reflect.Bool:
|
||||||
|
return !g.Bool()
|
||||||
|
case reflect.Complex64, reflect.Complex128:
|
||||||
|
return g.Complex() == 0
|
||||||
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||||
|
return g.Int() == 0
|
||||||
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||||
|
return g.Uint() == 0
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
return g.Float() == 0
|
||||||
|
case reflect.Struct:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// coalesce returns the first non-empty value.
|
||||||
|
func coalesce(v ...interface{}) interface{} {
|
||||||
|
for _, val := range v {
|
||||||
|
if !empty(val) {
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// all returns true if empty(x) is false for all values x in the list.
|
||||||
|
// If the list is empty, return true.
|
||||||
|
func all(v ...interface{}) bool {
|
||||||
|
for _, val := range v {
|
||||||
|
if empty(val) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// any returns true if empty(x) is false for any x in the list.
|
||||||
|
// If the list is empty, return false.
|
||||||
|
func any(v ...interface{}) bool {
|
||||||
|
for _, val := range v {
|
||||||
|
if !empty(val) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// fromJson decodes JSON into a structured value, ignoring errors.
|
||||||
|
func fromJson(v string) interface{} {
|
||||||
|
output, _ := mustFromJson(v)
|
||||||
|
return output
|
||||||
|
}
|
||||||
|
|
||||||
|
// mustFromJson decodes JSON into a structured value, returning errors.
|
||||||
|
func mustFromJson(v string) (interface{}, error) {
|
||||||
|
var output interface{}
|
||||||
|
err := json.Unmarshal([]byte(v), &output)
|
||||||
|
return output, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// toJson encodes an item into a JSON string
|
||||||
|
func toJson(v interface{}) string {
|
||||||
|
output, _ := json.Marshal(v)
|
||||||
|
return string(output)
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustToJson(v interface{}) (string, error) {
|
||||||
|
output, err := json.Marshal(v)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return string(output), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// toPrettyJson encodes an item into a pretty (indented) JSON string
|
||||||
|
func toPrettyJson(v interface{}) string {
|
||||||
|
output, _ := json.MarshalIndent(v, "", " ")
|
||||||
|
return string(output)
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustToPrettyJson(v interface{}) (string, error) {
|
||||||
|
output, err := json.MarshalIndent(v, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return string(output), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// toRawJson encodes an item into a JSON string with no escaping of HTML characters.
|
||||||
|
func toRawJson(v interface{}) string {
|
||||||
|
output, err := mustToRawJson(v)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return string(output)
|
||||||
|
}
|
||||||
|
|
||||||
|
// mustToRawJson encodes an item into a JSON string with no escaping of HTML characters.
|
||||||
|
func mustToRawJson(v interface{}) (string, error) {
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
enc := json.NewEncoder(buf)
|
||||||
|
enc.SetEscapeHTML(false)
|
||||||
|
err := enc.Encode(&v)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return strings.TrimSuffix(buf.String(), "\n"), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ternary returns the first value if the last value is true, otherwise returns the second value.
|
||||||
|
func ternary(vt interface{}, vf interface{}, v bool) interface{} {
|
||||||
|
if v {
|
||||||
|
return vt
|
||||||
|
}
|
||||||
|
|
||||||
|
return vf
|
||||||
|
}
|
118
vendor/github.com/go-task/slim-sprig/dict.go
generated
vendored
Normal file
118
vendor/github.com/go-task/slim-sprig/dict.go
generated
vendored
Normal file
@ -0,0 +1,118 @@
|
|||||||
|
package sprig
|
||||||
|
|
||||||
|
func get(d map[string]interface{}, key string) interface{} {
|
||||||
|
if val, ok := d[key]; ok {
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func set(d map[string]interface{}, key string, value interface{}) map[string]interface{} {
|
||||||
|
d[key] = value
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
|
||||||
|
func unset(d map[string]interface{}, key string) map[string]interface{} {
|
||||||
|
delete(d, key)
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasKey(d map[string]interface{}, key string) bool {
|
||||||
|
_, ok := d[key]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func pluck(key string, d ...map[string]interface{}) []interface{} {
|
||||||
|
res := []interface{}{}
|
||||||
|
for _, dict := range d {
|
||||||
|
if val, ok := dict[key]; ok {
|
||||||
|
res = append(res, val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func keys(dicts ...map[string]interface{}) []string {
|
||||||
|
k := []string{}
|
||||||
|
for _, dict := range dicts {
|
||||||
|
for key := range dict {
|
||||||
|
k = append(k, key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return k
|
||||||
|
}
|
||||||
|
|
||||||
|
func pick(dict map[string]interface{}, keys ...string) map[string]interface{} {
|
||||||
|
res := map[string]interface{}{}
|
||||||
|
for _, k := range keys {
|
||||||
|
if v, ok := dict[k]; ok {
|
||||||
|
res[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func omit(dict map[string]interface{}, keys ...string) map[string]interface{} {
|
||||||
|
res := map[string]interface{}{}
|
||||||
|
|
||||||
|
omit := make(map[string]bool, len(keys))
|
||||||
|
for _, k := range keys {
|
||||||
|
omit[k] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, v := range dict {
|
||||||
|
if _, ok := omit[k]; !ok {
|
||||||
|
res[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func dict(v ...interface{}) map[string]interface{} {
|
||||||
|
dict := map[string]interface{}{}
|
||||||
|
lenv := len(v)
|
||||||
|
for i := 0; i < lenv; i += 2 {
|
||||||
|
key := strval(v[i])
|
||||||
|
if i+1 >= lenv {
|
||||||
|
dict[key] = ""
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
dict[key] = v[i+1]
|
||||||
|
}
|
||||||
|
return dict
|
||||||
|
}
|
||||||
|
|
||||||
|
func values(dict map[string]interface{}) []interface{} {
|
||||||
|
values := []interface{}{}
|
||||||
|
for _, value := range dict {
|
||||||
|
values = append(values, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
return values
|
||||||
|
}
|
||||||
|
|
||||||
|
func dig(ps ...interface{}) (interface{}, error) {
|
||||||
|
if len(ps) < 3 {
|
||||||
|
panic("dig needs at least three arguments")
|
||||||
|
}
|
||||||
|
dict := ps[len(ps)-1].(map[string]interface{})
|
||||||
|
def := ps[len(ps)-2]
|
||||||
|
ks := make([]string, len(ps)-2)
|
||||||
|
for i := 0; i < len(ks); i++ {
|
||||||
|
ks[i] = ps[i].(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
return digFromDict(dict, def, ks)
|
||||||
|
}
|
||||||
|
|
||||||
|
func digFromDict(dict map[string]interface{}, d interface{}, ks []string) (interface{}, error) {
|
||||||
|
k, ns := ks[0], ks[1:len(ks)]
|
||||||
|
step, has := dict[k]
|
||||||
|
if !has {
|
||||||
|
return d, nil
|
||||||
|
}
|
||||||
|
if len(ns) == 0 {
|
||||||
|
return step, nil
|
||||||
|
}
|
||||||
|
return digFromDict(step.(map[string]interface{}), d, ns)
|
||||||
|
}
|
19
vendor/github.com/go-task/slim-sprig/doc.go
generated
vendored
Normal file
19
vendor/github.com/go-task/slim-sprig/doc.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
/*
|
||||||
|
Package sprig provides template functions for Go.
|
||||||
|
|
||||||
|
This package contains a number of utility functions for working with data
|
||||||
|
inside of Go `html/template` and `text/template` files.
|
||||||
|
|
||||||
|
To add these functions, use the `template.Funcs()` method:
|
||||||
|
|
||||||
|
t := templates.New("foo").Funcs(sprig.FuncMap())
|
||||||
|
|
||||||
|
Note that you should add the function map before you parse any template files.
|
||||||
|
|
||||||
|
In several cases, Sprig reverses the order of arguments from the way they
|
||||||
|
appear in the standard library. This is to make it easier to pipe
|
||||||
|
arguments into functions.
|
||||||
|
|
||||||
|
See http://masterminds.github.io/sprig/ for more detailed documentation on each of the available functions.
|
||||||
|
*/
|
||||||
|
package sprig
|
317
vendor/github.com/go-task/slim-sprig/functions.go
generated
vendored
Normal file
317
vendor/github.com/go-task/slim-sprig/functions.go
generated
vendored
Normal file
@ -0,0 +1,317 @@
|
|||||||
|
package sprig
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"html/template"
|
||||||
|
"math/rand"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
ttemplate "text/template"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FuncMap produces the function map.
|
||||||
|
//
|
||||||
|
// Use this to pass the functions into the template engine:
|
||||||
|
//
|
||||||
|
// tpl := template.New("foo").Funcs(sprig.FuncMap()))
|
||||||
|
//
|
||||||
|
func FuncMap() template.FuncMap {
|
||||||
|
return HtmlFuncMap()
|
||||||
|
}
|
||||||
|
|
||||||
|
// HermeticTxtFuncMap returns a 'text/template'.FuncMap with only repeatable functions.
|
||||||
|
func HermeticTxtFuncMap() ttemplate.FuncMap {
|
||||||
|
r := TxtFuncMap()
|
||||||
|
for _, name := range nonhermeticFunctions {
|
||||||
|
delete(r, name)
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// HermeticHtmlFuncMap returns an 'html/template'.Funcmap with only repeatable functions.
|
||||||
|
func HermeticHtmlFuncMap() template.FuncMap {
|
||||||
|
r := HtmlFuncMap()
|
||||||
|
for _, name := range nonhermeticFunctions {
|
||||||
|
delete(r, name)
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// TxtFuncMap returns a 'text/template'.FuncMap
|
||||||
|
func TxtFuncMap() ttemplate.FuncMap {
|
||||||
|
return ttemplate.FuncMap(GenericFuncMap())
|
||||||
|
}
|
||||||
|
|
||||||
|
// HtmlFuncMap returns an 'html/template'.Funcmap
|
||||||
|
func HtmlFuncMap() template.FuncMap {
|
||||||
|
return template.FuncMap(GenericFuncMap())
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenericFuncMap returns a copy of the basic function map as a map[string]interface{}.
|
||||||
|
func GenericFuncMap() map[string]interface{} {
|
||||||
|
gfm := make(map[string]interface{}, len(genericMap))
|
||||||
|
for k, v := range genericMap {
|
||||||
|
gfm[k] = v
|
||||||
|
}
|
||||||
|
return gfm
|
||||||
|
}
|
||||||
|
|
||||||
|
// These functions are not guaranteed to evaluate to the same result for given input, because they
|
||||||
|
// refer to the environment or global state.
|
||||||
|
var nonhermeticFunctions = []string{
|
||||||
|
// Date functions
|
||||||
|
"date",
|
||||||
|
"date_in_zone",
|
||||||
|
"date_modify",
|
||||||
|
"now",
|
||||||
|
"htmlDate",
|
||||||
|
"htmlDateInZone",
|
||||||
|
"dateInZone",
|
||||||
|
"dateModify",
|
||||||
|
|
||||||
|
// Strings
|
||||||
|
"randAlphaNum",
|
||||||
|
"randAlpha",
|
||||||
|
"randAscii",
|
||||||
|
"randNumeric",
|
||||||
|
"randBytes",
|
||||||
|
"uuidv4",
|
||||||
|
|
||||||
|
// OS
|
||||||
|
"env",
|
||||||
|
"expandenv",
|
||||||
|
|
||||||
|
// Network
|
||||||
|
"getHostByName",
|
||||||
|
}
|
||||||
|
|
||||||
|
var genericMap = map[string]interface{}{
|
||||||
|
"hello": func() string { return "Hello!" },
|
||||||
|
|
||||||
|
// Date functions
|
||||||
|
"ago": dateAgo,
|
||||||
|
"date": date,
|
||||||
|
"date_in_zone": dateInZone,
|
||||||
|
"date_modify": dateModify,
|
||||||
|
"dateInZone": dateInZone,
|
||||||
|
"dateModify": dateModify,
|
||||||
|
"duration": duration,
|
||||||
|
"durationRound": durationRound,
|
||||||
|
"htmlDate": htmlDate,
|
||||||
|
"htmlDateInZone": htmlDateInZone,
|
||||||
|
"must_date_modify": mustDateModify,
|
||||||
|
"mustDateModify": mustDateModify,
|
||||||
|
"mustToDate": mustToDate,
|
||||||
|
"now": time.Now,
|
||||||
|
"toDate": toDate,
|
||||||
|
"unixEpoch": unixEpoch,
|
||||||
|
|
||||||
|
// Strings
|
||||||
|
"trunc": trunc,
|
||||||
|
"trim": strings.TrimSpace,
|
||||||
|
"upper": strings.ToUpper,
|
||||||
|
"lower": strings.ToLower,
|
||||||
|
"title": strings.Title,
|
||||||
|
"substr": substring,
|
||||||
|
// Switch order so that "foo" | repeat 5
|
||||||
|
"repeat": func(count int, str string) string { return strings.Repeat(str, count) },
|
||||||
|
// Deprecated: Use trimAll.
|
||||||
|
"trimall": func(a, b string) string { return strings.Trim(b, a) },
|
||||||
|
// Switch order so that "$foo" | trimall "$"
|
||||||
|
"trimAll": func(a, b string) string { return strings.Trim(b, a) },
|
||||||
|
"trimSuffix": func(a, b string) string { return strings.TrimSuffix(b, a) },
|
||||||
|
"trimPrefix": func(a, b string) string { return strings.TrimPrefix(b, a) },
|
||||||
|
// Switch order so that "foobar" | contains "foo"
|
||||||
|
"contains": func(substr string, str string) bool { return strings.Contains(str, substr) },
|
||||||
|
"hasPrefix": func(substr string, str string) bool { return strings.HasPrefix(str, substr) },
|
||||||
|
"hasSuffix": func(substr string, str string) bool { return strings.HasSuffix(str, substr) },
|
||||||
|
"quote": quote,
|
||||||
|
"squote": squote,
|
||||||
|
"cat": cat,
|
||||||
|
"indent": indent,
|
||||||
|
"nindent": nindent,
|
||||||
|
"replace": replace,
|
||||||
|
"plural": plural,
|
||||||
|
"sha1sum": sha1sum,
|
||||||
|
"sha256sum": sha256sum,
|
||||||
|
"adler32sum": adler32sum,
|
||||||
|
"toString": strval,
|
||||||
|
|
||||||
|
// Wrap Atoi to stop errors.
|
||||||
|
"atoi": func(a string) int { i, _ := strconv.Atoi(a); return i },
|
||||||
|
"int64": toInt64,
|
||||||
|
"int": toInt,
|
||||||
|
"float64": toFloat64,
|
||||||
|
"seq": seq,
|
||||||
|
"toDecimal": toDecimal,
|
||||||
|
|
||||||
|
//"gt": func(a, b int) bool {return a > b},
|
||||||
|
//"gte": func(a, b int) bool {return a >= b},
|
||||||
|
//"lt": func(a, b int) bool {return a < b},
|
||||||
|
//"lte": func(a, b int) bool {return a <= b},
|
||||||
|
|
||||||
|
// split "/" foo/bar returns map[int]string{0: foo, 1: bar}
|
||||||
|
"split": split,
|
||||||
|
"splitList": func(sep, orig string) []string { return strings.Split(orig, sep) },
|
||||||
|
// splitn "/" foo/bar/fuu returns map[int]string{0: foo, 1: bar/fuu}
|
||||||
|
"splitn": splitn,
|
||||||
|
"toStrings": strslice,
|
||||||
|
|
||||||
|
"until": until,
|
||||||
|
"untilStep": untilStep,
|
||||||
|
|
||||||
|
// VERY basic arithmetic.
|
||||||
|
"add1": func(i interface{}) int64 { return toInt64(i) + 1 },
|
||||||
|
"add": func(i ...interface{}) int64 {
|
||||||
|
var a int64 = 0
|
||||||
|
for _, b := range i {
|
||||||
|
a += toInt64(b)
|
||||||
|
}
|
||||||
|
return a
|
||||||
|
},
|
||||||
|
"sub": func(a, b interface{}) int64 { return toInt64(a) - toInt64(b) },
|
||||||
|
"div": func(a, b interface{}) int64 { return toInt64(a) / toInt64(b) },
|
||||||
|
"mod": func(a, b interface{}) int64 { return toInt64(a) % toInt64(b) },
|
||||||
|
"mul": func(a interface{}, v ...interface{}) int64 {
|
||||||
|
val := toInt64(a)
|
||||||
|
for _, b := range v {
|
||||||
|
val = val * toInt64(b)
|
||||||
|
}
|
||||||
|
return val
|
||||||
|
},
|
||||||
|
"randInt": func(min, max int) int { return rand.Intn(max-min) + min },
|
||||||
|
"biggest": max,
|
||||||
|
"max": max,
|
||||||
|
"min": min,
|
||||||
|
"maxf": maxf,
|
||||||
|
"minf": minf,
|
||||||
|
"ceil": ceil,
|
||||||
|
"floor": floor,
|
||||||
|
"round": round,
|
||||||
|
|
||||||
|
// string slices. Note that we reverse the order b/c that's better
|
||||||
|
// for template processing.
|
||||||
|
"join": join,
|
||||||
|
"sortAlpha": sortAlpha,
|
||||||
|
|
||||||
|
// Defaults
|
||||||
|
"default": dfault,
|
||||||
|
"empty": empty,
|
||||||
|
"coalesce": coalesce,
|
||||||
|
"all": all,
|
||||||
|
"any": any,
|
||||||
|
"compact": compact,
|
||||||
|
"mustCompact": mustCompact,
|
||||||
|
"fromJson": fromJson,
|
||||||
|
"toJson": toJson,
|
||||||
|
"toPrettyJson": toPrettyJson,
|
||||||
|
"toRawJson": toRawJson,
|
||||||
|
"mustFromJson": mustFromJson,
|
||||||
|
"mustToJson": mustToJson,
|
||||||
|
"mustToPrettyJson": mustToPrettyJson,
|
||||||
|
"mustToRawJson": mustToRawJson,
|
||||||
|
"ternary": ternary,
|
||||||
|
|
||||||
|
// Reflection
|
||||||
|
"typeOf": typeOf,
|
||||||
|
"typeIs": typeIs,
|
||||||
|
"typeIsLike": typeIsLike,
|
||||||
|
"kindOf": kindOf,
|
||||||
|
"kindIs": kindIs,
|
||||||
|
"deepEqual": reflect.DeepEqual,
|
||||||
|
|
||||||
|
// OS:
|
||||||
|
"env": os.Getenv,
|
||||||
|
"expandenv": os.ExpandEnv,
|
||||||
|
|
||||||
|
// Network:
|
||||||
|
"getHostByName": getHostByName,
|
||||||
|
|
||||||
|
// Paths:
|
||||||
|
"base": path.Base,
|
||||||
|
"dir": path.Dir,
|
||||||
|
"clean": path.Clean,
|
||||||
|
"ext": path.Ext,
|
||||||
|
"isAbs": path.IsAbs,
|
||||||
|
|
||||||
|
// Filepaths:
|
||||||
|
"osBase": filepath.Base,
|
||||||
|
"osClean": filepath.Clean,
|
||||||
|
"osDir": filepath.Dir,
|
||||||
|
"osExt": filepath.Ext,
|
||||||
|
"osIsAbs": filepath.IsAbs,
|
||||||
|
|
||||||
|
// Encoding:
|
||||||
|
"b64enc": base64encode,
|
||||||
|
"b64dec": base64decode,
|
||||||
|
"b32enc": base32encode,
|
||||||
|
"b32dec": base32decode,
|
||||||
|
|
||||||
|
// Data Structures:
|
||||||
|
"tuple": list, // FIXME: with the addition of append/prepend these are no longer immutable.
|
||||||
|
"list": list,
|
||||||
|
"dict": dict,
|
||||||
|
"get": get,
|
||||||
|
"set": set,
|
||||||
|
"unset": unset,
|
||||||
|
"hasKey": hasKey,
|
||||||
|
"pluck": pluck,
|
||||||
|
"keys": keys,
|
||||||
|
"pick": pick,
|
||||||
|
"omit": omit,
|
||||||
|
"values": values,
|
||||||
|
|
||||||
|
"append": push, "push": push,
|
||||||
|
"mustAppend": mustPush, "mustPush": mustPush,
|
||||||
|
"prepend": prepend,
|
||||||
|
"mustPrepend": mustPrepend,
|
||||||
|
"first": first,
|
||||||
|
"mustFirst": mustFirst,
|
||||||
|
"rest": rest,
|
||||||
|
"mustRest": mustRest,
|
||||||
|
"last": last,
|
||||||
|
"mustLast": mustLast,
|
||||||
|
"initial": initial,
|
||||||
|
"mustInitial": mustInitial,
|
||||||
|
"reverse": reverse,
|
||||||
|
"mustReverse": mustReverse,
|
||||||
|
"uniq": uniq,
|
||||||
|
"mustUniq": mustUniq,
|
||||||
|
"without": without,
|
||||||
|
"mustWithout": mustWithout,
|
||||||
|
"has": has,
|
||||||
|
"mustHas": mustHas,
|
||||||
|
"slice": slice,
|
||||||
|
"mustSlice": mustSlice,
|
||||||
|
"concat": concat,
|
||||||
|
"dig": dig,
|
||||||
|
"chunk": chunk,
|
||||||
|
"mustChunk": mustChunk,
|
||||||
|
|
||||||
|
// Flow Control:
|
||||||
|
"fail": func(msg string) (string, error) { return "", errors.New(msg) },
|
||||||
|
|
||||||
|
// Regex
|
||||||
|
"regexMatch": regexMatch,
|
||||||
|
"mustRegexMatch": mustRegexMatch,
|
||||||
|
"regexFindAll": regexFindAll,
|
||||||
|
"mustRegexFindAll": mustRegexFindAll,
|
||||||
|
"regexFind": regexFind,
|
||||||
|
"mustRegexFind": mustRegexFind,
|
||||||
|
"regexReplaceAll": regexReplaceAll,
|
||||||
|
"mustRegexReplaceAll": mustRegexReplaceAll,
|
||||||
|
"regexReplaceAllLiteral": regexReplaceAllLiteral,
|
||||||
|
"mustRegexReplaceAllLiteral": mustRegexReplaceAllLiteral,
|
||||||
|
"regexSplit": regexSplit,
|
||||||
|
"mustRegexSplit": mustRegexSplit,
|
||||||
|
"regexQuoteMeta": regexQuoteMeta,
|
||||||
|
|
||||||
|
// URLs:
|
||||||
|
"urlParse": urlParse,
|
||||||
|
"urlJoin": urlJoin,
|
||||||
|
}
|
464
vendor/github.com/go-task/slim-sprig/list.go
generated
vendored
Normal file
464
vendor/github.com/go-task/slim-sprig/list.go
generated
vendored
Normal file
@ -0,0 +1,464 @@
|
|||||||
|
package sprig
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Reflection is used in these functions so that slices and arrays of strings,
|
||||||
|
// ints, and other types not implementing []interface{} can be worked with.
|
||||||
|
// For example, this is useful if you need to work on the output of regexs.
|
||||||
|
|
||||||
|
func list(v ...interface{}) []interface{} {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
func push(list interface{}, v interface{}) []interface{} {
|
||||||
|
l, err := mustPush(list, v)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustPush(list interface{}, v interface{}) ([]interface{}, error) {
|
||||||
|
tp := reflect.TypeOf(list).Kind()
|
||||||
|
switch tp {
|
||||||
|
case reflect.Slice, reflect.Array:
|
||||||
|
l2 := reflect.ValueOf(list)
|
||||||
|
|
||||||
|
l := l2.Len()
|
||||||
|
nl := make([]interface{}, l)
|
||||||
|
for i := 0; i < l; i++ {
|
||||||
|
nl[i] = l2.Index(i).Interface()
|
||||||
|
}
|
||||||
|
|
||||||
|
return append(nl, v), nil
|
||||||
|
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Cannot push on type %s", tp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func prepend(list interface{}, v interface{}) []interface{} {
|
||||||
|
l, err := mustPrepend(list, v)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustPrepend(list interface{}, v interface{}) ([]interface{}, error) {
|
||||||
|
//return append([]interface{}{v}, list...)
|
||||||
|
|
||||||
|
tp := reflect.TypeOf(list).Kind()
|
||||||
|
switch tp {
|
||||||
|
case reflect.Slice, reflect.Array:
|
||||||
|
l2 := reflect.ValueOf(list)
|
||||||
|
|
||||||
|
l := l2.Len()
|
||||||
|
nl := make([]interface{}, l)
|
||||||
|
for i := 0; i < l; i++ {
|
||||||
|
nl[i] = l2.Index(i).Interface()
|
||||||
|
}
|
||||||
|
|
||||||
|
return append([]interface{}{v}, nl...), nil
|
||||||
|
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Cannot prepend on type %s", tp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func chunk(size int, list interface{}) [][]interface{} {
|
||||||
|
l, err := mustChunk(size, list)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustChunk(size int, list interface{}) ([][]interface{}, error) {
|
||||||
|
tp := reflect.TypeOf(list).Kind()
|
||||||
|
switch tp {
|
||||||
|
case reflect.Slice, reflect.Array:
|
||||||
|
l2 := reflect.ValueOf(list)
|
||||||
|
|
||||||
|
l := l2.Len()
|
||||||
|
|
||||||
|
cs := int(math.Floor(float64(l-1)/float64(size)) + 1)
|
||||||
|
nl := make([][]interface{}, cs)
|
||||||
|
|
||||||
|
for i := 0; i < cs; i++ {
|
||||||
|
clen := size
|
||||||
|
if i == cs-1 {
|
||||||
|
clen = int(math.Floor(math.Mod(float64(l), float64(size))))
|
||||||
|
if clen == 0 {
|
||||||
|
clen = size
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
nl[i] = make([]interface{}, clen)
|
||||||
|
|
||||||
|
for j := 0; j < clen; j++ {
|
||||||
|
ix := i*size + j
|
||||||
|
nl[i][j] = l2.Index(ix).Interface()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nl, nil
|
||||||
|
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Cannot chunk type %s", tp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func last(list interface{}) interface{} {
|
||||||
|
l, err := mustLast(list)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustLast(list interface{}) (interface{}, error) {
|
||||||
|
tp := reflect.TypeOf(list).Kind()
|
||||||
|
switch tp {
|
||||||
|
case reflect.Slice, reflect.Array:
|
||||||
|
l2 := reflect.ValueOf(list)
|
||||||
|
|
||||||
|
l := l2.Len()
|
||||||
|
if l == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return l2.Index(l - 1).Interface(), nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Cannot find last on type %s", tp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func first(list interface{}) interface{} {
|
||||||
|
l, err := mustFirst(list)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustFirst(list interface{}) (interface{}, error) {
|
||||||
|
tp := reflect.TypeOf(list).Kind()
|
||||||
|
switch tp {
|
||||||
|
case reflect.Slice, reflect.Array:
|
||||||
|
l2 := reflect.ValueOf(list)
|
||||||
|
|
||||||
|
l := l2.Len()
|
||||||
|
if l == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return l2.Index(0).Interface(), nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Cannot find first on type %s", tp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func rest(list interface{}) []interface{} {
|
||||||
|
l, err := mustRest(list)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustRest(list interface{}) ([]interface{}, error) {
|
||||||
|
tp := reflect.TypeOf(list).Kind()
|
||||||
|
switch tp {
|
||||||
|
case reflect.Slice, reflect.Array:
|
||||||
|
l2 := reflect.ValueOf(list)
|
||||||
|
|
||||||
|
l := l2.Len()
|
||||||
|
if l == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
nl := make([]interface{}, l-1)
|
||||||
|
for i := 1; i < l; i++ {
|
||||||
|
nl[i-1] = l2.Index(i).Interface()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nl, nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Cannot find rest on type %s", tp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func initial(list interface{}) []interface{} {
|
||||||
|
l, err := mustInitial(list)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustInitial(list interface{}) ([]interface{}, error) {
|
||||||
|
tp := reflect.TypeOf(list).Kind()
|
||||||
|
switch tp {
|
||||||
|
case reflect.Slice, reflect.Array:
|
||||||
|
l2 := reflect.ValueOf(list)
|
||||||
|
|
||||||
|
l := l2.Len()
|
||||||
|
if l == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
nl := make([]interface{}, l-1)
|
||||||
|
for i := 0; i < l-1; i++ {
|
||||||
|
nl[i] = l2.Index(i).Interface()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nl, nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Cannot find initial on type %s", tp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func sortAlpha(list interface{}) []string {
|
||||||
|
k := reflect.Indirect(reflect.ValueOf(list)).Kind()
|
||||||
|
switch k {
|
||||||
|
case reflect.Slice, reflect.Array:
|
||||||
|
a := strslice(list)
|
||||||
|
s := sort.StringSlice(a)
|
||||||
|
s.Sort()
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
return []string{strval(list)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func reverse(v interface{}) []interface{} {
|
||||||
|
l, err := mustReverse(v)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustReverse(v interface{}) ([]interface{}, error) {
|
||||||
|
tp := reflect.TypeOf(v).Kind()
|
||||||
|
switch tp {
|
||||||
|
case reflect.Slice, reflect.Array:
|
||||||
|
l2 := reflect.ValueOf(v)
|
||||||
|
|
||||||
|
l := l2.Len()
|
||||||
|
// We do not sort in place because the incoming array should not be altered.
|
||||||
|
nl := make([]interface{}, l)
|
||||||
|
for i := 0; i < l; i++ {
|
||||||
|
nl[l-i-1] = l2.Index(i).Interface()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nl, nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Cannot find reverse on type %s", tp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func compact(list interface{}) []interface{} {
|
||||||
|
l, err := mustCompact(list)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustCompact(list interface{}) ([]interface{}, error) {
|
||||||
|
tp := reflect.TypeOf(list).Kind()
|
||||||
|
switch tp {
|
||||||
|
case reflect.Slice, reflect.Array:
|
||||||
|
l2 := reflect.ValueOf(list)
|
||||||
|
|
||||||
|
l := l2.Len()
|
||||||
|
nl := []interface{}{}
|
||||||
|
var item interface{}
|
||||||
|
for i := 0; i < l; i++ {
|
||||||
|
item = l2.Index(i).Interface()
|
||||||
|
if !empty(item) {
|
||||||
|
nl = append(nl, item)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nl, nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Cannot compact on type %s", tp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func uniq(list interface{}) []interface{} {
|
||||||
|
l, err := mustUniq(list)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustUniq(list interface{}) ([]interface{}, error) {
|
||||||
|
tp := reflect.TypeOf(list).Kind()
|
||||||
|
switch tp {
|
||||||
|
case reflect.Slice, reflect.Array:
|
||||||
|
l2 := reflect.ValueOf(list)
|
||||||
|
|
||||||
|
l := l2.Len()
|
||||||
|
dest := []interface{}{}
|
||||||
|
var item interface{}
|
||||||
|
for i := 0; i < l; i++ {
|
||||||
|
item = l2.Index(i).Interface()
|
||||||
|
if !inList(dest, item) {
|
||||||
|
dest = append(dest, item)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return dest, nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Cannot find uniq on type %s", tp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func inList(haystack []interface{}, needle interface{}) bool {
|
||||||
|
for _, h := range haystack {
|
||||||
|
if reflect.DeepEqual(needle, h) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func without(list interface{}, omit ...interface{}) []interface{} {
|
||||||
|
l, err := mustWithout(list, omit...)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustWithout(list interface{}, omit ...interface{}) ([]interface{}, error) {
|
||||||
|
tp := reflect.TypeOf(list).Kind()
|
||||||
|
switch tp {
|
||||||
|
case reflect.Slice, reflect.Array:
|
||||||
|
l2 := reflect.ValueOf(list)
|
||||||
|
|
||||||
|
l := l2.Len()
|
||||||
|
res := []interface{}{}
|
||||||
|
var item interface{}
|
||||||
|
for i := 0; i < l; i++ {
|
||||||
|
item = l2.Index(i).Interface()
|
||||||
|
if !inList(omit, item) {
|
||||||
|
res = append(res, item)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Cannot find without on type %s", tp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func has(needle interface{}, haystack interface{}) bool {
|
||||||
|
l, err := mustHas(needle, haystack)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustHas(needle interface{}, haystack interface{}) (bool, error) {
|
||||||
|
if haystack == nil {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
tp := reflect.TypeOf(haystack).Kind()
|
||||||
|
switch tp {
|
||||||
|
case reflect.Slice, reflect.Array:
|
||||||
|
l2 := reflect.ValueOf(haystack)
|
||||||
|
var item interface{}
|
||||||
|
l := l2.Len()
|
||||||
|
for i := 0; i < l; i++ {
|
||||||
|
item = l2.Index(i).Interface()
|
||||||
|
if reflect.DeepEqual(needle, item) {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, nil
|
||||||
|
default:
|
||||||
|
return false, fmt.Errorf("Cannot find has on type %s", tp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// $list := [1, 2, 3, 4, 5]
|
||||||
|
// slice $list -> list[0:5] = list[:]
|
||||||
|
// slice $list 0 3 -> list[0:3] = list[:3]
|
||||||
|
// slice $list 3 5 -> list[3:5]
|
||||||
|
// slice $list 3 -> list[3:5] = list[3:]
|
||||||
|
func slice(list interface{}, indices ...interface{}) interface{} {
|
||||||
|
l, err := mustSlice(list, indices...)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustSlice(list interface{}, indices ...interface{}) (interface{}, error) {
|
||||||
|
tp := reflect.TypeOf(list).Kind()
|
||||||
|
switch tp {
|
||||||
|
case reflect.Slice, reflect.Array:
|
||||||
|
l2 := reflect.ValueOf(list)
|
||||||
|
|
||||||
|
l := l2.Len()
|
||||||
|
if l == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var start, end int
|
||||||
|
if len(indices) > 0 {
|
||||||
|
start = toInt(indices[0])
|
||||||
|
}
|
||||||
|
if len(indices) < 2 {
|
||||||
|
end = l
|
||||||
|
} else {
|
||||||
|
end = toInt(indices[1])
|
||||||
|
}
|
||||||
|
|
||||||
|
return l2.Slice(start, end).Interface(), nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("list should be type of slice or array but %s", tp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func concat(lists ...interface{}) interface{} {
|
||||||
|
var res []interface{}
|
||||||
|
for _, list := range lists {
|
||||||
|
tp := reflect.TypeOf(list).Kind()
|
||||||
|
switch tp {
|
||||||
|
case reflect.Slice, reflect.Array:
|
||||||
|
l2 := reflect.ValueOf(list)
|
||||||
|
for i := 0; i < l2.Len(); i++ {
|
||||||
|
res = append(res, l2.Index(i).Interface())
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("Cannot concat type %s as list", tp))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
12
vendor/github.com/go-task/slim-sprig/network.go
generated
vendored
Normal file
12
vendor/github.com/go-task/slim-sprig/network.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
package sprig
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"net"
|
||||||
|
)
|
||||||
|
|
||||||
|
func getHostByName(name string) string {
|
||||||
|
addrs, _ := net.LookupHost(name)
|
||||||
|
//TODO: add error handing when release v3 comes out
|
||||||
|
return addrs[rand.Intn(len(addrs))]
|
||||||
|
}
|
228
vendor/github.com/go-task/slim-sprig/numeric.go
generated
vendored
Normal file
228
vendor/github.com/go-task/slim-sprig/numeric.go
generated
vendored
Normal file
@ -0,0 +1,228 @@
|
|||||||
|
package sprig
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// toFloat64 converts 64-bit floats
|
||||||
|
func toFloat64(v interface{}) float64 {
|
||||||
|
if str, ok := v.(string); ok {
|
||||||
|
iv, err := strconv.ParseFloat(str, 64)
|
||||||
|
if err != nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return iv
|
||||||
|
}
|
||||||
|
|
||||||
|
val := reflect.Indirect(reflect.ValueOf(v))
|
||||||
|
switch val.Kind() {
|
||||||
|
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||||
|
return float64(val.Int())
|
||||||
|
case reflect.Uint8, reflect.Uint16, reflect.Uint32:
|
||||||
|
return float64(val.Uint())
|
||||||
|
case reflect.Uint, reflect.Uint64:
|
||||||
|
return float64(val.Uint())
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
return val.Float()
|
||||||
|
case reflect.Bool:
|
||||||
|
if val.Bool() {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
default:
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func toInt(v interface{}) int {
|
||||||
|
//It's not optimal. Bud I don't want duplicate toInt64 code.
|
||||||
|
return int(toInt64(v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// toInt64 converts integer types to 64-bit integers
|
||||||
|
func toInt64(v interface{}) int64 {
|
||||||
|
if str, ok := v.(string); ok {
|
||||||
|
iv, err := strconv.ParseInt(str, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return iv
|
||||||
|
}
|
||||||
|
|
||||||
|
val := reflect.Indirect(reflect.ValueOf(v))
|
||||||
|
switch val.Kind() {
|
||||||
|
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||||
|
return val.Int()
|
||||||
|
case reflect.Uint8, reflect.Uint16, reflect.Uint32:
|
||||||
|
return int64(val.Uint())
|
||||||
|
case reflect.Uint, reflect.Uint64:
|
||||||
|
tv := val.Uint()
|
||||||
|
if tv <= math.MaxInt64 {
|
||||||
|
return int64(tv)
|
||||||
|
}
|
||||||
|
// TODO: What is the sensible thing to do here?
|
||||||
|
return math.MaxInt64
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
return int64(val.Float())
|
||||||
|
case reflect.Bool:
|
||||||
|
if val.Bool() {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
default:
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func max(a interface{}, i ...interface{}) int64 {
|
||||||
|
aa := toInt64(a)
|
||||||
|
for _, b := range i {
|
||||||
|
bb := toInt64(b)
|
||||||
|
if bb > aa {
|
||||||
|
aa = bb
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return aa
|
||||||
|
}
|
||||||
|
|
||||||
|
func maxf(a interface{}, i ...interface{}) float64 {
|
||||||
|
aa := toFloat64(a)
|
||||||
|
for _, b := range i {
|
||||||
|
bb := toFloat64(b)
|
||||||
|
aa = math.Max(aa, bb)
|
||||||
|
}
|
||||||
|
return aa
|
||||||
|
}
|
||||||
|
|
||||||
|
func min(a interface{}, i ...interface{}) int64 {
|
||||||
|
aa := toInt64(a)
|
||||||
|
for _, b := range i {
|
||||||
|
bb := toInt64(b)
|
||||||
|
if bb < aa {
|
||||||
|
aa = bb
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return aa
|
||||||
|
}
|
||||||
|
|
||||||
|
func minf(a interface{}, i ...interface{}) float64 {
|
||||||
|
aa := toFloat64(a)
|
||||||
|
for _, b := range i {
|
||||||
|
bb := toFloat64(b)
|
||||||
|
aa = math.Min(aa, bb)
|
||||||
|
}
|
||||||
|
return aa
|
||||||
|
}
|
||||||
|
|
||||||
|
func until(count int) []int {
|
||||||
|
step := 1
|
||||||
|
if count < 0 {
|
||||||
|
step = -1
|
||||||
|
}
|
||||||
|
return untilStep(0, count, step)
|
||||||
|
}
|
||||||
|
|
||||||
|
func untilStep(start, stop, step int) []int {
|
||||||
|
v := []int{}
|
||||||
|
|
||||||
|
if stop < start {
|
||||||
|
if step >= 0 {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
for i := start; i > stop; i += step {
|
||||||
|
v = append(v, i)
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
if step <= 0 {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
for i := start; i < stop; i += step {
|
||||||
|
v = append(v, i)
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
func floor(a interface{}) float64 {
|
||||||
|
aa := toFloat64(a)
|
||||||
|
return math.Floor(aa)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ceil(a interface{}) float64 {
|
||||||
|
aa := toFloat64(a)
|
||||||
|
return math.Ceil(aa)
|
||||||
|
}
|
||||||
|
|
||||||
|
func round(a interface{}, p int, rOpt ...float64) float64 {
|
||||||
|
roundOn := .5
|
||||||
|
if len(rOpt) > 0 {
|
||||||
|
roundOn = rOpt[0]
|
||||||
|
}
|
||||||
|
val := toFloat64(a)
|
||||||
|
places := toFloat64(p)
|
||||||
|
|
||||||
|
var round float64
|
||||||
|
pow := math.Pow(10, places)
|
||||||
|
digit := pow * val
|
||||||
|
_, div := math.Modf(digit)
|
||||||
|
if div >= roundOn {
|
||||||
|
round = math.Ceil(digit)
|
||||||
|
} else {
|
||||||
|
round = math.Floor(digit)
|
||||||
|
}
|
||||||
|
return round / pow
|
||||||
|
}
|
||||||
|
|
||||||
|
// converts unix octal to decimal
|
||||||
|
func toDecimal(v interface{}) int64 {
|
||||||
|
result, err := strconv.ParseInt(fmt.Sprint(v), 8, 64)
|
||||||
|
if err != nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func seq(params ...int) string {
|
||||||
|
increment := 1
|
||||||
|
switch len(params) {
|
||||||
|
case 0:
|
||||||
|
return ""
|
||||||
|
case 1:
|
||||||
|
start := 1
|
||||||
|
end := params[0]
|
||||||
|
if end < start {
|
||||||
|
increment = -1
|
||||||
|
}
|
||||||
|
return intArrayToString(untilStep(start, end+increment, increment), " ")
|
||||||
|
case 3:
|
||||||
|
start := params[0]
|
||||||
|
end := params[2]
|
||||||
|
step := params[1]
|
||||||
|
if end < start {
|
||||||
|
increment = -1
|
||||||
|
if step > 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return intArrayToString(untilStep(start, end+increment, step), " ")
|
||||||
|
case 2:
|
||||||
|
start := params[0]
|
||||||
|
end := params[1]
|
||||||
|
step := 1
|
||||||
|
if end < start {
|
||||||
|
step = -1
|
||||||
|
}
|
||||||
|
return intArrayToString(untilStep(start, end+step, step), " ")
|
||||||
|
default:
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func intArrayToString(slice []int, delimeter string) string {
|
||||||
|
return strings.Trim(strings.Join(strings.Fields(fmt.Sprint(slice)), delimeter), "[]")
|
||||||
|
}
|
28
vendor/github.com/go-task/slim-sprig/reflect.go
generated
vendored
Normal file
28
vendor/github.com/go-task/slim-sprig/reflect.go
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
package sprig
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
// typeIs returns true if the src is the type named in target.
|
||||||
|
func typeIs(target string, src interface{}) bool {
|
||||||
|
return target == typeOf(src)
|
||||||
|
}
|
||||||
|
|
||||||
|
func typeIsLike(target string, src interface{}) bool {
|
||||||
|
t := typeOf(src)
|
||||||
|
return target == t || "*"+target == t
|
||||||
|
}
|
||||||
|
|
||||||
|
func typeOf(src interface{}) string {
|
||||||
|
return fmt.Sprintf("%T", src)
|
||||||
|
}
|
||||||
|
|
||||||
|
func kindIs(target string, src interface{}) bool {
|
||||||
|
return target == kindOf(src)
|
||||||
|
}
|
||||||
|
|
||||||
|
func kindOf(src interface{}) string {
|
||||||
|
return reflect.ValueOf(src).Kind().String()
|
||||||
|
}
|
83
vendor/github.com/go-task/slim-sprig/regex.go
generated
vendored
Normal file
83
vendor/github.com/go-task/slim-sprig/regex.go
generated
vendored
Normal file
@ -0,0 +1,83 @@
|
|||||||
|
package sprig
|
||||||
|
|
||||||
|
import (
|
||||||
|
"regexp"
|
||||||
|
)
|
||||||
|
|
||||||
|
func regexMatch(regex string, s string) bool {
|
||||||
|
match, _ := regexp.MatchString(regex, s)
|
||||||
|
return match
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustRegexMatch(regex string, s string) (bool, error) {
|
||||||
|
return regexp.MatchString(regex, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func regexFindAll(regex string, s string, n int) []string {
|
||||||
|
r := regexp.MustCompile(regex)
|
||||||
|
return r.FindAllString(s, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustRegexFindAll(regex string, s string, n int) ([]string, error) {
|
||||||
|
r, err := regexp.Compile(regex)
|
||||||
|
if err != nil {
|
||||||
|
return []string{}, err
|
||||||
|
}
|
||||||
|
return r.FindAllString(s, n), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func regexFind(regex string, s string) string {
|
||||||
|
r := regexp.MustCompile(regex)
|
||||||
|
return r.FindString(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustRegexFind(regex string, s string) (string, error) {
|
||||||
|
r, err := regexp.Compile(regex)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return r.FindString(s), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func regexReplaceAll(regex string, s string, repl string) string {
|
||||||
|
r := regexp.MustCompile(regex)
|
||||||
|
return r.ReplaceAllString(s, repl)
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustRegexReplaceAll(regex string, s string, repl string) (string, error) {
|
||||||
|
r, err := regexp.Compile(regex)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return r.ReplaceAllString(s, repl), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func regexReplaceAllLiteral(regex string, s string, repl string) string {
|
||||||
|
r := regexp.MustCompile(regex)
|
||||||
|
return r.ReplaceAllLiteralString(s, repl)
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustRegexReplaceAllLiteral(regex string, s string, repl string) (string, error) {
|
||||||
|
r, err := regexp.Compile(regex)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return r.ReplaceAllLiteralString(s, repl), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func regexSplit(regex string, s string, n int) []string {
|
||||||
|
r := regexp.MustCompile(regex)
|
||||||
|
return r.Split(s, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustRegexSplit(regex string, s string, n int) ([]string, error) {
|
||||||
|
r, err := regexp.Compile(regex)
|
||||||
|
if err != nil {
|
||||||
|
return []string{}, err
|
||||||
|
}
|
||||||
|
return r.Split(s, n), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func regexQuoteMeta(s string) string {
|
||||||
|
return regexp.QuoteMeta(s)
|
||||||
|
}
|
189
vendor/github.com/go-task/slim-sprig/strings.go
generated
vendored
Normal file
189
vendor/github.com/go-task/slim-sprig/strings.go
generated
vendored
Normal file
@ -0,0 +1,189 @@
|
|||||||
|
package sprig
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/base32"
|
||||||
|
"encoding/base64"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func base64encode(v string) string {
|
||||||
|
return base64.StdEncoding.EncodeToString([]byte(v))
|
||||||
|
}
|
||||||
|
|
||||||
|
func base64decode(v string) string {
|
||||||
|
data, err := base64.StdEncoding.DecodeString(v)
|
||||||
|
if err != nil {
|
||||||
|
return err.Error()
|
||||||
|
}
|
||||||
|
return string(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func base32encode(v string) string {
|
||||||
|
return base32.StdEncoding.EncodeToString([]byte(v))
|
||||||
|
}
|
||||||
|
|
||||||
|
func base32decode(v string) string {
|
||||||
|
data, err := base32.StdEncoding.DecodeString(v)
|
||||||
|
if err != nil {
|
||||||
|
return err.Error()
|
||||||
|
}
|
||||||
|
return string(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func quote(str ...interface{}) string {
|
||||||
|
out := make([]string, 0, len(str))
|
||||||
|
for _, s := range str {
|
||||||
|
if s != nil {
|
||||||
|
out = append(out, fmt.Sprintf("%q", strval(s)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return strings.Join(out, " ")
|
||||||
|
}
|
||||||
|
|
||||||
|
func squote(str ...interface{}) string {
|
||||||
|
out := make([]string, 0, len(str))
|
||||||
|
for _, s := range str {
|
||||||
|
if s != nil {
|
||||||
|
out = append(out, fmt.Sprintf("'%v'", s))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return strings.Join(out, " ")
|
||||||
|
}
|
||||||
|
|
||||||
|
func cat(v ...interface{}) string {
|
||||||
|
v = removeNilElements(v)
|
||||||
|
r := strings.TrimSpace(strings.Repeat("%v ", len(v)))
|
||||||
|
return fmt.Sprintf(r, v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func indent(spaces int, v string) string {
|
||||||
|
pad := strings.Repeat(" ", spaces)
|
||||||
|
return pad + strings.Replace(v, "\n", "\n"+pad, -1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func nindent(spaces int, v string) string {
|
||||||
|
return "\n" + indent(spaces, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func replace(old, new, src string) string {
|
||||||
|
return strings.Replace(src, old, new, -1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func plural(one, many string, count int) string {
|
||||||
|
if count == 1 {
|
||||||
|
return one
|
||||||
|
}
|
||||||
|
return many
|
||||||
|
}
|
||||||
|
|
||||||
|
func strslice(v interface{}) []string {
|
||||||
|
switch v := v.(type) {
|
||||||
|
case []string:
|
||||||
|
return v
|
||||||
|
case []interface{}:
|
||||||
|
b := make([]string, 0, len(v))
|
||||||
|
for _, s := range v {
|
||||||
|
if s != nil {
|
||||||
|
b = append(b, strval(s))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
default:
|
||||||
|
val := reflect.ValueOf(v)
|
||||||
|
switch val.Kind() {
|
||||||
|
case reflect.Array, reflect.Slice:
|
||||||
|
l := val.Len()
|
||||||
|
b := make([]string, 0, l)
|
||||||
|
for i := 0; i < l; i++ {
|
||||||
|
value := val.Index(i).Interface()
|
||||||
|
if value != nil {
|
||||||
|
b = append(b, strval(value))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
default:
|
||||||
|
if v == nil {
|
||||||
|
return []string{}
|
||||||
|
}
|
||||||
|
|
||||||
|
return []string{strval(v)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func removeNilElements(v []interface{}) []interface{} {
|
||||||
|
newSlice := make([]interface{}, 0, len(v))
|
||||||
|
for _, i := range v {
|
||||||
|
if i != nil {
|
||||||
|
newSlice = append(newSlice, i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return newSlice
|
||||||
|
}
|
||||||
|
|
||||||
|
func strval(v interface{}) string {
|
||||||
|
switch v := v.(type) {
|
||||||
|
case string:
|
||||||
|
return v
|
||||||
|
case []byte:
|
||||||
|
return string(v)
|
||||||
|
case error:
|
||||||
|
return v.Error()
|
||||||
|
case fmt.Stringer:
|
||||||
|
return v.String()
|
||||||
|
default:
|
||||||
|
return fmt.Sprintf("%v", v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func trunc(c int, s string) string {
|
||||||
|
if c < 0 && len(s)+c > 0 {
|
||||||
|
return s[len(s)+c:]
|
||||||
|
}
|
||||||
|
if c >= 0 && len(s) > c {
|
||||||
|
return s[:c]
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func join(sep string, v interface{}) string {
|
||||||
|
return strings.Join(strslice(v), sep)
|
||||||
|
}
|
||||||
|
|
||||||
|
func split(sep, orig string) map[string]string {
|
||||||
|
parts := strings.Split(orig, sep)
|
||||||
|
res := make(map[string]string, len(parts))
|
||||||
|
for i, v := range parts {
|
||||||
|
res["_"+strconv.Itoa(i)] = v
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func splitn(sep string, n int, orig string) map[string]string {
|
||||||
|
parts := strings.SplitN(orig, sep, n)
|
||||||
|
res := make(map[string]string, len(parts))
|
||||||
|
for i, v := range parts {
|
||||||
|
res["_"+strconv.Itoa(i)] = v
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
// substring creates a substring of the given string.
|
||||||
|
//
|
||||||
|
// If start is < 0, this calls string[:end].
|
||||||
|
//
|
||||||
|
// If start is >= 0 and end < 0 or end bigger than s length, this calls string[start:]
|
||||||
|
//
|
||||||
|
// Otherwise, this calls string[start, end].
|
||||||
|
func substring(start, end int, s string) string {
|
||||||
|
if start < 0 {
|
||||||
|
return s[:end]
|
||||||
|
}
|
||||||
|
if end < 0 || end > len(s) {
|
||||||
|
return s[start:]
|
||||||
|
}
|
||||||
|
return s[start:end]
|
||||||
|
}
|
66
vendor/github.com/go-task/slim-sprig/url.go
generated
vendored
Normal file
66
vendor/github.com/go-task/slim-sprig/url.go
generated
vendored
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
package sprig
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"reflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
func dictGetOrEmpty(dict map[string]interface{}, key string) string {
|
||||||
|
value, ok := dict[key]
|
||||||
|
if !ok {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
tp := reflect.TypeOf(value).Kind()
|
||||||
|
if tp != reflect.String {
|
||||||
|
panic(fmt.Sprintf("unable to parse %s key, must be of type string, but %s found", key, tp.String()))
|
||||||
|
}
|
||||||
|
return reflect.ValueOf(value).String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// parses given URL to return dict object
|
||||||
|
func urlParse(v string) map[string]interface{} {
|
||||||
|
dict := map[string]interface{}{}
|
||||||
|
parsedURL, err := url.Parse(v)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("unable to parse url: %s", err))
|
||||||
|
}
|
||||||
|
dict["scheme"] = parsedURL.Scheme
|
||||||
|
dict["host"] = parsedURL.Host
|
||||||
|
dict["hostname"] = parsedURL.Hostname()
|
||||||
|
dict["path"] = parsedURL.Path
|
||||||
|
dict["query"] = parsedURL.RawQuery
|
||||||
|
dict["opaque"] = parsedURL.Opaque
|
||||||
|
dict["fragment"] = parsedURL.Fragment
|
||||||
|
if parsedURL.User != nil {
|
||||||
|
dict["userinfo"] = parsedURL.User.String()
|
||||||
|
} else {
|
||||||
|
dict["userinfo"] = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return dict
|
||||||
|
}
|
||||||
|
|
||||||
|
// join given dict to URL string
|
||||||
|
func urlJoin(d map[string]interface{}) string {
|
||||||
|
resURL := url.URL{
|
||||||
|
Scheme: dictGetOrEmpty(d, "scheme"),
|
||||||
|
Host: dictGetOrEmpty(d, "host"),
|
||||||
|
Path: dictGetOrEmpty(d, "path"),
|
||||||
|
RawQuery: dictGetOrEmpty(d, "query"),
|
||||||
|
Opaque: dictGetOrEmpty(d, "opaque"),
|
||||||
|
Fragment: dictGetOrEmpty(d, "fragment"),
|
||||||
|
}
|
||||||
|
userinfo := dictGetOrEmpty(d, "userinfo")
|
||||||
|
var user *url.Userinfo
|
||||||
|
if userinfo != "" {
|
||||||
|
tempURL, err := url.Parse(fmt.Sprintf("proto://%s@host", userinfo))
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("unable to parse userinfo in dict: %s", err))
|
||||||
|
}
|
||||||
|
user = tempURL.User
|
||||||
|
}
|
||||||
|
|
||||||
|
resURL.User = user
|
||||||
|
return resURL.String()
|
||||||
|
}
|
7
vendor/github.com/google/pprof/AUTHORS
generated
vendored
Normal file
7
vendor/github.com/google/pprof/AUTHORS
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
# This is the official list of pprof authors for copyright purposes.
|
||||||
|
# This file is distinct from the CONTRIBUTORS files.
|
||||||
|
# See the latter for an explanation.
|
||||||
|
# Names should be added to this file as:
|
||||||
|
# Name or Organization <email address>
|
||||||
|
# The email address is not required for organizations.
|
||||||
|
Google Inc.
|
16
vendor/github.com/google/pprof/CONTRIBUTORS
generated
vendored
Normal file
16
vendor/github.com/google/pprof/CONTRIBUTORS
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
# People who have agreed to one of the CLAs and can contribute patches.
|
||||||
|
# The AUTHORS file lists the copyright holders; this file
|
||||||
|
# lists people. For example, Google employees are listed here
|
||||||
|
# but not in AUTHORS, because Google holds the copyright.
|
||||||
|
#
|
||||||
|
# https://developers.google.com/open-source/cla/individual
|
||||||
|
# https://developers.google.com/open-source/cla/corporate
|
||||||
|
#
|
||||||
|
# Names should be added to this file as:
|
||||||
|
# Name <email address>
|
||||||
|
Raul Silvera <rsilvera@google.com>
|
||||||
|
Tipp Moseley <tipp@google.com>
|
||||||
|
Hyoun Kyu Cho <netforce@google.com>
|
||||||
|
Martin Spier <spiermar@gmail.com>
|
||||||
|
Taco de Wolff <tacodewolff@gmail.com>
|
||||||
|
Andrew Hunter <andrewhhunter@gmail.com>
|
202
vendor/github.com/google/pprof/LICENSE
generated
vendored
Normal file
202
vendor/github.com/google/pprof/LICENSE
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
|||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
567
vendor/github.com/google/pprof/profile/encode.go
generated
vendored
Normal file
567
vendor/github.com/google/pprof/profile/encode.go
generated
vendored
Normal file
@ -0,0 +1,567 @@
|
|||||||
|
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package profile
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (p *Profile) decoder() []decoder {
|
||||||
|
return profileDecoder
|
||||||
|
}
|
||||||
|
|
||||||
|
// preEncode populates the unexported fields to be used by encode
|
||||||
|
// (with suffix X) from the corresponding exported fields. The
|
||||||
|
// exported fields are cleared up to facilitate testing.
|
||||||
|
func (p *Profile) preEncode() {
|
||||||
|
strings := make(map[string]int)
|
||||||
|
addString(strings, "")
|
||||||
|
|
||||||
|
for _, st := range p.SampleType {
|
||||||
|
st.typeX = addString(strings, st.Type)
|
||||||
|
st.unitX = addString(strings, st.Unit)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, s := range p.Sample {
|
||||||
|
s.labelX = nil
|
||||||
|
var keys []string
|
||||||
|
for k := range s.Label {
|
||||||
|
keys = append(keys, k)
|
||||||
|
}
|
||||||
|
sort.Strings(keys)
|
||||||
|
for _, k := range keys {
|
||||||
|
vs := s.Label[k]
|
||||||
|
for _, v := range vs {
|
||||||
|
s.labelX = append(s.labelX,
|
||||||
|
label{
|
||||||
|
keyX: addString(strings, k),
|
||||||
|
strX: addString(strings, v),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var numKeys []string
|
||||||
|
for k := range s.NumLabel {
|
||||||
|
numKeys = append(numKeys, k)
|
||||||
|
}
|
||||||
|
sort.Strings(numKeys)
|
||||||
|
for _, k := range numKeys {
|
||||||
|
keyX := addString(strings, k)
|
||||||
|
vs := s.NumLabel[k]
|
||||||
|
units := s.NumUnit[k]
|
||||||
|
for i, v := range vs {
|
||||||
|
var unitX int64
|
||||||
|
if len(units) != 0 {
|
||||||
|
unitX = addString(strings, units[i])
|
||||||
|
}
|
||||||
|
s.labelX = append(s.labelX,
|
||||||
|
label{
|
||||||
|
keyX: keyX,
|
||||||
|
numX: v,
|
||||||
|
unitX: unitX,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.locationIDX = make([]uint64, len(s.Location))
|
||||||
|
for i, loc := range s.Location {
|
||||||
|
s.locationIDX[i] = loc.ID
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, m := range p.Mapping {
|
||||||
|
m.fileX = addString(strings, m.File)
|
||||||
|
m.buildIDX = addString(strings, m.BuildID)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, l := range p.Location {
|
||||||
|
for i, ln := range l.Line {
|
||||||
|
if ln.Function != nil {
|
||||||
|
l.Line[i].functionIDX = ln.Function.ID
|
||||||
|
} else {
|
||||||
|
l.Line[i].functionIDX = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if l.Mapping != nil {
|
||||||
|
l.mappingIDX = l.Mapping.ID
|
||||||
|
} else {
|
||||||
|
l.mappingIDX = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, f := range p.Function {
|
||||||
|
f.nameX = addString(strings, f.Name)
|
||||||
|
f.systemNameX = addString(strings, f.SystemName)
|
||||||
|
f.filenameX = addString(strings, f.Filename)
|
||||||
|
}
|
||||||
|
|
||||||
|
p.dropFramesX = addString(strings, p.DropFrames)
|
||||||
|
p.keepFramesX = addString(strings, p.KeepFrames)
|
||||||
|
|
||||||
|
if pt := p.PeriodType; pt != nil {
|
||||||
|
pt.typeX = addString(strings, pt.Type)
|
||||||
|
pt.unitX = addString(strings, pt.Unit)
|
||||||
|
}
|
||||||
|
|
||||||
|
p.commentX = nil
|
||||||
|
for _, c := range p.Comments {
|
||||||
|
p.commentX = append(p.commentX, addString(strings, c))
|
||||||
|
}
|
||||||
|
|
||||||
|
p.defaultSampleTypeX = addString(strings, p.DefaultSampleType)
|
||||||
|
|
||||||
|
p.stringTable = make([]string, len(strings))
|
||||||
|
for s, i := range strings {
|
||||||
|
p.stringTable[i] = s
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Profile) encode(b *buffer) {
|
||||||
|
for _, x := range p.SampleType {
|
||||||
|
encodeMessage(b, 1, x)
|
||||||
|
}
|
||||||
|
for _, x := range p.Sample {
|
||||||
|
encodeMessage(b, 2, x)
|
||||||
|
}
|
||||||
|
for _, x := range p.Mapping {
|
||||||
|
encodeMessage(b, 3, x)
|
||||||
|
}
|
||||||
|
for _, x := range p.Location {
|
||||||
|
encodeMessage(b, 4, x)
|
||||||
|
}
|
||||||
|
for _, x := range p.Function {
|
||||||
|
encodeMessage(b, 5, x)
|
||||||
|
}
|
||||||
|
encodeStrings(b, 6, p.stringTable)
|
||||||
|
encodeInt64Opt(b, 7, p.dropFramesX)
|
||||||
|
encodeInt64Opt(b, 8, p.keepFramesX)
|
||||||
|
encodeInt64Opt(b, 9, p.TimeNanos)
|
||||||
|
encodeInt64Opt(b, 10, p.DurationNanos)
|
||||||
|
if pt := p.PeriodType; pt != nil && (pt.typeX != 0 || pt.unitX != 0) {
|
||||||
|
encodeMessage(b, 11, p.PeriodType)
|
||||||
|
}
|
||||||
|
encodeInt64Opt(b, 12, p.Period)
|
||||||
|
encodeInt64s(b, 13, p.commentX)
|
||||||
|
encodeInt64(b, 14, p.defaultSampleTypeX)
|
||||||
|
}
|
||||||
|
|
||||||
|
var profileDecoder = []decoder{
|
||||||
|
nil, // 0
|
||||||
|
// repeated ValueType sample_type = 1
|
||||||
|
func(b *buffer, m message) error {
|
||||||
|
x := new(ValueType)
|
||||||
|
pp := m.(*Profile)
|
||||||
|
pp.SampleType = append(pp.SampleType, x)
|
||||||
|
return decodeMessage(b, x)
|
||||||
|
},
|
||||||
|
// repeated Sample sample = 2
|
||||||
|
func(b *buffer, m message) error {
|
||||||
|
x := new(Sample)
|
||||||
|
pp := m.(*Profile)
|
||||||
|
pp.Sample = append(pp.Sample, x)
|
||||||
|
return decodeMessage(b, x)
|
||||||
|
},
|
||||||
|
// repeated Mapping mapping = 3
|
||||||
|
func(b *buffer, m message) error {
|
||||||
|
x := new(Mapping)
|
||||||
|
pp := m.(*Profile)
|
||||||
|
pp.Mapping = append(pp.Mapping, x)
|
||||||
|
return decodeMessage(b, x)
|
||||||
|
},
|
||||||
|
// repeated Location location = 4
|
||||||
|
func(b *buffer, m message) error {
|
||||||
|
x := new(Location)
|
||||||
|
x.Line = make([]Line, 0, 8) // Pre-allocate Line buffer
|
||||||
|
pp := m.(*Profile)
|
||||||
|
pp.Location = append(pp.Location, x)
|
||||||
|
err := decodeMessage(b, x)
|
||||||
|
var tmp []Line
|
||||||
|
x.Line = append(tmp, x.Line...) // Shrink to allocated size
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
// repeated Function function = 5
|
||||||
|
func(b *buffer, m message) error {
|
||||||
|
x := new(Function)
|
||||||
|
pp := m.(*Profile)
|
||||||
|
pp.Function = append(pp.Function, x)
|
||||||
|
return decodeMessage(b, x)
|
||||||
|
},
|
||||||
|
// repeated string string_table = 6
|
||||||
|
func(b *buffer, m message) error {
|
||||||
|
err := decodeStrings(b, &m.(*Profile).stringTable)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if m.(*Profile).stringTable[0] != "" {
|
||||||
|
return errors.New("string_table[0] must be ''")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
// int64 drop_frames = 7
|
||||||
|
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).dropFramesX) },
|
||||||
|
// int64 keep_frames = 8
|
||||||
|
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).keepFramesX) },
|
||||||
|
// int64 time_nanos = 9
|
||||||
|
func(b *buffer, m message) error {
|
||||||
|
if m.(*Profile).TimeNanos != 0 {
|
||||||
|
return errConcatProfile
|
||||||
|
}
|
||||||
|
return decodeInt64(b, &m.(*Profile).TimeNanos)
|
||||||
|
},
|
||||||
|
// int64 duration_nanos = 10
|
||||||
|
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).DurationNanos) },
|
||||||
|
// ValueType period_type = 11
|
||||||
|
func(b *buffer, m message) error {
|
||||||
|
x := new(ValueType)
|
||||||
|
pp := m.(*Profile)
|
||||||
|
pp.PeriodType = x
|
||||||
|
return decodeMessage(b, x)
|
||||||
|
},
|
||||||
|
// int64 period = 12
|
||||||
|
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).Period) },
|
||||||
|
// repeated int64 comment = 13
|
||||||
|
func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Profile).commentX) },
|
||||||
|
// int64 defaultSampleType = 14
|
||||||
|
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).defaultSampleTypeX) },
|
||||||
|
}
|
||||||
|
|
||||||
|
// postDecode takes the unexported fields populated by decode (with
|
||||||
|
// suffix X) and populates the corresponding exported fields.
|
||||||
|
// The unexported fields are cleared up to facilitate testing.
|
||||||
|
func (p *Profile) postDecode() error {
|
||||||
|
var err error
|
||||||
|
mappings := make(map[uint64]*Mapping, len(p.Mapping))
|
||||||
|
mappingIds := make([]*Mapping, len(p.Mapping)+1)
|
||||||
|
for _, m := range p.Mapping {
|
||||||
|
m.File, err = getString(p.stringTable, &m.fileX, err)
|
||||||
|
m.BuildID, err = getString(p.stringTable, &m.buildIDX, err)
|
||||||
|
if m.ID < uint64(len(mappingIds)) {
|
||||||
|
mappingIds[m.ID] = m
|
||||||
|
} else {
|
||||||
|
mappings[m.ID] = m
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
functions := make(map[uint64]*Function, len(p.Function))
|
||||||
|
functionIds := make([]*Function, len(p.Function)+1)
|
||||||
|
for _, f := range p.Function {
|
||||||
|
f.Name, err = getString(p.stringTable, &f.nameX, err)
|
||||||
|
f.SystemName, err = getString(p.stringTable, &f.systemNameX, err)
|
||||||
|
f.Filename, err = getString(p.stringTable, &f.filenameX, err)
|
||||||
|
if f.ID < uint64(len(functionIds)) {
|
||||||
|
functionIds[f.ID] = f
|
||||||
|
} else {
|
||||||
|
functions[f.ID] = f
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
locations := make(map[uint64]*Location, len(p.Location))
|
||||||
|
locationIds := make([]*Location, len(p.Location)+1)
|
||||||
|
for _, l := range p.Location {
|
||||||
|
if id := l.mappingIDX; id < uint64(len(mappingIds)) {
|
||||||
|
l.Mapping = mappingIds[id]
|
||||||
|
} else {
|
||||||
|
l.Mapping = mappings[id]
|
||||||
|
}
|
||||||
|
l.mappingIDX = 0
|
||||||
|
for i, ln := range l.Line {
|
||||||
|
if id := ln.functionIDX; id != 0 {
|
||||||
|
l.Line[i].functionIDX = 0
|
||||||
|
if id < uint64(len(functionIds)) {
|
||||||
|
l.Line[i].Function = functionIds[id]
|
||||||
|
} else {
|
||||||
|
l.Line[i].Function = functions[id]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if l.ID < uint64(len(locationIds)) {
|
||||||
|
locationIds[l.ID] = l
|
||||||
|
} else {
|
||||||
|
locations[l.ID] = l
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, st := range p.SampleType {
|
||||||
|
st.Type, err = getString(p.stringTable, &st.typeX, err)
|
||||||
|
st.Unit, err = getString(p.stringTable, &st.unitX, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, s := range p.Sample {
|
||||||
|
labels := make(map[string][]string, len(s.labelX))
|
||||||
|
numLabels := make(map[string][]int64, len(s.labelX))
|
||||||
|
numUnits := make(map[string][]string, len(s.labelX))
|
||||||
|
for _, l := range s.labelX {
|
||||||
|
var key, value string
|
||||||
|
key, err = getString(p.stringTable, &l.keyX, err)
|
||||||
|
if l.strX != 0 {
|
||||||
|
value, err = getString(p.stringTable, &l.strX, err)
|
||||||
|
labels[key] = append(labels[key], value)
|
||||||
|
} else if l.numX != 0 || l.unitX != 0 {
|
||||||
|
numValues := numLabels[key]
|
||||||
|
units := numUnits[key]
|
||||||
|
if l.unitX != 0 {
|
||||||
|
var unit string
|
||||||
|
unit, err = getString(p.stringTable, &l.unitX, err)
|
||||||
|
units = padStringArray(units, len(numValues))
|
||||||
|
numUnits[key] = append(units, unit)
|
||||||
|
}
|
||||||
|
numLabels[key] = append(numLabels[key], l.numX)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(labels) > 0 {
|
||||||
|
s.Label = labels
|
||||||
|
}
|
||||||
|
if len(numLabels) > 0 {
|
||||||
|
s.NumLabel = numLabels
|
||||||
|
for key, units := range numUnits {
|
||||||
|
if len(units) > 0 {
|
||||||
|
numUnits[key] = padStringArray(units, len(numLabels[key]))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.NumUnit = numUnits
|
||||||
|
}
|
||||||
|
s.Location = make([]*Location, len(s.locationIDX))
|
||||||
|
for i, lid := range s.locationIDX {
|
||||||
|
if lid < uint64(len(locationIds)) {
|
||||||
|
s.Location[i] = locationIds[lid]
|
||||||
|
} else {
|
||||||
|
s.Location[i] = locations[lid]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.locationIDX = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
p.DropFrames, err = getString(p.stringTable, &p.dropFramesX, err)
|
||||||
|
p.KeepFrames, err = getString(p.stringTable, &p.keepFramesX, err)
|
||||||
|
|
||||||
|
if pt := p.PeriodType; pt == nil {
|
||||||
|
p.PeriodType = &ValueType{}
|
||||||
|
}
|
||||||
|
|
||||||
|
if pt := p.PeriodType; pt != nil {
|
||||||
|
pt.Type, err = getString(p.stringTable, &pt.typeX, err)
|
||||||
|
pt.Unit, err = getString(p.stringTable, &pt.unitX, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, i := range p.commentX {
|
||||||
|
var c string
|
||||||
|
c, err = getString(p.stringTable, &i, err)
|
||||||
|
p.Comments = append(p.Comments, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
p.commentX = nil
|
||||||
|
p.DefaultSampleType, err = getString(p.stringTable, &p.defaultSampleTypeX, err)
|
||||||
|
p.stringTable = nil
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// padStringArray pads arr with enough empty strings to make arr
|
||||||
|
// length l when arr's length is less than l.
|
||||||
|
func padStringArray(arr []string, l int) []string {
|
||||||
|
if l <= len(arr) {
|
||||||
|
return arr
|
||||||
|
}
|
||||||
|
return append(arr, make([]string, l-len(arr))...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ValueType) decoder() []decoder {
|
||||||
|
return valueTypeDecoder
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ValueType) encode(b *buffer) {
|
||||||
|
encodeInt64Opt(b, 1, p.typeX)
|
||||||
|
encodeInt64Opt(b, 2, p.unitX)
|
||||||
|
}
|
||||||
|
|
||||||
|
var valueTypeDecoder = []decoder{
|
||||||
|
nil, // 0
|
||||||
|
// optional int64 type = 1
|
||||||
|
func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).typeX) },
|
||||||
|
// optional int64 unit = 2
|
||||||
|
func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).unitX) },
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Sample) decoder() []decoder {
|
||||||
|
return sampleDecoder
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Sample) encode(b *buffer) {
|
||||||
|
encodeUint64s(b, 1, p.locationIDX)
|
||||||
|
encodeInt64s(b, 2, p.Value)
|
||||||
|
for _, x := range p.labelX {
|
||||||
|
encodeMessage(b, 3, x)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var sampleDecoder = []decoder{
|
||||||
|
nil, // 0
|
||||||
|
// repeated uint64 location = 1
|
||||||
|
func(b *buffer, m message) error { return decodeUint64s(b, &m.(*Sample).locationIDX) },
|
||||||
|
// repeated int64 value = 2
|
||||||
|
func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Sample).Value) },
|
||||||
|
// repeated Label label = 3
|
||||||
|
func(b *buffer, m message) error {
|
||||||
|
s := m.(*Sample)
|
||||||
|
n := len(s.labelX)
|
||||||
|
s.labelX = append(s.labelX, label{})
|
||||||
|
return decodeMessage(b, &s.labelX[n])
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p label) decoder() []decoder {
|
||||||
|
return labelDecoder
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p label) encode(b *buffer) {
|
||||||
|
encodeInt64Opt(b, 1, p.keyX)
|
||||||
|
encodeInt64Opt(b, 2, p.strX)
|
||||||
|
encodeInt64Opt(b, 3, p.numX)
|
||||||
|
encodeInt64Opt(b, 4, p.unitX)
|
||||||
|
}
|
||||||
|
|
||||||
|
var labelDecoder = []decoder{
|
||||||
|
nil, // 0
|
||||||
|
// optional int64 key = 1
|
||||||
|
func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).keyX) },
|
||||||
|
// optional int64 str = 2
|
||||||
|
func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).strX) },
|
||||||
|
// optional int64 num = 3
|
||||||
|
func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).numX) },
|
||||||
|
// optional int64 num = 4
|
||||||
|
func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).unitX) },
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Mapping) decoder() []decoder {
|
||||||
|
return mappingDecoder
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Mapping) encode(b *buffer) {
|
||||||
|
encodeUint64Opt(b, 1, p.ID)
|
||||||
|
encodeUint64Opt(b, 2, p.Start)
|
||||||
|
encodeUint64Opt(b, 3, p.Limit)
|
||||||
|
encodeUint64Opt(b, 4, p.Offset)
|
||||||
|
encodeInt64Opt(b, 5, p.fileX)
|
||||||
|
encodeInt64Opt(b, 6, p.buildIDX)
|
||||||
|
encodeBoolOpt(b, 7, p.HasFunctions)
|
||||||
|
encodeBoolOpt(b, 8, p.HasFilenames)
|
||||||
|
encodeBoolOpt(b, 9, p.HasLineNumbers)
|
||||||
|
encodeBoolOpt(b, 10, p.HasInlineFrames)
|
||||||
|
}
|
||||||
|
|
||||||
|
var mappingDecoder = []decoder{
|
||||||
|
nil, // 0
|
||||||
|
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).ID) }, // optional uint64 id = 1
|
||||||
|
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Start) }, // optional uint64 memory_offset = 2
|
||||||
|
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Limit) }, // optional uint64 memory_limit = 3
|
||||||
|
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Offset) }, // optional uint64 file_offset = 4
|
||||||
|
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).fileX) }, // optional int64 filename = 5
|
||||||
|
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).buildIDX) }, // optional int64 build_id = 6
|
||||||
|
func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFunctions) }, // optional bool has_functions = 7
|
||||||
|
func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFilenames) }, // optional bool has_filenames = 8
|
||||||
|
func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasLineNumbers) }, // optional bool has_line_numbers = 9
|
||||||
|
func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasInlineFrames) }, // optional bool has_inline_frames = 10
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Location) decoder() []decoder {
|
||||||
|
return locationDecoder
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Location) encode(b *buffer) {
|
||||||
|
encodeUint64Opt(b, 1, p.ID)
|
||||||
|
encodeUint64Opt(b, 2, p.mappingIDX)
|
||||||
|
encodeUint64Opt(b, 3, p.Address)
|
||||||
|
for i := range p.Line {
|
||||||
|
encodeMessage(b, 4, &p.Line[i])
|
||||||
|
}
|
||||||
|
encodeBoolOpt(b, 5, p.IsFolded)
|
||||||
|
}
|
||||||
|
|
||||||
|
var locationDecoder = []decoder{
|
||||||
|
nil, // 0
|
||||||
|
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).ID) }, // optional uint64 id = 1;
|
||||||
|
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).mappingIDX) }, // optional uint64 mapping_id = 2;
|
||||||
|
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).Address) }, // optional uint64 address = 3;
|
||||||
|
func(b *buffer, m message) error { // repeated Line line = 4
|
||||||
|
pp := m.(*Location)
|
||||||
|
n := len(pp.Line)
|
||||||
|
pp.Line = append(pp.Line, Line{})
|
||||||
|
return decodeMessage(b, &pp.Line[n])
|
||||||
|
},
|
||||||
|
func(b *buffer, m message) error { return decodeBool(b, &m.(*Location).IsFolded) }, // optional bool is_folded = 5;
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Line) decoder() []decoder {
|
||||||
|
return lineDecoder
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Line) encode(b *buffer) {
|
||||||
|
encodeUint64Opt(b, 1, p.functionIDX)
|
||||||
|
encodeInt64Opt(b, 2, p.Line)
|
||||||
|
}
|
||||||
|
|
||||||
|
var lineDecoder = []decoder{
|
||||||
|
nil, // 0
|
||||||
|
// optional uint64 function_id = 1
|
||||||
|
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) },
|
||||||
|
// optional int64 line = 2
|
||||||
|
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) },
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Function) decoder() []decoder {
|
||||||
|
return functionDecoder
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Function) encode(b *buffer) {
|
||||||
|
encodeUint64Opt(b, 1, p.ID)
|
||||||
|
encodeInt64Opt(b, 2, p.nameX)
|
||||||
|
encodeInt64Opt(b, 3, p.systemNameX)
|
||||||
|
encodeInt64Opt(b, 4, p.filenameX)
|
||||||
|
encodeInt64Opt(b, 5, p.StartLine)
|
||||||
|
}
|
||||||
|
|
||||||
|
var functionDecoder = []decoder{
|
||||||
|
nil, // 0
|
||||||
|
// optional uint64 id = 1
|
||||||
|
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Function).ID) },
|
||||||
|
// optional int64 function_name = 2
|
||||||
|
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).nameX) },
|
||||||
|
// optional int64 function_system_name = 3
|
||||||
|
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).systemNameX) },
|
||||||
|
// repeated int64 filename = 4
|
||||||
|
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).filenameX) },
|
||||||
|
// optional int64 start_line = 5
|
||||||
|
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).StartLine) },
|
||||||
|
}
|
||||||
|
|
||||||
|
func addString(strings map[string]int, s string) int64 {
|
||||||
|
i, ok := strings[s]
|
||||||
|
if !ok {
|
||||||
|
i = len(strings)
|
||||||
|
strings[s] = i
|
||||||
|
}
|
||||||
|
return int64(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getString(strings []string, strng *int64, err error) (string, error) {
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
s := int(*strng)
|
||||||
|
if s < 0 || s >= len(strings) {
|
||||||
|
return "", errMalformed
|
||||||
|
}
|
||||||
|
*strng = 0
|
||||||
|
return strings[s], nil
|
||||||
|
}
|
270
vendor/github.com/google/pprof/profile/filter.go
generated
vendored
Normal file
270
vendor/github.com/google/pprof/profile/filter.go
generated
vendored
Normal file
@ -0,0 +1,270 @@
|
|||||||
|
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package profile
|
||||||
|
|
||||||
|
// Implements methods to filter samples from profiles.
|
||||||
|
|
||||||
|
import "regexp"
|
||||||
|
|
||||||
|
// FilterSamplesByName filters the samples in a profile and only keeps
|
||||||
|
// samples where at least one frame matches focus but none match ignore.
|
||||||
|
// Returns true is the corresponding regexp matched at least one sample.
|
||||||
|
func (p *Profile) FilterSamplesByName(focus, ignore, hide, show *regexp.Regexp) (fm, im, hm, hnm bool) {
|
||||||
|
focusOrIgnore := make(map[uint64]bool)
|
||||||
|
hidden := make(map[uint64]bool)
|
||||||
|
for _, l := range p.Location {
|
||||||
|
if ignore != nil && l.matchesName(ignore) {
|
||||||
|
im = true
|
||||||
|
focusOrIgnore[l.ID] = false
|
||||||
|
} else if focus == nil || l.matchesName(focus) {
|
||||||
|
fm = true
|
||||||
|
focusOrIgnore[l.ID] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if hide != nil && l.matchesName(hide) {
|
||||||
|
hm = true
|
||||||
|
l.Line = l.unmatchedLines(hide)
|
||||||
|
if len(l.Line) == 0 {
|
||||||
|
hidden[l.ID] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if show != nil {
|
||||||
|
l.Line = l.matchedLines(show)
|
||||||
|
if len(l.Line) == 0 {
|
||||||
|
hidden[l.ID] = true
|
||||||
|
} else {
|
||||||
|
hnm = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
s := make([]*Sample, 0, len(p.Sample))
|
||||||
|
for _, sample := range p.Sample {
|
||||||
|
if focusedAndNotIgnored(sample.Location, focusOrIgnore) {
|
||||||
|
if len(hidden) > 0 {
|
||||||
|
var locs []*Location
|
||||||
|
for _, loc := range sample.Location {
|
||||||
|
if !hidden[loc.ID] {
|
||||||
|
locs = append(locs, loc)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(locs) == 0 {
|
||||||
|
// Remove sample with no locations (by not adding it to s).
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
sample.Location = locs
|
||||||
|
}
|
||||||
|
s = append(s, sample)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.Sample = s
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// ShowFrom drops all stack frames above the highest matching frame and returns
|
||||||
|
// whether a match was found. If showFrom is nil it returns false and does not
|
||||||
|
// modify the profile.
|
||||||
|
//
|
||||||
|
// Example: consider a sample with frames [A, B, C, B], where A is the root.
|
||||||
|
// ShowFrom(nil) returns false and has frames [A, B, C, B].
|
||||||
|
// ShowFrom(A) returns true and has frames [A, B, C, B].
|
||||||
|
// ShowFrom(B) returns true and has frames [B, C, B].
|
||||||
|
// ShowFrom(C) returns true and has frames [C, B].
|
||||||
|
// ShowFrom(D) returns false and drops the sample because no frames remain.
|
||||||
|
func (p *Profile) ShowFrom(showFrom *regexp.Regexp) (matched bool) {
|
||||||
|
if showFrom == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// showFromLocs stores location IDs that matched ShowFrom.
|
||||||
|
showFromLocs := make(map[uint64]bool)
|
||||||
|
// Apply to locations.
|
||||||
|
for _, loc := range p.Location {
|
||||||
|
if filterShowFromLocation(loc, showFrom) {
|
||||||
|
showFromLocs[loc.ID] = true
|
||||||
|
matched = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// For all samples, strip locations after the highest matching one.
|
||||||
|
s := make([]*Sample, 0, len(p.Sample))
|
||||||
|
for _, sample := range p.Sample {
|
||||||
|
for i := len(sample.Location) - 1; i >= 0; i-- {
|
||||||
|
if showFromLocs[sample.Location[i].ID] {
|
||||||
|
sample.Location = sample.Location[:i+1]
|
||||||
|
s = append(s, sample)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.Sample = s
|
||||||
|
return matched
|
||||||
|
}
|
||||||
|
|
||||||
|
// filterShowFromLocation tests a showFrom regex against a location, removes
|
||||||
|
// lines after the last match and returns whether a match was found. If the
|
||||||
|
// mapping is matched, then all lines are kept.
|
||||||
|
func filterShowFromLocation(loc *Location, showFrom *regexp.Regexp) bool {
|
||||||
|
if m := loc.Mapping; m != nil && showFrom.MatchString(m.File) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if i := loc.lastMatchedLineIndex(showFrom); i >= 0 {
|
||||||
|
loc.Line = loc.Line[:i+1]
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// lastMatchedLineIndex returns the index of the last line that matches a regex,
|
||||||
|
// or -1 if no match is found.
|
||||||
|
func (loc *Location) lastMatchedLineIndex(re *regexp.Regexp) int {
|
||||||
|
for i := len(loc.Line) - 1; i >= 0; i-- {
|
||||||
|
if fn := loc.Line[i].Function; fn != nil {
|
||||||
|
if re.MatchString(fn.Name) || re.MatchString(fn.Filename) {
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterTagsByName filters the tags in a profile and only keeps
|
||||||
|
// tags that match show and not hide.
|
||||||
|
func (p *Profile) FilterTagsByName(show, hide *regexp.Regexp) (sm, hm bool) {
|
||||||
|
matchRemove := func(name string) bool {
|
||||||
|
matchShow := show == nil || show.MatchString(name)
|
||||||
|
matchHide := hide != nil && hide.MatchString(name)
|
||||||
|
|
||||||
|
if matchShow {
|
||||||
|
sm = true
|
||||||
|
}
|
||||||
|
if matchHide {
|
||||||
|
hm = true
|
||||||
|
}
|
||||||
|
return !matchShow || matchHide
|
||||||
|
}
|
||||||
|
for _, s := range p.Sample {
|
||||||
|
for lab := range s.Label {
|
||||||
|
if matchRemove(lab) {
|
||||||
|
delete(s.Label, lab)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for lab := range s.NumLabel {
|
||||||
|
if matchRemove(lab) {
|
||||||
|
delete(s.NumLabel, lab)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// matchesName returns whether the location matches the regular
|
||||||
|
// expression. It checks any available function names, file names, and
|
||||||
|
// mapping object filename.
|
||||||
|
func (loc *Location) matchesName(re *regexp.Regexp) bool {
|
||||||
|
for _, ln := range loc.Line {
|
||||||
|
if fn := ln.Function; fn != nil {
|
||||||
|
if re.MatchString(fn.Name) || re.MatchString(fn.Filename) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if m := loc.Mapping; m != nil && re.MatchString(m.File) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// unmatchedLines returns the lines in the location that do not match
|
||||||
|
// the regular expression.
|
||||||
|
func (loc *Location) unmatchedLines(re *regexp.Regexp) []Line {
|
||||||
|
if m := loc.Mapping; m != nil && re.MatchString(m.File) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var lines []Line
|
||||||
|
for _, ln := range loc.Line {
|
||||||
|
if fn := ln.Function; fn != nil {
|
||||||
|
if re.MatchString(fn.Name) || re.MatchString(fn.Filename) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
lines = append(lines, ln)
|
||||||
|
}
|
||||||
|
return lines
|
||||||
|
}
|
||||||
|
|
||||||
|
// matchedLines returns the lines in the location that match
|
||||||
|
// the regular expression.
|
||||||
|
func (loc *Location) matchedLines(re *regexp.Regexp) []Line {
|
||||||
|
if m := loc.Mapping; m != nil && re.MatchString(m.File) {
|
||||||
|
return loc.Line
|
||||||
|
}
|
||||||
|
var lines []Line
|
||||||
|
for _, ln := range loc.Line {
|
||||||
|
if fn := ln.Function; fn != nil {
|
||||||
|
if !re.MatchString(fn.Name) && !re.MatchString(fn.Filename) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
lines = append(lines, ln)
|
||||||
|
}
|
||||||
|
return lines
|
||||||
|
}
|
||||||
|
|
||||||
|
// focusedAndNotIgnored looks up a slice of ids against a map of
|
||||||
|
// focused/ignored locations. The map only contains locations that are
|
||||||
|
// explicitly focused or ignored. Returns whether there is at least
|
||||||
|
// one focused location but no ignored locations.
|
||||||
|
func focusedAndNotIgnored(locs []*Location, m map[uint64]bool) bool {
|
||||||
|
var f bool
|
||||||
|
for _, loc := range locs {
|
||||||
|
if focus, focusOrIgnore := m[loc.ID]; focusOrIgnore {
|
||||||
|
if focus {
|
||||||
|
// Found focused location. Must keep searching in case there
|
||||||
|
// is an ignored one as well.
|
||||||
|
f = true
|
||||||
|
} else {
|
||||||
|
// Found ignored location. Can return false right away.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
// TagMatch selects tags for filtering
|
||||||
|
type TagMatch func(s *Sample) bool
|
||||||
|
|
||||||
|
// FilterSamplesByTag removes all samples from the profile, except
|
||||||
|
// those that match focus and do not match the ignore regular
|
||||||
|
// expression.
|
||||||
|
func (p *Profile) FilterSamplesByTag(focus, ignore TagMatch) (fm, im bool) {
|
||||||
|
samples := make([]*Sample, 0, len(p.Sample))
|
||||||
|
for _, s := range p.Sample {
|
||||||
|
focused, ignored := true, false
|
||||||
|
if focus != nil {
|
||||||
|
focused = focus(s)
|
||||||
|
}
|
||||||
|
if ignore != nil {
|
||||||
|
ignored = ignore(s)
|
||||||
|
}
|
||||||
|
fm = fm || focused
|
||||||
|
im = im || ignored
|
||||||
|
if focused && !ignored {
|
||||||
|
samples = append(samples, s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.Sample = samples
|
||||||
|
return
|
||||||
|
}
|
64
vendor/github.com/google/pprof/profile/index.go
generated
vendored
Normal file
64
vendor/github.com/google/pprof/profile/index.go
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package profile
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SampleIndexByName returns the appropriate index for a value of sample index.
|
||||||
|
// If numeric, it returns the number, otherwise it looks up the text in the
|
||||||
|
// profile sample types.
|
||||||
|
func (p *Profile) SampleIndexByName(sampleIndex string) (int, error) {
|
||||||
|
if sampleIndex == "" {
|
||||||
|
if dst := p.DefaultSampleType; dst != "" {
|
||||||
|
for i, t := range sampleTypes(p) {
|
||||||
|
if t == dst {
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// By default select the last sample value
|
||||||
|
return len(p.SampleType) - 1, nil
|
||||||
|
}
|
||||||
|
if i, err := strconv.Atoi(sampleIndex); err == nil {
|
||||||
|
if i < 0 || i >= len(p.SampleType) {
|
||||||
|
return 0, fmt.Errorf("sample_index %s is outside the range [0..%d]", sampleIndex, len(p.SampleType)-1)
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove the inuse_ prefix to support legacy pprof options
|
||||||
|
// "inuse_space" and "inuse_objects" for profiles containing types
|
||||||
|
// "space" and "objects".
|
||||||
|
noInuse := strings.TrimPrefix(sampleIndex, "inuse_")
|
||||||
|
for i, t := range p.SampleType {
|
||||||
|
if t.Type == sampleIndex || t.Type == noInuse {
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0, fmt.Errorf("sample_index %q must be one of: %v", sampleIndex, sampleTypes(p))
|
||||||
|
}
|
||||||
|
|
||||||
|
func sampleTypes(p *Profile) []string {
|
||||||
|
types := make([]string, len(p.SampleType))
|
||||||
|
for i, t := range p.SampleType {
|
||||||
|
types[i] = t.Type
|
||||||
|
}
|
||||||
|
return types
|
||||||
|
}
|
315
vendor/github.com/google/pprof/profile/legacy_java_profile.go
generated
vendored
Normal file
315
vendor/github.com/google/pprof/profile/legacy_java_profile.go
generated
vendored
Normal file
@ -0,0 +1,315 @@
|
|||||||
|
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// This file implements parsers to convert java legacy profiles into
|
||||||
|
// the profile.proto format.
|
||||||
|
|
||||||
|
package profile
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
attributeRx = regexp.MustCompile(`([\w ]+)=([\w ]+)`)
|
||||||
|
javaSampleRx = regexp.MustCompile(` *(\d+) +(\d+) +@ +([ x0-9a-f]*)`)
|
||||||
|
javaLocationRx = regexp.MustCompile(`^\s*0x([[:xdigit:]]+)\s+(.*)\s*$`)
|
||||||
|
javaLocationFileLineRx = regexp.MustCompile(`^(.*)\s+\((.+):(-?[[:digit:]]+)\)$`)
|
||||||
|
javaLocationPathRx = regexp.MustCompile(`^(.*)\s+\((.*)\)$`)
|
||||||
|
)
|
||||||
|
|
||||||
|
// javaCPUProfile returns a new Profile from profilez data.
|
||||||
|
// b is the profile bytes after the header, period is the profiling
|
||||||
|
// period, and parse is a function to parse 8-byte chunks from the
|
||||||
|
// profile in its native endianness.
|
||||||
|
func javaCPUProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) {
|
||||||
|
p := &Profile{
|
||||||
|
Period: period * 1000,
|
||||||
|
PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"},
|
||||||
|
SampleType: []*ValueType{{Type: "samples", Unit: "count"}, {Type: "cpu", Unit: "nanoseconds"}},
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
var locs map[uint64]*Location
|
||||||
|
if b, locs, err = parseCPUSamples(b, parse, false, p); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = parseJavaLocations(b, locs, p); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Strip out addresses for better merge.
|
||||||
|
if err = p.Aggregate(true, true, true, true, false); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseJavaProfile returns a new profile from heapz or contentionz
|
||||||
|
// data. b is the profile bytes after the header.
|
||||||
|
func parseJavaProfile(b []byte) (*Profile, error) {
|
||||||
|
h := bytes.SplitAfterN(b, []byte("\n"), 2)
|
||||||
|
if len(h) < 2 {
|
||||||
|
return nil, errUnrecognized
|
||||||
|
}
|
||||||
|
|
||||||
|
p := &Profile{
|
||||||
|
PeriodType: &ValueType{},
|
||||||
|
}
|
||||||
|
header := string(bytes.TrimSpace(h[0]))
|
||||||
|
|
||||||
|
var err error
|
||||||
|
var pType string
|
||||||
|
switch header {
|
||||||
|
case "--- heapz 1 ---":
|
||||||
|
pType = "heap"
|
||||||
|
case "--- contentionz 1 ---":
|
||||||
|
pType = "contention"
|
||||||
|
default:
|
||||||
|
return nil, errUnrecognized
|
||||||
|
}
|
||||||
|
|
||||||
|
if b, err = parseJavaHeader(pType, h[1], p); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var locs map[uint64]*Location
|
||||||
|
if b, locs, err = parseJavaSamples(pType, b, p); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err = parseJavaLocations(b, locs, p); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Strip out addresses for better merge.
|
||||||
|
if err = p.Aggregate(true, true, true, true, false); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseJavaHeader parses the attribute section on a java profile and
|
||||||
|
// populates a profile. Returns the remainder of the buffer after all
|
||||||
|
// attributes.
|
||||||
|
func parseJavaHeader(pType string, b []byte, p *Profile) ([]byte, error) {
|
||||||
|
nextNewLine := bytes.IndexByte(b, byte('\n'))
|
||||||
|
for nextNewLine != -1 {
|
||||||
|
line := string(bytes.TrimSpace(b[0:nextNewLine]))
|
||||||
|
if line != "" {
|
||||||
|
h := attributeRx.FindStringSubmatch(line)
|
||||||
|
if h == nil {
|
||||||
|
// Not a valid attribute, exit.
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
attribute, value := strings.TrimSpace(h[1]), strings.TrimSpace(h[2])
|
||||||
|
var err error
|
||||||
|
switch pType + "/" + attribute {
|
||||||
|
case "heap/format", "cpu/format", "contention/format":
|
||||||
|
if value != "java" {
|
||||||
|
return nil, errUnrecognized
|
||||||
|
}
|
||||||
|
case "heap/resolution":
|
||||||
|
p.SampleType = []*ValueType{
|
||||||
|
{Type: "inuse_objects", Unit: "count"},
|
||||||
|
{Type: "inuse_space", Unit: value},
|
||||||
|
}
|
||||||
|
case "contention/resolution":
|
||||||
|
p.SampleType = []*ValueType{
|
||||||
|
{Type: "contentions", Unit: "count"},
|
||||||
|
{Type: "delay", Unit: value},
|
||||||
|
}
|
||||||
|
case "contention/sampling period":
|
||||||
|
p.PeriodType = &ValueType{
|
||||||
|
Type: "contentions", Unit: "count",
|
||||||
|
}
|
||||||
|
if p.Period, err = strconv.ParseInt(value, 0, 64); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err)
|
||||||
|
}
|
||||||
|
case "contention/ms since reset":
|
||||||
|
millis, err := strconv.ParseInt(value, 0, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err)
|
||||||
|
}
|
||||||
|
p.DurationNanos = millis * 1000 * 1000
|
||||||
|
default:
|
||||||
|
return nil, errUnrecognized
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Grab next line.
|
||||||
|
b = b[nextNewLine+1:]
|
||||||
|
nextNewLine = bytes.IndexByte(b, byte('\n'))
|
||||||
|
}
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseJavaSamples parses the samples from a java profile and
|
||||||
|
// populates the Samples in a profile. Returns the remainder of the
|
||||||
|
// buffer after the samples.
|
||||||
|
func parseJavaSamples(pType string, b []byte, p *Profile) ([]byte, map[uint64]*Location, error) {
|
||||||
|
nextNewLine := bytes.IndexByte(b, byte('\n'))
|
||||||
|
locs := make(map[uint64]*Location)
|
||||||
|
for nextNewLine != -1 {
|
||||||
|
line := string(bytes.TrimSpace(b[0:nextNewLine]))
|
||||||
|
if line != "" {
|
||||||
|
sample := javaSampleRx.FindStringSubmatch(line)
|
||||||
|
if sample == nil {
|
||||||
|
// Not a valid sample, exit.
|
||||||
|
return b, locs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Java profiles have data/fields inverted compared to other
|
||||||
|
// profile types.
|
||||||
|
var err error
|
||||||
|
value1, value2, value3 := sample[2], sample[1], sample[3]
|
||||||
|
addrs, err := parseHexAddresses(value3)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var sloc []*Location
|
||||||
|
for _, addr := range addrs {
|
||||||
|
loc := locs[addr]
|
||||||
|
if locs[addr] == nil {
|
||||||
|
loc = &Location{
|
||||||
|
Address: addr,
|
||||||
|
}
|
||||||
|
p.Location = append(p.Location, loc)
|
||||||
|
locs[addr] = loc
|
||||||
|
}
|
||||||
|
sloc = append(sloc, loc)
|
||||||
|
}
|
||||||
|
s := &Sample{
|
||||||
|
Value: make([]int64, 2),
|
||||||
|
Location: sloc,
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.Value[0], err = strconv.ParseInt(value1, 0, 64); err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err)
|
||||||
|
}
|
||||||
|
if s.Value[1], err = strconv.ParseInt(value2, 0, 64); err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch pType {
|
||||||
|
case "heap":
|
||||||
|
const javaHeapzSamplingRate = 524288 // 512K
|
||||||
|
if s.Value[0] == 0 {
|
||||||
|
return nil, nil, fmt.Errorf("parsing sample %s: second value must be non-zero", line)
|
||||||
|
}
|
||||||
|
s.NumLabel = map[string][]int64{"bytes": {s.Value[1] / s.Value[0]}}
|
||||||
|
s.Value[0], s.Value[1] = scaleHeapSample(s.Value[0], s.Value[1], javaHeapzSamplingRate)
|
||||||
|
case "contention":
|
||||||
|
if period := p.Period; period != 0 {
|
||||||
|
s.Value[0] = s.Value[0] * p.Period
|
||||||
|
s.Value[1] = s.Value[1] * p.Period
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.Sample = append(p.Sample, s)
|
||||||
|
}
|
||||||
|
// Grab next line.
|
||||||
|
b = b[nextNewLine+1:]
|
||||||
|
nextNewLine = bytes.IndexByte(b, byte('\n'))
|
||||||
|
}
|
||||||
|
return b, locs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseJavaLocations parses the location information in a java
|
||||||
|
// profile and populates the Locations in a profile. It uses the
|
||||||
|
// location addresses from the profile as both the ID of each
|
||||||
|
// location.
|
||||||
|
func parseJavaLocations(b []byte, locs map[uint64]*Location, p *Profile) error {
|
||||||
|
r := bytes.NewBuffer(b)
|
||||||
|
fns := make(map[string]*Function)
|
||||||
|
for {
|
||||||
|
line, err := r.ReadString('\n')
|
||||||
|
if err != nil {
|
||||||
|
if err != io.EOF {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if line == "" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if line = strings.TrimSpace(line); line == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
jloc := javaLocationRx.FindStringSubmatch(line)
|
||||||
|
if len(jloc) != 3 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
addr, err := strconv.ParseUint(jloc[1], 16, 64)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("parsing sample %s: %v", line, err)
|
||||||
|
}
|
||||||
|
loc := locs[addr]
|
||||||
|
if loc == nil {
|
||||||
|
// Unused/unseen
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
var lineFunc, lineFile string
|
||||||
|
var lineNo int64
|
||||||
|
|
||||||
|
if fileLine := javaLocationFileLineRx.FindStringSubmatch(jloc[2]); len(fileLine) == 4 {
|
||||||
|
// Found a line of the form: "function (file:line)"
|
||||||
|
lineFunc, lineFile = fileLine[1], fileLine[2]
|
||||||
|
if n, err := strconv.ParseInt(fileLine[3], 10, 64); err == nil && n > 0 {
|
||||||
|
lineNo = n
|
||||||
|
}
|
||||||
|
} else if filePath := javaLocationPathRx.FindStringSubmatch(jloc[2]); len(filePath) == 3 {
|
||||||
|
// If there's not a file:line, it's a shared library path.
|
||||||
|
// The path isn't interesting, so just give the .so.
|
||||||
|
lineFunc, lineFile = filePath[1], filepath.Base(filePath[2])
|
||||||
|
} else if strings.Contains(jloc[2], "generated stub/JIT") {
|
||||||
|
lineFunc = "STUB"
|
||||||
|
} else {
|
||||||
|
// Treat whole line as the function name. This is used by the
|
||||||
|
// java agent for internal states such as "GC" or "VM".
|
||||||
|
lineFunc = jloc[2]
|
||||||
|
}
|
||||||
|
fn := fns[lineFunc]
|
||||||
|
|
||||||
|
if fn == nil {
|
||||||
|
fn = &Function{
|
||||||
|
Name: lineFunc,
|
||||||
|
SystemName: lineFunc,
|
||||||
|
Filename: lineFile,
|
||||||
|
}
|
||||||
|
fns[lineFunc] = fn
|
||||||
|
p.Function = append(p.Function, fn)
|
||||||
|
}
|
||||||
|
loc.Line = []Line{
|
||||||
|
{
|
||||||
|
Function: fn,
|
||||||
|
Line: lineNo,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
loc.Address = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
p.remapLocationIDs()
|
||||||
|
p.remapFunctionIDs()
|
||||||
|
p.remapMappingIDs()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
1225
vendor/github.com/google/pprof/profile/legacy_profile.go
generated
vendored
Normal file
1225
vendor/github.com/google/pprof/profile/legacy_profile.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
481
vendor/github.com/google/pprof/profile/merge.go
generated
vendored
Normal file
481
vendor/github.com/google/pprof/profile/merge.go
generated
vendored
Normal file
@ -0,0 +1,481 @@
|
|||||||
|
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package profile
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Compact performs garbage collection on a profile to remove any
|
||||||
|
// unreferenced fields. This is useful to reduce the size of a profile
|
||||||
|
// after samples or locations have been removed.
|
||||||
|
func (p *Profile) Compact() *Profile {
|
||||||
|
p, _ = Merge([]*Profile{p})
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge merges all the profiles in profs into a single Profile.
|
||||||
|
// Returns a new profile independent of the input profiles. The merged
|
||||||
|
// profile is compacted to eliminate unused samples, locations,
|
||||||
|
// functions and mappings. Profiles must have identical profile sample
|
||||||
|
// and period types or the merge will fail. profile.Period of the
|
||||||
|
// resulting profile will be the maximum of all profiles, and
|
||||||
|
// profile.TimeNanos will be the earliest nonzero one. Merges are
|
||||||
|
// associative with the caveat of the first profile having some
|
||||||
|
// specialization in how headers are combined. There may be other
|
||||||
|
// subtleties now or in the future regarding associativity.
|
||||||
|
func Merge(srcs []*Profile) (*Profile, error) {
|
||||||
|
if len(srcs) == 0 {
|
||||||
|
return nil, fmt.Errorf("no profiles to merge")
|
||||||
|
}
|
||||||
|
p, err := combineHeaders(srcs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
pm := &profileMerger{
|
||||||
|
p: p,
|
||||||
|
samples: make(map[sampleKey]*Sample, len(srcs[0].Sample)),
|
||||||
|
locations: make(map[locationKey]*Location, len(srcs[0].Location)),
|
||||||
|
functions: make(map[functionKey]*Function, len(srcs[0].Function)),
|
||||||
|
mappings: make(map[mappingKey]*Mapping, len(srcs[0].Mapping)),
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, src := range srcs {
|
||||||
|
// Clear the profile-specific hash tables
|
||||||
|
pm.locationsByID = make(map[uint64]*Location, len(src.Location))
|
||||||
|
pm.functionsByID = make(map[uint64]*Function, len(src.Function))
|
||||||
|
pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping))
|
||||||
|
|
||||||
|
if len(pm.mappings) == 0 && len(src.Mapping) > 0 {
|
||||||
|
// The Mapping list has the property that the first mapping
|
||||||
|
// represents the main binary. Take the first Mapping we see,
|
||||||
|
// otherwise the operations below will add mappings in an
|
||||||
|
// arbitrary order.
|
||||||
|
pm.mapMapping(src.Mapping[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, s := range src.Sample {
|
||||||
|
if !isZeroSample(s) {
|
||||||
|
pm.mapSample(s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, s := range p.Sample {
|
||||||
|
if isZeroSample(s) {
|
||||||
|
// If there are any zero samples, re-merge the profile to GC
|
||||||
|
// them.
|
||||||
|
return Merge([]*Profile{p})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Normalize normalizes the source profile by multiplying each value in profile by the
|
||||||
|
// ratio of the sum of the base profile's values of that sample type to the sum of the
|
||||||
|
// source profile's value of that sample type.
|
||||||
|
func (p *Profile) Normalize(pb *Profile) error {
|
||||||
|
|
||||||
|
if err := p.compatible(pb); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
baseVals := make([]int64, len(p.SampleType))
|
||||||
|
for _, s := range pb.Sample {
|
||||||
|
for i, v := range s.Value {
|
||||||
|
baseVals[i] += v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
srcVals := make([]int64, len(p.SampleType))
|
||||||
|
for _, s := range p.Sample {
|
||||||
|
for i, v := range s.Value {
|
||||||
|
srcVals[i] += v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
normScale := make([]float64, len(baseVals))
|
||||||
|
for i := range baseVals {
|
||||||
|
if srcVals[i] == 0 {
|
||||||
|
normScale[i] = 0.0
|
||||||
|
} else {
|
||||||
|
normScale[i] = float64(baseVals[i]) / float64(srcVals[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.ScaleN(normScale)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func isZeroSample(s *Sample) bool {
|
||||||
|
for _, v := range s.Value {
|
||||||
|
if v != 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
type profileMerger struct {
|
||||||
|
p *Profile
|
||||||
|
|
||||||
|
// Memoization tables within a profile.
|
||||||
|
locationsByID map[uint64]*Location
|
||||||
|
functionsByID map[uint64]*Function
|
||||||
|
mappingsByID map[uint64]mapInfo
|
||||||
|
|
||||||
|
// Memoization tables for profile entities.
|
||||||
|
samples map[sampleKey]*Sample
|
||||||
|
locations map[locationKey]*Location
|
||||||
|
functions map[functionKey]*Function
|
||||||
|
mappings map[mappingKey]*Mapping
|
||||||
|
}
|
||||||
|
|
||||||
|
type mapInfo struct {
|
||||||
|
m *Mapping
|
||||||
|
offset int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pm *profileMerger) mapSample(src *Sample) *Sample {
|
||||||
|
s := &Sample{
|
||||||
|
Location: make([]*Location, len(src.Location)),
|
||||||
|
Value: make([]int64, len(src.Value)),
|
||||||
|
Label: make(map[string][]string, len(src.Label)),
|
||||||
|
NumLabel: make(map[string][]int64, len(src.NumLabel)),
|
||||||
|
NumUnit: make(map[string][]string, len(src.NumLabel)),
|
||||||
|
}
|
||||||
|
for i, l := range src.Location {
|
||||||
|
s.Location[i] = pm.mapLocation(l)
|
||||||
|
}
|
||||||
|
for k, v := range src.Label {
|
||||||
|
vv := make([]string, len(v))
|
||||||
|
copy(vv, v)
|
||||||
|
s.Label[k] = vv
|
||||||
|
}
|
||||||
|
for k, v := range src.NumLabel {
|
||||||
|
u := src.NumUnit[k]
|
||||||
|
vv := make([]int64, len(v))
|
||||||
|
uu := make([]string, len(u))
|
||||||
|
copy(vv, v)
|
||||||
|
copy(uu, u)
|
||||||
|
s.NumLabel[k] = vv
|
||||||
|
s.NumUnit[k] = uu
|
||||||
|
}
|
||||||
|
// Check memoization table. Must be done on the remapped location to
|
||||||
|
// account for the remapped mapping. Add current values to the
|
||||||
|
// existing sample.
|
||||||
|
k := s.key()
|
||||||
|
if ss, ok := pm.samples[k]; ok {
|
||||||
|
for i, v := range src.Value {
|
||||||
|
ss.Value[i] += v
|
||||||
|
}
|
||||||
|
return ss
|
||||||
|
}
|
||||||
|
copy(s.Value, src.Value)
|
||||||
|
pm.samples[k] = s
|
||||||
|
pm.p.Sample = append(pm.p.Sample, s)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// key generates sampleKey to be used as a key for maps.
|
||||||
|
func (sample *Sample) key() sampleKey {
|
||||||
|
ids := make([]string, len(sample.Location))
|
||||||
|
for i, l := range sample.Location {
|
||||||
|
ids[i] = strconv.FormatUint(l.ID, 16)
|
||||||
|
}
|
||||||
|
|
||||||
|
labels := make([]string, 0, len(sample.Label))
|
||||||
|
for k, v := range sample.Label {
|
||||||
|
labels = append(labels, fmt.Sprintf("%q%q", k, v))
|
||||||
|
}
|
||||||
|
sort.Strings(labels)
|
||||||
|
|
||||||
|
numlabels := make([]string, 0, len(sample.NumLabel))
|
||||||
|
for k, v := range sample.NumLabel {
|
||||||
|
numlabels = append(numlabels, fmt.Sprintf("%q%x%x", k, v, sample.NumUnit[k]))
|
||||||
|
}
|
||||||
|
sort.Strings(numlabels)
|
||||||
|
|
||||||
|
return sampleKey{
|
||||||
|
strings.Join(ids, "|"),
|
||||||
|
strings.Join(labels, ""),
|
||||||
|
strings.Join(numlabels, ""),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type sampleKey struct {
|
||||||
|
locations string
|
||||||
|
labels string
|
||||||
|
numlabels string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pm *profileMerger) mapLocation(src *Location) *Location {
|
||||||
|
if src == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if l, ok := pm.locationsByID[src.ID]; ok {
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
mi := pm.mapMapping(src.Mapping)
|
||||||
|
l := &Location{
|
||||||
|
ID: uint64(len(pm.p.Location) + 1),
|
||||||
|
Mapping: mi.m,
|
||||||
|
Address: uint64(int64(src.Address) + mi.offset),
|
||||||
|
Line: make([]Line, len(src.Line)),
|
||||||
|
IsFolded: src.IsFolded,
|
||||||
|
}
|
||||||
|
for i, ln := range src.Line {
|
||||||
|
l.Line[i] = pm.mapLine(ln)
|
||||||
|
}
|
||||||
|
// Check memoization table. Must be done on the remapped location to
|
||||||
|
// account for the remapped mapping ID.
|
||||||
|
k := l.key()
|
||||||
|
if ll, ok := pm.locations[k]; ok {
|
||||||
|
pm.locationsByID[src.ID] = ll
|
||||||
|
return ll
|
||||||
|
}
|
||||||
|
pm.locationsByID[src.ID] = l
|
||||||
|
pm.locations[k] = l
|
||||||
|
pm.p.Location = append(pm.p.Location, l)
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
// key generates locationKey to be used as a key for maps.
|
||||||
|
func (l *Location) key() locationKey {
|
||||||
|
key := locationKey{
|
||||||
|
addr: l.Address,
|
||||||
|
isFolded: l.IsFolded,
|
||||||
|
}
|
||||||
|
if l.Mapping != nil {
|
||||||
|
// Normalizes address to handle address space randomization.
|
||||||
|
key.addr -= l.Mapping.Start
|
||||||
|
key.mappingID = l.Mapping.ID
|
||||||
|
}
|
||||||
|
lines := make([]string, len(l.Line)*2)
|
||||||
|
for i, line := range l.Line {
|
||||||
|
if line.Function != nil {
|
||||||
|
lines[i*2] = strconv.FormatUint(line.Function.ID, 16)
|
||||||
|
}
|
||||||
|
lines[i*2+1] = strconv.FormatInt(line.Line, 16)
|
||||||
|
}
|
||||||
|
key.lines = strings.Join(lines, "|")
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
|
||||||
|
type locationKey struct {
|
||||||
|
addr, mappingID uint64
|
||||||
|
lines string
|
||||||
|
isFolded bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pm *profileMerger) mapMapping(src *Mapping) mapInfo {
|
||||||
|
if src == nil {
|
||||||
|
return mapInfo{}
|
||||||
|
}
|
||||||
|
|
||||||
|
if mi, ok := pm.mappingsByID[src.ID]; ok {
|
||||||
|
return mi
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check memoization tables.
|
||||||
|
mk := src.key()
|
||||||
|
if m, ok := pm.mappings[mk]; ok {
|
||||||
|
mi := mapInfo{m, int64(m.Start) - int64(src.Start)}
|
||||||
|
pm.mappingsByID[src.ID] = mi
|
||||||
|
return mi
|
||||||
|
}
|
||||||
|
m := &Mapping{
|
||||||
|
ID: uint64(len(pm.p.Mapping) + 1),
|
||||||
|
Start: src.Start,
|
||||||
|
Limit: src.Limit,
|
||||||
|
Offset: src.Offset,
|
||||||
|
File: src.File,
|
||||||
|
BuildID: src.BuildID,
|
||||||
|
HasFunctions: src.HasFunctions,
|
||||||
|
HasFilenames: src.HasFilenames,
|
||||||
|
HasLineNumbers: src.HasLineNumbers,
|
||||||
|
HasInlineFrames: src.HasInlineFrames,
|
||||||
|
}
|
||||||
|
pm.p.Mapping = append(pm.p.Mapping, m)
|
||||||
|
|
||||||
|
// Update memoization tables.
|
||||||
|
pm.mappings[mk] = m
|
||||||
|
mi := mapInfo{m, 0}
|
||||||
|
pm.mappingsByID[src.ID] = mi
|
||||||
|
return mi
|
||||||
|
}
|
||||||
|
|
||||||
|
// key generates encoded strings of Mapping to be used as a key for
|
||||||
|
// maps.
|
||||||
|
func (m *Mapping) key() mappingKey {
|
||||||
|
// Normalize addresses to handle address space randomization.
|
||||||
|
// Round up to next 4K boundary to avoid minor discrepancies.
|
||||||
|
const mapsizeRounding = 0x1000
|
||||||
|
|
||||||
|
size := m.Limit - m.Start
|
||||||
|
size = size + mapsizeRounding - 1
|
||||||
|
size = size - (size % mapsizeRounding)
|
||||||
|
key := mappingKey{
|
||||||
|
size: size,
|
||||||
|
offset: m.Offset,
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case m.BuildID != "":
|
||||||
|
key.buildIDOrFile = m.BuildID
|
||||||
|
case m.File != "":
|
||||||
|
key.buildIDOrFile = m.File
|
||||||
|
default:
|
||||||
|
// A mapping containing neither build ID nor file name is a fake mapping. A
|
||||||
|
// key with empty buildIDOrFile is used for fake mappings so that they are
|
||||||
|
// treated as the same mapping during merging.
|
||||||
|
}
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
|
||||||
|
type mappingKey struct {
|
||||||
|
size, offset uint64
|
||||||
|
buildIDOrFile string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pm *profileMerger) mapLine(src Line) Line {
|
||||||
|
ln := Line{
|
||||||
|
Function: pm.mapFunction(src.Function),
|
||||||
|
Line: src.Line,
|
||||||
|
}
|
||||||
|
return ln
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pm *profileMerger) mapFunction(src *Function) *Function {
|
||||||
|
if src == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if f, ok := pm.functionsByID[src.ID]; ok {
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
k := src.key()
|
||||||
|
if f, ok := pm.functions[k]; ok {
|
||||||
|
pm.functionsByID[src.ID] = f
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
f := &Function{
|
||||||
|
ID: uint64(len(pm.p.Function) + 1),
|
||||||
|
Name: src.Name,
|
||||||
|
SystemName: src.SystemName,
|
||||||
|
Filename: src.Filename,
|
||||||
|
StartLine: src.StartLine,
|
||||||
|
}
|
||||||
|
pm.functions[k] = f
|
||||||
|
pm.functionsByID[src.ID] = f
|
||||||
|
pm.p.Function = append(pm.p.Function, f)
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
// key generates a struct to be used as a key for maps.
|
||||||
|
func (f *Function) key() functionKey {
|
||||||
|
return functionKey{
|
||||||
|
f.StartLine,
|
||||||
|
f.Name,
|
||||||
|
f.SystemName,
|
||||||
|
f.Filename,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type functionKey struct {
|
||||||
|
startLine int64
|
||||||
|
name, systemName, fileName string
|
||||||
|
}
|
||||||
|
|
||||||
|
// combineHeaders checks that all profiles can be merged and returns
|
||||||
|
// their combined profile.
|
||||||
|
func combineHeaders(srcs []*Profile) (*Profile, error) {
|
||||||
|
for _, s := range srcs[1:] {
|
||||||
|
if err := srcs[0].compatible(s); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var timeNanos, durationNanos, period int64
|
||||||
|
var comments []string
|
||||||
|
seenComments := map[string]bool{}
|
||||||
|
var defaultSampleType string
|
||||||
|
for _, s := range srcs {
|
||||||
|
if timeNanos == 0 || s.TimeNanos < timeNanos {
|
||||||
|
timeNanos = s.TimeNanos
|
||||||
|
}
|
||||||
|
durationNanos += s.DurationNanos
|
||||||
|
if period == 0 || period < s.Period {
|
||||||
|
period = s.Period
|
||||||
|
}
|
||||||
|
for _, c := range s.Comments {
|
||||||
|
if seen := seenComments[c]; !seen {
|
||||||
|
comments = append(comments, c)
|
||||||
|
seenComments[c] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if defaultSampleType == "" {
|
||||||
|
defaultSampleType = s.DefaultSampleType
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
p := &Profile{
|
||||||
|
SampleType: make([]*ValueType, len(srcs[0].SampleType)),
|
||||||
|
|
||||||
|
DropFrames: srcs[0].DropFrames,
|
||||||
|
KeepFrames: srcs[0].KeepFrames,
|
||||||
|
|
||||||
|
TimeNanos: timeNanos,
|
||||||
|
DurationNanos: durationNanos,
|
||||||
|
PeriodType: srcs[0].PeriodType,
|
||||||
|
Period: period,
|
||||||
|
|
||||||
|
Comments: comments,
|
||||||
|
DefaultSampleType: defaultSampleType,
|
||||||
|
}
|
||||||
|
copy(p.SampleType, srcs[0].SampleType)
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// compatible determines if two profiles can be compared/merged.
|
||||||
|
// returns nil if the profiles are compatible; otherwise an error with
|
||||||
|
// details on the incompatibility.
|
||||||
|
func (p *Profile) compatible(pb *Profile) error {
|
||||||
|
if !equalValueType(p.PeriodType, pb.PeriodType) {
|
||||||
|
return fmt.Errorf("incompatible period types %v and %v", p.PeriodType, pb.PeriodType)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(p.SampleType) != len(pb.SampleType) {
|
||||||
|
return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range p.SampleType {
|
||||||
|
if !equalValueType(p.SampleType[i], pb.SampleType[i]) {
|
||||||
|
return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// equalValueType returns true if the two value types are semantically
|
||||||
|
// equal. It ignores the internal fields used during encode/decode.
|
||||||
|
func equalValueType(st1, st2 *ValueType) bool {
|
||||||
|
return st1.Type == st2.Type && st1.Unit == st2.Unit
|
||||||
|
}
|
805
vendor/github.com/google/pprof/profile/profile.go
generated
vendored
Normal file
805
vendor/github.com/google/pprof/profile/profile.go
generated
vendored
Normal file
@ -0,0 +1,805 @@
|
|||||||
|
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package profile provides a representation of profile.proto and
|
||||||
|
// methods to encode/decode profiles in this format.
|
||||||
|
package profile
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"compress/gzip"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"math"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Profile is an in-memory representation of profile.proto.
|
||||||
|
type Profile struct {
|
||||||
|
SampleType []*ValueType
|
||||||
|
DefaultSampleType string
|
||||||
|
Sample []*Sample
|
||||||
|
Mapping []*Mapping
|
||||||
|
Location []*Location
|
||||||
|
Function []*Function
|
||||||
|
Comments []string
|
||||||
|
|
||||||
|
DropFrames string
|
||||||
|
KeepFrames string
|
||||||
|
|
||||||
|
TimeNanos int64
|
||||||
|
DurationNanos int64
|
||||||
|
PeriodType *ValueType
|
||||||
|
Period int64
|
||||||
|
|
||||||
|
// The following fields are modified during encoding and copying,
|
||||||
|
// so are protected by a Mutex.
|
||||||
|
encodeMu sync.Mutex
|
||||||
|
|
||||||
|
commentX []int64
|
||||||
|
dropFramesX int64
|
||||||
|
keepFramesX int64
|
||||||
|
stringTable []string
|
||||||
|
defaultSampleTypeX int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValueType corresponds to Profile.ValueType
|
||||||
|
type ValueType struct {
|
||||||
|
Type string // cpu, wall, inuse_space, etc
|
||||||
|
Unit string // seconds, nanoseconds, bytes, etc
|
||||||
|
|
||||||
|
typeX int64
|
||||||
|
unitX int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sample corresponds to Profile.Sample
|
||||||
|
type Sample struct {
|
||||||
|
Location []*Location
|
||||||
|
Value []int64
|
||||||
|
Label map[string][]string
|
||||||
|
NumLabel map[string][]int64
|
||||||
|
NumUnit map[string][]string
|
||||||
|
|
||||||
|
locationIDX []uint64
|
||||||
|
labelX []label
|
||||||
|
}
|
||||||
|
|
||||||
|
// label corresponds to Profile.Label
|
||||||
|
type label struct {
|
||||||
|
keyX int64
|
||||||
|
// Exactly one of the two following values must be set
|
||||||
|
strX int64
|
||||||
|
numX int64 // Integer value for this label
|
||||||
|
// can be set if numX has value
|
||||||
|
unitX int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mapping corresponds to Profile.Mapping
|
||||||
|
type Mapping struct {
|
||||||
|
ID uint64
|
||||||
|
Start uint64
|
||||||
|
Limit uint64
|
||||||
|
Offset uint64
|
||||||
|
File string
|
||||||
|
BuildID string
|
||||||
|
HasFunctions bool
|
||||||
|
HasFilenames bool
|
||||||
|
HasLineNumbers bool
|
||||||
|
HasInlineFrames bool
|
||||||
|
|
||||||
|
fileX int64
|
||||||
|
buildIDX int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Location corresponds to Profile.Location
|
||||||
|
type Location struct {
|
||||||
|
ID uint64
|
||||||
|
Mapping *Mapping
|
||||||
|
Address uint64
|
||||||
|
Line []Line
|
||||||
|
IsFolded bool
|
||||||
|
|
||||||
|
mappingIDX uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Line corresponds to Profile.Line
|
||||||
|
type Line struct {
|
||||||
|
Function *Function
|
||||||
|
Line int64
|
||||||
|
|
||||||
|
functionIDX uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Function corresponds to Profile.Function
|
||||||
|
type Function struct {
|
||||||
|
ID uint64
|
||||||
|
Name string
|
||||||
|
SystemName string
|
||||||
|
Filename string
|
||||||
|
StartLine int64
|
||||||
|
|
||||||
|
nameX int64
|
||||||
|
systemNameX int64
|
||||||
|
filenameX int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse parses a profile and checks for its validity. The input
|
||||||
|
// may be a gzip-compressed encoded protobuf or one of many legacy
|
||||||
|
// profile formats which may be unsupported in the future.
|
||||||
|
func Parse(r io.Reader) (*Profile, error) {
|
||||||
|
data, err := ioutil.ReadAll(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ParseData(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseData parses a profile from a buffer and checks for its
|
||||||
|
// validity.
|
||||||
|
func ParseData(data []byte) (*Profile, error) {
|
||||||
|
var p *Profile
|
||||||
|
var err error
|
||||||
|
if len(data) >= 2 && data[0] == 0x1f && data[1] == 0x8b {
|
||||||
|
gz, err := gzip.NewReader(bytes.NewBuffer(data))
|
||||||
|
if err == nil {
|
||||||
|
data, err = ioutil.ReadAll(gz)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("decompressing profile: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if p, err = ParseUncompressed(data); err != nil && err != errNoData && err != errConcatProfile {
|
||||||
|
p, err = parseLegacy(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("parsing profile: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := p.CheckValid(); err != nil {
|
||||||
|
return nil, fmt.Errorf("malformed profile: %v", err)
|
||||||
|
}
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var errUnrecognized = fmt.Errorf("unrecognized profile format")
|
||||||
|
var errMalformed = fmt.Errorf("malformed profile format")
|
||||||
|
var errNoData = fmt.Errorf("empty input file")
|
||||||
|
var errConcatProfile = fmt.Errorf("concatenated profiles detected")
|
||||||
|
|
||||||
|
func parseLegacy(data []byte) (*Profile, error) {
|
||||||
|
parsers := []func([]byte) (*Profile, error){
|
||||||
|
parseCPU,
|
||||||
|
parseHeap,
|
||||||
|
parseGoCount, // goroutine, threadcreate
|
||||||
|
parseThread,
|
||||||
|
parseContention,
|
||||||
|
parseJavaProfile,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, parser := range parsers {
|
||||||
|
p, err := parser(data)
|
||||||
|
if err == nil {
|
||||||
|
p.addLegacyFrameInfo()
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
if err != errUnrecognized {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, errUnrecognized
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseUncompressed parses an uncompressed protobuf into a profile.
|
||||||
|
func ParseUncompressed(data []byte) (*Profile, error) {
|
||||||
|
if len(data) == 0 {
|
||||||
|
return nil, errNoData
|
||||||
|
}
|
||||||
|
p := &Profile{}
|
||||||
|
if err := unmarshal(data, p); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := p.postDecode(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var libRx = regexp.MustCompile(`([.]so$|[.]so[._][0-9]+)`)
|
||||||
|
|
||||||
|
// massageMappings applies heuristic-based changes to the profile
|
||||||
|
// mappings to account for quirks of some environments.
|
||||||
|
func (p *Profile) massageMappings() {
|
||||||
|
// Merge adjacent regions with matching names, checking that the offsets match
|
||||||
|
if len(p.Mapping) > 1 {
|
||||||
|
mappings := []*Mapping{p.Mapping[0]}
|
||||||
|
for _, m := range p.Mapping[1:] {
|
||||||
|
lm := mappings[len(mappings)-1]
|
||||||
|
if adjacent(lm, m) {
|
||||||
|
lm.Limit = m.Limit
|
||||||
|
if m.File != "" {
|
||||||
|
lm.File = m.File
|
||||||
|
}
|
||||||
|
if m.BuildID != "" {
|
||||||
|
lm.BuildID = m.BuildID
|
||||||
|
}
|
||||||
|
p.updateLocationMapping(m, lm)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
mappings = append(mappings, m)
|
||||||
|
}
|
||||||
|
p.Mapping = mappings
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use heuristics to identify main binary and move it to the top of the list of mappings
|
||||||
|
for i, m := range p.Mapping {
|
||||||
|
file := strings.TrimSpace(strings.Replace(m.File, "(deleted)", "", -1))
|
||||||
|
if len(file) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if len(libRx.FindStringSubmatch(file)) > 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if file[0] == '[' {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Swap what we guess is main to position 0.
|
||||||
|
p.Mapping[0], p.Mapping[i] = p.Mapping[i], p.Mapping[0]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keep the mapping IDs neatly sorted
|
||||||
|
for i, m := range p.Mapping {
|
||||||
|
m.ID = uint64(i + 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// adjacent returns whether two mapping entries represent the same
|
||||||
|
// mapping that has been split into two. Check that their addresses are adjacent,
|
||||||
|
// and if the offsets match, if they are available.
|
||||||
|
func adjacent(m1, m2 *Mapping) bool {
|
||||||
|
if m1.File != "" && m2.File != "" {
|
||||||
|
if m1.File != m2.File {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if m1.BuildID != "" && m2.BuildID != "" {
|
||||||
|
if m1.BuildID != m2.BuildID {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if m1.Limit != m2.Start {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if m1.Offset != 0 && m2.Offset != 0 {
|
||||||
|
offset := m1.Offset + (m1.Limit - m1.Start)
|
||||||
|
if offset != m2.Offset {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Profile) updateLocationMapping(from, to *Mapping) {
|
||||||
|
for _, l := range p.Location {
|
||||||
|
if l.Mapping == from {
|
||||||
|
l.Mapping = to
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func serialize(p *Profile) []byte {
|
||||||
|
p.encodeMu.Lock()
|
||||||
|
p.preEncode()
|
||||||
|
b := marshal(p)
|
||||||
|
p.encodeMu.Unlock()
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write writes the profile as a gzip-compressed marshaled protobuf.
|
||||||
|
func (p *Profile) Write(w io.Writer) error {
|
||||||
|
zw := gzip.NewWriter(w)
|
||||||
|
defer zw.Close()
|
||||||
|
_, err := zw.Write(serialize(p))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteUncompressed writes the profile as a marshaled protobuf.
|
||||||
|
func (p *Profile) WriteUncompressed(w io.Writer) error {
|
||||||
|
_, err := w.Write(serialize(p))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckValid tests whether the profile is valid. Checks include, but are
|
||||||
|
// not limited to:
|
||||||
|
// - len(Profile.Sample[n].value) == len(Profile.value_unit)
|
||||||
|
// - Sample.id has a corresponding Profile.Location
|
||||||
|
func (p *Profile) CheckValid() error {
|
||||||
|
// Check that sample values are consistent
|
||||||
|
sampleLen := len(p.SampleType)
|
||||||
|
if sampleLen == 0 && len(p.Sample) != 0 {
|
||||||
|
return fmt.Errorf("missing sample type information")
|
||||||
|
}
|
||||||
|
for _, s := range p.Sample {
|
||||||
|
if s == nil {
|
||||||
|
return fmt.Errorf("profile has nil sample")
|
||||||
|
}
|
||||||
|
if len(s.Value) != sampleLen {
|
||||||
|
return fmt.Errorf("mismatch: sample has %d values vs. %d types", len(s.Value), len(p.SampleType))
|
||||||
|
}
|
||||||
|
for _, l := range s.Location {
|
||||||
|
if l == nil {
|
||||||
|
return fmt.Errorf("sample has nil location")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that all mappings/locations/functions are in the tables
|
||||||
|
// Check that there are no duplicate ids
|
||||||
|
mappings := make(map[uint64]*Mapping, len(p.Mapping))
|
||||||
|
for _, m := range p.Mapping {
|
||||||
|
if m == nil {
|
||||||
|
return fmt.Errorf("profile has nil mapping")
|
||||||
|
}
|
||||||
|
if m.ID == 0 {
|
||||||
|
return fmt.Errorf("found mapping with reserved ID=0")
|
||||||
|
}
|
||||||
|
if mappings[m.ID] != nil {
|
||||||
|
return fmt.Errorf("multiple mappings with same id: %d", m.ID)
|
||||||
|
}
|
||||||
|
mappings[m.ID] = m
|
||||||
|
}
|
||||||
|
functions := make(map[uint64]*Function, len(p.Function))
|
||||||
|
for _, f := range p.Function {
|
||||||
|
if f == nil {
|
||||||
|
return fmt.Errorf("profile has nil function")
|
||||||
|
}
|
||||||
|
if f.ID == 0 {
|
||||||
|
return fmt.Errorf("found function with reserved ID=0")
|
||||||
|
}
|
||||||
|
if functions[f.ID] != nil {
|
||||||
|
return fmt.Errorf("multiple functions with same id: %d", f.ID)
|
||||||
|
}
|
||||||
|
functions[f.ID] = f
|
||||||
|
}
|
||||||
|
locations := make(map[uint64]*Location, len(p.Location))
|
||||||
|
for _, l := range p.Location {
|
||||||
|
if l == nil {
|
||||||
|
return fmt.Errorf("profile has nil location")
|
||||||
|
}
|
||||||
|
if l.ID == 0 {
|
||||||
|
return fmt.Errorf("found location with reserved id=0")
|
||||||
|
}
|
||||||
|
if locations[l.ID] != nil {
|
||||||
|
return fmt.Errorf("multiple locations with same id: %d", l.ID)
|
||||||
|
}
|
||||||
|
locations[l.ID] = l
|
||||||
|
if m := l.Mapping; m != nil {
|
||||||
|
if m.ID == 0 || mappings[m.ID] != m {
|
||||||
|
return fmt.Errorf("inconsistent mapping %p: %d", m, m.ID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, ln := range l.Line {
|
||||||
|
f := ln.Function
|
||||||
|
if f == nil {
|
||||||
|
return fmt.Errorf("location id: %d has a line with nil function", l.ID)
|
||||||
|
}
|
||||||
|
if f.ID == 0 || functions[f.ID] != f {
|
||||||
|
return fmt.Errorf("inconsistent function %p: %d", f, f.ID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate merges the locations in the profile into equivalence
|
||||||
|
// classes preserving the request attributes. It also updates the
|
||||||
|
// samples to point to the merged locations.
|
||||||
|
func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address bool) error {
|
||||||
|
for _, m := range p.Mapping {
|
||||||
|
m.HasInlineFrames = m.HasInlineFrames && inlineFrame
|
||||||
|
m.HasFunctions = m.HasFunctions && function
|
||||||
|
m.HasFilenames = m.HasFilenames && filename
|
||||||
|
m.HasLineNumbers = m.HasLineNumbers && linenumber
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate functions
|
||||||
|
if !function || !filename {
|
||||||
|
for _, f := range p.Function {
|
||||||
|
if !function {
|
||||||
|
f.Name = ""
|
||||||
|
f.SystemName = ""
|
||||||
|
}
|
||||||
|
if !filename {
|
||||||
|
f.Filename = ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate locations
|
||||||
|
if !inlineFrame || !address || !linenumber {
|
||||||
|
for _, l := range p.Location {
|
||||||
|
if !inlineFrame && len(l.Line) > 1 {
|
||||||
|
l.Line = l.Line[len(l.Line)-1:]
|
||||||
|
}
|
||||||
|
if !linenumber {
|
||||||
|
for i := range l.Line {
|
||||||
|
l.Line[i].Line = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !address {
|
||||||
|
l.Address = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return p.CheckValid()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NumLabelUnits returns a map of numeric label keys to the units
|
||||||
|
// associated with those keys and a map of those keys to any units
|
||||||
|
// that were encountered but not used.
|
||||||
|
// Unit for a given key is the first encountered unit for that key. If multiple
|
||||||
|
// units are encountered for values paired with a particular key, then the first
|
||||||
|
// unit encountered is used and all other units are returned in sorted order
|
||||||
|
// in map of ignored units.
|
||||||
|
// If no units are encountered for a particular key, the unit is then inferred
|
||||||
|
// based on the key.
|
||||||
|
func (p *Profile) NumLabelUnits() (map[string]string, map[string][]string) {
|
||||||
|
numLabelUnits := map[string]string{}
|
||||||
|
ignoredUnits := map[string]map[string]bool{}
|
||||||
|
encounteredKeys := map[string]bool{}
|
||||||
|
|
||||||
|
// Determine units based on numeric tags for each sample.
|
||||||
|
for _, s := range p.Sample {
|
||||||
|
for k := range s.NumLabel {
|
||||||
|
encounteredKeys[k] = true
|
||||||
|
for _, unit := range s.NumUnit[k] {
|
||||||
|
if unit == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if wantUnit, ok := numLabelUnits[k]; !ok {
|
||||||
|
numLabelUnits[k] = unit
|
||||||
|
} else if wantUnit != unit {
|
||||||
|
if v, ok := ignoredUnits[k]; ok {
|
||||||
|
v[unit] = true
|
||||||
|
} else {
|
||||||
|
ignoredUnits[k] = map[string]bool{unit: true}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Infer units for keys without any units associated with
|
||||||
|
// numeric tag values.
|
||||||
|
for key := range encounteredKeys {
|
||||||
|
unit := numLabelUnits[key]
|
||||||
|
if unit == "" {
|
||||||
|
switch key {
|
||||||
|
case "alignment", "request":
|
||||||
|
numLabelUnits[key] = "bytes"
|
||||||
|
default:
|
||||||
|
numLabelUnits[key] = key
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy ignored units into more readable format
|
||||||
|
unitsIgnored := make(map[string][]string, len(ignoredUnits))
|
||||||
|
for key, values := range ignoredUnits {
|
||||||
|
units := make([]string, len(values))
|
||||||
|
i := 0
|
||||||
|
for unit := range values {
|
||||||
|
units[i] = unit
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
sort.Strings(units)
|
||||||
|
unitsIgnored[key] = units
|
||||||
|
}
|
||||||
|
|
||||||
|
return numLabelUnits, unitsIgnored
|
||||||
|
}
|
||||||
|
|
||||||
|
// String dumps a text representation of a profile. Intended mainly
|
||||||
|
// for debugging purposes.
|
||||||
|
func (p *Profile) String() string {
|
||||||
|
ss := make([]string, 0, len(p.Comments)+len(p.Sample)+len(p.Mapping)+len(p.Location))
|
||||||
|
for _, c := range p.Comments {
|
||||||
|
ss = append(ss, "Comment: "+c)
|
||||||
|
}
|
||||||
|
if pt := p.PeriodType; pt != nil {
|
||||||
|
ss = append(ss, fmt.Sprintf("PeriodType: %s %s", pt.Type, pt.Unit))
|
||||||
|
}
|
||||||
|
ss = append(ss, fmt.Sprintf("Period: %d", p.Period))
|
||||||
|
if p.TimeNanos != 0 {
|
||||||
|
ss = append(ss, fmt.Sprintf("Time: %v", time.Unix(0, p.TimeNanos)))
|
||||||
|
}
|
||||||
|
if p.DurationNanos != 0 {
|
||||||
|
ss = append(ss, fmt.Sprintf("Duration: %.4v", time.Duration(p.DurationNanos)))
|
||||||
|
}
|
||||||
|
|
||||||
|
ss = append(ss, "Samples:")
|
||||||
|
var sh1 string
|
||||||
|
for _, s := range p.SampleType {
|
||||||
|
dflt := ""
|
||||||
|
if s.Type == p.DefaultSampleType {
|
||||||
|
dflt = "[dflt]"
|
||||||
|
}
|
||||||
|
sh1 = sh1 + fmt.Sprintf("%s/%s%s ", s.Type, s.Unit, dflt)
|
||||||
|
}
|
||||||
|
ss = append(ss, strings.TrimSpace(sh1))
|
||||||
|
for _, s := range p.Sample {
|
||||||
|
ss = append(ss, s.string())
|
||||||
|
}
|
||||||
|
|
||||||
|
ss = append(ss, "Locations")
|
||||||
|
for _, l := range p.Location {
|
||||||
|
ss = append(ss, l.string())
|
||||||
|
}
|
||||||
|
|
||||||
|
ss = append(ss, "Mappings")
|
||||||
|
for _, m := range p.Mapping {
|
||||||
|
ss = append(ss, m.string())
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(ss, "\n") + "\n"
|
||||||
|
}
|
||||||
|
|
||||||
|
// string dumps a text representation of a mapping. Intended mainly
|
||||||
|
// for debugging purposes.
|
||||||
|
func (m *Mapping) string() string {
|
||||||
|
bits := ""
|
||||||
|
if m.HasFunctions {
|
||||||
|
bits = bits + "[FN]"
|
||||||
|
}
|
||||||
|
if m.HasFilenames {
|
||||||
|
bits = bits + "[FL]"
|
||||||
|
}
|
||||||
|
if m.HasLineNumbers {
|
||||||
|
bits = bits + "[LN]"
|
||||||
|
}
|
||||||
|
if m.HasInlineFrames {
|
||||||
|
bits = bits + "[IN]"
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%d: %#x/%#x/%#x %s %s %s",
|
||||||
|
m.ID,
|
||||||
|
m.Start, m.Limit, m.Offset,
|
||||||
|
m.File,
|
||||||
|
m.BuildID,
|
||||||
|
bits)
|
||||||
|
}
|
||||||
|
|
||||||
|
// string dumps a text representation of a location. Intended mainly
|
||||||
|
// for debugging purposes.
|
||||||
|
func (l *Location) string() string {
|
||||||
|
ss := []string{}
|
||||||
|
locStr := fmt.Sprintf("%6d: %#x ", l.ID, l.Address)
|
||||||
|
if m := l.Mapping; m != nil {
|
||||||
|
locStr = locStr + fmt.Sprintf("M=%d ", m.ID)
|
||||||
|
}
|
||||||
|
if l.IsFolded {
|
||||||
|
locStr = locStr + "[F] "
|
||||||
|
}
|
||||||
|
if len(l.Line) == 0 {
|
||||||
|
ss = append(ss, locStr)
|
||||||
|
}
|
||||||
|
for li := range l.Line {
|
||||||
|
lnStr := "??"
|
||||||
|
if fn := l.Line[li].Function; fn != nil {
|
||||||
|
lnStr = fmt.Sprintf("%s %s:%d s=%d",
|
||||||
|
fn.Name,
|
||||||
|
fn.Filename,
|
||||||
|
l.Line[li].Line,
|
||||||
|
fn.StartLine)
|
||||||
|
if fn.Name != fn.SystemName {
|
||||||
|
lnStr = lnStr + "(" + fn.SystemName + ")"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ss = append(ss, locStr+lnStr)
|
||||||
|
// Do not print location details past the first line
|
||||||
|
locStr = " "
|
||||||
|
}
|
||||||
|
return strings.Join(ss, "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// string dumps a text representation of a sample. Intended mainly
|
||||||
|
// for debugging purposes.
|
||||||
|
func (s *Sample) string() string {
|
||||||
|
ss := []string{}
|
||||||
|
var sv string
|
||||||
|
for _, v := range s.Value {
|
||||||
|
sv = fmt.Sprintf("%s %10d", sv, v)
|
||||||
|
}
|
||||||
|
sv = sv + ": "
|
||||||
|
for _, l := range s.Location {
|
||||||
|
sv = sv + fmt.Sprintf("%d ", l.ID)
|
||||||
|
}
|
||||||
|
ss = append(ss, sv)
|
||||||
|
const labelHeader = " "
|
||||||
|
if len(s.Label) > 0 {
|
||||||
|
ss = append(ss, labelHeader+labelsToString(s.Label))
|
||||||
|
}
|
||||||
|
if len(s.NumLabel) > 0 {
|
||||||
|
ss = append(ss, labelHeader+numLabelsToString(s.NumLabel, s.NumUnit))
|
||||||
|
}
|
||||||
|
return strings.Join(ss, "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// labelsToString returns a string representation of a
|
||||||
|
// map representing labels.
|
||||||
|
func labelsToString(labels map[string][]string) string {
|
||||||
|
ls := []string{}
|
||||||
|
for k, v := range labels {
|
||||||
|
ls = append(ls, fmt.Sprintf("%s:%v", k, v))
|
||||||
|
}
|
||||||
|
sort.Strings(ls)
|
||||||
|
return strings.Join(ls, " ")
|
||||||
|
}
|
||||||
|
|
||||||
|
// numLabelsToString returns a string representation of a map
|
||||||
|
// representing numeric labels.
|
||||||
|
func numLabelsToString(numLabels map[string][]int64, numUnits map[string][]string) string {
|
||||||
|
ls := []string{}
|
||||||
|
for k, v := range numLabels {
|
||||||
|
units := numUnits[k]
|
||||||
|
var labelString string
|
||||||
|
if len(units) == len(v) {
|
||||||
|
values := make([]string, len(v))
|
||||||
|
for i, vv := range v {
|
||||||
|
values[i] = fmt.Sprintf("%d %s", vv, units[i])
|
||||||
|
}
|
||||||
|
labelString = fmt.Sprintf("%s:%v", k, values)
|
||||||
|
} else {
|
||||||
|
labelString = fmt.Sprintf("%s:%v", k, v)
|
||||||
|
}
|
||||||
|
ls = append(ls, labelString)
|
||||||
|
}
|
||||||
|
sort.Strings(ls)
|
||||||
|
return strings.Join(ls, " ")
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetLabel sets the specified key to the specified value for all samples in the
|
||||||
|
// profile.
|
||||||
|
func (p *Profile) SetLabel(key string, value []string) {
|
||||||
|
for _, sample := range p.Sample {
|
||||||
|
if sample.Label == nil {
|
||||||
|
sample.Label = map[string][]string{key: value}
|
||||||
|
} else {
|
||||||
|
sample.Label[key] = value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveLabel removes all labels associated with the specified key for all
|
||||||
|
// samples in the profile.
|
||||||
|
func (p *Profile) RemoveLabel(key string) {
|
||||||
|
for _, sample := range p.Sample {
|
||||||
|
delete(sample.Label, key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasLabel returns true if a sample has a label with indicated key and value.
|
||||||
|
func (s *Sample) HasLabel(key, value string) bool {
|
||||||
|
for _, v := range s.Label[key] {
|
||||||
|
if v == value {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// DiffBaseSample returns true if a sample belongs to the diff base and false
|
||||||
|
// otherwise.
|
||||||
|
func (s *Sample) DiffBaseSample() bool {
|
||||||
|
return s.HasLabel("pprof::base", "true")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scale multiplies all sample values in a profile by a constant and keeps
|
||||||
|
// only samples that have at least one non-zero value.
|
||||||
|
func (p *Profile) Scale(ratio float64) {
|
||||||
|
if ratio == 1 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ratios := make([]float64, len(p.SampleType))
|
||||||
|
for i := range p.SampleType {
|
||||||
|
ratios[i] = ratio
|
||||||
|
}
|
||||||
|
p.ScaleN(ratios)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScaleN multiplies each sample values in a sample by a different amount
|
||||||
|
// and keeps only samples that have at least one non-zero value.
|
||||||
|
func (p *Profile) ScaleN(ratios []float64) error {
|
||||||
|
if len(p.SampleType) != len(ratios) {
|
||||||
|
return fmt.Errorf("mismatched scale ratios, got %d, want %d", len(ratios), len(p.SampleType))
|
||||||
|
}
|
||||||
|
allOnes := true
|
||||||
|
for _, r := range ratios {
|
||||||
|
if r != 1 {
|
||||||
|
allOnes = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if allOnes {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
fillIdx := 0
|
||||||
|
for _, s := range p.Sample {
|
||||||
|
keepSample := false
|
||||||
|
for i, v := range s.Value {
|
||||||
|
if ratios[i] != 1 {
|
||||||
|
val := int64(math.Round(float64(v) * ratios[i]))
|
||||||
|
s.Value[i] = val
|
||||||
|
keepSample = keepSample || val != 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if keepSample {
|
||||||
|
p.Sample[fillIdx] = s
|
||||||
|
fillIdx++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.Sample = p.Sample[:fillIdx]
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasFunctions determines if all locations in this profile have
|
||||||
|
// symbolized function information.
|
||||||
|
func (p *Profile) HasFunctions() bool {
|
||||||
|
for _, l := range p.Location {
|
||||||
|
if l.Mapping != nil && !l.Mapping.HasFunctions {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasFileLines determines if all locations in this profile have
|
||||||
|
// symbolized file and line number information.
|
||||||
|
func (p *Profile) HasFileLines() bool {
|
||||||
|
for _, l := range p.Location {
|
||||||
|
if l.Mapping != nil && (!l.Mapping.HasFilenames || !l.Mapping.HasLineNumbers) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unsymbolizable returns true if a mapping points to a binary for which
|
||||||
|
// locations can't be symbolized in principle, at least now. Examples are
|
||||||
|
// "[vdso]", [vsyscall]" and some others, see the code.
|
||||||
|
func (m *Mapping) Unsymbolizable() bool {
|
||||||
|
name := filepath.Base(m.File)
|
||||||
|
return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy makes a fully independent copy of a profile.
|
||||||
|
func (p *Profile) Copy() *Profile {
|
||||||
|
pp := &Profile{}
|
||||||
|
if err := unmarshal(serialize(p), pp); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
if err := pp.postDecode(); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return pp
|
||||||
|
}
|
370
vendor/github.com/google/pprof/profile/proto.go
generated
vendored
Normal file
370
vendor/github.com/google/pprof/profile/proto.go
generated
vendored
Normal file
@ -0,0 +1,370 @@
|
|||||||
|
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// This file is a simple protocol buffer encoder and decoder.
|
||||||
|
// The format is described at
|
||||||
|
// https://developers.google.com/protocol-buffers/docs/encoding
|
||||||
|
//
|
||||||
|
// A protocol message must implement the message interface:
|
||||||
|
// decoder() []decoder
|
||||||
|
// encode(*buffer)
|
||||||
|
//
|
||||||
|
// The decode method returns a slice indexed by field number that gives the
|
||||||
|
// function to decode that field.
|
||||||
|
// The encode method encodes its receiver into the given buffer.
|
||||||
|
//
|
||||||
|
// The two methods are simple enough to be implemented by hand rather than
|
||||||
|
// by using a protocol compiler.
|
||||||
|
//
|
||||||
|
// See profile.go for examples of messages implementing this interface.
|
||||||
|
//
|
||||||
|
// There is no support for groups, message sets, or "has" bits.
|
||||||
|
|
||||||
|
package profile
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
type buffer struct {
|
||||||
|
field int // field tag
|
||||||
|
typ int // proto wire type code for field
|
||||||
|
u64 uint64
|
||||||
|
data []byte
|
||||||
|
tmp [16]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
type decoder func(*buffer, message) error
|
||||||
|
|
||||||
|
type message interface {
|
||||||
|
decoder() []decoder
|
||||||
|
encode(*buffer)
|
||||||
|
}
|
||||||
|
|
||||||
|
func marshal(m message) []byte {
|
||||||
|
var b buffer
|
||||||
|
m.encode(&b)
|
||||||
|
return b.data
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeVarint(b *buffer, x uint64) {
|
||||||
|
for x >= 128 {
|
||||||
|
b.data = append(b.data, byte(x)|0x80)
|
||||||
|
x >>= 7
|
||||||
|
}
|
||||||
|
b.data = append(b.data, byte(x))
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeLength(b *buffer, tag int, len int) {
|
||||||
|
encodeVarint(b, uint64(tag)<<3|2)
|
||||||
|
encodeVarint(b, uint64(len))
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeUint64(b *buffer, tag int, x uint64) {
|
||||||
|
// append varint to b.data
|
||||||
|
encodeVarint(b, uint64(tag)<<3)
|
||||||
|
encodeVarint(b, x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeUint64s(b *buffer, tag int, x []uint64) {
|
||||||
|
if len(x) > 2 {
|
||||||
|
// Use packed encoding
|
||||||
|
n1 := len(b.data)
|
||||||
|
for _, u := range x {
|
||||||
|
encodeVarint(b, u)
|
||||||
|
}
|
||||||
|
n2 := len(b.data)
|
||||||
|
encodeLength(b, tag, n2-n1)
|
||||||
|
n3 := len(b.data)
|
||||||
|
copy(b.tmp[:], b.data[n2:n3])
|
||||||
|
copy(b.data[n1+(n3-n2):], b.data[n1:n2])
|
||||||
|
copy(b.data[n1:], b.tmp[:n3-n2])
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for _, u := range x {
|
||||||
|
encodeUint64(b, tag, u)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeUint64Opt(b *buffer, tag int, x uint64) {
|
||||||
|
if x == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
encodeUint64(b, tag, x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeInt64(b *buffer, tag int, x int64) {
|
||||||
|
u := uint64(x)
|
||||||
|
encodeUint64(b, tag, u)
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeInt64s(b *buffer, tag int, x []int64) {
|
||||||
|
if len(x) > 2 {
|
||||||
|
// Use packed encoding
|
||||||
|
n1 := len(b.data)
|
||||||
|
for _, u := range x {
|
||||||
|
encodeVarint(b, uint64(u))
|
||||||
|
}
|
||||||
|
n2 := len(b.data)
|
||||||
|
encodeLength(b, tag, n2-n1)
|
||||||
|
n3 := len(b.data)
|
||||||
|
copy(b.tmp[:], b.data[n2:n3])
|
||||||
|
copy(b.data[n1+(n3-n2):], b.data[n1:n2])
|
||||||
|
copy(b.data[n1:], b.tmp[:n3-n2])
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for _, u := range x {
|
||||||
|
encodeInt64(b, tag, u)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeInt64Opt(b *buffer, tag int, x int64) {
|
||||||
|
if x == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
encodeInt64(b, tag, x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeString(b *buffer, tag int, x string) {
|
||||||
|
encodeLength(b, tag, len(x))
|
||||||
|
b.data = append(b.data, x...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeStrings(b *buffer, tag int, x []string) {
|
||||||
|
for _, s := range x {
|
||||||
|
encodeString(b, tag, s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeBool(b *buffer, tag int, x bool) {
|
||||||
|
if x {
|
||||||
|
encodeUint64(b, tag, 1)
|
||||||
|
} else {
|
||||||
|
encodeUint64(b, tag, 0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeBoolOpt(b *buffer, tag int, x bool) {
|
||||||
|
if x {
|
||||||
|
encodeBool(b, tag, x)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeMessage(b *buffer, tag int, m message) {
|
||||||
|
n1 := len(b.data)
|
||||||
|
m.encode(b)
|
||||||
|
n2 := len(b.data)
|
||||||
|
encodeLength(b, tag, n2-n1)
|
||||||
|
n3 := len(b.data)
|
||||||
|
copy(b.tmp[:], b.data[n2:n3])
|
||||||
|
copy(b.data[n1+(n3-n2):], b.data[n1:n2])
|
||||||
|
copy(b.data[n1:], b.tmp[:n3-n2])
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmarshal(data []byte, m message) (err error) {
|
||||||
|
b := buffer{data: data, typ: 2}
|
||||||
|
return decodeMessage(&b, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func le64(p []byte) uint64 {
|
||||||
|
return uint64(p[0]) | uint64(p[1])<<8 | uint64(p[2])<<16 | uint64(p[3])<<24 | uint64(p[4])<<32 | uint64(p[5])<<40 | uint64(p[6])<<48 | uint64(p[7])<<56
|
||||||
|
}
|
||||||
|
|
||||||
|
func le32(p []byte) uint32 {
|
||||||
|
return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeVarint(data []byte) (uint64, []byte, error) {
|
||||||
|
var u uint64
|
||||||
|
for i := 0; ; i++ {
|
||||||
|
if i >= 10 || i >= len(data) {
|
||||||
|
return 0, nil, errors.New("bad varint")
|
||||||
|
}
|
||||||
|
u |= uint64(data[i]&0x7F) << uint(7*i)
|
||||||
|
if data[i]&0x80 == 0 {
|
||||||
|
return u, data[i+1:], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeField(b *buffer, data []byte) ([]byte, error) {
|
||||||
|
x, data, err := decodeVarint(data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
b.field = int(x >> 3)
|
||||||
|
b.typ = int(x & 7)
|
||||||
|
b.data = nil
|
||||||
|
b.u64 = 0
|
||||||
|
switch b.typ {
|
||||||
|
case 0:
|
||||||
|
b.u64, data, err = decodeVarint(data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
case 1:
|
||||||
|
if len(data) < 8 {
|
||||||
|
return nil, errors.New("not enough data")
|
||||||
|
}
|
||||||
|
b.u64 = le64(data[:8])
|
||||||
|
data = data[8:]
|
||||||
|
case 2:
|
||||||
|
var n uint64
|
||||||
|
n, data, err = decodeVarint(data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if n > uint64(len(data)) {
|
||||||
|
return nil, errors.New("too much data")
|
||||||
|
}
|
||||||
|
b.data = data[:n]
|
||||||
|
data = data[n:]
|
||||||
|
case 5:
|
||||||
|
if len(data) < 4 {
|
||||||
|
return nil, errors.New("not enough data")
|
||||||
|
}
|
||||||
|
b.u64 = uint64(le32(data[:4]))
|
||||||
|
data = data[4:]
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unknown wire type: %d", b.typ)
|
||||||
|
}
|
||||||
|
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkType(b *buffer, typ int) error {
|
||||||
|
if b.typ != typ {
|
||||||
|
return errors.New("type mismatch")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeMessage(b *buffer, m message) error {
|
||||||
|
if err := checkType(b, 2); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dec := m.decoder()
|
||||||
|
data := b.data
|
||||||
|
for len(data) > 0 {
|
||||||
|
// pull varint field# + type
|
||||||
|
var err error
|
||||||
|
data, err = decodeField(b, data)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if b.field >= len(dec) || dec[b.field] == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := dec[b.field](b, m); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeInt64(b *buffer, x *int64) error {
|
||||||
|
if err := checkType(b, 0); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*x = int64(b.u64)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeInt64s(b *buffer, x *[]int64) error {
|
||||||
|
if b.typ == 2 {
|
||||||
|
// Packed encoding
|
||||||
|
data := b.data
|
||||||
|
tmp := make([]int64, 0, len(data)) // Maximally sized
|
||||||
|
for len(data) > 0 {
|
||||||
|
var u uint64
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if u, data, err = decodeVarint(data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tmp = append(tmp, int64(u))
|
||||||
|
}
|
||||||
|
*x = append(*x, tmp...)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var i int64
|
||||||
|
if err := decodeInt64(b, &i); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*x = append(*x, i)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeUint64(b *buffer, x *uint64) error {
|
||||||
|
if err := checkType(b, 0); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*x = b.u64
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeUint64s(b *buffer, x *[]uint64) error {
|
||||||
|
if b.typ == 2 {
|
||||||
|
data := b.data
|
||||||
|
// Packed encoding
|
||||||
|
tmp := make([]uint64, 0, len(data)) // Maximally sized
|
||||||
|
for len(data) > 0 {
|
||||||
|
var u uint64
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if u, data, err = decodeVarint(data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tmp = append(tmp, u)
|
||||||
|
}
|
||||||
|
*x = append(*x, tmp...)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var u uint64
|
||||||
|
if err := decodeUint64(b, &u); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*x = append(*x, u)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeString(b *buffer, x *string) error {
|
||||||
|
if err := checkType(b, 2); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*x = string(b.data)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeStrings(b *buffer, x *[]string) error {
|
||||||
|
var s string
|
||||||
|
if err := decodeString(b, &s); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*x = append(*x, s)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeBool(b *buffer, x *bool) error {
|
||||||
|
if err := checkType(b, 0); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if int64(b.u64) == 0 {
|
||||||
|
*x = false
|
||||||
|
} else {
|
||||||
|
*x = true
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
178
vendor/github.com/google/pprof/profile/prune.go
generated
vendored
Normal file
178
vendor/github.com/google/pprof/profile/prune.go
generated
vendored
Normal file
@ -0,0 +1,178 @@
|
|||||||
|
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Implements methods to remove frames from profiles.
|
||||||
|
|
||||||
|
package profile
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
reservedNames = []string{"(anonymous namespace)", "operator()"}
|
||||||
|
bracketRx = func() *regexp.Regexp {
|
||||||
|
var quotedNames []string
|
||||||
|
for _, name := range append(reservedNames, "(") {
|
||||||
|
quotedNames = append(quotedNames, regexp.QuoteMeta(name))
|
||||||
|
}
|
||||||
|
return regexp.MustCompile(strings.Join(quotedNames, "|"))
|
||||||
|
}()
|
||||||
|
)
|
||||||
|
|
||||||
|
// simplifyFunc does some primitive simplification of function names.
|
||||||
|
func simplifyFunc(f string) string {
|
||||||
|
// Account for leading '.' on the PPC ELF v1 ABI.
|
||||||
|
funcName := strings.TrimPrefix(f, ".")
|
||||||
|
// Account for unsimplified names -- try to remove the argument list by trimming
|
||||||
|
// starting from the first '(', but skipping reserved names that have '('.
|
||||||
|
for _, ind := range bracketRx.FindAllStringSubmatchIndex(funcName, -1) {
|
||||||
|
foundReserved := false
|
||||||
|
for _, res := range reservedNames {
|
||||||
|
if funcName[ind[0]:ind[1]] == res {
|
||||||
|
foundReserved = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !foundReserved {
|
||||||
|
funcName = funcName[:ind[0]]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return funcName
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prune removes all nodes beneath a node matching dropRx, and not
|
||||||
|
// matching keepRx. If the root node of a Sample matches, the sample
|
||||||
|
// will have an empty stack.
|
||||||
|
func (p *Profile) Prune(dropRx, keepRx *regexp.Regexp) {
|
||||||
|
prune := make(map[uint64]bool)
|
||||||
|
pruneBeneath := make(map[uint64]bool)
|
||||||
|
|
||||||
|
for _, loc := range p.Location {
|
||||||
|
var i int
|
||||||
|
for i = len(loc.Line) - 1; i >= 0; i-- {
|
||||||
|
if fn := loc.Line[i].Function; fn != nil && fn.Name != "" {
|
||||||
|
funcName := simplifyFunc(fn.Name)
|
||||||
|
if dropRx.MatchString(funcName) {
|
||||||
|
if keepRx == nil || !keepRx.MatchString(funcName) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if i >= 0 {
|
||||||
|
// Found matching entry to prune.
|
||||||
|
pruneBeneath[loc.ID] = true
|
||||||
|
|
||||||
|
// Remove the matching location.
|
||||||
|
if i == len(loc.Line)-1 {
|
||||||
|
// Matched the top entry: prune the whole location.
|
||||||
|
prune[loc.ID] = true
|
||||||
|
} else {
|
||||||
|
loc.Line = loc.Line[i+1:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prune locs from each Sample
|
||||||
|
for _, sample := range p.Sample {
|
||||||
|
// Scan from the root to the leaves to find the prune location.
|
||||||
|
// Do not prune frames before the first user frame, to avoid
|
||||||
|
// pruning everything.
|
||||||
|
foundUser := false
|
||||||
|
for i := len(sample.Location) - 1; i >= 0; i-- {
|
||||||
|
id := sample.Location[i].ID
|
||||||
|
if !prune[id] && !pruneBeneath[id] {
|
||||||
|
foundUser = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !foundUser {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if prune[id] {
|
||||||
|
sample.Location = sample.Location[i+1:]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if pruneBeneath[id] {
|
||||||
|
sample.Location = sample.Location[i:]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveUninteresting prunes and elides profiles using built-in
|
||||||
|
// tables of uninteresting function names.
|
||||||
|
func (p *Profile) RemoveUninteresting() error {
|
||||||
|
var keep, drop *regexp.Regexp
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if p.DropFrames != "" {
|
||||||
|
if drop, err = regexp.Compile("^(" + p.DropFrames + ")$"); err != nil {
|
||||||
|
return fmt.Errorf("failed to compile regexp %s: %v", p.DropFrames, err)
|
||||||
|
}
|
||||||
|
if p.KeepFrames != "" {
|
||||||
|
if keep, err = regexp.Compile("^(" + p.KeepFrames + ")$"); err != nil {
|
||||||
|
return fmt.Errorf("failed to compile regexp %s: %v", p.KeepFrames, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.Prune(drop, keep)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PruneFrom removes all nodes beneath the lowest node matching dropRx, not including itself.
|
||||||
|
//
|
||||||
|
// Please see the example below to understand this method as well as
|
||||||
|
// the difference from Prune method.
|
||||||
|
//
|
||||||
|
// A sample contains Location of [A,B,C,B,D] where D is the top frame and there's no inline.
|
||||||
|
//
|
||||||
|
// PruneFrom(A) returns [A,B,C,B,D] because there's no node beneath A.
|
||||||
|
// Prune(A, nil) returns [B,C,B,D] by removing A itself.
|
||||||
|
//
|
||||||
|
// PruneFrom(B) returns [B,C,B,D] by removing all nodes beneath the first B when scanning from the bottom.
|
||||||
|
// Prune(B, nil) returns [D] because a matching node is found by scanning from the root.
|
||||||
|
func (p *Profile) PruneFrom(dropRx *regexp.Regexp) {
|
||||||
|
pruneBeneath := make(map[uint64]bool)
|
||||||
|
|
||||||
|
for _, loc := range p.Location {
|
||||||
|
for i := 0; i < len(loc.Line); i++ {
|
||||||
|
if fn := loc.Line[i].Function; fn != nil && fn.Name != "" {
|
||||||
|
funcName := simplifyFunc(fn.Name)
|
||||||
|
if dropRx.MatchString(funcName) {
|
||||||
|
// Found matching entry to prune.
|
||||||
|
pruneBeneath[loc.ID] = true
|
||||||
|
loc.Line = loc.Line[i:]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prune locs from each Sample
|
||||||
|
for _, sample := range p.Sample {
|
||||||
|
// Scan from the bottom leaf to the root to find the prune location.
|
||||||
|
for i, loc := range sample.Location {
|
||||||
|
if pruneBeneath[loc.ID] {
|
||||||
|
sample.Location = sample.Location[i:]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
2
vendor/github.com/onsi/ginkgo/v2/.gitignore
generated
vendored
2
vendor/github.com/onsi/ginkgo/v2/.gitignore
generated
vendored
@ -1,5 +1,5 @@
|
|||||||
.DS_Store
|
.DS_Store
|
||||||
TODO.md
|
TODO
|
||||||
tmp/**/*
|
tmp/**/*
|
||||||
*.coverprofile
|
*.coverprofile
|
||||||
.vscode
|
.vscode
|
||||||
|
256
vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md
generated
vendored
256
vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md
generated
vendored
@ -1,3 +1,257 @@
|
|||||||
|
## 2.9.5
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- ensure the correct deterministic sort order is produced when ordered specs are generated by a helper function [7fa0b6b]
|
||||||
|
|
||||||
|
### Maintenance
|
||||||
|
- fix generators link (#1200) [9f9d8b9]
|
||||||
|
- Bump golang.org/x/tools from 0.8.0 to 0.9.1 (#1196) [150e3f2]
|
||||||
|
- fix spelling err in docs (#1199) [0013b1a]
|
||||||
|
- Bump golang.org/x/sys from 0.7.0 to 0.8.0 (#1193) [9e9e3e5]
|
||||||
|
|
||||||
|
## 2.9.4
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- fix hang with ginkgo -p (#1192) [15d4bdc] - this addresses a _long_ standing issue related to Ginkgo hanging when a child process spawned by the test does not exit.
|
||||||
|
|
||||||
|
- fix: fail fast may cause Serial spec or cleanup Node interrupted (#1178) [8dea88b] - prior to this there was a small gap in which specs on other processes might start even if one process has tried to abort the suite.
|
||||||
|
|
||||||
|
|
||||||
|
### Maintenance
|
||||||
|
- Document run order when multiple setup nodes are at the same nesting level [903be81]
|
||||||
|
|
||||||
|
## 2.9.3
|
||||||
|
|
||||||
|
### Features
|
||||||
|
- Add RenderTimeline to GinkgoT() [c0c77b6]
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- update Measure deprecation message. fixes #1176 [227c662]
|
||||||
|
- add newlines to GinkgoLogr (#1170) (#1171) [0de0e7c]
|
||||||
|
|
||||||
|
### Maintenance
|
||||||
|
- Bump commonmarker from 0.23.8 to 0.23.9 in /docs (#1183) [8b925ab]
|
||||||
|
- Bump nokogiri from 1.14.1 to 1.14.3 in /docs (#1184) [e3795a4]
|
||||||
|
- Bump golang.org/x/tools from 0.7.0 to 0.8.0 (#1182) [b453793]
|
||||||
|
- Bump actions/setup-go from 3 to 4 (#1164) [73ed75b]
|
||||||
|
- Bump github.com/onsi/gomega from 1.27.4 to 1.27.6 (#1173) [0a2bc64]
|
||||||
|
- Bump github.com/go-logr/logr from 1.2.3 to 1.2.4 (#1174) [f41c557]
|
||||||
|
- Bump golang.org/x/sys from 0.6.0 to 0.7.0 (#1179) [8e423e5]
|
||||||
|
|
||||||
|
## 2.9.2
|
||||||
|
|
||||||
|
### Maintenance
|
||||||
|
- Bump github.com/go-task/slim-sprig (#1167) [3fcc5bf]
|
||||||
|
- Bump github.com/onsi/gomega from 1.27.3 to 1.27.4 (#1163) [6143ffe]
|
||||||
|
|
||||||
|
## 2.9.1
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
This release fixes a longstanding issue where `ginkgo -coverpkg=./...` would not work. This is now resolved and fixes [#1161](https://github.com/onsi/ginkgo/issues/1161) and [#995](https://github.com/onsi/ginkgo/issues/995)
|
||||||
|
- Support -coverpkg=./... [26ca1b5]
|
||||||
|
- document coverpkg a bit more clearly [fc44c3b]
|
||||||
|
|
||||||
|
### Maintenance
|
||||||
|
- bump various dependencies
|
||||||
|
- Improve Documentation and fix typo (#1158) [93de676]
|
||||||
|
|
||||||
|
## 2.9.0
|
||||||
|
|
||||||
|
### Features
|
||||||
|
- AttachProgressReporter is an experimental feature that allows users to provide arbitrary information when a ProgressReport is requested [28801fe]
|
||||||
|
|
||||||
|
- GinkgoT() has been expanded to include several Ginkgo-specific methods [2bd5a3b]
|
||||||
|
|
||||||
|
The intent is to enable the development of third-party libraries that integrate deeply with Ginkgo using `GinkgoT()` to access Ginkgo's functionality.
|
||||||
|
|
||||||
|
## 2.8.4
|
||||||
|
|
||||||
|
### Features
|
||||||
|
- Add OmitSuiteSetupNodes to JunitReportConfig (#1147) [979fbc2]
|
||||||
|
- Add a reference to ginkgolinter in docs.index.md (#1143) [8432589]
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- rename tools hack to see if it fixes things for downstream users [a8bb39a]
|
||||||
|
|
||||||
|
### Maintenance
|
||||||
|
- Bump golang.org/x/text (#1144) [41b2a8a]
|
||||||
|
- Bump github.com/onsi/gomega from 1.27.0 to 1.27.1 (#1142) [7c4f583]
|
||||||
|
|
||||||
|
## 2.8.3
|
||||||
|
|
||||||
|
Released to fix security issue in golang.org/x/net dependency
|
||||||
|
|
||||||
|
### Maintenance
|
||||||
|
|
||||||
|
- Bump golang.org/x/net from 0.6.0 to 0.7.0 (#1141) [fc1a02e]
|
||||||
|
- remove tools.go hack from documentation [0718693]
|
||||||
|
|
||||||
|
## 2.8.2
|
||||||
|
|
||||||
|
Ginkgo now includes a `tools.go` file in the root directory of the `ginkgo` package. This should allow modules that simply `go get github.com/onsi/ginkgo/v2` to also pull in the CLI dependencies. This obviates the need for consumers of Ginkgo to have their own `tools.go` file and makes it simpler to ensure that the version of the `ginkgo` CLI being used matches the version of the library. You can simply run `go run github.com/onsi/ginkgo/v2/ginkgo` to run the version of the cli associated with your package go.mod.
|
||||||
|
|
||||||
|
### Maintenance
|
||||||
|
|
||||||
|
- Bump github.com/onsi/gomega from 1.26.0 to 1.27.0 (#1139) [5767b0a]
|
||||||
|
- Fix minor typos (#1138) [e1e9723]
|
||||||
|
- Fix link in V2 Migration Guide (#1137) [a588f60]
|
||||||
|
|
||||||
|
## 2.8.1
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- lock around default report output to avoid triggering the race detector when calling By from goroutines [2d5075a]
|
||||||
|
- don't run ReportEntries through sprintf [febbe38]
|
||||||
|
|
||||||
|
### Maintenance
|
||||||
|
- Bump golang.org/x/tools from 0.5.0 to 0.6.0 (#1135) [11a4860]
|
||||||
|
- test: update matrix for Go 1.20 (#1130) [4890a62]
|
||||||
|
- Bump golang.org/x/sys from 0.4.0 to 0.5.0 (#1133) [a774638]
|
||||||
|
- Bump github.com/onsi/gomega from 1.25.0 to 1.26.0 (#1120) [3f233bd]
|
||||||
|
- Bump github-pages from 227 to 228 in /docs (#1131) [f9b8649]
|
||||||
|
- Bump activesupport from 6.0.6 to 6.0.6.1 in /docs (#1127) [6f8c042]
|
||||||
|
- Update index.md with instructions on how to upgrade Ginkgo [833a75e]
|
||||||
|
|
||||||
|
## 2.8.0
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
- Introduce GinkgoHelper() to track and exclude helper functions from potential CodeLocations [e19f556]
|
||||||
|
|
||||||
|
Modeled after `testing.T.Helper()`. Now, rather than write code like:
|
||||||
|
|
||||||
|
```go
|
||||||
|
func helper(model Model) {
|
||||||
|
Expect(model).WithOffset(1).To(BeValid())
|
||||||
|
Expect(model.SerialNumber).WithOffset(1).To(MatchRegexp(/[a-f0-9]*/))
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
you can stop tracking offsets (which makes nesting composing helpers nearly impossible) and simply write:
|
||||||
|
|
||||||
|
```go
|
||||||
|
func helper(model Model) {
|
||||||
|
GinkgoHelper()
|
||||||
|
Expect(model).To(BeValid())
|
||||||
|
Expect(model.SerialNumber).To(MatchRegexp(/[a-f0-9]*/))
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
- Introduce GinkgoLabelFilter() and Label().MatchesLabelFilter() to make it possible to programmatically match filters (fixes #1119) [2f6597c]
|
||||||
|
|
||||||
|
You can now write code like this:
|
||||||
|
|
||||||
|
```go
|
||||||
|
BeforeSuite(func() {
|
||||||
|
if Label("slow").MatchesLabelFilter(GinkgoLabelFilter()) {
|
||||||
|
// do slow setup
|
||||||
|
}
|
||||||
|
|
||||||
|
if Label("fast").MatchesLabelFilter(GinkgoLabelFilter()) {
|
||||||
|
// do fast setup
|
||||||
|
}
|
||||||
|
})
|
||||||
|
```
|
||||||
|
|
||||||
|
to programmatically check whether a given set of labels will match the configured `--label-filter`.
|
||||||
|
|
||||||
|
### Maintenance
|
||||||
|
|
||||||
|
- Bump webrick from 1.7.0 to 1.8.1 in /docs (#1125) [ea4966e]
|
||||||
|
- cdeql: add ruby language (#1124) [9dd275b]
|
||||||
|
- dependabot: add bundler package-ecosystem for docs (#1123) [14e7bdd]
|
||||||
|
|
||||||
|
## 2.7.1
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- Bring back SuiteConfig.EmitSpecProgress to avoid compilation issue for consumers that set it manually [d2a1cb0]
|
||||||
|
|
||||||
|
### Maintenance
|
||||||
|
- Bump github.com/onsi/gomega from 1.24.2 to 1.25.0 (#1118) [cafece6]
|
||||||
|
- Bump golang.org/x/tools from 0.4.0 to 0.5.0 (#1111) [eda66c2]
|
||||||
|
- Bump golang.org/x/sys from 0.3.0 to 0.4.0 (#1112) [ac5ccaa]
|
||||||
|
- Bump github.com/onsi/gomega from 1.24.1 to 1.24.2 (#1097) [eee6480]
|
||||||
|
|
||||||
|
## 2.7.0
|
||||||
|
|
||||||
|
### Features
|
||||||
|
- Introduce ContinueOnFailure for Ordered containers [e0123ca] - Ordered containers that are also decorated with ContinueOnFailure will not stop running specs after the first spec fails.
|
||||||
|
- Support for bootstrap commands to use custom data for templates (#1110) [7a2b242]
|
||||||
|
- Support for labels and pending decorator in ginkgo outline output (#1113) [e6e3b98]
|
||||||
|
- Color aliases for custom color support (#1101) [49fab7a]
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- correctly ensure deterministic spec order, even if specs are generated by iterating over a map [89dda20]
|
||||||
|
- Fix a bug where timedout specs were not correctly treated as failures when determining whether or not to run AfterAlls in an Ordered container.
|
||||||
|
- Ensure go test coverprofile outputs to the expected location (#1105) [b0bd77b]
|
||||||
|
|
||||||
|
## 2.6.1
|
||||||
|
|
||||||
|
### Features
|
||||||
|
- Override formatter colors from envvars - this is a new feature but an alternative approach involving config files might be taken in the future (#1095) [60240d1]
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- GinkgoRecover now supports ignoring panics that match a specific, hidden, interface [301f3e2]
|
||||||
|
|
||||||
|
### Maintenance
|
||||||
|
- Bump github.com/onsi/gomega from 1.24.0 to 1.24.1 (#1077) [3643823]
|
||||||
|
- Bump golang.org/x/tools from 0.2.0 to 0.4.0 (#1090) [f9f856e]
|
||||||
|
- Bump nokogiri from 1.13.9 to 1.13.10 in /docs (#1091) [0d7087e]
|
||||||
|
|
||||||
|
## 2.6.0
|
||||||
|
|
||||||
|
### Features
|
||||||
|
- `ReportBeforeSuite` provides access to the suite report before the suite begins.
|
||||||
|
- Add junit config option for omitting leafnodetype (#1088) [956e6d2]
|
||||||
|
- Add support to customize junit report config to omit spec labels (#1087) [de44005]
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- Fix stack trace pruning so that it has a chance of working on windows [2165648]
|
||||||
|
|
||||||
|
## 2.5.1
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- skipped tests only show as 'S' when running with -v [3ab38ae]
|
||||||
|
- Fix typo in docs/index.md (#1082) [55fc58d]
|
||||||
|
- Fix typo in docs/index.md (#1081) [8a14f1f]
|
||||||
|
- Fix link notation in docs/index.md (#1080) [2669612]
|
||||||
|
- Fix typo in `--progress` deprecation message (#1076) [b4b7edc]
|
||||||
|
|
||||||
|
### Maintenance
|
||||||
|
- chore: Included githubactions in the dependabot config (#976) [baea341]
|
||||||
|
- Bump golang.org/x/sys from 0.1.0 to 0.2.0 (#1075) [9646297]
|
||||||
|
|
||||||
|
## 2.5.0
|
||||||
|
|
||||||
|
### Ginkgo output now includes a timeline-view of the spec
|
||||||
|
|
||||||
|
This commit changes Ginkgo's default output. Spec details are now
|
||||||
|
presented as a **timeline** that includes events that occur during the spec
|
||||||
|
lifecycle interleaved with any GinkgoWriter content. This makes is much easier
|
||||||
|
to understand the flow of a spec and where a given failure occurs.
|
||||||
|
|
||||||
|
The --progress, --slow-spec-threshold, --always-emit-ginkgo-writer flags
|
||||||
|
and the SuppressProgressReporting decorator have all been deprecated. Instead
|
||||||
|
the existing -v and -vv flags better capture the level of verbosity to display. However,
|
||||||
|
a new --show-node-events flag is added to include node `> Enter` and `< Exit` events
|
||||||
|
in the spec timeline.
|
||||||
|
|
||||||
|
In addition, JUnit reports now include the timeline (rendered with -vv) and custom JUnit
|
||||||
|
reports can be configured and generated using
|
||||||
|
`GenerateJUnitReportWithConfig(report types.Report, dst string, config JunitReportConfig)`
|
||||||
|
|
||||||
|
Code should continue to work unchanged with this version of Ginkgo - however if you have tooling that
|
||||||
|
was relying on the specific output format of Ginkgo you _may_ run into issues. Ginkgo's console output is not guaranteed to be stable for tooling and automation purposes. You should, instead, use Ginkgo's JSON format
|
||||||
|
to build tooling on top of as it has stronger guarantees to be stable from version to version.
|
||||||
|
|
||||||
|
### Features
|
||||||
|
- Provide details about which timeout expired [0f2fa27]
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- Add Support Policy to docs [c70867a]
|
||||||
|
|
||||||
|
### Maintenance
|
||||||
|
- Bump github.com/onsi/gomega from 1.22.1 to 1.23.0 (#1070) [bb3b4e2]
|
||||||
|
|
||||||
## 2.4.0
|
## 2.4.0
|
||||||
|
|
||||||
### Features
|
### Features
|
||||||
@ -8,7 +262,7 @@
|
|||||||
|
|
||||||
### Fixes
|
### Fixes
|
||||||
- correcting some typos (#1064) [1403d3c]
|
- correcting some typos (#1064) [1403d3c]
|
||||||
- fix flaky internal_integration interupt specs [2105ba3]
|
- fix flaky internal_integration interrupt specs [2105ba3]
|
||||||
- Correct busted link in README [be6b5b9]
|
- Correct busted link in README [be6b5b9]
|
||||||
|
|
||||||
### Maintenance
|
### Maintenance
|
||||||
|
56
vendor/github.com/onsi/ginkgo/v2/README.md
generated
vendored
56
vendor/github.com/onsi/ginkgo/v2/README.md
generated
vendored
@ -4,11 +4,7 @@
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
# Ginkgo 2.0 is now Generally Available!
|
# Ginkgo
|
||||||
|
|
||||||
You can learn more about 2.0 in the [Migration Guide](https://onsi.github.io/ginkgo/MIGRATING_TO_V2)!
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
Ginkgo is a mature testing framework for Go designed to help you write expressive specs. Ginkgo builds on top of Go's `testing` foundation and is complemented by the [Gomega](https://github.com/onsi/gomega) matcher library. Together, Ginkgo and Gomega let you express the intent behind your specs clearly:
|
Ginkgo is a mature testing framework for Go designed to help you write expressive specs. Ginkgo builds on top of Go's `testing` foundation and is complemented by the [Gomega](https://github.com/onsi/gomega) matcher library. Together, Ginkgo and Gomega let you express the intent behind your specs clearly:
|
||||||
|
|
||||||
@ -33,53 +29,53 @@ Describe("Checking books out of the library", Label("library"), func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
When("the library has the book in question", func() {
|
When("the library has the book in question", func() {
|
||||||
BeforeEach(func() {
|
BeforeEach(func(ctx SpecContext) {
|
||||||
Expect(library.Store(book)).To(Succeed())
|
Expect(library.Store(ctx, book)).To(Succeed())
|
||||||
})
|
})
|
||||||
|
|
||||||
Context("and the book is available", func() {
|
Context("and the book is available", func() {
|
||||||
It("lends it to the reader", func() {
|
It("lends it to the reader", func(ctx SpecContext) {
|
||||||
Expect(valjean.Checkout(library, "Les Miserables")).To(Succeed())
|
Expect(valjean.Checkout(ctx, library, "Les Miserables")).To(Succeed())
|
||||||
Expect(valjean.Books()).To(ContainElement(book))
|
Expect(valjean.Books()).To(ContainElement(book))
|
||||||
Expect(library.UserWithBook(book)).To(Equal(valjean))
|
Expect(library.UserWithBook(ctx, book)).To(Equal(valjean))
|
||||||
})
|
}, SpecTimeout(time.Second * 5))
|
||||||
})
|
})
|
||||||
|
|
||||||
Context("but the book has already been checked out", func() {
|
Context("but the book has already been checked out", func() {
|
||||||
var javert *users.User
|
var javert *users.User
|
||||||
BeforeEach(func() {
|
BeforeEach(func(ctx SpecContext) {
|
||||||
javert = users.NewUser("Javert")
|
javert = users.NewUser("Javert")
|
||||||
Expect(javert.Checkout(library, "Les Miserables")).To(Succeed())
|
Expect(javert.Checkout(ctx, library, "Les Miserables")).To(Succeed())
|
||||||
})
|
})
|
||||||
|
|
||||||
It("tells the user", func() {
|
It("tells the user", func(ctx SpecContext) {
|
||||||
err := valjean.Checkout(library, "Les Miserables")
|
err := valjean.Checkout(ctx, library, "Les Miserables")
|
||||||
Expect(error).To(MatchError("Les Miserables is currently checked out"))
|
Expect(error).To(MatchError("Les Miserables is currently checked out"))
|
||||||
})
|
}, SpecTimeout(time.Second * 5))
|
||||||
|
|
||||||
It("lets the user place a hold and get notified later", func() {
|
It("lets the user place a hold and get notified later", func(ctx SpecContext) {
|
||||||
Expect(valjean.Hold(library, "Les Miserables")).To(Succeed())
|
Expect(valjean.Hold(ctx, library, "Les Miserables")).To(Succeed())
|
||||||
Expect(valjean.Holds()).To(ContainElement(book))
|
Expect(valjean.Holds(ctx)).To(ContainElement(book))
|
||||||
|
|
||||||
By("when Javert returns the book")
|
By("when Javert returns the book")
|
||||||
Expect(javert.Return(library, book)).To(Succeed())
|
Expect(javert.Return(ctx, library, book)).To(Succeed())
|
||||||
|
|
||||||
By("it eventually informs Valjean")
|
By("it eventually informs Valjean")
|
||||||
notification := "Les Miserables is ready for pick up"
|
notification := "Les Miserables is ready for pick up"
|
||||||
Eventually(valjean.Notifications).Should(ContainElement(notification))
|
Eventually(ctx, valjean.Notifications).Should(ContainElement(notification))
|
||||||
|
|
||||||
Expect(valjean.Checkout(library, "Les Miserables")).To(Succeed())
|
Expect(valjean.Checkout(ctx, library, "Les Miserables")).To(Succeed())
|
||||||
Expect(valjean.Books()).To(ContainElement(book))
|
Expect(valjean.Books(ctx)).To(ContainElement(book))
|
||||||
Expect(valjean.Holds()).To(BeEmpty())
|
Expect(valjean.Holds(ctx)).To(BeEmpty())
|
||||||
})
|
}, SpecTimeout(time.Second * 10))
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
When("the library does not have the book in question", func() {
|
When("the library does not have the book in question", func() {
|
||||||
It("tells the reader the book is unavailable", func() {
|
It("tells the reader the book is unavailable", func(ctx SpecContext) {
|
||||||
err := valjean.Checkout(library, "Les Miserables")
|
err := valjean.Checkout(ctx, library, "Les Miserables")
|
||||||
Expect(error).To(MatchError("Les Miserables is not in the library catalog"))
|
Expect(error).To(MatchError("Les Miserables is not in the library catalog"))
|
||||||
})
|
}, SpecTimeout(time.Second * 5))
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
```
|
```
|
||||||
@ -92,7 +88,7 @@ If you have a question, comment, bug report, feature request, etc. please open a
|
|||||||
|
|
||||||
Whether writing basic unit specs, complex integration specs, or even performance specs - Ginkgo gives you an expressive Domain-Specific Language (DSL) that will be familiar to users coming from frameworks such as [Quick](https://github.com/Quick/Quick), [RSpec](https://rspec.info), [Jasmine](https://jasmine.github.io), and [Busted](https://lunarmodules.github.io/busted/). This style of testing is sometimes referred to as "Behavior-Driven Development" (BDD) though Ginkgo's utility extends beyond acceptance-level testing.
|
Whether writing basic unit specs, complex integration specs, or even performance specs - Ginkgo gives you an expressive Domain-Specific Language (DSL) that will be familiar to users coming from frameworks such as [Quick](https://github.com/Quick/Quick), [RSpec](https://rspec.info), [Jasmine](https://jasmine.github.io), and [Busted](https://lunarmodules.github.io/busted/). This style of testing is sometimes referred to as "Behavior-Driven Development" (BDD) though Ginkgo's utility extends beyond acceptance-level testing.
|
||||||
|
|
||||||
With Ginkgo's DSL you can use nestable [`Describe`, `Context` and `When` container nodes](https://onsi.github.io/ginkgo/#organizing-specs-with-container-nodes) to help you organize your specs. [`BeforeEach` and `AfterEach` setup nodes](https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and cleanup. [`It` and `Specify` subject nodes](https://onsi.github.io/ginkgo/#spec-subjects-it) that hold your assertions. [`BeforeSuite` and `AfterSuite` nodes](https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite) to prep for and cleanup after a suite... and [much more!](https://onsi.github.io/ginkgo/#writing-specs)
|
With Ginkgo's DSL you can use nestable [`Describe`, `Context` and `When` container nodes](https://onsi.github.io/ginkgo/#organizing-specs-with-container-nodes) to help you organize your specs. [`BeforeEach` and `AfterEach` setup nodes](https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and cleanup. [`It` and `Specify` subject nodes](https://onsi.github.io/ginkgo/#spec-subjects-it) that hold your assertions. [`BeforeSuite` and `AfterSuite` nodes](https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite) to prep for and cleanup after a suite... and [much more!](https://onsi.github.io/ginkgo/#writing-specs).
|
||||||
|
|
||||||
At runtime, Ginkgo can run your specs in reproducibly [random order](https://onsi.github.io/ginkgo/#spec-randomization) and has sophisticated support for [spec parallelization](https://onsi.github.io/ginkgo/#spec-parallelization). In fact, running specs in parallel is as easy as
|
At runtime, Ginkgo can run your specs in reproducibly [random order](https://onsi.github.io/ginkgo/#spec-randomization) and has sophisticated support for [spec parallelization](https://onsi.github.io/ginkgo/#spec-parallelization). In fact, running specs in parallel is as easy as
|
||||||
|
|
||||||
@ -100,7 +96,7 @@ At runtime, Ginkgo can run your specs in reproducibly [random order](https://ons
|
|||||||
ginkgo -p
|
ginkgo -p
|
||||||
```
|
```
|
||||||
|
|
||||||
By following [established patterns for writing parallel specs](https://onsi.github.io/ginkgo/#patterns-for-parallel-integration-specs) you can build even large, complex integration suites that parallelize cleanly and run performantly.
|
By following [established patterns for writing parallel specs](https://onsi.github.io/ginkgo/#patterns-for-parallel-integration-specs) you can build even large, complex integration suites that parallelize cleanly and run performantly. And you don't have to worry about your spec suite hanging or leaving a mess behind - Ginkgo provides a per-node `context.Context` and the capability to interrupt the spec after a set period of time - and then clean up.
|
||||||
|
|
||||||
As your suites grow Ginkgo helps you keep your specs organized with [labels](https://onsi.github.io/ginkgo/#spec-labels) and lets you easily run [subsets of specs](https://onsi.github.io/ginkgo/#filtering-specs), either [programmatically](https://onsi.github.io/ginkgo/#focused-specs) or on the [command line](https://onsi.github.io/ginkgo/#combining-filters). And Ginkgo's reporting infrastructure generates machine-readable output in a [variety of formats](https://onsi.github.io/ginkgo/#generating-machine-readable-reports) _and_ allows you to build your own [custom reporting infrastructure](https://onsi.github.io/ginkgo/#generating-reports-programmatically).
|
As your suites grow Ginkgo helps you keep your specs organized with [labels](https://onsi.github.io/ginkgo/#spec-labels) and lets you easily run [subsets of specs](https://onsi.github.io/ginkgo/#filtering-specs), either [programmatically](https://onsi.github.io/ginkgo/#focused-specs) or on the [command line](https://onsi.github.io/ginkgo/#combining-filters). And Ginkgo's reporting infrastructure generates machine-readable output in a [variety of formats](https://onsi.github.io/ginkgo/#generating-machine-readable-reports) _and_ allows you to build your own [custom reporting infrastructure](https://onsi.github.io/ginkgo/#generating-reports-programmatically).
|
||||||
|
|
||||||
|
90
vendor/github.com/onsi/ginkgo/v2/core_dsl.go
generated
vendored
90
vendor/github.com/onsi/ginkgo/v2/core_dsl.go
generated
vendored
@ -21,7 +21,6 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
"github.com/onsi/ginkgo/v2/formatter"
|
"github.com/onsi/ginkgo/v2/formatter"
|
||||||
@ -93,11 +92,11 @@ type GinkgoWriterInterface interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
SpecContext is the context object passed into nodes that are subject to a timeout or need to be notified of an interrupt. It implements the standard context.Context interface but also contains additional helpers to provide an extensibility point for Ginkgo. (As an example, Gomega's Eventually can use the methods defined on SpecContext to provide deeper integratoin with Ginkgo).
|
SpecContext is the context object passed into nodes that are subject to a timeout or need to be notified of an interrupt. It implements the standard context.Context interface but also contains additional helpers to provide an extensibility point for Ginkgo. (As an example, Gomega's Eventually can use the methods defined on SpecContext to provide deeper integration with Ginkgo).
|
||||||
|
|
||||||
You can do anything with SpecContext that you do with a typical context.Context including wrapping it with any of the context.With* methods.
|
You can do anything with SpecContext that you do with a typical context.Context including wrapping it with any of the context.With* methods.
|
||||||
|
|
||||||
Ginkgo will cancel the SpecContext when a node is interrupted (e.g. by the user sending an interupt signal) or when a node has exceeded it's allowed run-time. Note, however, that even in cases where a node has a deadline, SpecContext will not return a deadline via .Deadline(). This is because Ginkgo does not use a WithDeadline() context to model node deadlines as Ginkgo needs control over the precise timing of the context cancellation to ensure it can provide an accurate progress report at the moment of cancellation.
|
Ginkgo will cancel the SpecContext when a node is interrupted (e.g. by the user sending an interrupt signal) or when a node has exceeded its allowed run-time. Note, however, that even in cases where a node has a deadline, SpecContext will not return a deadline via .Deadline(). This is because Ginkgo does not use a WithDeadline() context to model node deadlines as Ginkgo needs control over the precise timing of the context cancellation to ensure it can provide an accurate progress report at the moment of cancellation.
|
||||||
*/
|
*/
|
||||||
type SpecContext = internal.SpecContext
|
type SpecContext = internal.SpecContext
|
||||||
|
|
||||||
@ -164,6 +163,29 @@ func GinkgoParallelProcess() int {
|
|||||||
return suiteConfig.ParallelProcess
|
return suiteConfig.ParallelProcess
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
GinkgoHelper marks the function it's called in as a test helper. When a failure occurs inside a helper function, Ginkgo will skip the helper when analyzing the stack trace to identify where the failure occurred.
|
||||||
|
|
||||||
|
This is an alternative, simpler, mechanism to passing in a skip offset when calling Fail or using Gomega.
|
||||||
|
*/
|
||||||
|
func GinkgoHelper() {
|
||||||
|
types.MarkAsHelper(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
GinkgoLabelFilter() returns the label filter configured for this suite via `--label-filter`.
|
||||||
|
|
||||||
|
You can use this to manually check if a set of labels would satisfy the filter via:
|
||||||
|
|
||||||
|
if (Label("cat", "dog").MatchesLabelFilter(GinkgoLabelFilter())) {
|
||||||
|
//...
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
func GinkgoLabelFilter() string {
|
||||||
|
suiteConfig, _ := GinkgoConfiguration()
|
||||||
|
return suiteConfig.LabelFilter
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
PauseOutputInterception() pauses Ginkgo's output interception. This is only relevant
|
PauseOutputInterception() pauses Ginkgo's output interception. This is only relevant
|
||||||
when running in parallel and output to stdout/stderr is being intercepted. You generally
|
when running in parallel and output to stdout/stderr is being intercepted. You generally
|
||||||
@ -276,7 +298,7 @@ func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
writer := GinkgoWriter.(*internal.Writer)
|
writer := GinkgoWriter.(*internal.Writer)
|
||||||
if reporterConfig.Verbose && suiteConfig.ParallelTotal == 1 {
|
if reporterConfig.Verbosity().GTE(types.VerbosityLevelVerbose) && suiteConfig.ParallelTotal == 1 {
|
||||||
writer.SetMode(internal.WriterModeStreamAndBuffer)
|
writer.SetMode(internal.WriterModeStreamAndBuffer)
|
||||||
} else {
|
} else {
|
||||||
writer.SetMode(internal.WriterModeBufferOnly)
|
writer.SetMode(internal.WriterModeBufferOnly)
|
||||||
@ -370,6 +392,12 @@ func AbortSuite(message string, callerSkip ...int) {
|
|||||||
panic(types.GinkgoErrors.UncaughtGinkgoPanic(cl))
|
panic(types.GinkgoErrors.UncaughtGinkgoPanic(cl))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
ignorablePanic is used by Gomega to signal to GinkgoRecover that Goemga is handling
|
||||||
|
the error associated with this panic. It i used when Eventually/Consistently are passed a func(g Gomega) and the resulting function launches a goroutines that makes a failed assertion. That failed assertion is registered by Gomega and then panics. Ordinarily the panic is captured by Gomega. In the case of a goroutine Gomega can't capture the panic - so we piggy back on GinkgoRecover so users have a single defer GinkgoRecover() pattern to follow. To do that we need to tell Ginkgo to ignore this panic and not register it as a panic on the global Failer.
|
||||||
|
*/
|
||||||
|
type ignorablePanic interface{ GinkgoRecoverShouldIgnoreThisPanic() }
|
||||||
|
|
||||||
/*
|
/*
|
||||||
GinkgoRecover should be deferred at the top of any spawned goroutine that (may) call `Fail`
|
GinkgoRecover should be deferred at the top of any spawned goroutine that (may) call `Fail`
|
||||||
Since Gomega assertions call fail, you should throw a `defer GinkgoRecover()` at the top of any goroutine that
|
Since Gomega assertions call fail, you should throw a `defer GinkgoRecover()` at the top of any goroutine that
|
||||||
@ -385,6 +413,9 @@ You can learn more about how Ginkgo manages failures here: https://onsi.github.i
|
|||||||
func GinkgoRecover() {
|
func GinkgoRecover() {
|
||||||
e := recover()
|
e := recover()
|
||||||
if e != nil {
|
if e != nil {
|
||||||
|
if _, ok := e.(ignorablePanic); ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
global.Failer.Panic(types.NewCodeLocationWithStackTrace(1), e)
|
global.Failer.Panic(types.NewCodeLocationWithStackTrace(1), e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -509,35 +540,11 @@ and will simply log the passed in text to the GinkgoWriter. If By is handed a f
|
|||||||
|
|
||||||
By will also generate and attach a ReportEntry to the spec. This will ensure that By annotations appear in Ginkgo's machine-readable reports.
|
By will also generate and attach a ReportEntry to the spec. This will ensure that By annotations appear in Ginkgo's machine-readable reports.
|
||||||
|
|
||||||
Note that By does not generate a new Ginkgo node - rather it is simply synctactic sugar around GinkgoWriter and AddReportEntry
|
Note that By does not generate a new Ginkgo node - rather it is simply syntactic sugar around GinkgoWriter and AddReportEntry
|
||||||
You can learn more about By here: https://onsi.github.io/ginkgo/#documenting-complex-specs-by
|
You can learn more about By here: https://onsi.github.io/ginkgo/#documenting-complex-specs-by
|
||||||
*/
|
*/
|
||||||
func By(text string, callback ...func()) {
|
func By(text string, callback ...func()) {
|
||||||
if !global.Suite.InRunPhase() {
|
exitIfErr(global.Suite.By(text, callback...))
|
||||||
exitIfErr(types.GinkgoErrors.ByNotDuringRunPhase(types.NewCodeLocation(1)))
|
|
||||||
}
|
|
||||||
value := struct {
|
|
||||||
Text string
|
|
||||||
Duration time.Duration
|
|
||||||
}{
|
|
||||||
Text: text,
|
|
||||||
}
|
|
||||||
t := time.Now()
|
|
||||||
global.Suite.SetProgressStepCursor(internal.ProgressStepCursor{
|
|
||||||
Text: text,
|
|
||||||
CodeLocation: types.NewCodeLocation(1),
|
|
||||||
StartTime: t,
|
|
||||||
})
|
|
||||||
AddReportEntry("By Step", ReportEntryVisibilityNever, Offset(1), &value, t)
|
|
||||||
formatter := formatter.NewWithNoColorBool(reporterConfig.NoColor)
|
|
||||||
GinkgoWriter.Println(formatter.F("{{bold}}STEP:{{/}} %s {{gray}}%s{{/}}", text, t.Format(types.GINKGO_TIME_FORMAT)))
|
|
||||||
if len(callback) == 1 {
|
|
||||||
callback[0]()
|
|
||||||
value.Duration = time.Since(t)
|
|
||||||
}
|
|
||||||
if len(callback) > 1 {
|
|
||||||
panic("just one callback per By, please")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -736,7 +743,7 @@ For example:
|
|||||||
os.SetEnv("FOO", "BAR")
|
os.SetEnv("FOO", "BAR")
|
||||||
})
|
})
|
||||||
|
|
||||||
will register a cleanup handler that will set the environment variable "FOO" to it's current value (obtained by os.GetEnv("FOO")) after the spec runs and then sets the environment variable "FOO" to "BAR" for the current spec.
|
will register a cleanup handler that will set the environment variable "FOO" to its current value (obtained by os.GetEnv("FOO")) after the spec runs and then sets the environment variable "FOO" to "BAR" for the current spec.
|
||||||
|
|
||||||
Similarly:
|
Similarly:
|
||||||
|
|
||||||
@ -764,3 +771,24 @@ func DeferCleanup(args ...interface{}) {
|
|||||||
}
|
}
|
||||||
pushNode(internal.NewCleanupNode(deprecationTracker, fail, args...))
|
pushNode(internal.NewCleanupNode(deprecationTracker, fail, args...))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
AttachProgressReporter allows you to register a function that will be called whenever Ginkgo generates a Progress Report. The contents returned by the function will be included in the report.
|
||||||
|
|
||||||
|
**This is an experimental feature and the public-facing interface may change in a future minor version of Ginkgo**
|
||||||
|
|
||||||
|
Progress Reports are generated:
|
||||||
|
- whenever the user explicitly requests one (via `SIGINFO` or `SIGUSR1`)
|
||||||
|
- on nodes decorated with PollProgressAfter
|
||||||
|
- on suites run with --poll-progress-after
|
||||||
|
- whenever a test times out
|
||||||
|
|
||||||
|
Ginkgo uses Progress Reports to convey the current state of the test suite, including any running goroutines. By attaching a progress reporter you are able to supplement these reports with additional information.
|
||||||
|
|
||||||
|
# AttachProgressReporter returns a function that can be called to detach the progress reporter
|
||||||
|
|
||||||
|
You can learn more about AttachProgressReporter here: https://onsi.github.io/ginkgo/#attaching-additional-information-to-progress-reports
|
||||||
|
*/
|
||||||
|
func AttachProgressReporter(reporter func() string) func() {
|
||||||
|
return global.Suite.AttachProgressReporter(reporter)
|
||||||
|
}
|
||||||
|
14
vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go
generated
vendored
14
vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go
generated
vendored
@ -46,7 +46,7 @@ const Pending = internal.Pending
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
Serial is a decorator that allows you to mark a spec or container as serial. These specs will never run in parallel with other specs.
|
Serial is a decorator that allows you to mark a spec or container as serial. These specs will never run in parallel with other specs.
|
||||||
Tests in ordered containers cannot be marked as serial - mark the ordered container instead.
|
Specs in ordered containers cannot be marked as serial - mark the ordered container instead.
|
||||||
|
|
||||||
You can learn more here: https://onsi.github.io/ginkgo/#serial-specs
|
You can learn more here: https://onsi.github.io/ginkgo/#serial-specs
|
||||||
You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
|
You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
|
||||||
@ -54,7 +54,7 @@ You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorat
|
|||||||
const Serial = internal.Serial
|
const Serial = internal.Serial
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Ordered is a decorator that allows you to mark a container as ordered. Tests in the container will always run in the order they appear.
|
Ordered is a decorator that allows you to mark a container as ordered. Specs in the container will always run in the order they appear.
|
||||||
They will never be randomized and they will never run in parallel with one another, though they may run in parallel with other specs.
|
They will never be randomized and they will never run in parallel with one another, though they may run in parallel with other specs.
|
||||||
|
|
||||||
You can learn more here: https://onsi.github.io/ginkgo/#ordered-containers
|
You can learn more here: https://onsi.github.io/ginkgo/#ordered-containers
|
||||||
@ -62,6 +62,16 @@ You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorat
|
|||||||
*/
|
*/
|
||||||
const Ordered = internal.Ordered
|
const Ordered = internal.Ordered
|
||||||
|
|
||||||
|
/*
|
||||||
|
ContinueOnFailure is a decorator that allows you to mark an Ordered container to continue running specs even if failures occur. Ordinarily an ordered container will stop running specs after the first failure occurs. Note that if a BeforeAll or a BeforeEach/JustBeforeEach annotated with OncePerOrdered fails then no specs will run as the precondition for the Ordered container will consider to be failed.
|
||||||
|
|
||||||
|
ContinueOnFailure only applies to the outermost Ordered container. Attempting to place ContinueOnFailure in a nested container will result in an error.
|
||||||
|
|
||||||
|
You can learn more here: https://onsi.github.io/ginkgo/#ordered-containers
|
||||||
|
You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
|
||||||
|
*/
|
||||||
|
const ContinueOnFailure = internal.ContinueOnFailure
|
||||||
|
|
||||||
/*
|
/*
|
||||||
OncePerOrdered is a decorator that allows you to mark outer BeforeEach, AfterEach, JustBeforeEach, and JustAfterEach setup nodes to run once
|
OncePerOrdered is a decorator that allows you to mark outer BeforeEach, AfterEach, JustBeforeEach, and JustAfterEach setup nodes to run once
|
||||||
per ordered context. Normally these setup nodes run around each individual spec, with OncePerOrdered they will run once around the set of specs in an ordered container.
|
per ordered context. Normally these setup nodes run around each individual spec, with OncePerOrdered they will run once around the set of specs in an ordered container.
|
||||||
|
61
vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go
generated
vendored
61
vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go
generated
vendored
@ -4,6 +4,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -50,6 +51,37 @@ func NewWithNoColorBool(noColor bool) Formatter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func New(colorMode ColorMode) Formatter {
|
func New(colorMode ColorMode) Formatter {
|
||||||
|
colorAliases := map[string]int{
|
||||||
|
"black": 0,
|
||||||
|
"red": 1,
|
||||||
|
"green": 2,
|
||||||
|
"yellow": 3,
|
||||||
|
"blue": 4,
|
||||||
|
"magenta": 5,
|
||||||
|
"cyan": 6,
|
||||||
|
"white": 7,
|
||||||
|
}
|
||||||
|
for colorAlias, n := range colorAliases {
|
||||||
|
colorAliases[fmt.Sprintf("bright-%s", colorAlias)] = n + 8
|
||||||
|
}
|
||||||
|
|
||||||
|
getColor := func(color, defaultEscapeCode string) string {
|
||||||
|
color = strings.ToUpper(strings.ReplaceAll(color, "-", "_"))
|
||||||
|
envVar := fmt.Sprintf("GINKGO_CLI_COLOR_%s", color)
|
||||||
|
envVarColor := os.Getenv(envVar)
|
||||||
|
if envVarColor == "" {
|
||||||
|
return defaultEscapeCode
|
||||||
|
}
|
||||||
|
if colorCode, ok := colorAliases[envVarColor]; ok {
|
||||||
|
return fmt.Sprintf("\x1b[38;5;%dm", colorCode)
|
||||||
|
}
|
||||||
|
colorCode, err := strconv.Atoi(envVarColor)
|
||||||
|
if err != nil || colorCode < 0 || colorCode > 255 {
|
||||||
|
return defaultEscapeCode
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("\x1b[38;5;%dm", colorCode)
|
||||||
|
}
|
||||||
|
|
||||||
f := Formatter{
|
f := Formatter{
|
||||||
ColorMode: colorMode,
|
ColorMode: colorMode,
|
||||||
colors: map[string]string{
|
colors: map[string]string{
|
||||||
@ -57,18 +89,18 @@ func New(colorMode ColorMode) Formatter {
|
|||||||
"bold": "\x1b[1m",
|
"bold": "\x1b[1m",
|
||||||
"underline": "\x1b[4m",
|
"underline": "\x1b[4m",
|
||||||
|
|
||||||
"red": "\x1b[38;5;9m",
|
"red": getColor("red", "\x1b[38;5;9m"),
|
||||||
"orange": "\x1b[38;5;214m",
|
"orange": getColor("orange", "\x1b[38;5;214m"),
|
||||||
"coral": "\x1b[38;5;204m",
|
"coral": getColor("coral", "\x1b[38;5;204m"),
|
||||||
"magenta": "\x1b[38;5;13m",
|
"magenta": getColor("magenta", "\x1b[38;5;13m"),
|
||||||
"green": "\x1b[38;5;10m",
|
"green": getColor("green", "\x1b[38;5;10m"),
|
||||||
"dark-green": "\x1b[38;5;28m",
|
"dark-green": getColor("dark-green", "\x1b[38;5;28m"),
|
||||||
"yellow": "\x1b[38;5;11m",
|
"yellow": getColor("yellow", "\x1b[38;5;11m"),
|
||||||
"light-yellow": "\x1b[38;5;228m",
|
"light-yellow": getColor("light-yellow", "\x1b[38;5;228m"),
|
||||||
"cyan": "\x1b[38;5;14m",
|
"cyan": getColor("cyan", "\x1b[38;5;14m"),
|
||||||
"gray": "\x1b[38;5;243m",
|
"gray": getColor("gray", "\x1b[38;5;243m"),
|
||||||
"light-gray": "\x1b[38;5;246m",
|
"light-gray": getColor("light-gray", "\x1b[38;5;246m"),
|
||||||
"blue": "\x1b[38;5;12m",
|
"blue": getColor("blue", "\x1b[38;5;12m"),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
colors := []string{}
|
colors := []string{}
|
||||||
@ -88,7 +120,10 @@ func (f Formatter) Fi(indentation uint, format string, args ...interface{}) stri
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (f Formatter) Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string {
|
func (f Formatter) Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string {
|
||||||
out := fmt.Sprintf(f.style(format), args...)
|
out := f.style(format)
|
||||||
|
if len(args) > 0 {
|
||||||
|
out = fmt.Sprintf(out, args...)
|
||||||
|
}
|
||||||
|
|
||||||
if indentation == 0 && maxWidth == 0 {
|
if indentation == 0 && maxWidth == 0 {
|
||||||
return out
|
return out
|
||||||
|
63
vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go
generated
vendored
Normal file
63
vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go
generated
vendored
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
package build
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
||||||
|
"github.com/onsi/ginkgo/v2/ginkgo/internal"
|
||||||
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
func BuildBuildCommand() command.Command {
|
||||||
|
var cliConfig = types.NewDefaultCLIConfig()
|
||||||
|
var goFlagsConfig = types.NewDefaultGoFlagsConfig()
|
||||||
|
|
||||||
|
flags, err := types.BuildBuildCommandFlagSet(&cliConfig, &goFlagsConfig)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return command.Command{
|
||||||
|
Name: "build",
|
||||||
|
Flags: flags,
|
||||||
|
Usage: "ginkgo build <FLAGS> <PACKAGES>",
|
||||||
|
ShortDoc: "Build the passed in <PACKAGES> (or the package in the current directory if left blank).",
|
||||||
|
DocLink: "precompiling-suites",
|
||||||
|
Command: func(args []string, _ []string) {
|
||||||
|
var errors []error
|
||||||
|
cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig)
|
||||||
|
command.AbortIfErrors("Ginkgo detected configuration issues:", errors)
|
||||||
|
|
||||||
|
buildSpecs(args, cliConfig, goFlagsConfig)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildSpecs(args []string, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig) {
|
||||||
|
suites := internal.FindSuites(args, cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter)
|
||||||
|
if len(suites) == 0 {
|
||||||
|
command.AbortWith("Found no test suites")
|
||||||
|
}
|
||||||
|
|
||||||
|
internal.VerifyCLIAndFrameworkVersion(suites)
|
||||||
|
|
||||||
|
opc := internal.NewOrderedParallelCompiler(cliConfig.ComputedNumCompilers())
|
||||||
|
opc.StartCompiling(suites, goFlagsConfig)
|
||||||
|
|
||||||
|
for {
|
||||||
|
suiteIdx, suite := opc.Next()
|
||||||
|
if suiteIdx >= len(suites) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
suites[suiteIdx] = suite
|
||||||
|
if suite.State.Is(internal.TestSuiteStateFailedToCompile) {
|
||||||
|
fmt.Println(suite.CompilationError.Error())
|
||||||
|
} else {
|
||||||
|
fmt.Printf("Compiled %s.test\n", suite.PackageName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if suites.CountWithState(internal.TestSuiteStateFailedToCompile) > 0 {
|
||||||
|
command.AbortWith("Failed to compile all tests")
|
||||||
|
}
|
||||||
|
}
|
61
vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go
generated
vendored
Normal file
61
vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go
generated
vendored
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
package command
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
type AbortDetails struct {
|
||||||
|
ExitCode int
|
||||||
|
Error error
|
||||||
|
EmitUsage bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func Abort(details AbortDetails) {
|
||||||
|
panic(details)
|
||||||
|
}
|
||||||
|
|
||||||
|
func AbortGracefullyWith(format string, args ...interface{}) {
|
||||||
|
Abort(AbortDetails{
|
||||||
|
ExitCode: 0,
|
||||||
|
Error: fmt.Errorf(format, args...),
|
||||||
|
EmitUsage: false,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func AbortWith(format string, args ...interface{}) {
|
||||||
|
Abort(AbortDetails{
|
||||||
|
ExitCode: 1,
|
||||||
|
Error: fmt.Errorf(format, args...),
|
||||||
|
EmitUsage: false,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func AbortWithUsage(format string, args ...interface{}) {
|
||||||
|
Abort(AbortDetails{
|
||||||
|
ExitCode: 1,
|
||||||
|
Error: fmt.Errorf(format, args...),
|
||||||
|
EmitUsage: true,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func AbortIfError(preamble string, err error) {
|
||||||
|
if err != nil {
|
||||||
|
Abort(AbortDetails{
|
||||||
|
ExitCode: 1,
|
||||||
|
Error: fmt.Errorf("%s\n%s", preamble, err.Error()),
|
||||||
|
EmitUsage: false,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func AbortIfErrors(preamble string, errors []error) {
|
||||||
|
if len(errors) > 0 {
|
||||||
|
out := ""
|
||||||
|
for _, err := range errors {
|
||||||
|
out += err.Error()
|
||||||
|
}
|
||||||
|
Abort(AbortDetails{
|
||||||
|
ExitCode: 1,
|
||||||
|
Error: fmt.Errorf("%s\n%s", preamble, out),
|
||||||
|
EmitUsage: false,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
50
vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go
generated
vendored
Normal file
50
vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go
generated
vendored
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
package command
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/formatter"
|
||||||
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Command struct {
|
||||||
|
Name string
|
||||||
|
Flags types.GinkgoFlagSet
|
||||||
|
Usage string
|
||||||
|
ShortDoc string
|
||||||
|
Documentation string
|
||||||
|
DocLink string
|
||||||
|
Command func(args []string, additionalArgs []string)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c Command) Run(args []string, additionalArgs []string) {
|
||||||
|
args, err := c.Flags.Parse(args)
|
||||||
|
if err != nil {
|
||||||
|
AbortWithUsage(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Command(args, additionalArgs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c Command) EmitUsage(writer io.Writer) {
|
||||||
|
fmt.Fprintln(writer, formatter.F("{{bold}}"+c.Usage+"{{/}}"))
|
||||||
|
fmt.Fprintln(writer, formatter.F("{{gray}}%s{{/}}", strings.Repeat("-", len(c.Usage))))
|
||||||
|
if c.ShortDoc != "" {
|
||||||
|
fmt.Fprintln(writer, formatter.Fiw(0, formatter.COLS, c.ShortDoc))
|
||||||
|
fmt.Fprintln(writer, "")
|
||||||
|
}
|
||||||
|
if c.Documentation != "" {
|
||||||
|
fmt.Fprintln(writer, formatter.Fiw(0, formatter.COLS, c.Documentation))
|
||||||
|
fmt.Fprintln(writer, "")
|
||||||
|
}
|
||||||
|
if c.DocLink != "" {
|
||||||
|
fmt.Fprintln(writer, formatter.Fi(0, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}http://onsi.github.io/ginkgo/#%s{{/}}", c.DocLink))
|
||||||
|
fmt.Fprintln(writer, "")
|
||||||
|
}
|
||||||
|
flagUsage := c.Flags.Usage()
|
||||||
|
if flagUsage != "" {
|
||||||
|
fmt.Fprintf(writer, formatter.F(flagUsage))
|
||||||
|
}
|
||||||
|
}
|
182
vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go
generated
vendored
Normal file
182
vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go
generated
vendored
Normal file
@ -0,0 +1,182 @@
|
|||||||
|
package command
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/formatter"
|
||||||
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Program struct {
|
||||||
|
Name string
|
||||||
|
Heading string
|
||||||
|
Commands []Command
|
||||||
|
DefaultCommand Command
|
||||||
|
DeprecatedCommands []DeprecatedCommand
|
||||||
|
|
||||||
|
//For testing - leave as nil in production
|
||||||
|
OutWriter io.Writer
|
||||||
|
ErrWriter io.Writer
|
||||||
|
Exiter func(code int)
|
||||||
|
}
|
||||||
|
|
||||||
|
type DeprecatedCommand struct {
|
||||||
|
Name string
|
||||||
|
Deprecation types.Deprecation
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p Program) RunAndExit(osArgs []string) {
|
||||||
|
var command Command
|
||||||
|
deprecationTracker := types.NewDeprecationTracker()
|
||||||
|
if p.Exiter == nil {
|
||||||
|
p.Exiter = os.Exit
|
||||||
|
}
|
||||||
|
if p.OutWriter == nil {
|
||||||
|
p.OutWriter = formatter.ColorableStdOut
|
||||||
|
}
|
||||||
|
if p.ErrWriter == nil {
|
||||||
|
p.ErrWriter = formatter.ColorableStdErr
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
exitCode := 0
|
||||||
|
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
details, ok := r.(AbortDetails)
|
||||||
|
if !ok {
|
||||||
|
panic(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
if details.Error != nil {
|
||||||
|
fmt.Fprintln(p.ErrWriter, formatter.F("{{red}}{{bold}}%s %s{{/}} {{red}}failed{{/}}", p.Name, command.Name))
|
||||||
|
fmt.Fprintln(p.ErrWriter, formatter.Fi(1, details.Error.Error()))
|
||||||
|
}
|
||||||
|
if details.EmitUsage {
|
||||||
|
if details.Error != nil {
|
||||||
|
fmt.Fprintln(p.ErrWriter, "")
|
||||||
|
}
|
||||||
|
command.EmitUsage(p.ErrWriter)
|
||||||
|
}
|
||||||
|
exitCode = details.ExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
command.Flags.ValidateDeprecations(deprecationTracker)
|
||||||
|
if deprecationTracker.DidTrackDeprecations() {
|
||||||
|
fmt.Fprintln(p.ErrWriter, deprecationTracker.DeprecationsReport())
|
||||||
|
}
|
||||||
|
p.Exiter(exitCode)
|
||||||
|
return
|
||||||
|
}()
|
||||||
|
|
||||||
|
args, additionalArgs := []string{}, []string{}
|
||||||
|
|
||||||
|
foundDelimiter := false
|
||||||
|
for _, arg := range osArgs[1:] {
|
||||||
|
if !foundDelimiter {
|
||||||
|
if arg == "--" {
|
||||||
|
foundDelimiter = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if foundDelimiter {
|
||||||
|
additionalArgs = append(additionalArgs, arg)
|
||||||
|
} else {
|
||||||
|
args = append(args, arg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
command = p.DefaultCommand
|
||||||
|
if len(args) > 0 {
|
||||||
|
p.handleHelpRequestsAndExit(p.OutWriter, args)
|
||||||
|
if command.Name == args[0] {
|
||||||
|
args = args[1:]
|
||||||
|
} else {
|
||||||
|
for _, deprecatedCommand := range p.DeprecatedCommands {
|
||||||
|
if deprecatedCommand.Name == args[0] {
|
||||||
|
deprecationTracker.TrackDeprecation(deprecatedCommand.Deprecation)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, tryCommand := range p.Commands {
|
||||||
|
if tryCommand.Name == args[0] {
|
||||||
|
command, args = tryCommand, args[1:]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
command.Run(args, additionalArgs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p Program) handleHelpRequestsAndExit(writer io.Writer, args []string) {
|
||||||
|
if len(args) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
matchesHelpFlag := func(args ...string) bool {
|
||||||
|
for _, arg := range args {
|
||||||
|
if arg == "--help" || arg == "-help" || arg == "-h" || arg == "--h" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if len(args) == 1 {
|
||||||
|
if args[0] == "help" || matchesHelpFlag(args[0]) {
|
||||||
|
p.EmitUsage(writer)
|
||||||
|
Abort(AbortDetails{})
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
var name string
|
||||||
|
if args[0] == "help" || matchesHelpFlag(args[0]) {
|
||||||
|
name = args[1]
|
||||||
|
} else if matchesHelpFlag(args[1:]...) {
|
||||||
|
name = args[0]
|
||||||
|
} else {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if p.DefaultCommand.Name == name || p.Name == name {
|
||||||
|
p.DefaultCommand.EmitUsage(writer)
|
||||||
|
Abort(AbortDetails{})
|
||||||
|
}
|
||||||
|
for _, command := range p.Commands {
|
||||||
|
if command.Name == name {
|
||||||
|
command.EmitUsage(writer)
|
||||||
|
Abort(AbortDetails{})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintln(writer, formatter.F("{{red}}Unknown Command: {{bold}}%s{{/}}", name))
|
||||||
|
fmt.Fprintln(writer, "")
|
||||||
|
p.EmitUsage(writer)
|
||||||
|
Abort(AbortDetails{ExitCode: 1})
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p Program) EmitUsage(writer io.Writer) {
|
||||||
|
fmt.Fprintln(writer, formatter.F(p.Heading))
|
||||||
|
fmt.Fprintln(writer, formatter.F("{{gray}}%s{{/}}", strings.Repeat("-", len(p.Heading))))
|
||||||
|
fmt.Fprintln(writer, formatter.F("For usage information for a command, run {{bold}}%s help COMMAND{{/}}.", p.Name))
|
||||||
|
fmt.Fprintln(writer, formatter.F("For usage information for the default command, run {{bold}}%s help %s{{/}} or {{bold}}%s help %s{{/}}.", p.Name, p.Name, p.Name, p.DefaultCommand.Name))
|
||||||
|
fmt.Fprintln(writer, "")
|
||||||
|
fmt.Fprintln(writer, formatter.F("The following commands are available:"))
|
||||||
|
|
||||||
|
fmt.Fprintln(writer, formatter.Fi(1, "{{bold}}%s{{/}} or %s {{bold}}%s{{/}} - {{gray}}%s{{/}}", p.Name, p.Name, p.DefaultCommand.Name, p.DefaultCommand.Usage))
|
||||||
|
if p.DefaultCommand.ShortDoc != "" {
|
||||||
|
fmt.Fprintln(writer, formatter.Fi(2, p.DefaultCommand.ShortDoc))
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, command := range p.Commands {
|
||||||
|
fmt.Fprintln(writer, formatter.Fi(1, "{{bold}}%s{{/}} - {{gray}}%s{{/}}", command.Name, command.Usage))
|
||||||
|
if command.ShortDoc != "" {
|
||||||
|
fmt.Fprintln(writer, formatter.Fi(2, command.ShortDoc))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
48
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go
generated
vendored
Normal file
48
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go
generated
vendored
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
package generators
|
||||||
|
|
||||||
|
var bootstrapText = `package {{.Package}}
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
{{.GinkgoImport}}
|
||||||
|
{{.GomegaImport}}
|
||||||
|
)
|
||||||
|
|
||||||
|
func Test{{.FormattedName}}(t *testing.T) {
|
||||||
|
{{.GomegaPackage}}RegisterFailHandler({{.GinkgoPackage}}Fail)
|
||||||
|
{{.GinkgoPackage}}RunSpecs(t, "{{.FormattedName}} Suite")
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
var agoutiBootstrapText = `package {{.Package}}
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
{{.GinkgoImport}}
|
||||||
|
{{.GomegaImport}}
|
||||||
|
"github.com/sclevine/agouti"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Test{{.FormattedName}}(t *testing.T) {
|
||||||
|
{{.GomegaPackage}}RegisterFailHandler({{.GinkgoPackage}}Fail)
|
||||||
|
{{.GinkgoPackage}}RunSpecs(t, "{{.FormattedName}} Suite")
|
||||||
|
}
|
||||||
|
|
||||||
|
var agoutiDriver *agouti.WebDriver
|
||||||
|
|
||||||
|
var _ = {{.GinkgoPackage}}BeforeSuite(func() {
|
||||||
|
// Choose a WebDriver:
|
||||||
|
|
||||||
|
agoutiDriver = agouti.PhantomJS()
|
||||||
|
// agoutiDriver = agouti.Selenium()
|
||||||
|
// agoutiDriver = agouti.ChromeDriver()
|
||||||
|
|
||||||
|
{{.GomegaPackage}}Expect(agoutiDriver.Start()).To({{.GomegaPackage}}Succeed())
|
||||||
|
})
|
||||||
|
|
||||||
|
var _ = {{.GinkgoPackage}}AfterSuite(func() {
|
||||||
|
{{.GomegaPackage}}Expect(agoutiDriver.Stop()).To({{.GomegaPackage}}Succeed())
|
||||||
|
})
|
||||||
|
`
|
133
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go
generated
vendored
Normal file
133
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go
generated
vendored
Normal file
@ -0,0 +1,133 @@
|
|||||||
|
package generators
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"text/template"
|
||||||
|
|
||||||
|
sprig "github.com/go-task/slim-sprig"
|
||||||
|
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
||||||
|
"github.com/onsi/ginkgo/v2/ginkgo/internal"
|
||||||
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
func BuildBootstrapCommand() command.Command {
|
||||||
|
conf := GeneratorsConfig{}
|
||||||
|
flags, err := types.NewGinkgoFlagSet(
|
||||||
|
types.GinkgoFlags{
|
||||||
|
{Name: "agouti", KeyPath: "Agouti",
|
||||||
|
Usage: "If set, bootstrap will generate a bootstrap file for writing Agouti tests"},
|
||||||
|
{Name: "nodot", KeyPath: "NoDot",
|
||||||
|
Usage: "If set, bootstrap will generate a bootstrap test file that does not dot-import ginkgo and gomega"},
|
||||||
|
{Name: "internal", KeyPath: "Internal",
|
||||||
|
Usage: "If set, bootstrap will generate a bootstrap test file that uses the regular package name (i.e. `package X`, not `package X_test`)"},
|
||||||
|
{Name: "template", KeyPath: "CustomTemplate",
|
||||||
|
UsageArgument: "template-file",
|
||||||
|
Usage: "If specified, generate will use the contents of the file passed as the bootstrap template"},
|
||||||
|
{Name: "template-data", KeyPath: "CustomTemplateData",
|
||||||
|
UsageArgument: "template-data-file",
|
||||||
|
Usage: "If specified, generate will use the contents of the file passed as data to be rendered in the bootstrap template"},
|
||||||
|
},
|
||||||
|
&conf,
|
||||||
|
types.GinkgoFlagSections{},
|
||||||
|
)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return command.Command{
|
||||||
|
Name: "bootstrap",
|
||||||
|
Usage: "ginkgo bootstrap",
|
||||||
|
ShortDoc: "Bootstrap a test suite for the current package",
|
||||||
|
Documentation: `Tests written in Ginkgo and Gomega require a small amount of boilerplate to hook into Go's testing infrastructure.
|
||||||
|
|
||||||
|
{{bold}}ginkgo bootstrap{{/}} generates this boilerplate for you in a file named X_suite_test.go where X is the name of the package under test.`,
|
||||||
|
DocLink: "generators",
|
||||||
|
Flags: flags,
|
||||||
|
Command: func(_ []string, _ []string) {
|
||||||
|
generateBootstrap(conf)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type bootstrapData struct {
|
||||||
|
Package string
|
||||||
|
FormattedName string
|
||||||
|
|
||||||
|
GinkgoImport string
|
||||||
|
GomegaImport string
|
||||||
|
GinkgoPackage string
|
||||||
|
GomegaPackage string
|
||||||
|
CustomData map[string]any
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateBootstrap(conf GeneratorsConfig) {
|
||||||
|
packageName, bootstrapFilePrefix, formattedName := getPackageAndFormattedName()
|
||||||
|
|
||||||
|
data := bootstrapData{
|
||||||
|
Package: determinePackageName(packageName, conf.Internal),
|
||||||
|
FormattedName: formattedName,
|
||||||
|
|
||||||
|
GinkgoImport: `. "github.com/onsi/ginkgo/v2"`,
|
||||||
|
GomegaImport: `. "github.com/onsi/gomega"`,
|
||||||
|
GinkgoPackage: "",
|
||||||
|
GomegaPackage: "",
|
||||||
|
}
|
||||||
|
|
||||||
|
if conf.NoDot {
|
||||||
|
data.GinkgoImport = `"github.com/onsi/ginkgo/v2"`
|
||||||
|
data.GomegaImport = `"github.com/onsi/gomega"`
|
||||||
|
data.GinkgoPackage = `ginkgo.`
|
||||||
|
data.GomegaPackage = `gomega.`
|
||||||
|
}
|
||||||
|
|
||||||
|
targetFile := fmt.Sprintf("%s_suite_test.go", bootstrapFilePrefix)
|
||||||
|
if internal.FileExists(targetFile) {
|
||||||
|
command.AbortWith("{{bold}}%s{{/}} already exists", targetFile)
|
||||||
|
} else {
|
||||||
|
fmt.Printf("Generating ginkgo test suite bootstrap for %s in:\n\t%s\n", packageName, targetFile)
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := os.Create(targetFile)
|
||||||
|
command.AbortIfError("Failed to create file:", err)
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
var templateText string
|
||||||
|
if conf.CustomTemplate != "" {
|
||||||
|
tpl, err := os.ReadFile(conf.CustomTemplate)
|
||||||
|
command.AbortIfError("Failed to read custom bootstrap file:", err)
|
||||||
|
templateText = string(tpl)
|
||||||
|
if conf.CustomTemplateData != "" {
|
||||||
|
var tplCustomDataMap map[string]any
|
||||||
|
tplCustomData, err := os.ReadFile(conf.CustomTemplateData)
|
||||||
|
command.AbortIfError("Failed to read custom boostrap data file:", err)
|
||||||
|
if !json.Valid([]byte(tplCustomData)) {
|
||||||
|
command.AbortWith("Invalid JSON object in custom data file.")
|
||||||
|
}
|
||||||
|
//create map from the custom template data
|
||||||
|
json.Unmarshal(tplCustomData, &tplCustomDataMap)
|
||||||
|
data.CustomData = tplCustomDataMap
|
||||||
|
}
|
||||||
|
} else if conf.Agouti {
|
||||||
|
templateText = agoutiBootstrapText
|
||||||
|
} else {
|
||||||
|
templateText = bootstrapText
|
||||||
|
}
|
||||||
|
|
||||||
|
//Setting the option to explicitly fail if template is rendered trying to access missing key
|
||||||
|
bootstrapTemplate, err := template.New("bootstrap").Funcs(sprig.TxtFuncMap()).Option("missingkey=error").Parse(templateText)
|
||||||
|
command.AbortIfError("Failed to parse bootstrap template:", err)
|
||||||
|
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
//Being explicit about failing sooner during template rendering
|
||||||
|
//when accessing custom data rather than during the go fmt command
|
||||||
|
err = bootstrapTemplate.Execute(buf, data)
|
||||||
|
command.AbortIfError("Failed to render bootstrap template:", err)
|
||||||
|
|
||||||
|
buf.WriteTo(f)
|
||||||
|
|
||||||
|
internal.GoFmt(targetFile)
|
||||||
|
}
|
259
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go
generated
vendored
Normal file
259
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go
generated
vendored
Normal file
@ -0,0 +1,259 @@
|
|||||||
|
package generators
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"text/template"
|
||||||
|
|
||||||
|
sprig "github.com/go-task/slim-sprig"
|
||||||
|
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
||||||
|
"github.com/onsi/ginkgo/v2/ginkgo/internal"
|
||||||
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
func BuildGenerateCommand() command.Command {
|
||||||
|
conf := GeneratorsConfig{}
|
||||||
|
flags, err := types.NewGinkgoFlagSet(
|
||||||
|
types.GinkgoFlags{
|
||||||
|
{Name: "agouti", KeyPath: "Agouti",
|
||||||
|
Usage: "If set, generate will create a test file for writing Agouti tests"},
|
||||||
|
{Name: "nodot", KeyPath: "NoDot",
|
||||||
|
Usage: "If set, generate will create a test file that does not dot-import ginkgo and gomega"},
|
||||||
|
{Name: "internal", KeyPath: "Internal",
|
||||||
|
Usage: "If set, generate will create a test file that uses the regular package name (i.e. `package X`, not `package X_test`)"},
|
||||||
|
{Name: "template", KeyPath: "CustomTemplate",
|
||||||
|
UsageArgument: "template-file",
|
||||||
|
Usage: "If specified, generate will use the contents of the file passed as the test file template"},
|
||||||
|
{Name: "template-data", KeyPath: "CustomTemplateData",
|
||||||
|
UsageArgument: "template-data-file",
|
||||||
|
Usage: "If specified, generate will use the contents of the file passed as data to be rendered in the test file template"},
|
||||||
|
},
|
||||||
|
&conf,
|
||||||
|
types.GinkgoFlagSections{},
|
||||||
|
)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return command.Command{
|
||||||
|
Name: "generate",
|
||||||
|
Usage: "ginkgo generate <filename(s)>",
|
||||||
|
ShortDoc: "Generate a test file named <filename>_test.go",
|
||||||
|
Documentation: `If the optional <filename> argument is omitted, a file named after the package in the current directory will be created.
|
||||||
|
|
||||||
|
You can pass multiple <filename(s)> to generate multiple files simultaneously. The resulting files are named <filename>_test.go.
|
||||||
|
|
||||||
|
You can also pass a <filename> of the form "file.go" and generate will emit "file_test.go".`,
|
||||||
|
DocLink: "generators",
|
||||||
|
Flags: flags,
|
||||||
|
Command: func(args []string, _ []string) {
|
||||||
|
generateTestFiles(conf, args)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type specData struct {
|
||||||
|
Package string
|
||||||
|
Subject string
|
||||||
|
PackageImportPath string
|
||||||
|
ImportPackage bool
|
||||||
|
|
||||||
|
GinkgoImport string
|
||||||
|
GomegaImport string
|
||||||
|
GinkgoPackage string
|
||||||
|
GomegaPackage string
|
||||||
|
CustomData map[string]any
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateTestFiles(conf GeneratorsConfig, args []string) {
|
||||||
|
subjects := args
|
||||||
|
if len(subjects) == 0 {
|
||||||
|
subjects = []string{""}
|
||||||
|
}
|
||||||
|
for _, subject := range subjects {
|
||||||
|
generateTestFileForSubject(subject, conf)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateTestFileForSubject(subject string, conf GeneratorsConfig) {
|
||||||
|
packageName, specFilePrefix, formattedName := getPackageAndFormattedName()
|
||||||
|
if subject != "" {
|
||||||
|
specFilePrefix = formatSubject(subject)
|
||||||
|
formattedName = prettifyName(specFilePrefix)
|
||||||
|
}
|
||||||
|
|
||||||
|
if conf.Internal {
|
||||||
|
specFilePrefix = specFilePrefix + "_internal"
|
||||||
|
}
|
||||||
|
|
||||||
|
data := specData{
|
||||||
|
Package: determinePackageName(packageName, conf.Internal),
|
||||||
|
Subject: formattedName,
|
||||||
|
PackageImportPath: getPackageImportPath(),
|
||||||
|
ImportPackage: !conf.Internal,
|
||||||
|
|
||||||
|
GinkgoImport: `. "github.com/onsi/ginkgo/v2"`,
|
||||||
|
GomegaImport: `. "github.com/onsi/gomega"`,
|
||||||
|
GinkgoPackage: "",
|
||||||
|
GomegaPackage: "",
|
||||||
|
}
|
||||||
|
|
||||||
|
if conf.NoDot {
|
||||||
|
data.GinkgoImport = `"github.com/onsi/ginkgo/v2"`
|
||||||
|
data.GomegaImport = `"github.com/onsi/gomega"`
|
||||||
|
data.GinkgoPackage = `ginkgo.`
|
||||||
|
data.GomegaPackage = `gomega.`
|
||||||
|
}
|
||||||
|
|
||||||
|
targetFile := fmt.Sprintf("%s_test.go", specFilePrefix)
|
||||||
|
if internal.FileExists(targetFile) {
|
||||||
|
command.AbortWith("{{bold}}%s{{/}} already exists", targetFile)
|
||||||
|
} else {
|
||||||
|
fmt.Printf("Generating ginkgo test for %s in:\n %s\n", data.Subject, targetFile)
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := os.Create(targetFile)
|
||||||
|
command.AbortIfError("Failed to create test file:", err)
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
var templateText string
|
||||||
|
if conf.CustomTemplate != "" {
|
||||||
|
tpl, err := os.ReadFile(conf.CustomTemplate)
|
||||||
|
command.AbortIfError("Failed to read custom template file:", err)
|
||||||
|
templateText = string(tpl)
|
||||||
|
if conf.CustomTemplateData != "" {
|
||||||
|
var tplCustomDataMap map[string]any
|
||||||
|
tplCustomData, err := os.ReadFile(conf.CustomTemplateData)
|
||||||
|
command.AbortIfError("Failed to read custom template data file:", err)
|
||||||
|
if !json.Valid([]byte(tplCustomData)) {
|
||||||
|
command.AbortWith("Invalid JSON object in custom data file.")
|
||||||
|
}
|
||||||
|
//create map from the custom template data
|
||||||
|
json.Unmarshal(tplCustomData, &tplCustomDataMap)
|
||||||
|
data.CustomData = tplCustomDataMap
|
||||||
|
}
|
||||||
|
} else if conf.Agouti {
|
||||||
|
templateText = agoutiSpecText
|
||||||
|
} else {
|
||||||
|
templateText = specText
|
||||||
|
}
|
||||||
|
|
||||||
|
//Setting the option to explicitly fail if template is rendered trying to access missing key
|
||||||
|
specTemplate, err := template.New("spec").Funcs(sprig.TxtFuncMap()).Option("missingkey=error").Parse(templateText)
|
||||||
|
command.AbortIfError("Failed to read parse test template:", err)
|
||||||
|
|
||||||
|
//Being explicit about failing sooner during template rendering
|
||||||
|
//when accessing custom data rather than during the go fmt command
|
||||||
|
err = specTemplate.Execute(f, data)
|
||||||
|
command.AbortIfError("Failed to render bootstrap template:", err)
|
||||||
|
internal.GoFmt(targetFile)
|
||||||
|
}
|
||||||
|
|
||||||
|
func formatSubject(name string) string {
|
||||||
|
name = strings.ReplaceAll(name, "-", "_")
|
||||||
|
name = strings.ReplaceAll(name, " ", "_")
|
||||||
|
name = strings.Split(name, ".go")[0]
|
||||||
|
name = strings.Split(name, "_test")[0]
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
|
||||||
|
// moduleName returns module name from go.mod from given module root directory
|
||||||
|
func moduleName(modRoot string) string {
|
||||||
|
modFile, err := os.Open(filepath.Join(modRoot, "go.mod"))
|
||||||
|
if err != nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
mod := make([]byte, 128)
|
||||||
|
_, err = modFile.Read(mod)
|
||||||
|
if err != nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
slashSlash := []byte("//")
|
||||||
|
moduleStr := []byte("module")
|
||||||
|
|
||||||
|
for len(mod) > 0 {
|
||||||
|
line := mod
|
||||||
|
mod = nil
|
||||||
|
if i := bytes.IndexByte(line, '\n'); i >= 0 {
|
||||||
|
line, mod = line[:i], line[i+1:]
|
||||||
|
}
|
||||||
|
if i := bytes.Index(line, slashSlash); i >= 0 {
|
||||||
|
line = line[:i]
|
||||||
|
}
|
||||||
|
line = bytes.TrimSpace(line)
|
||||||
|
if !bytes.HasPrefix(line, moduleStr) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
line = line[len(moduleStr):]
|
||||||
|
n := len(line)
|
||||||
|
line = bytes.TrimSpace(line)
|
||||||
|
if len(line) == n || len(line) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if line[0] == '"' || line[0] == '`' {
|
||||||
|
p, err := strconv.Unquote(string(line))
|
||||||
|
if err != nil {
|
||||||
|
return "" // malformed quoted string or multiline module path
|
||||||
|
}
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(line)
|
||||||
|
}
|
||||||
|
|
||||||
|
return "" // missing module path
|
||||||
|
}
|
||||||
|
|
||||||
|
func findModuleRoot(dir string) (root string) {
|
||||||
|
dir = filepath.Clean(dir)
|
||||||
|
|
||||||
|
// Look for enclosing go.mod.
|
||||||
|
for {
|
||||||
|
if fi, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil && !fi.IsDir() {
|
||||||
|
return dir
|
||||||
|
}
|
||||||
|
d := filepath.Dir(dir)
|
||||||
|
if d == dir {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
dir = d
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func getPackageImportPath() string {
|
||||||
|
workingDir, err := os.Getwd()
|
||||||
|
if err != nil {
|
||||||
|
panic(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
sep := string(filepath.Separator)
|
||||||
|
|
||||||
|
// Try go.mod file first
|
||||||
|
modRoot := findModuleRoot(workingDir)
|
||||||
|
if modRoot != "" {
|
||||||
|
modName := moduleName(modRoot)
|
||||||
|
if modName != "" {
|
||||||
|
cd := strings.ReplaceAll(workingDir, modRoot, "")
|
||||||
|
cd = strings.ReplaceAll(cd, sep, "/")
|
||||||
|
return modName + cd
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback to GOPATH structure
|
||||||
|
paths := strings.Split(workingDir, sep+"src"+sep)
|
||||||
|
if len(paths) == 1 {
|
||||||
|
fmt.Printf("\nCouldn't identify package import path.\n\n\tginkgo generate\n\nMust be run within a package directory under $GOPATH/src/...\nYou're going to have to change UNKNOWN_PACKAGE_PATH in the generated file...\n\n")
|
||||||
|
return "UNKNOWN_PACKAGE_PATH"
|
||||||
|
}
|
||||||
|
return filepath.ToSlash(paths[len(paths)-1])
|
||||||
|
}
|
41
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go
generated
vendored
Normal file
41
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
package generators
|
||||||
|
|
||||||
|
var specText = `package {{.Package}}
|
||||||
|
|
||||||
|
import (
|
||||||
|
{{.GinkgoImport}}
|
||||||
|
{{.GomegaImport}}
|
||||||
|
|
||||||
|
{{if .ImportPackage}}"{{.PackageImportPath}}"{{end}}
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = {{.GinkgoPackage}}Describe("{{.Subject}}", func() {
|
||||||
|
|
||||||
|
})
|
||||||
|
`
|
||||||
|
|
||||||
|
var agoutiSpecText = `package {{.Package}}
|
||||||
|
|
||||||
|
import (
|
||||||
|
{{.GinkgoImport}}
|
||||||
|
{{.GomegaImport}}
|
||||||
|
"github.com/sclevine/agouti"
|
||||||
|
. "github.com/sclevine/agouti/matchers"
|
||||||
|
|
||||||
|
{{if .ImportPackage}}"{{.PackageImportPath}}"{{end}}
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = {{.GinkgoPackage}}Describe("{{.Subject}}", func() {
|
||||||
|
var page *agouti.Page
|
||||||
|
|
||||||
|
{{.GinkgoPackage}}BeforeEach(func() {
|
||||||
|
var err error
|
||||||
|
page, err = agoutiDriver.NewPage()
|
||||||
|
{{.GomegaPackage}}Expect(err).NotTo({{.GomegaPackage}}HaveOccurred())
|
||||||
|
})
|
||||||
|
|
||||||
|
{{.GinkgoPackage}}AfterEach(func() {
|
||||||
|
{{.GomegaPackage}}Expect(page.Destroy()).To({{.GomegaPackage}}Succeed())
|
||||||
|
})
|
||||||
|
})
|
||||||
|
`
|
64
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go
generated
vendored
Normal file
64
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
package generators
|
||||||
|
|
||||||
|
import (
|
||||||
|
"go/build"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
||||||
|
)
|
||||||
|
|
||||||
|
type GeneratorsConfig struct {
|
||||||
|
Agouti, NoDot, Internal bool
|
||||||
|
CustomTemplate string
|
||||||
|
CustomTemplateData string
|
||||||
|
}
|
||||||
|
|
||||||
|
func getPackageAndFormattedName() (string, string, string) {
|
||||||
|
path, err := os.Getwd()
|
||||||
|
command.AbortIfError("Could not get current working directory:", err)
|
||||||
|
|
||||||
|
dirName := strings.ReplaceAll(filepath.Base(path), "-", "_")
|
||||||
|
dirName = strings.ReplaceAll(dirName, " ", "_")
|
||||||
|
|
||||||
|
pkg, err := build.ImportDir(path, 0)
|
||||||
|
packageName := pkg.Name
|
||||||
|
if err != nil {
|
||||||
|
packageName = ensureLegalPackageName(dirName)
|
||||||
|
}
|
||||||
|
|
||||||
|
formattedName := prettifyName(filepath.Base(path))
|
||||||
|
return packageName, dirName, formattedName
|
||||||
|
}
|
||||||
|
|
||||||
|
func ensureLegalPackageName(name string) string {
|
||||||
|
if name == "_" {
|
||||||
|
return "underscore"
|
||||||
|
}
|
||||||
|
if len(name) == 0 {
|
||||||
|
return "empty"
|
||||||
|
}
|
||||||
|
n, isDigitErr := strconv.Atoi(string(name[0]))
|
||||||
|
if isDigitErr == nil {
|
||||||
|
return []string{"zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"}[n] + name[1:]
|
||||||
|
}
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
|
||||||
|
func prettifyName(name string) string {
|
||||||
|
name = strings.ReplaceAll(name, "-", " ")
|
||||||
|
name = strings.ReplaceAll(name, "_", " ")
|
||||||
|
name = strings.Title(name)
|
||||||
|
name = strings.ReplaceAll(name, " ", "")
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
|
||||||
|
func determinePackageName(name string, internal bool) string {
|
||||||
|
if internal {
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
|
||||||
|
return name + "_test"
|
||||||
|
}
|
161
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go
generated
vendored
Normal file
161
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go
generated
vendored
Normal file
@ -0,0 +1,161 @@
|
|||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite {
|
||||||
|
if suite.PathToCompiledTest != "" {
|
||||||
|
return suite
|
||||||
|
}
|
||||||
|
|
||||||
|
suite.CompilationError = nil
|
||||||
|
|
||||||
|
path, err := filepath.Abs(filepath.Join(suite.Path, suite.PackageName+".test"))
|
||||||
|
if err != nil {
|
||||||
|
suite.State = TestSuiteStateFailedToCompile
|
||||||
|
suite.CompilationError = fmt.Errorf("Failed to compute compilation target path:\n%s", err.Error())
|
||||||
|
return suite
|
||||||
|
}
|
||||||
|
|
||||||
|
ginkgoInvocationPath, _ := os.Getwd()
|
||||||
|
ginkgoInvocationPath, _ = filepath.Abs(ginkgoInvocationPath)
|
||||||
|
packagePath := suite.AbsPath()
|
||||||
|
pathToInvocationPath, err := filepath.Rel(packagePath, ginkgoInvocationPath)
|
||||||
|
if err != nil {
|
||||||
|
suite.State = TestSuiteStateFailedToCompile
|
||||||
|
suite.CompilationError = fmt.Errorf("Failed to get relative path from package to the current working directory:\n%s", err.Error())
|
||||||
|
return suite
|
||||||
|
}
|
||||||
|
args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, path, "./", pathToInvocationPath)
|
||||||
|
if err != nil {
|
||||||
|
suite.State = TestSuiteStateFailedToCompile
|
||||||
|
suite.CompilationError = fmt.Errorf("Failed to generate go test compile flags:\n%s", err.Error())
|
||||||
|
return suite
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd := exec.Command("go", args...)
|
||||||
|
cmd.Dir = suite.Path
|
||||||
|
output, err := cmd.CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
if len(output) > 0 {
|
||||||
|
suite.State = TestSuiteStateFailedToCompile
|
||||||
|
suite.CompilationError = fmt.Errorf("Failed to compile %s:\n\n%s", suite.PackageName, output)
|
||||||
|
} else {
|
||||||
|
suite.State = TestSuiteStateFailedToCompile
|
||||||
|
suite.CompilationError = fmt.Errorf("Failed to compile %s\n%s", suite.PackageName, err.Error())
|
||||||
|
}
|
||||||
|
return suite
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.Contains(string(output), "[no test files]") {
|
||||||
|
suite.State = TestSuiteStateSkippedDueToEmptyCompilation
|
||||||
|
return suite
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(output) > 0 {
|
||||||
|
fmt.Println(string(output))
|
||||||
|
}
|
||||||
|
|
||||||
|
if !FileExists(path) {
|
||||||
|
suite.State = TestSuiteStateFailedToCompile
|
||||||
|
suite.CompilationError = fmt.Errorf("Failed to compile %s:\nOutput file %s could not be found", suite.PackageName, path)
|
||||||
|
return suite
|
||||||
|
}
|
||||||
|
|
||||||
|
suite.State = TestSuiteStateCompiled
|
||||||
|
suite.PathToCompiledTest = path
|
||||||
|
return suite
|
||||||
|
}
|
||||||
|
|
||||||
|
func Cleanup(goFlagsConfig types.GoFlagsConfig, suites ...TestSuite) {
|
||||||
|
if goFlagsConfig.BinaryMustBePreserved() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for _, suite := range suites {
|
||||||
|
if !suite.Precompiled {
|
||||||
|
os.Remove(suite.PathToCompiledTest)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type parallelSuiteBundle struct {
|
||||||
|
suite TestSuite
|
||||||
|
compiled chan TestSuite
|
||||||
|
}
|
||||||
|
|
||||||
|
type OrderedParallelCompiler struct {
|
||||||
|
mutex *sync.Mutex
|
||||||
|
stopped bool
|
||||||
|
numCompilers int
|
||||||
|
|
||||||
|
idx int
|
||||||
|
numSuites int
|
||||||
|
completionChannels []chan TestSuite
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewOrderedParallelCompiler(numCompilers int) *OrderedParallelCompiler {
|
||||||
|
return &OrderedParallelCompiler{
|
||||||
|
mutex: &sync.Mutex{},
|
||||||
|
numCompilers: numCompilers,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (opc *OrderedParallelCompiler) StartCompiling(suites TestSuites, goFlagsConfig types.GoFlagsConfig) {
|
||||||
|
opc.stopped = false
|
||||||
|
opc.idx = 0
|
||||||
|
opc.numSuites = len(suites)
|
||||||
|
opc.completionChannels = make([]chan TestSuite, opc.numSuites)
|
||||||
|
|
||||||
|
toCompile := make(chan parallelSuiteBundle, opc.numCompilers)
|
||||||
|
for compiler := 0; compiler < opc.numCompilers; compiler++ {
|
||||||
|
go func() {
|
||||||
|
for bundle := range toCompile {
|
||||||
|
c, suite := bundle.compiled, bundle.suite
|
||||||
|
opc.mutex.Lock()
|
||||||
|
stopped := opc.stopped
|
||||||
|
opc.mutex.Unlock()
|
||||||
|
if !stopped {
|
||||||
|
suite = CompileSuite(suite, goFlagsConfig)
|
||||||
|
}
|
||||||
|
c <- suite
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
for idx, suite := range suites {
|
||||||
|
opc.completionChannels[idx] = make(chan TestSuite, 1)
|
||||||
|
toCompile <- parallelSuiteBundle{suite, opc.completionChannels[idx]}
|
||||||
|
if idx == 0 { //compile first suite serially
|
||||||
|
suite = <-opc.completionChannels[0]
|
||||||
|
opc.completionChannels[0] <- suite
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
close(toCompile)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (opc *OrderedParallelCompiler) Next() (int, TestSuite) {
|
||||||
|
if opc.idx >= opc.numSuites {
|
||||||
|
return opc.numSuites, TestSuite{}
|
||||||
|
}
|
||||||
|
|
||||||
|
idx := opc.idx
|
||||||
|
suite := <-opc.completionChannels[idx]
|
||||||
|
opc.idx = opc.idx + 1
|
||||||
|
|
||||||
|
return idx, suite
|
||||||
|
}
|
||||||
|
|
||||||
|
func (opc *OrderedParallelCompiler) StopAndDrain() {
|
||||||
|
opc.mutex.Lock()
|
||||||
|
opc.stopped = true
|
||||||
|
opc.mutex.Unlock()
|
||||||
|
}
|
237
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go
generated
vendored
Normal file
237
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go
generated
vendored
Normal file
@ -0,0 +1,237 @@
|
|||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/google/pprof/profile"
|
||||||
|
"github.com/onsi/ginkgo/v2/reporters"
|
||||||
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
func AbsPathForGeneratedAsset(assetName string, suite TestSuite, cliConfig types.CLIConfig, process int) string {
|
||||||
|
suffix := ""
|
||||||
|
if process != 0 {
|
||||||
|
suffix = fmt.Sprintf(".%d", process)
|
||||||
|
}
|
||||||
|
if cliConfig.OutputDir == "" {
|
||||||
|
return filepath.Join(suite.AbsPath(), assetName+suffix)
|
||||||
|
}
|
||||||
|
outputDir, _ := filepath.Abs(cliConfig.OutputDir)
|
||||||
|
return filepath.Join(outputDir, suite.NamespacedName()+"_"+assetName+suffix)
|
||||||
|
}
|
||||||
|
|
||||||
|
func FinalizeProfilesAndReportsForSuites(suites TestSuites, cliConfig types.CLIConfig, suiteConfig types.SuiteConfig, reporterConfig types.ReporterConfig, goFlagsConfig types.GoFlagsConfig) ([]string, error) {
|
||||||
|
messages := []string{}
|
||||||
|
suitesWithProfiles := suites.WithState(TestSuiteStatePassed, TestSuiteStateFailed) //anything else won't have actually run and generated a profile
|
||||||
|
|
||||||
|
// merge cover profiles if need be
|
||||||
|
if goFlagsConfig.Cover && !cliConfig.KeepSeparateCoverprofiles {
|
||||||
|
coverProfiles := []string{}
|
||||||
|
for _, suite := range suitesWithProfiles {
|
||||||
|
if !suite.HasProgrammaticFocus {
|
||||||
|
coverProfiles = append(coverProfiles, AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(coverProfiles) > 0 {
|
||||||
|
dst := goFlagsConfig.CoverProfile
|
||||||
|
if cliConfig.OutputDir != "" {
|
||||||
|
dst = filepath.Join(cliConfig.OutputDir, goFlagsConfig.CoverProfile)
|
||||||
|
}
|
||||||
|
err := MergeAndCleanupCoverProfiles(coverProfiles, dst)
|
||||||
|
if err != nil {
|
||||||
|
return messages, err
|
||||||
|
}
|
||||||
|
coverage, err := GetCoverageFromCoverProfile(dst)
|
||||||
|
if err != nil {
|
||||||
|
return messages, err
|
||||||
|
}
|
||||||
|
if coverage == 0 {
|
||||||
|
messages = append(messages, "composite coverage: [no statements]")
|
||||||
|
} else if suitesWithProfiles.AnyHaveProgrammaticFocus() {
|
||||||
|
messages = append(messages, fmt.Sprintf("composite coverage: %.1f%% of statements however some suites did not contribute because they included programatically focused specs", coverage))
|
||||||
|
} else {
|
||||||
|
messages = append(messages, fmt.Sprintf("composite coverage: %.1f%% of statements", coverage))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
messages = append(messages, "no composite coverage computed: all suites included programatically focused specs")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// copy binaries if need be
|
||||||
|
for _, suite := range suitesWithProfiles {
|
||||||
|
if goFlagsConfig.BinaryMustBePreserved() && cliConfig.OutputDir != "" {
|
||||||
|
src := suite.PathToCompiledTest
|
||||||
|
dst := filepath.Join(cliConfig.OutputDir, suite.NamespacedName()+".test")
|
||||||
|
if suite.Precompiled {
|
||||||
|
if err := CopyFile(src, dst); err != nil {
|
||||||
|
return messages, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err := os.Rename(src, dst); err != nil {
|
||||||
|
return messages, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type reportFormat struct {
|
||||||
|
ReportName string
|
||||||
|
GenerateFunc func(types.Report, string) error
|
||||||
|
MergeFunc func([]string, string) ([]string, error)
|
||||||
|
}
|
||||||
|
reportFormats := []reportFormat{}
|
||||||
|
if reporterConfig.JSONReport != "" {
|
||||||
|
reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JSONReport, GenerateFunc: reporters.GenerateJSONReport, MergeFunc: reporters.MergeAndCleanupJSONReports})
|
||||||
|
}
|
||||||
|
if reporterConfig.JUnitReport != "" {
|
||||||
|
reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JUnitReport, GenerateFunc: reporters.GenerateJUnitReport, MergeFunc: reporters.MergeAndCleanupJUnitReports})
|
||||||
|
}
|
||||||
|
if reporterConfig.TeamcityReport != "" {
|
||||||
|
reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.TeamcityReport, GenerateFunc: reporters.GenerateTeamcityReport, MergeFunc: reporters.MergeAndCleanupTeamcityReports})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate reports for suites that failed to run
|
||||||
|
reportableSuites := suites.ThatAreGinkgoSuites()
|
||||||
|
for _, suite := range reportableSuites.WithState(TestSuiteStateFailedToCompile, TestSuiteStateFailedDueToTimeout, TestSuiteStateSkippedDueToPriorFailures, TestSuiteStateSkippedDueToEmptyCompilation) {
|
||||||
|
report := types.Report{
|
||||||
|
SuitePath: suite.AbsPath(),
|
||||||
|
SuiteConfig: suiteConfig,
|
||||||
|
SuiteSucceeded: false,
|
||||||
|
}
|
||||||
|
switch suite.State {
|
||||||
|
case TestSuiteStateFailedToCompile:
|
||||||
|
report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, suite.CompilationError.Error())
|
||||||
|
case TestSuiteStateFailedDueToTimeout:
|
||||||
|
report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, TIMEOUT_ELAPSED_FAILURE_REASON)
|
||||||
|
case TestSuiteStateSkippedDueToPriorFailures:
|
||||||
|
report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, PRIOR_FAILURES_FAILURE_REASON)
|
||||||
|
case TestSuiteStateSkippedDueToEmptyCompilation:
|
||||||
|
report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, EMPTY_SKIP_FAILURE_REASON)
|
||||||
|
report.SuiteSucceeded = true
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, format := range reportFormats {
|
||||||
|
format.GenerateFunc(report, AbsPathForGeneratedAsset(format.ReportName, suite, cliConfig, 0))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge reports unless we've been asked to keep them separate
|
||||||
|
if !cliConfig.KeepSeparateReports {
|
||||||
|
for _, format := range reportFormats {
|
||||||
|
reports := []string{}
|
||||||
|
for _, suite := range reportableSuites {
|
||||||
|
reports = append(reports, AbsPathForGeneratedAsset(format.ReportName, suite, cliConfig, 0))
|
||||||
|
}
|
||||||
|
dst := format.ReportName
|
||||||
|
if cliConfig.OutputDir != "" {
|
||||||
|
dst = filepath.Join(cliConfig.OutputDir, format.ReportName)
|
||||||
|
}
|
||||||
|
mergeMessages, err := format.MergeFunc(reports, dst)
|
||||||
|
messages = append(messages, mergeMessages...)
|
||||||
|
if err != nil {
|
||||||
|
return messages, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return messages, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
//loads each profile, combines them, deletes them, stores them in destination
|
||||||
|
func MergeAndCleanupCoverProfiles(profiles []string, destination string) error {
|
||||||
|
combined := &bytes.Buffer{}
|
||||||
|
modeRegex := regexp.MustCompile(`^mode: .*\n`)
|
||||||
|
for i, profile := range profiles {
|
||||||
|
contents, err := os.ReadFile(profile)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Unable to read coverage file %s:\n%s", profile, err.Error())
|
||||||
|
}
|
||||||
|
os.Remove(profile)
|
||||||
|
|
||||||
|
// remove the cover mode line from every file
|
||||||
|
// except the first one
|
||||||
|
if i > 0 {
|
||||||
|
contents = modeRegex.ReplaceAll(contents, []byte{})
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = combined.Write(contents)
|
||||||
|
|
||||||
|
// Add a newline to the end of every file if missing.
|
||||||
|
if err == nil && len(contents) > 0 && contents[len(contents)-1] != '\n' {
|
||||||
|
_, err = combined.Write([]byte("\n"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Unable to append to coverprofile:\n%s", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err := os.WriteFile(destination, combined.Bytes(), 0666)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Unable to create combined cover profile:\n%s", err.Error())
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetCoverageFromCoverProfile(profile string) (float64, error) {
|
||||||
|
cmd := exec.Command("go", "tool", "cover", "-func", profile)
|
||||||
|
output, err := cmd.CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("Could not process Coverprofile %s: %s", profile, err.Error())
|
||||||
|
}
|
||||||
|
re := regexp.MustCompile(`total:\s*\(statements\)\s*(\d*\.\d*)\%`)
|
||||||
|
matches := re.FindStringSubmatch(string(output))
|
||||||
|
if matches == nil {
|
||||||
|
return 0, fmt.Errorf("Could not parse Coverprofile to compute coverage percentage")
|
||||||
|
}
|
||||||
|
coverageString := matches[1]
|
||||||
|
coverage, err := strconv.ParseFloat(coverageString, 64)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("Could not parse Coverprofile to compute coverage percentage: %s", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
return coverage, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func MergeProfiles(profilePaths []string, destination string) error {
|
||||||
|
profiles := []*profile.Profile{}
|
||||||
|
for _, profilePath := range profilePaths {
|
||||||
|
proFile, err := os.Open(profilePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Could not open profile: %s\n%s", profilePath, err.Error())
|
||||||
|
}
|
||||||
|
prof, err := profile.Parse(proFile)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Could not parse profile: %s\n%s", profilePath, err.Error())
|
||||||
|
}
|
||||||
|
profiles = append(profiles, prof)
|
||||||
|
os.Remove(profilePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
mergedProfile, err := profile.Merge(profiles)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Could not merge profiles:\n%s", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
outFile, err := os.Create(destination)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Could not create merged profile %s:\n%s", destination, err.Error())
|
||||||
|
}
|
||||||
|
err = mergedProfile.Write(outFile)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Could not write merged profile %s:\n%s", destination, err.Error())
|
||||||
|
}
|
||||||
|
err = outFile.Close()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Could not close merged profile %s:\n%s", destination, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
355
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go
generated
vendored
Normal file
355
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go
generated
vendored
Normal file
@ -0,0 +1,355 @@
|
|||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/formatter"
|
||||||
|
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
||||||
|
"github.com/onsi/ginkgo/v2/internal/parallel_support"
|
||||||
|
"github.com/onsi/ginkgo/v2/reporters"
|
||||||
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
func RunCompiledSuite(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite {
|
||||||
|
suite.State = TestSuiteStateFailed
|
||||||
|
suite.HasProgrammaticFocus = false
|
||||||
|
|
||||||
|
if suite.PathToCompiledTest == "" {
|
||||||
|
return suite
|
||||||
|
}
|
||||||
|
|
||||||
|
if suite.IsGinkgo && cliConfig.ComputedProcs() > 1 {
|
||||||
|
suite = runParallel(suite, ginkgoConfig, reporterConfig, cliConfig, goFlagsConfig, additionalArgs)
|
||||||
|
} else if suite.IsGinkgo {
|
||||||
|
suite = runSerial(suite, ginkgoConfig, reporterConfig, cliConfig, goFlagsConfig, additionalArgs)
|
||||||
|
} else {
|
||||||
|
suite = runGoTest(suite, cliConfig, goFlagsConfig)
|
||||||
|
}
|
||||||
|
runAfterRunHook(cliConfig.AfterRunHook, reporterConfig.NoColor, suite)
|
||||||
|
return suite
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildAndStartCommand(suite TestSuite, args []string, pipeToStdout bool) (*exec.Cmd, *bytes.Buffer) {
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
cmd := exec.Command(suite.PathToCompiledTest, args...)
|
||||||
|
cmd.Dir = suite.Path
|
||||||
|
if pipeToStdout {
|
||||||
|
cmd.Stderr = io.MultiWriter(os.Stdout, buf)
|
||||||
|
cmd.Stdout = os.Stdout
|
||||||
|
} else {
|
||||||
|
cmd.Stderr = buf
|
||||||
|
cmd.Stdout = buf
|
||||||
|
}
|
||||||
|
err := cmd.Start()
|
||||||
|
command.AbortIfError("Failed to start test suite", err)
|
||||||
|
|
||||||
|
return cmd, buf
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkForNoTestsWarning(buf *bytes.Buffer) bool {
|
||||||
|
if strings.Contains(buf.String(), "warning: no tests to run") {
|
||||||
|
fmt.Fprintf(os.Stderr, `Found no test suites, did you forget to run "ginkgo bootstrap"?`)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func runGoTest(suite TestSuite, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig) TestSuite {
|
||||||
|
// As we run the go test from the suite directory, make sure the cover profile is absolute
|
||||||
|
// and placed into the expected output directory when one is configured.
|
||||||
|
if goFlagsConfig.Cover && !filepath.IsAbs(goFlagsConfig.CoverProfile) {
|
||||||
|
goFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
args, err := types.GenerateGoTestRunArgs(goFlagsConfig)
|
||||||
|
command.AbortIfError("Failed to generate test run arguments", err)
|
||||||
|
cmd, buf := buildAndStartCommand(suite, args, true)
|
||||||
|
|
||||||
|
cmd.Wait()
|
||||||
|
|
||||||
|
exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
|
||||||
|
passed := (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE)
|
||||||
|
passed = !(checkForNoTestsWarning(buf) && cliConfig.RequireSuite) && passed
|
||||||
|
if passed {
|
||||||
|
suite.State = TestSuiteStatePassed
|
||||||
|
} else {
|
||||||
|
suite.State = TestSuiteStateFailed
|
||||||
|
}
|
||||||
|
|
||||||
|
return suite
|
||||||
|
}
|
||||||
|
|
||||||
|
func runSerial(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite {
|
||||||
|
if goFlagsConfig.Cover {
|
||||||
|
goFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0)
|
||||||
|
}
|
||||||
|
if goFlagsConfig.BlockProfile != "" {
|
||||||
|
goFlagsConfig.BlockProfile = AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, 0)
|
||||||
|
}
|
||||||
|
if goFlagsConfig.CPUProfile != "" {
|
||||||
|
goFlagsConfig.CPUProfile = AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, 0)
|
||||||
|
}
|
||||||
|
if goFlagsConfig.MemProfile != "" {
|
||||||
|
goFlagsConfig.MemProfile = AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, 0)
|
||||||
|
}
|
||||||
|
if goFlagsConfig.MutexProfile != "" {
|
||||||
|
goFlagsConfig.MutexProfile = AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, 0)
|
||||||
|
}
|
||||||
|
if reporterConfig.JSONReport != "" {
|
||||||
|
reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0)
|
||||||
|
}
|
||||||
|
if reporterConfig.JUnitReport != "" {
|
||||||
|
reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0)
|
||||||
|
}
|
||||||
|
if reporterConfig.TeamcityReport != "" {
|
||||||
|
reporterConfig.TeamcityReport = AbsPathForGeneratedAsset(reporterConfig.TeamcityReport, suite, cliConfig, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
args, err := types.GenerateGinkgoTestRunArgs(ginkgoConfig, reporterConfig, goFlagsConfig)
|
||||||
|
command.AbortIfError("Failed to generate test run arguments", err)
|
||||||
|
args = append([]string{"--test.timeout=0"}, args...)
|
||||||
|
args = append(args, additionalArgs...)
|
||||||
|
|
||||||
|
cmd, buf := buildAndStartCommand(suite, args, true)
|
||||||
|
|
||||||
|
cmd.Wait()
|
||||||
|
|
||||||
|
exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
|
||||||
|
suite.HasProgrammaticFocus = (exitStatus == types.GINKGO_FOCUS_EXIT_CODE)
|
||||||
|
passed := (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE)
|
||||||
|
passed = !(checkForNoTestsWarning(buf) && cliConfig.RequireSuite) && passed
|
||||||
|
if passed {
|
||||||
|
suite.State = TestSuiteStatePassed
|
||||||
|
} else {
|
||||||
|
suite.State = TestSuiteStateFailed
|
||||||
|
}
|
||||||
|
|
||||||
|
if suite.HasProgrammaticFocus {
|
||||||
|
if goFlagsConfig.Cover {
|
||||||
|
fmt.Fprintln(os.Stdout, "coverage: no coverfile was generated because specs are programmatically focused")
|
||||||
|
}
|
||||||
|
if goFlagsConfig.BlockProfile != "" {
|
||||||
|
fmt.Fprintln(os.Stdout, "no block profile was generated because specs are programmatically focused")
|
||||||
|
}
|
||||||
|
if goFlagsConfig.CPUProfile != "" {
|
||||||
|
fmt.Fprintln(os.Stdout, "no cpu profile was generated because specs are programmatically focused")
|
||||||
|
}
|
||||||
|
if goFlagsConfig.MemProfile != "" {
|
||||||
|
fmt.Fprintln(os.Stdout, "no mem profile was generated because specs are programmatically focused")
|
||||||
|
}
|
||||||
|
if goFlagsConfig.MutexProfile != "" {
|
||||||
|
fmt.Fprintln(os.Stdout, "no mutex profile was generated because specs are programmatically focused")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return suite
|
||||||
|
}
|
||||||
|
|
||||||
|
func runParallel(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite {
|
||||||
|
type procResult struct {
|
||||||
|
passed bool
|
||||||
|
hasProgrammaticFocus bool
|
||||||
|
}
|
||||||
|
|
||||||
|
numProcs := cliConfig.ComputedProcs()
|
||||||
|
procOutput := make([]*bytes.Buffer, numProcs)
|
||||||
|
coverProfiles := []string{}
|
||||||
|
|
||||||
|
blockProfiles := []string{}
|
||||||
|
cpuProfiles := []string{}
|
||||||
|
memProfiles := []string{}
|
||||||
|
mutexProfiles := []string{}
|
||||||
|
|
||||||
|
procResults := make(chan procResult)
|
||||||
|
|
||||||
|
server, err := parallel_support.NewServer(numProcs, reporters.NewDefaultReporter(reporterConfig, formatter.ColorableStdOut))
|
||||||
|
command.AbortIfError("Failed to start parallel spec server", err)
|
||||||
|
server.Start()
|
||||||
|
defer server.Close()
|
||||||
|
|
||||||
|
if reporterConfig.JSONReport != "" {
|
||||||
|
reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0)
|
||||||
|
}
|
||||||
|
if reporterConfig.JUnitReport != "" {
|
||||||
|
reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0)
|
||||||
|
}
|
||||||
|
if reporterConfig.TeamcityReport != "" {
|
||||||
|
reporterConfig.TeamcityReport = AbsPathForGeneratedAsset(reporterConfig.TeamcityReport, suite, cliConfig, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
for proc := 1; proc <= numProcs; proc++ {
|
||||||
|
procGinkgoConfig := ginkgoConfig
|
||||||
|
procGinkgoConfig.ParallelProcess, procGinkgoConfig.ParallelTotal, procGinkgoConfig.ParallelHost = proc, numProcs, server.Address()
|
||||||
|
|
||||||
|
procGoFlagsConfig := goFlagsConfig
|
||||||
|
if goFlagsConfig.Cover {
|
||||||
|
procGoFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, proc)
|
||||||
|
coverProfiles = append(coverProfiles, procGoFlagsConfig.CoverProfile)
|
||||||
|
}
|
||||||
|
if goFlagsConfig.BlockProfile != "" {
|
||||||
|
procGoFlagsConfig.BlockProfile = AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, proc)
|
||||||
|
blockProfiles = append(blockProfiles, procGoFlagsConfig.BlockProfile)
|
||||||
|
}
|
||||||
|
if goFlagsConfig.CPUProfile != "" {
|
||||||
|
procGoFlagsConfig.CPUProfile = AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, proc)
|
||||||
|
cpuProfiles = append(cpuProfiles, procGoFlagsConfig.CPUProfile)
|
||||||
|
}
|
||||||
|
if goFlagsConfig.MemProfile != "" {
|
||||||
|
procGoFlagsConfig.MemProfile = AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, proc)
|
||||||
|
memProfiles = append(memProfiles, procGoFlagsConfig.MemProfile)
|
||||||
|
}
|
||||||
|
if goFlagsConfig.MutexProfile != "" {
|
||||||
|
procGoFlagsConfig.MutexProfile = AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, proc)
|
||||||
|
mutexProfiles = append(mutexProfiles, procGoFlagsConfig.MutexProfile)
|
||||||
|
}
|
||||||
|
|
||||||
|
args, err := types.GenerateGinkgoTestRunArgs(procGinkgoConfig, reporterConfig, procGoFlagsConfig)
|
||||||
|
command.AbortIfError("Failed to generate test run arguments", err)
|
||||||
|
args = append([]string{"--test.timeout=0"}, args...)
|
||||||
|
args = append(args, additionalArgs...)
|
||||||
|
|
||||||
|
cmd, buf := buildAndStartCommand(suite, args, false)
|
||||||
|
procOutput[proc-1] = buf
|
||||||
|
server.RegisterAlive(proc, func() bool { return cmd.ProcessState == nil || !cmd.ProcessState.Exited() })
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
cmd.Wait()
|
||||||
|
exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
|
||||||
|
procResults <- procResult{
|
||||||
|
passed: (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE),
|
||||||
|
hasProgrammaticFocus: exitStatus == types.GINKGO_FOCUS_EXIT_CODE,
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
passed := true
|
||||||
|
for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ {
|
||||||
|
result := <-procResults
|
||||||
|
passed = passed && result.passed
|
||||||
|
suite.HasProgrammaticFocus = suite.HasProgrammaticFocus || result.hasProgrammaticFocus
|
||||||
|
}
|
||||||
|
if passed {
|
||||||
|
suite.State = TestSuiteStatePassed
|
||||||
|
} else {
|
||||||
|
suite.State = TestSuiteStateFailed
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-server.GetSuiteDone():
|
||||||
|
fmt.Println("")
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
//one of the nodes never finished reporting to the server. Something must have gone wrong.
|
||||||
|
fmt.Fprint(formatter.ColorableStdErr, formatter.F("\n{{bold}}{{red}}Ginkgo timed out waiting for all parallel procs to report back{{/}}\n"))
|
||||||
|
fmt.Fprint(formatter.ColorableStdErr, formatter.F("{{gray}}Test suite:{{/}} %s (%s)\n\n", suite.PackageName, suite.Path))
|
||||||
|
fmt.Fprint(formatter.ColorableStdErr, formatter.Fiw(0, formatter.COLS, "This occurs if a parallel process exits before it reports its results to the Ginkgo CLI. The CLI will now print out all the stdout/stderr output it's collected from the running processes. However you may not see anything useful in these logs because the individual test processes usually intercept output to stdout/stderr in order to capture it in the spec reports.\n\nYou may want to try rerunning your test suite with {{light-gray}}--output-interceptor-mode=none{{/}} to see additional output here and debug your suite.\n"))
|
||||||
|
fmt.Fprintln(formatter.ColorableStdErr, " ")
|
||||||
|
for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ {
|
||||||
|
fmt.Fprintf(formatter.ColorableStdErr, formatter.F("{{bold}}Output from proc %d:{{/}}\n", proc))
|
||||||
|
fmt.Fprintln(os.Stderr, formatter.Fi(1, "%s", procOutput[proc-1].String()))
|
||||||
|
}
|
||||||
|
fmt.Fprintf(os.Stderr, "** End **")
|
||||||
|
}
|
||||||
|
|
||||||
|
for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ {
|
||||||
|
output := procOutput[proc-1].String()
|
||||||
|
if proc == 1 && checkForNoTestsWarning(procOutput[0]) && cliConfig.RequireSuite {
|
||||||
|
suite.State = TestSuiteStateFailed
|
||||||
|
}
|
||||||
|
if strings.Contains(output, "deprecated Ginkgo functionality") {
|
||||||
|
fmt.Fprintln(os.Stderr, output)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(coverProfiles) > 0 {
|
||||||
|
if suite.HasProgrammaticFocus {
|
||||||
|
fmt.Fprintln(os.Stdout, "coverage: no coverfile was generated because specs are programmatically focused")
|
||||||
|
} else {
|
||||||
|
coverProfile := AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0)
|
||||||
|
err := MergeAndCleanupCoverProfiles(coverProfiles, coverProfile)
|
||||||
|
command.AbortIfError("Failed to combine cover profiles", err)
|
||||||
|
|
||||||
|
coverage, err := GetCoverageFromCoverProfile(coverProfile)
|
||||||
|
command.AbortIfError("Failed to compute coverage", err)
|
||||||
|
if coverage == 0 {
|
||||||
|
fmt.Fprintln(os.Stdout, "coverage: [no statements]")
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(os.Stdout, "coverage: %.1f%% of statements\n", coverage)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(blockProfiles) > 0 {
|
||||||
|
if suite.HasProgrammaticFocus {
|
||||||
|
fmt.Fprintln(os.Stdout, "no block profile was generated because specs are programmatically focused")
|
||||||
|
} else {
|
||||||
|
blockProfile := AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, 0)
|
||||||
|
err := MergeProfiles(blockProfiles, blockProfile)
|
||||||
|
command.AbortIfError("Failed to combine blockprofiles", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(cpuProfiles) > 0 {
|
||||||
|
if suite.HasProgrammaticFocus {
|
||||||
|
fmt.Fprintln(os.Stdout, "no cpu profile was generated because specs are programmatically focused")
|
||||||
|
} else {
|
||||||
|
cpuProfile := AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, 0)
|
||||||
|
err := MergeProfiles(cpuProfiles, cpuProfile)
|
||||||
|
command.AbortIfError("Failed to combine cpuprofiles", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(memProfiles) > 0 {
|
||||||
|
if suite.HasProgrammaticFocus {
|
||||||
|
fmt.Fprintln(os.Stdout, "no mem profile was generated because specs are programmatically focused")
|
||||||
|
} else {
|
||||||
|
memProfile := AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, 0)
|
||||||
|
err := MergeProfiles(memProfiles, memProfile)
|
||||||
|
command.AbortIfError("Failed to combine memprofiles", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(mutexProfiles) > 0 {
|
||||||
|
if suite.HasProgrammaticFocus {
|
||||||
|
fmt.Fprintln(os.Stdout, "no mutex profile was generated because specs are programmatically focused")
|
||||||
|
} else {
|
||||||
|
mutexProfile := AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, 0)
|
||||||
|
err := MergeProfiles(mutexProfiles, mutexProfile)
|
||||||
|
command.AbortIfError("Failed to combine mutexprofiles", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return suite
|
||||||
|
}
|
||||||
|
|
||||||
|
func runAfterRunHook(command string, noColor bool, suite TestSuite) {
|
||||||
|
if command == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
f := formatter.NewWithNoColorBool(noColor)
|
||||||
|
|
||||||
|
// Allow for string replacement to pass input to the command
|
||||||
|
passed := "[FAIL]"
|
||||||
|
if suite.State.Is(TestSuiteStatePassed) {
|
||||||
|
passed = "[PASS]"
|
||||||
|
}
|
||||||
|
command = strings.ReplaceAll(command, "(ginkgo-suite-passed)", passed)
|
||||||
|
command = strings.ReplaceAll(command, "(ginkgo-suite-name)", suite.PackageName)
|
||||||
|
|
||||||
|
// Must break command into parts
|
||||||
|
splitArgs := regexp.MustCompile(`'.+'|".+"|\S+`)
|
||||||
|
parts := splitArgs.FindAllString(command, -1)
|
||||||
|
|
||||||
|
output, err := exec.Command(parts[0], parts[1:]...).CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(formatter.ColorableStdOut, f.Fi(0, "{{red}}{{bold}}After-run-hook failed:{{/}}"))
|
||||||
|
fmt.Fprintln(formatter.ColorableStdOut, f.Fi(1, "{{red}}%s{{/}}", output))
|
||||||
|
} else {
|
||||||
|
fmt.Fprintln(formatter.ColorableStdOut, f.Fi(0, "{{green}}{{bold}}After-run-hook succeeded:{{/}}"))
|
||||||
|
fmt.Fprintln(formatter.ColorableStdOut, f.Fi(1, "{{green}}%s{{/}}", output))
|
||||||
|
}
|
||||||
|
}
|
283
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go
generated
vendored
Normal file
283
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go
generated
vendored
Normal file
@ -0,0 +1,283 @@
|
|||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"math/rand"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
const TIMEOUT_ELAPSED_FAILURE_REASON = "Suite did not run because the timeout elapsed"
|
||||||
|
const PRIOR_FAILURES_FAILURE_REASON = "Suite did not run because prior suites failed and --keep-going is not set"
|
||||||
|
const EMPTY_SKIP_FAILURE_REASON = "Suite did not run go test reported that no test files were found"
|
||||||
|
|
||||||
|
type TestSuiteState uint
|
||||||
|
|
||||||
|
const (
|
||||||
|
TestSuiteStateInvalid TestSuiteState = iota
|
||||||
|
|
||||||
|
TestSuiteStateUncompiled
|
||||||
|
TestSuiteStateCompiled
|
||||||
|
|
||||||
|
TestSuiteStatePassed
|
||||||
|
|
||||||
|
TestSuiteStateSkippedDueToEmptyCompilation
|
||||||
|
TestSuiteStateSkippedByFilter
|
||||||
|
TestSuiteStateSkippedDueToPriorFailures
|
||||||
|
|
||||||
|
TestSuiteStateFailed
|
||||||
|
TestSuiteStateFailedDueToTimeout
|
||||||
|
TestSuiteStateFailedToCompile
|
||||||
|
)
|
||||||
|
|
||||||
|
var TestSuiteStateFailureStates = []TestSuiteState{TestSuiteStateFailed, TestSuiteStateFailedDueToTimeout, TestSuiteStateFailedToCompile}
|
||||||
|
|
||||||
|
func (state TestSuiteState) Is(states ...TestSuiteState) bool {
|
||||||
|
for _, suiteState := range states {
|
||||||
|
if suiteState == state {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
type TestSuite struct {
|
||||||
|
Path string
|
||||||
|
PackageName string
|
||||||
|
IsGinkgo bool
|
||||||
|
|
||||||
|
Precompiled bool
|
||||||
|
PathToCompiledTest string
|
||||||
|
CompilationError error
|
||||||
|
|
||||||
|
HasProgrammaticFocus bool
|
||||||
|
State TestSuiteState
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts TestSuite) AbsPath() string {
|
||||||
|
path, _ := filepath.Abs(ts.Path)
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts TestSuite) NamespacedName() string {
|
||||||
|
name := relPath(ts.Path)
|
||||||
|
name = strings.TrimLeft(name, "."+string(filepath.Separator))
|
||||||
|
name = strings.ReplaceAll(name, string(filepath.Separator), "_")
|
||||||
|
name = strings.ReplaceAll(name, " ", "_")
|
||||||
|
if name == "" {
|
||||||
|
return ts.PackageName
|
||||||
|
}
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
|
||||||
|
type TestSuites []TestSuite
|
||||||
|
|
||||||
|
func (ts TestSuites) AnyHaveProgrammaticFocus() bool {
|
||||||
|
for _, suite := range ts {
|
||||||
|
if suite.HasProgrammaticFocus {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts TestSuites) ThatAreGinkgoSuites() TestSuites {
|
||||||
|
out := TestSuites{}
|
||||||
|
for _, suite := range ts {
|
||||||
|
if suite.IsGinkgo {
|
||||||
|
out = append(out, suite)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts TestSuites) CountWithState(states ...TestSuiteState) int {
|
||||||
|
n := 0
|
||||||
|
for _, suite := range ts {
|
||||||
|
if suite.State.Is(states...) {
|
||||||
|
n += 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts TestSuites) WithState(states ...TestSuiteState) TestSuites {
|
||||||
|
out := TestSuites{}
|
||||||
|
for _, suite := range ts {
|
||||||
|
if suite.State.Is(states...) {
|
||||||
|
out = append(out, suite)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts TestSuites) WithoutState(states ...TestSuiteState) TestSuites {
|
||||||
|
out := TestSuites{}
|
||||||
|
for _, suite := range ts {
|
||||||
|
if !suite.State.Is(states...) {
|
||||||
|
out = append(out, suite)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts TestSuites) ShuffledCopy(seed int64) TestSuites {
|
||||||
|
out := make(TestSuites, len(ts))
|
||||||
|
permutation := rand.New(rand.NewSource(seed)).Perm(len(ts))
|
||||||
|
for i, j := range permutation {
|
||||||
|
out[i] = ts[j]
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func FindSuites(args []string, cliConfig types.CLIConfig, allowPrecompiled bool) TestSuites {
|
||||||
|
suites := TestSuites{}
|
||||||
|
|
||||||
|
if len(args) > 0 {
|
||||||
|
for _, arg := range args {
|
||||||
|
if allowPrecompiled {
|
||||||
|
suite, err := precompiledTestSuite(arg)
|
||||||
|
if err == nil {
|
||||||
|
suites = append(suites, suite)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
recurseForSuite := cliConfig.Recurse
|
||||||
|
if strings.HasSuffix(arg, "/...") && arg != "/..." {
|
||||||
|
arg = arg[:len(arg)-4]
|
||||||
|
recurseForSuite = true
|
||||||
|
}
|
||||||
|
suites = append(suites, suitesInDir(arg, recurseForSuite)...)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
suites = suitesInDir(".", cliConfig.Recurse)
|
||||||
|
}
|
||||||
|
|
||||||
|
if cliConfig.SkipPackage != "" {
|
||||||
|
skipFilters := strings.Split(cliConfig.SkipPackage, ",")
|
||||||
|
for idx := range suites {
|
||||||
|
for _, skipFilter := range skipFilters {
|
||||||
|
if strings.Contains(suites[idx].Path, skipFilter) {
|
||||||
|
suites[idx].State = TestSuiteStateSkippedByFilter
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return suites
|
||||||
|
}
|
||||||
|
|
||||||
|
func precompiledTestSuite(path string) (TestSuite, error) {
|
||||||
|
info, err := os.Stat(path)
|
||||||
|
if err != nil {
|
||||||
|
return TestSuite{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if info.IsDir() {
|
||||||
|
return TestSuite{}, errors.New("this is a directory, not a file")
|
||||||
|
}
|
||||||
|
|
||||||
|
if filepath.Ext(path) != ".test" && filepath.Ext(path) != ".exe" {
|
||||||
|
return TestSuite{}, errors.New("this is not a .test binary")
|
||||||
|
}
|
||||||
|
|
||||||
|
if filepath.Ext(path) == ".test" && info.Mode()&0111 == 0 {
|
||||||
|
return TestSuite{}, errors.New("this is not executable")
|
||||||
|
}
|
||||||
|
|
||||||
|
dir := relPath(filepath.Dir(path))
|
||||||
|
packageName := strings.TrimSuffix(filepath.Base(path), ".exe")
|
||||||
|
packageName = strings.TrimSuffix(packageName, ".test")
|
||||||
|
|
||||||
|
path, err = filepath.Abs(path)
|
||||||
|
if err != nil {
|
||||||
|
return TestSuite{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return TestSuite{
|
||||||
|
Path: dir,
|
||||||
|
PackageName: packageName,
|
||||||
|
IsGinkgo: true,
|
||||||
|
Precompiled: true,
|
||||||
|
PathToCompiledTest: path,
|
||||||
|
State: TestSuiteStateCompiled,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func suitesInDir(dir string, recurse bool) TestSuites {
|
||||||
|
suites := TestSuites{}
|
||||||
|
|
||||||
|
if path.Base(dir) == "vendor" {
|
||||||
|
return suites
|
||||||
|
}
|
||||||
|
|
||||||
|
files, _ := os.ReadDir(dir)
|
||||||
|
re := regexp.MustCompile(`^[^._].*_test\.go$`)
|
||||||
|
for _, file := range files {
|
||||||
|
if !file.IsDir() && re.Match([]byte(file.Name())) {
|
||||||
|
suite := TestSuite{
|
||||||
|
Path: relPath(dir),
|
||||||
|
PackageName: packageNameForSuite(dir),
|
||||||
|
IsGinkgo: filesHaveGinkgoSuite(dir, files),
|
||||||
|
State: TestSuiteStateUncompiled,
|
||||||
|
}
|
||||||
|
suites = append(suites, suite)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if recurse {
|
||||||
|
re = regexp.MustCompile(`^[._]`)
|
||||||
|
for _, file := range files {
|
||||||
|
if file.IsDir() && !re.Match([]byte(file.Name())) {
|
||||||
|
suites = append(suites, suitesInDir(dir+"/"+file.Name(), recurse)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return suites
|
||||||
|
}
|
||||||
|
|
||||||
|
func relPath(dir string) string {
|
||||||
|
dir, _ = filepath.Abs(dir)
|
||||||
|
cwd, _ := os.Getwd()
|
||||||
|
dir, _ = filepath.Rel(cwd, filepath.Clean(dir))
|
||||||
|
|
||||||
|
if string(dir[0]) != "." {
|
||||||
|
dir = "." + string(filepath.Separator) + dir
|
||||||
|
}
|
||||||
|
|
||||||
|
return dir
|
||||||
|
}
|
||||||
|
|
||||||
|
func packageNameForSuite(dir string) string {
|
||||||
|
path, _ := filepath.Abs(dir)
|
||||||
|
return filepath.Base(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func filesHaveGinkgoSuite(dir string, files []os.DirEntry) bool {
|
||||||
|
reTestFile := regexp.MustCompile(`_test\.go$`)
|
||||||
|
reGinkgo := regexp.MustCompile(`package ginkgo|\/ginkgo"|\/ginkgo\/v2"|\/ginkgo\/v2/dsl/`)
|
||||||
|
|
||||||
|
for _, file := range files {
|
||||||
|
if !file.IsDir() && reTestFile.Match([]byte(file.Name())) {
|
||||||
|
contents, _ := os.ReadFile(dir + "/" + file.Name())
|
||||||
|
if reGinkgo.Match(contents) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
86
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go
generated
vendored
Normal file
86
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go
generated
vendored
Normal file
@ -0,0 +1,86 @@
|
|||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/formatter"
|
||||||
|
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
||||||
|
)
|
||||||
|
|
||||||
|
func FileExists(path string) bool {
|
||||||
|
_, err := os.Stat(path)
|
||||||
|
return err == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func CopyFile(src string, dest string) error {
|
||||||
|
srcFile, err := os.Open(src)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
srcStat, err := srcFile.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := os.Stat(dest); err == nil {
|
||||||
|
os.Remove(dest)
|
||||||
|
}
|
||||||
|
|
||||||
|
destFile, err := os.OpenFile(dest, os.O_WRONLY|os.O_CREATE, srcStat.Mode())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = io.Copy(destFile, srcFile)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := srcFile.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return destFile.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func GoFmt(path string) {
|
||||||
|
out, err := exec.Command("go", "fmt", path).CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
command.AbortIfError(fmt.Sprintf("Could not fmt:\n%s\n", string(out)), err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func PluralizedWord(singular, plural string, count int) string {
|
||||||
|
if count == 1 {
|
||||||
|
return singular
|
||||||
|
}
|
||||||
|
return plural
|
||||||
|
}
|
||||||
|
|
||||||
|
func FailedSuitesReport(suites TestSuites, f formatter.Formatter) string {
|
||||||
|
out := ""
|
||||||
|
out += "There were failures detected in the following suites:\n"
|
||||||
|
|
||||||
|
maxPackageNameLength := 0
|
||||||
|
for _, suite := range suites.WithState(TestSuiteStateFailureStates...) {
|
||||||
|
if len(suite.PackageName) > maxPackageNameLength {
|
||||||
|
maxPackageNameLength = len(suite.PackageName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
packageNameFormatter := fmt.Sprintf("%%%ds", maxPackageNameLength)
|
||||||
|
for _, suite := range suites {
|
||||||
|
switch suite.State {
|
||||||
|
case TestSuiteStateFailed:
|
||||||
|
out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s{{/}}\n", suite.PackageName, suite.Path)
|
||||||
|
case TestSuiteStateFailedToCompile:
|
||||||
|
out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s {{magenta}}[Compilation failure]{{/}}\n", suite.PackageName, suite.Path)
|
||||||
|
case TestSuiteStateFailedDueToTimeout:
|
||||||
|
out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s {{orange}}[%s]{{/}}\n", suite.PackageName, suite.Path, TIMEOUT_ELAPSED_FAILURE_REASON)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
54
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go
generated
vendored
Normal file
54
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os/exec"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/formatter"
|
||||||
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
var versiorRe = regexp.MustCompile(`v(\d+\.\d+\.\d+)`)
|
||||||
|
|
||||||
|
func VerifyCLIAndFrameworkVersion(suites TestSuites) {
|
||||||
|
cliVersion := types.VERSION
|
||||||
|
mismatches := map[string][]string{}
|
||||||
|
|
||||||
|
for _, suite := range suites {
|
||||||
|
cmd := exec.Command("go", "list", "-m", "github.com/onsi/ginkgo/v2")
|
||||||
|
cmd.Dir = suite.Path
|
||||||
|
output, err := cmd.CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
components := strings.Split(string(output), " ")
|
||||||
|
if len(components) != 2 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
matches := versiorRe.FindStringSubmatch(components[1])
|
||||||
|
if matches == nil || len(matches) != 2 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
libraryVersion := matches[1]
|
||||||
|
if cliVersion != libraryVersion {
|
||||||
|
mismatches[libraryVersion] = append(mismatches[libraryVersion], suite.PackageName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(mismatches) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println(formatter.F("{{red}}{{bold}}Ginkgo detected a version mismatch between the Ginkgo CLI and the version of Ginkgo imported by your packages:{{/}}"))
|
||||||
|
|
||||||
|
fmt.Println(formatter.Fi(1, "Ginkgo CLI Version:"))
|
||||||
|
fmt.Println(formatter.Fi(2, "{{bold}}%s{{/}}", cliVersion))
|
||||||
|
fmt.Println(formatter.Fi(1, "Mismatched package versions found:"))
|
||||||
|
for version, packages := range mismatches {
|
||||||
|
fmt.Println(formatter.Fi(2, "{{bold}}%s{{/}} used by %s", version, strings.Join(packages, ", ")))
|
||||||
|
}
|
||||||
|
fmt.Println("")
|
||||||
|
fmt.Println(formatter.Fiw(1, formatter.COLS, "{{gray}}Ginkgo will continue to attempt to run but you may see errors (including flag parsing errors) and should either update your go.mod or your version of the Ginkgo CLI to match.\n\nTo install the matching version of the CLI run\n {{bold}}go install github.com/onsi/ginkgo/v2/ginkgo{{/}}{{gray}}\nfrom a path that contains a go.mod file. Alternatively you can use\n {{bold}}go run github.com/onsi/ginkgo/v2/ginkgo{{/}}{{gray}}\nfrom a path that contains a go.mod file to invoke the matching version of the Ginkgo CLI.\n\nIf you are attempting to test multiple packages that each have a different version of the Ginkgo library with a single Ginkgo CLI that is currently unsupported.\n{{/}}"))
|
||||||
|
}
|
123
vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go
generated
vendored
Normal file
123
vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go
generated
vendored
Normal file
@ -0,0 +1,123 @@
|
|||||||
|
package labels
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"go/ast"
|
||||||
|
"go/parser"
|
||||||
|
"go/token"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
||||||
|
"github.com/onsi/ginkgo/v2/ginkgo/internal"
|
||||||
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
"golang.org/x/tools/go/ast/inspector"
|
||||||
|
)
|
||||||
|
|
||||||
|
func BuildLabelsCommand() command.Command {
|
||||||
|
var cliConfig = types.NewDefaultCLIConfig()
|
||||||
|
|
||||||
|
flags, err := types.BuildLabelsCommandFlagSet(&cliConfig)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return command.Command{
|
||||||
|
Name: "labels",
|
||||||
|
Usage: "ginkgo labels <FLAGS> <PACKAGES>",
|
||||||
|
Flags: flags,
|
||||||
|
ShortDoc: "List labels detected in the passed-in packages (or the package in the current directory if left blank).",
|
||||||
|
DocLink: "spec-labels",
|
||||||
|
Command: func(args []string, _ []string) {
|
||||||
|
ListLabels(args, cliConfig)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ListLabels(args []string, cliConfig types.CLIConfig) {
|
||||||
|
suites := internal.FindSuites(args, cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter)
|
||||||
|
if len(suites) == 0 {
|
||||||
|
command.AbortWith("Found no test suites")
|
||||||
|
}
|
||||||
|
for _, suite := range suites {
|
||||||
|
labels := fetchLabelsFromPackage(suite.Path)
|
||||||
|
if len(labels) == 0 {
|
||||||
|
fmt.Printf("%s: No labels found\n", suite.PackageName)
|
||||||
|
} else {
|
||||||
|
fmt.Printf("%s: [%s]\n", suite.PackageName, strings.Join(labels, ", "))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func fetchLabelsFromPackage(packagePath string) []string {
|
||||||
|
fset := token.NewFileSet()
|
||||||
|
parsedPackages, err := parser.ParseDir(fset, packagePath, nil, 0)
|
||||||
|
command.AbortIfError("Failed to parse package source:", err)
|
||||||
|
|
||||||
|
files := []*ast.File{}
|
||||||
|
hasTestPackage := false
|
||||||
|
for key, pkg := range parsedPackages {
|
||||||
|
if strings.HasSuffix(key, "_test") {
|
||||||
|
hasTestPackage = true
|
||||||
|
for _, file := range pkg.Files {
|
||||||
|
files = append(files, file)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !hasTestPackage {
|
||||||
|
for _, pkg := range parsedPackages {
|
||||||
|
for _, file := range pkg.Files {
|
||||||
|
files = append(files, file)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
seen := map[string]bool{}
|
||||||
|
labels := []string{}
|
||||||
|
ispr := inspector.New(files)
|
||||||
|
ispr.Preorder([]ast.Node{&ast.CallExpr{}}, func(n ast.Node) {
|
||||||
|
potentialLabels := fetchLabels(n.(*ast.CallExpr))
|
||||||
|
for _, label := range potentialLabels {
|
||||||
|
if !seen[label] {
|
||||||
|
seen[label] = true
|
||||||
|
labels = append(labels, strconv.Quote(label))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
sort.Strings(labels)
|
||||||
|
return labels
|
||||||
|
}
|
||||||
|
|
||||||
|
func fetchLabels(callExpr *ast.CallExpr) []string {
|
||||||
|
out := []string{}
|
||||||
|
switch expr := callExpr.Fun.(type) {
|
||||||
|
case *ast.Ident:
|
||||||
|
if expr.Name != "Label" {
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
case *ast.SelectorExpr:
|
||||||
|
if expr.Sel.Name != "Label" {
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
for _, arg := range callExpr.Args {
|
||||||
|
switch expr := arg.(type) {
|
||||||
|
case *ast.BasicLit:
|
||||||
|
if expr.Kind == token.STRING {
|
||||||
|
unquoted, err := strconv.Unquote(expr.Value)
|
||||||
|
if err != nil {
|
||||||
|
unquoted = expr.Value
|
||||||
|
}
|
||||||
|
validated, err := types.ValidateAndCleanupLabel(unquoted, types.CodeLocation{})
|
||||||
|
if err == nil {
|
||||||
|
out = append(out, validated)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
58
vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go
generated
vendored
Normal file
58
vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go
generated
vendored
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/ginkgo/build"
|
||||||
|
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
||||||
|
"github.com/onsi/ginkgo/v2/ginkgo/generators"
|
||||||
|
"github.com/onsi/ginkgo/v2/ginkgo/labels"
|
||||||
|
"github.com/onsi/ginkgo/v2/ginkgo/outline"
|
||||||
|
"github.com/onsi/ginkgo/v2/ginkgo/run"
|
||||||
|
"github.com/onsi/ginkgo/v2/ginkgo/unfocus"
|
||||||
|
"github.com/onsi/ginkgo/v2/ginkgo/watch"
|
||||||
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
var program command.Program
|
||||||
|
|
||||||
|
func GenerateCommands() []command.Command {
|
||||||
|
return []command.Command{
|
||||||
|
watch.BuildWatchCommand(),
|
||||||
|
build.BuildBuildCommand(),
|
||||||
|
generators.BuildBootstrapCommand(),
|
||||||
|
generators.BuildGenerateCommand(),
|
||||||
|
labels.BuildLabelsCommand(),
|
||||||
|
outline.BuildOutlineCommand(),
|
||||||
|
unfocus.BuildUnfocusCommand(),
|
||||||
|
BuildVersionCommand(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
program = command.Program{
|
||||||
|
Name: "ginkgo",
|
||||||
|
Heading: fmt.Sprintf("Ginkgo Version %s", types.VERSION),
|
||||||
|
Commands: GenerateCommands(),
|
||||||
|
DefaultCommand: run.BuildRunCommand(),
|
||||||
|
DeprecatedCommands: []command.DeprecatedCommand{
|
||||||
|
{Name: "convert", Deprecation: types.Deprecations.Convert()},
|
||||||
|
{Name: "blur", Deprecation: types.Deprecations.Blur()},
|
||||||
|
{Name: "nodot", Deprecation: types.Deprecations.Nodot()},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
program.RunAndExit(os.Args)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BuildVersionCommand() command.Command {
|
||||||
|
return command.Command{
|
||||||
|
Name: "version",
|
||||||
|
Usage: "ginkgo version",
|
||||||
|
ShortDoc: "Print Ginkgo's version",
|
||||||
|
Command: func(_ []string, _ []string) {
|
||||||
|
fmt.Printf("Ginkgo Version %s\n", types.VERSION)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
302
vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go
generated
vendored
Normal file
302
vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go
generated
vendored
Normal file
@ -0,0 +1,302 @@
|
|||||||
|
package outline
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
"go/ast"
|
||||||
|
"go/token"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// undefinedTextAlt is used if the spec/container text cannot be derived
|
||||||
|
undefinedTextAlt = "undefined"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ginkgoMetadata holds useful bits of information for every entry in the outline
|
||||||
|
type ginkgoMetadata struct {
|
||||||
|
// Name is the spec or container function name, e.g. `Describe` or `It`
|
||||||
|
Name string `json:"name"`
|
||||||
|
|
||||||
|
// Text is the `text` argument passed to specs, and some containers
|
||||||
|
Text string `json:"text"`
|
||||||
|
|
||||||
|
// Start is the position of first character of the spec or container block
|
||||||
|
Start int `json:"start"`
|
||||||
|
|
||||||
|
// End is the position of first character immediately after the spec or container block
|
||||||
|
End int `json:"end"`
|
||||||
|
|
||||||
|
Spec bool `json:"spec"`
|
||||||
|
Focused bool `json:"focused"`
|
||||||
|
Pending bool `json:"pending"`
|
||||||
|
Labels []string `json:"labels"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ginkgoNode is used to construct the outline as a tree
|
||||||
|
type ginkgoNode struct {
|
||||||
|
ginkgoMetadata
|
||||||
|
Nodes []*ginkgoNode `json:"nodes"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type walkFunc func(n *ginkgoNode)
|
||||||
|
|
||||||
|
func (n *ginkgoNode) PreOrder(f walkFunc) {
|
||||||
|
f(n)
|
||||||
|
for _, m := range n.Nodes {
|
||||||
|
m.PreOrder(f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *ginkgoNode) PostOrder(f walkFunc) {
|
||||||
|
for _, m := range n.Nodes {
|
||||||
|
m.PostOrder(f)
|
||||||
|
}
|
||||||
|
f(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *ginkgoNode) Walk(pre, post walkFunc) {
|
||||||
|
pre(n)
|
||||||
|
for _, m := range n.Nodes {
|
||||||
|
m.Walk(pre, post)
|
||||||
|
}
|
||||||
|
post(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PropagateInheritedProperties propagates the Pending and Focused properties
|
||||||
|
// through the subtree rooted at n.
|
||||||
|
func (n *ginkgoNode) PropagateInheritedProperties() {
|
||||||
|
n.PreOrder(func(thisNode *ginkgoNode) {
|
||||||
|
for _, descendantNode := range thisNode.Nodes {
|
||||||
|
if thisNode.Pending {
|
||||||
|
descendantNode.Pending = true
|
||||||
|
descendantNode.Focused = false
|
||||||
|
}
|
||||||
|
if thisNode.Focused && !descendantNode.Pending {
|
||||||
|
descendantNode.Focused = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// BackpropagateUnfocus propagates the Focused property through the subtree
|
||||||
|
// rooted at n. It applies the rule described in the Ginkgo docs:
|
||||||
|
// > Nested programmatically focused specs follow a simple rule: if a
|
||||||
|
// > leaf-node is marked focused, any of its ancestor nodes that are marked
|
||||||
|
// > focus will be unfocused.
|
||||||
|
func (n *ginkgoNode) BackpropagateUnfocus() {
|
||||||
|
focusedSpecInSubtreeStack := []bool{}
|
||||||
|
n.PostOrder(func(thisNode *ginkgoNode) {
|
||||||
|
if thisNode.Spec {
|
||||||
|
focusedSpecInSubtreeStack = append(focusedSpecInSubtreeStack, thisNode.Focused)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
focusedSpecInSubtree := false
|
||||||
|
for range thisNode.Nodes {
|
||||||
|
focusedSpecInSubtree = focusedSpecInSubtree || focusedSpecInSubtreeStack[len(focusedSpecInSubtreeStack)-1]
|
||||||
|
focusedSpecInSubtreeStack = focusedSpecInSubtreeStack[0 : len(focusedSpecInSubtreeStack)-1]
|
||||||
|
}
|
||||||
|
focusedSpecInSubtreeStack = append(focusedSpecInSubtreeStack, focusedSpecInSubtree)
|
||||||
|
if focusedSpecInSubtree {
|
||||||
|
thisNode.Focused = false
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func packageAndIdentNamesFromCallExpr(ce *ast.CallExpr) (string, string, bool) {
|
||||||
|
switch ex := ce.Fun.(type) {
|
||||||
|
case *ast.Ident:
|
||||||
|
return "", ex.Name, true
|
||||||
|
case *ast.SelectorExpr:
|
||||||
|
pkgID, ok := ex.X.(*ast.Ident)
|
||||||
|
if !ok {
|
||||||
|
return "", "", false
|
||||||
|
}
|
||||||
|
// A package identifier is top-level, so Obj must be nil
|
||||||
|
if pkgID.Obj != nil {
|
||||||
|
return "", "", false
|
||||||
|
}
|
||||||
|
if ex.Sel == nil {
|
||||||
|
return "", "", false
|
||||||
|
}
|
||||||
|
return pkgID.Name, ex.Sel.Name, true
|
||||||
|
default:
|
||||||
|
return "", "", false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// absoluteOffsetsForNode derives the absolute character offsets of the node start and
|
||||||
|
// end positions.
|
||||||
|
func absoluteOffsetsForNode(fset *token.FileSet, n ast.Node) (start, end int) {
|
||||||
|
return fset.PositionFor(n.Pos(), false).Offset, fset.PositionFor(n.End(), false).Offset
|
||||||
|
}
|
||||||
|
|
||||||
|
// ginkgoNodeFromCallExpr derives an outline entry from a go AST subtree
|
||||||
|
// corresponding to a Ginkgo container or spec.
|
||||||
|
func ginkgoNodeFromCallExpr(fset *token.FileSet, ce *ast.CallExpr, ginkgoPackageName *string) (*ginkgoNode, bool) {
|
||||||
|
packageName, identName, ok := packageAndIdentNamesFromCallExpr(ce)
|
||||||
|
if !ok {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
n := ginkgoNode{}
|
||||||
|
n.Name = identName
|
||||||
|
n.Start, n.End = absoluteOffsetsForNode(fset, ce)
|
||||||
|
n.Nodes = make([]*ginkgoNode, 0)
|
||||||
|
switch identName {
|
||||||
|
case "It", "Specify", "Entry":
|
||||||
|
n.Spec = true
|
||||||
|
n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
|
||||||
|
n.Labels = labelFromCallExpr(ce)
|
||||||
|
n.Pending = pendingFromCallExpr(ce)
|
||||||
|
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||||
|
case "FIt", "FSpecify", "FEntry":
|
||||||
|
n.Spec = true
|
||||||
|
n.Focused = true
|
||||||
|
n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
|
||||||
|
n.Labels = labelFromCallExpr(ce)
|
||||||
|
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||||
|
case "PIt", "PSpecify", "XIt", "XSpecify", "PEntry", "XEntry":
|
||||||
|
n.Spec = true
|
||||||
|
n.Pending = true
|
||||||
|
n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
|
||||||
|
n.Labels = labelFromCallExpr(ce)
|
||||||
|
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||||
|
case "Context", "Describe", "When", "DescribeTable":
|
||||||
|
n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
|
||||||
|
n.Labels = labelFromCallExpr(ce)
|
||||||
|
n.Pending = pendingFromCallExpr(ce)
|
||||||
|
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||||
|
case "FContext", "FDescribe", "FWhen", "FDescribeTable":
|
||||||
|
n.Focused = true
|
||||||
|
n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
|
||||||
|
n.Labels = labelFromCallExpr(ce)
|
||||||
|
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||||
|
case "PContext", "PDescribe", "PWhen", "XContext", "XDescribe", "XWhen", "PDescribeTable", "XDescribeTable":
|
||||||
|
n.Pending = true
|
||||||
|
n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
|
||||||
|
n.Labels = labelFromCallExpr(ce)
|
||||||
|
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||||
|
case "By":
|
||||||
|
n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
|
||||||
|
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||||
|
case "AfterEach", "BeforeEach":
|
||||||
|
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||||
|
case "JustAfterEach", "JustBeforeEach":
|
||||||
|
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||||
|
case "AfterSuite", "BeforeSuite":
|
||||||
|
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||||
|
case "SynchronizedAfterSuite", "SynchronizedBeforeSuite":
|
||||||
|
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||||
|
default:
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// textOrAltFromCallExpr tries to derive the "text" of a Ginkgo spec or
|
||||||
|
// container. If it cannot derive it, it returns the alt text.
|
||||||
|
func textOrAltFromCallExpr(ce *ast.CallExpr, alt string) string {
|
||||||
|
text, defined := textFromCallExpr(ce)
|
||||||
|
if !defined {
|
||||||
|
return alt
|
||||||
|
}
|
||||||
|
return text
|
||||||
|
}
|
||||||
|
|
||||||
|
// textFromCallExpr tries to derive the "text" of a Ginkgo spec or container. If
|
||||||
|
// it cannot derive it, it returns false.
|
||||||
|
func textFromCallExpr(ce *ast.CallExpr) (string, bool) {
|
||||||
|
if len(ce.Args) < 1 {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
text, ok := ce.Args[0].(*ast.BasicLit)
|
||||||
|
if !ok {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
switch text.Kind {
|
||||||
|
case token.CHAR, token.STRING:
|
||||||
|
// For token.CHAR and token.STRING, Value is quoted
|
||||||
|
unquoted, err := strconv.Unquote(text.Value)
|
||||||
|
if err != nil {
|
||||||
|
// If unquoting fails, just use the raw Value
|
||||||
|
return text.Value, true
|
||||||
|
}
|
||||||
|
return unquoted, true
|
||||||
|
default:
|
||||||
|
return text.Value, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func labelFromCallExpr(ce *ast.CallExpr) []string {
|
||||||
|
|
||||||
|
labels := []string{}
|
||||||
|
if len(ce.Args) < 2 {
|
||||||
|
return labels
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, arg := range ce.Args[1:] {
|
||||||
|
switch expr := arg.(type) {
|
||||||
|
case *ast.CallExpr:
|
||||||
|
id, ok := expr.Fun.(*ast.Ident)
|
||||||
|
if !ok {
|
||||||
|
// to skip over cases where the expr.Fun. is actually *ast.SelectorExpr
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if id.Name == "Label" {
|
||||||
|
ls := extractLabels(expr)
|
||||||
|
for _, label := range ls {
|
||||||
|
labels = append(labels, label)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return labels
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractLabels(expr *ast.CallExpr) []string {
|
||||||
|
out := []string{}
|
||||||
|
for _, arg := range expr.Args {
|
||||||
|
switch expr := arg.(type) {
|
||||||
|
case *ast.BasicLit:
|
||||||
|
if expr.Kind == token.STRING {
|
||||||
|
unquoted, err := strconv.Unquote(expr.Value)
|
||||||
|
if err != nil {
|
||||||
|
unquoted = expr.Value
|
||||||
|
}
|
||||||
|
validated, err := types.ValidateAndCleanupLabel(unquoted, types.CodeLocation{})
|
||||||
|
if err == nil {
|
||||||
|
out = append(out, validated)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func pendingFromCallExpr(ce *ast.CallExpr) bool {
|
||||||
|
|
||||||
|
pending := false
|
||||||
|
if len(ce.Args) < 2 {
|
||||||
|
return pending
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, arg := range ce.Args[1:] {
|
||||||
|
switch expr := arg.(type) {
|
||||||
|
case *ast.CallExpr:
|
||||||
|
id, ok := expr.Fun.(*ast.Ident)
|
||||||
|
if !ok {
|
||||||
|
// to skip over cases where the expr.Fun. is actually *ast.SelectorExpr
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if id.Name == "Pending" {
|
||||||
|
pending = true
|
||||||
|
}
|
||||||
|
case *ast.Ident:
|
||||||
|
if expr.Name == "Pending" {
|
||||||
|
pending = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return pending
|
||||||
|
}
|
65
vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go
generated
vendored
Normal file
65
vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go
generated
vendored
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Most of the required functions were available in the
|
||||||
|
// "golang.org/x/tools/go/ast/astutil" package, but not exported.
|
||||||
|
// They were copied from https://github.com/golang/tools/blob/2b0845dc783e36ae26d683f4915a5840ef01ab0f/go/ast/astutil/imports.go
|
||||||
|
|
||||||
|
package outline
|
||||||
|
|
||||||
|
import (
|
||||||
|
"go/ast"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// packageNameForImport returns the package name for the package. If the package
|
||||||
|
// is not imported, it returns nil. "Package name" refers to `pkgname` in the
|
||||||
|
// call expression `pkgname.ExportedIdentifier`. Examples:
|
||||||
|
// (import path not found) -> nil
|
||||||
|
// "import example.com/pkg/foo" -> "foo"
|
||||||
|
// "import fooalias example.com/pkg/foo" -> "fooalias"
|
||||||
|
// "import . example.com/pkg/foo" -> ""
|
||||||
|
func packageNameForImport(f *ast.File, path string) *string {
|
||||||
|
spec := importSpec(f, path)
|
||||||
|
if spec == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
name := spec.Name.String()
|
||||||
|
if name == "<nil>" {
|
||||||
|
// If the package name is not explicitly specified,
|
||||||
|
// make an educated guess. This is not guaranteed to be correct.
|
||||||
|
lastSlash := strings.LastIndex(path, "/")
|
||||||
|
if lastSlash == -1 {
|
||||||
|
name = path
|
||||||
|
} else {
|
||||||
|
name = path[lastSlash+1:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if name == "." {
|
||||||
|
name = ""
|
||||||
|
}
|
||||||
|
return &name
|
||||||
|
}
|
||||||
|
|
||||||
|
// importSpec returns the import spec if f imports path,
|
||||||
|
// or nil otherwise.
|
||||||
|
func importSpec(f *ast.File, path string) *ast.ImportSpec {
|
||||||
|
for _, s := range f.Imports {
|
||||||
|
if strings.HasPrefix(importPath(s), path) {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// importPath returns the unquoted import path of s,
|
||||||
|
// or "" if the path is not properly quoted.
|
||||||
|
func importPath(s *ast.ImportSpec) string {
|
||||||
|
t, err := strconv.Unquote(s.Path.Value)
|
||||||
|
if err != nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return t
|
||||||
|
}
|
110
vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go
generated
vendored
Normal file
110
vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go
generated
vendored
Normal file
@ -0,0 +1,110 @@
|
|||||||
|
package outline
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"go/ast"
|
||||||
|
"go/token"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/tools/go/ast/inspector"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// ginkgoImportPath is the well-known ginkgo import path
|
||||||
|
ginkgoImportPath = "github.com/onsi/ginkgo/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FromASTFile returns an outline for a Ginkgo test source file
|
||||||
|
func FromASTFile(fset *token.FileSet, src *ast.File) (*outline, error) {
|
||||||
|
ginkgoPackageName := packageNameForImport(src, ginkgoImportPath)
|
||||||
|
if ginkgoPackageName == nil {
|
||||||
|
return nil, fmt.Errorf("file does not import %q", ginkgoImportPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
root := ginkgoNode{}
|
||||||
|
stack := []*ginkgoNode{&root}
|
||||||
|
ispr := inspector.New([]*ast.File{src})
|
||||||
|
ispr.Nodes([]ast.Node{(*ast.CallExpr)(nil)}, func(node ast.Node, push bool) bool {
|
||||||
|
if push {
|
||||||
|
// Pre-order traversal
|
||||||
|
ce, ok := node.(*ast.CallExpr)
|
||||||
|
if !ok {
|
||||||
|
// Because `Nodes` calls this function only when the node is an
|
||||||
|
// ast.CallExpr, this should never happen
|
||||||
|
panic(fmt.Errorf("node starting at %d, ending at %d is not an *ast.CallExpr", node.Pos(), node.End()))
|
||||||
|
}
|
||||||
|
gn, ok := ginkgoNodeFromCallExpr(fset, ce, ginkgoPackageName)
|
||||||
|
if !ok {
|
||||||
|
// Node is not a Ginkgo spec or container, continue
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
parent := stack[len(stack)-1]
|
||||||
|
parent.Nodes = append(parent.Nodes, gn)
|
||||||
|
stack = append(stack, gn)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// Post-order traversal
|
||||||
|
start, end := absoluteOffsetsForNode(fset, node)
|
||||||
|
lastVisitedGinkgoNode := stack[len(stack)-1]
|
||||||
|
if start != lastVisitedGinkgoNode.Start || end != lastVisitedGinkgoNode.End {
|
||||||
|
// Node is not a Ginkgo spec or container, so it was not pushed onto the stack, continue
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
stack = stack[0 : len(stack)-1]
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
if len(root.Nodes) == 0 {
|
||||||
|
return &outline{[]*ginkgoNode{}}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Derive the final focused property for all nodes. This must be done
|
||||||
|
// _before_ propagating the inherited focused property.
|
||||||
|
root.BackpropagateUnfocus()
|
||||||
|
// Now, propagate inherited properties, including focused and pending.
|
||||||
|
root.PropagateInheritedProperties()
|
||||||
|
|
||||||
|
return &outline{root.Nodes}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type outline struct {
|
||||||
|
Nodes []*ginkgoNode `json:"nodes"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *outline) MarshalJSON() ([]byte, error) {
|
||||||
|
return json.Marshal(o.Nodes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a CSV-formatted outline. Spec or container are output in
|
||||||
|
// depth-first order.
|
||||||
|
func (o *outline) String() string {
|
||||||
|
return o.StringIndent(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringIndent returns a CSV-formated outline, but every line is indented by
|
||||||
|
// one 'width' of spaces for every level of nesting.
|
||||||
|
func (o *outline) StringIndent(width int) string {
|
||||||
|
var b strings.Builder
|
||||||
|
b.WriteString("Name,Text,Start,End,Spec,Focused,Pending,Labels\n")
|
||||||
|
|
||||||
|
currentIndent := 0
|
||||||
|
pre := func(n *ginkgoNode) {
|
||||||
|
b.WriteString(fmt.Sprintf("%*s", currentIndent, ""))
|
||||||
|
var labels string
|
||||||
|
if len(n.Labels) == 1 {
|
||||||
|
labels = n.Labels[0]
|
||||||
|
} else {
|
||||||
|
labels = strings.Join(n.Labels, ", ")
|
||||||
|
}
|
||||||
|
//enclosing labels in a double quoted comma separate listed so that when inmported into a CSV app the Labels column has comma separate strings
|
||||||
|
b.WriteString(fmt.Sprintf("%s,%s,%d,%d,%t,%t,%t,\"%s\"\n", n.Name, n.Text, n.Start, n.End, n.Spec, n.Focused, n.Pending, labels))
|
||||||
|
currentIndent += width
|
||||||
|
}
|
||||||
|
post := func(n *ginkgoNode) {
|
||||||
|
currentIndent -= width
|
||||||
|
}
|
||||||
|
for _, n := range o.Nodes {
|
||||||
|
n.Walk(pre, post)
|
||||||
|
}
|
||||||
|
return b.String()
|
||||||
|
}
|
98
vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go
generated
vendored
Normal file
98
vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go
generated
vendored
Normal file
@ -0,0 +1,98 @@
|
|||||||
|
package outline
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"go/parser"
|
||||||
|
"go/token"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
||||||
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// indentWidth is the width used by the 'indent' output
|
||||||
|
indentWidth = 4
|
||||||
|
// stdinAlias is a portable alias for stdin. This convention is used in
|
||||||
|
// other CLIs, e.g., kubectl.
|
||||||
|
stdinAlias = "-"
|
||||||
|
usageCommand = "ginkgo outline <filename>"
|
||||||
|
)
|
||||||
|
|
||||||
|
type outlineConfig struct {
|
||||||
|
Format string
|
||||||
|
}
|
||||||
|
|
||||||
|
func BuildOutlineCommand() command.Command {
|
||||||
|
conf := outlineConfig{
|
||||||
|
Format: "csv",
|
||||||
|
}
|
||||||
|
flags, err := types.NewGinkgoFlagSet(
|
||||||
|
types.GinkgoFlags{
|
||||||
|
{Name: "format", KeyPath: "Format",
|
||||||
|
Usage: "Format of outline",
|
||||||
|
UsageArgument: "one of 'csv', 'indent', or 'json'",
|
||||||
|
UsageDefaultValue: conf.Format,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
&conf,
|
||||||
|
types.GinkgoFlagSections{},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return command.Command{
|
||||||
|
Name: "outline",
|
||||||
|
Usage: "ginkgo outline <filename>",
|
||||||
|
ShortDoc: "Create an outline of Ginkgo symbols for a file",
|
||||||
|
Documentation: "To read from stdin, use: `ginkgo outline -`",
|
||||||
|
DocLink: "creating-an-outline-of-specs",
|
||||||
|
Flags: flags,
|
||||||
|
Command: func(args []string, _ []string) {
|
||||||
|
outlineFile(args, conf.Format)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func outlineFile(args []string, format string) {
|
||||||
|
if len(args) != 1 {
|
||||||
|
command.AbortWithUsage("outline expects exactly one argument")
|
||||||
|
}
|
||||||
|
|
||||||
|
filename := args[0]
|
||||||
|
var src *os.File
|
||||||
|
if filename == stdinAlias {
|
||||||
|
src = os.Stdin
|
||||||
|
} else {
|
||||||
|
var err error
|
||||||
|
src, err = os.Open(filename)
|
||||||
|
command.AbortIfError("Failed to open file:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fset := token.NewFileSet()
|
||||||
|
|
||||||
|
parsedSrc, err := parser.ParseFile(fset, filename, src, 0)
|
||||||
|
command.AbortIfError("Failed to parse source:", err)
|
||||||
|
|
||||||
|
o, err := FromASTFile(fset, parsedSrc)
|
||||||
|
command.AbortIfError("Failed to create outline:", err)
|
||||||
|
|
||||||
|
var oerr error
|
||||||
|
switch format {
|
||||||
|
case "csv":
|
||||||
|
_, oerr = fmt.Print(o)
|
||||||
|
case "indent":
|
||||||
|
_, oerr = fmt.Print(o.StringIndent(indentWidth))
|
||||||
|
case "json":
|
||||||
|
b, err := json.Marshal(o)
|
||||||
|
if err != nil {
|
||||||
|
println(fmt.Sprintf("error marshalling to json: %s", err))
|
||||||
|
}
|
||||||
|
_, oerr = fmt.Println(string(b))
|
||||||
|
default:
|
||||||
|
command.AbortWith("Format %s not accepted", format)
|
||||||
|
}
|
||||||
|
command.AbortIfError("Failed to write outline:", oerr)
|
||||||
|
}
|
232
vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go
generated
vendored
Normal file
232
vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go
generated
vendored
Normal file
@ -0,0 +1,232 @@
|
|||||||
|
package run
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/formatter"
|
||||||
|
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
||||||
|
"github.com/onsi/ginkgo/v2/ginkgo/internal"
|
||||||
|
"github.com/onsi/ginkgo/v2/internal/interrupt_handler"
|
||||||
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
func BuildRunCommand() command.Command {
|
||||||
|
var suiteConfig = types.NewDefaultSuiteConfig()
|
||||||
|
var reporterConfig = types.NewDefaultReporterConfig()
|
||||||
|
var cliConfig = types.NewDefaultCLIConfig()
|
||||||
|
var goFlagsConfig = types.NewDefaultGoFlagsConfig()
|
||||||
|
|
||||||
|
flags, err := types.BuildRunCommandFlagSet(&suiteConfig, &reporterConfig, &cliConfig, &goFlagsConfig)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
interruptHandler := interrupt_handler.NewInterruptHandler(nil)
|
||||||
|
interrupt_handler.SwallowSigQuit()
|
||||||
|
|
||||||
|
return command.Command{
|
||||||
|
Name: "run",
|
||||||
|
Flags: flags,
|
||||||
|
Usage: "ginkgo run <FLAGS> <PACKAGES> -- <PASS-THROUGHS>",
|
||||||
|
ShortDoc: "Run the tests in the passed in <PACKAGES> (or the package in the current directory if left blank)",
|
||||||
|
Documentation: "Any arguments after -- will be passed to the test.",
|
||||||
|
DocLink: "running-tests",
|
||||||
|
Command: func(args []string, additionalArgs []string) {
|
||||||
|
var errors []error
|
||||||
|
cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig)
|
||||||
|
command.AbortIfErrors("Ginkgo detected configuration issues:", errors)
|
||||||
|
|
||||||
|
runner := &SpecRunner{
|
||||||
|
cliConfig: cliConfig,
|
||||||
|
goFlagsConfig: goFlagsConfig,
|
||||||
|
suiteConfig: suiteConfig,
|
||||||
|
reporterConfig: reporterConfig,
|
||||||
|
flags: flags,
|
||||||
|
|
||||||
|
interruptHandler: interruptHandler,
|
||||||
|
}
|
||||||
|
|
||||||
|
runner.RunSpecs(args, additionalArgs)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type SpecRunner struct {
|
||||||
|
suiteConfig types.SuiteConfig
|
||||||
|
reporterConfig types.ReporterConfig
|
||||||
|
cliConfig types.CLIConfig
|
||||||
|
goFlagsConfig types.GoFlagsConfig
|
||||||
|
flags types.GinkgoFlagSet
|
||||||
|
|
||||||
|
interruptHandler *interrupt_handler.InterruptHandler
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *SpecRunner) RunSpecs(args []string, additionalArgs []string) {
|
||||||
|
suites := internal.FindSuites(args, r.cliConfig, true)
|
||||||
|
skippedSuites := suites.WithState(internal.TestSuiteStateSkippedByFilter)
|
||||||
|
suites = suites.WithoutState(internal.TestSuiteStateSkippedByFilter)
|
||||||
|
|
||||||
|
internal.VerifyCLIAndFrameworkVersion(suites)
|
||||||
|
|
||||||
|
if len(skippedSuites) > 0 {
|
||||||
|
fmt.Println("Will skip:")
|
||||||
|
for _, skippedSuite := range skippedSuites {
|
||||||
|
fmt.Println(" " + skippedSuite.Path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(skippedSuites) > 0 && len(suites) == 0 {
|
||||||
|
command.AbortGracefullyWith("All tests skipped! Exiting...")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(suites) == 0 {
|
||||||
|
command.AbortWith("Found no test suites")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(suites) > 1 && !r.flags.WasSet("succinct") && r.reporterConfig.Verbosity().LT(types.VerbosityLevelVerbose) {
|
||||||
|
r.reporterConfig.Succinct = true
|
||||||
|
}
|
||||||
|
|
||||||
|
t := time.Now()
|
||||||
|
var endTime time.Time
|
||||||
|
if r.suiteConfig.Timeout > 0 {
|
||||||
|
endTime = t.Add(r.suiteConfig.Timeout)
|
||||||
|
}
|
||||||
|
|
||||||
|
iteration := 0
|
||||||
|
OUTER_LOOP:
|
||||||
|
for {
|
||||||
|
if !r.flags.WasSet("seed") {
|
||||||
|
r.suiteConfig.RandomSeed = time.Now().Unix()
|
||||||
|
}
|
||||||
|
if r.cliConfig.RandomizeSuites && len(suites) > 1 {
|
||||||
|
suites = suites.ShuffledCopy(r.suiteConfig.RandomSeed)
|
||||||
|
}
|
||||||
|
|
||||||
|
opc := internal.NewOrderedParallelCompiler(r.cliConfig.ComputedNumCompilers())
|
||||||
|
opc.StartCompiling(suites, r.goFlagsConfig)
|
||||||
|
|
||||||
|
SUITE_LOOP:
|
||||||
|
for {
|
||||||
|
suiteIdx, suite := opc.Next()
|
||||||
|
if suiteIdx >= len(suites) {
|
||||||
|
break SUITE_LOOP
|
||||||
|
}
|
||||||
|
suites[suiteIdx] = suite
|
||||||
|
|
||||||
|
if r.interruptHandler.Status().Interrupted() {
|
||||||
|
opc.StopAndDrain()
|
||||||
|
break OUTER_LOOP
|
||||||
|
}
|
||||||
|
|
||||||
|
if suites[suiteIdx].State.Is(internal.TestSuiteStateSkippedDueToEmptyCompilation) {
|
||||||
|
fmt.Printf("Skipping %s (no test files)\n", suite.Path)
|
||||||
|
continue SUITE_LOOP
|
||||||
|
}
|
||||||
|
|
||||||
|
if suites[suiteIdx].State.Is(internal.TestSuiteStateFailedToCompile) {
|
||||||
|
fmt.Println(suites[suiteIdx].CompilationError.Error())
|
||||||
|
if !r.cliConfig.KeepGoing {
|
||||||
|
opc.StopAndDrain()
|
||||||
|
}
|
||||||
|
continue SUITE_LOOP
|
||||||
|
}
|
||||||
|
|
||||||
|
if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 && !r.cliConfig.KeepGoing {
|
||||||
|
suites[suiteIdx].State = internal.TestSuiteStateSkippedDueToPriorFailures
|
||||||
|
opc.StopAndDrain()
|
||||||
|
continue SUITE_LOOP
|
||||||
|
}
|
||||||
|
|
||||||
|
if !endTime.IsZero() {
|
||||||
|
r.suiteConfig.Timeout = endTime.Sub(time.Now())
|
||||||
|
if r.suiteConfig.Timeout <= 0 {
|
||||||
|
suites[suiteIdx].State = internal.TestSuiteStateFailedDueToTimeout
|
||||||
|
opc.StopAndDrain()
|
||||||
|
continue SUITE_LOOP
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
suites[suiteIdx] = internal.RunCompiledSuite(suites[suiteIdx], r.suiteConfig, r.reporterConfig, r.cliConfig, r.goFlagsConfig, additionalArgs)
|
||||||
|
}
|
||||||
|
|
||||||
|
if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 {
|
||||||
|
if iteration > 0 {
|
||||||
|
fmt.Printf("\nTests failed on attempt #%d\n\n", iteration+1)
|
||||||
|
}
|
||||||
|
break OUTER_LOOP
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.cliConfig.UntilItFails {
|
||||||
|
fmt.Printf("\nAll tests passed...\nWill keep running them until they fail.\nThis was attempt #%d\n%s\n", iteration+1, orcMessage(iteration+1))
|
||||||
|
} else if r.cliConfig.Repeat > 0 && iteration < r.cliConfig.Repeat {
|
||||||
|
fmt.Printf("\nAll tests passed...\nThis was attempt %d of %d.\n", iteration+1, r.cliConfig.Repeat+1)
|
||||||
|
} else {
|
||||||
|
break OUTER_LOOP
|
||||||
|
}
|
||||||
|
iteration += 1
|
||||||
|
}
|
||||||
|
|
||||||
|
internal.Cleanup(r.goFlagsConfig, suites...)
|
||||||
|
|
||||||
|
messages, err := internal.FinalizeProfilesAndReportsForSuites(suites, r.cliConfig, r.suiteConfig, r.reporterConfig, r.goFlagsConfig)
|
||||||
|
command.AbortIfError("could not finalize profiles:", err)
|
||||||
|
for _, message := range messages {
|
||||||
|
fmt.Println(message)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("\nGinkgo ran %d %s in %s\n", len(suites), internal.PluralizedWord("suite", "suites", len(suites)), time.Since(t))
|
||||||
|
|
||||||
|
if suites.CountWithState(internal.TestSuiteStateFailureStates...) == 0 {
|
||||||
|
if suites.AnyHaveProgrammaticFocus() && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" {
|
||||||
|
fmt.Printf("Test Suite Passed\n")
|
||||||
|
fmt.Printf("Detected Programmatic Focus - setting exit status to %d\n", types.GINKGO_FOCUS_EXIT_CODE)
|
||||||
|
command.Abort(command.AbortDetails{ExitCode: types.GINKGO_FOCUS_EXIT_CODE})
|
||||||
|
} else {
|
||||||
|
fmt.Printf("Test Suite Passed\n")
|
||||||
|
command.Abort(command.AbortDetails{})
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
fmt.Fprintln(formatter.ColorableStdOut, "")
|
||||||
|
if len(suites) > 1 && suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 {
|
||||||
|
fmt.Fprintln(formatter.ColorableStdOut,
|
||||||
|
internal.FailedSuitesReport(suites, formatter.NewWithNoColorBool(r.reporterConfig.NoColor)))
|
||||||
|
}
|
||||||
|
fmt.Printf("Test Suite Failed\n")
|
||||||
|
command.Abort(command.AbortDetails{ExitCode: 1})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func orcMessage(iteration int) string {
|
||||||
|
if iteration < 10 {
|
||||||
|
return ""
|
||||||
|
} else if iteration < 30 {
|
||||||
|
return []string{
|
||||||
|
"If at first you succeed...",
|
||||||
|
"...try, try again.",
|
||||||
|
"Looking good!",
|
||||||
|
"Still good...",
|
||||||
|
"I think your tests are fine....",
|
||||||
|
"Yep, still passing",
|
||||||
|
"Oh boy, here I go testin' again!",
|
||||||
|
"Even the gophers are getting bored",
|
||||||
|
"Did you try -race?",
|
||||||
|
"Maybe you should stop now?",
|
||||||
|
"I'm getting tired...",
|
||||||
|
"What if I just made you a sandwich?",
|
||||||
|
"Hit ^C, hit ^C, please hit ^C",
|
||||||
|
"Make it stop. Please!",
|
||||||
|
"Come on! Enough is enough!",
|
||||||
|
"Dave, this conversation can serve no purpose anymore. Goodbye.",
|
||||||
|
"Just what do you think you're doing, Dave? ",
|
||||||
|
"I, Sisyphus",
|
||||||
|
"Insanity: doing the same thing over and over again and expecting different results. -Einstein",
|
||||||
|
"I guess Einstein never tried to churn butter",
|
||||||
|
}[iteration-10] + "\n"
|
||||||
|
} else {
|
||||||
|
return "No, seriously... you can probably stop now.\n"
|
||||||
|
}
|
||||||
|
}
|
186
vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go
generated
vendored
Normal file
186
vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go
generated
vendored
Normal file
@ -0,0 +1,186 @@
|
|||||||
|
package unfocus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"go/ast"
|
||||||
|
"go/parser"
|
||||||
|
"go/token"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
||||||
|
)
|
||||||
|
|
||||||
|
func BuildUnfocusCommand() command.Command {
|
||||||
|
return command.Command{
|
||||||
|
Name: "unfocus",
|
||||||
|
Usage: "ginkgo unfocus",
|
||||||
|
ShortDoc: "Recursively unfocus any focused tests under the current directory",
|
||||||
|
DocLink: "filtering-specs",
|
||||||
|
Command: func(_ []string, _ []string) {
|
||||||
|
unfocusSpecs()
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func unfocusSpecs() {
|
||||||
|
fmt.Println("Scanning for focus...")
|
||||||
|
|
||||||
|
goFiles := make(chan string)
|
||||||
|
go func() {
|
||||||
|
unfocusDir(goFiles, ".")
|
||||||
|
close(goFiles)
|
||||||
|
}()
|
||||||
|
|
||||||
|
const workers = 10
|
||||||
|
wg := sync.WaitGroup{}
|
||||||
|
wg.Add(workers)
|
||||||
|
|
||||||
|
for i := 0; i < workers; i++ {
|
||||||
|
go func() {
|
||||||
|
for path := range goFiles {
|
||||||
|
unfocusFile(path)
|
||||||
|
}
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
func unfocusDir(goFiles chan string, path string) {
|
||||||
|
files, err := os.ReadDir(path)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, f := range files {
|
||||||
|
switch {
|
||||||
|
case f.IsDir() && shouldProcessDir(f.Name()):
|
||||||
|
unfocusDir(goFiles, filepath.Join(path, f.Name()))
|
||||||
|
case !f.IsDir() && shouldProcessFile(f.Name()):
|
||||||
|
goFiles <- filepath.Join(path, f.Name())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func shouldProcessDir(basename string) bool {
|
||||||
|
return basename != "vendor" && !strings.HasPrefix(basename, ".")
|
||||||
|
}
|
||||||
|
|
||||||
|
func shouldProcessFile(basename string) bool {
|
||||||
|
return strings.HasSuffix(basename, ".go")
|
||||||
|
}
|
||||||
|
|
||||||
|
func unfocusFile(path string) {
|
||||||
|
data, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error reading file '%s': %s\n", path, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ast, err := parser.ParseFile(token.NewFileSet(), path, bytes.NewReader(data), parser.ParseComments)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error parsing file '%s': %s\n", path, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
eliminations := scanForFocus(ast)
|
||||||
|
if len(eliminations) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("...updating %s\n", path)
|
||||||
|
backup, err := writeBackup(path, data)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error creating backup file: %s\n", err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := updateFile(path, data, eliminations); err != nil {
|
||||||
|
fmt.Printf("error writing file '%s': %s\n", path, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
os.Remove(backup)
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeBackup(path string, data []byte) (string, error) {
|
||||||
|
t, err := os.CreateTemp(filepath.Dir(path), filepath.Base(path))
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("error creating temporary file: %w", err)
|
||||||
|
}
|
||||||
|
defer t.Close()
|
||||||
|
|
||||||
|
if _, err := io.Copy(t, bytes.NewReader(data)); err != nil {
|
||||||
|
return "", fmt.Errorf("error writing to temporary file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return t.Name(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func updateFile(path string, data []byte, eliminations [][]int64) error {
|
||||||
|
to, err := os.Create(path)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error opening file for writing '%s': %w\n", path, err)
|
||||||
|
}
|
||||||
|
defer to.Close()
|
||||||
|
|
||||||
|
from := bytes.NewReader(data)
|
||||||
|
var cursor int64
|
||||||
|
for _, eliminationRange := range eliminations {
|
||||||
|
positionToEliminate, lengthToEliminate := eliminationRange[0]-1, eliminationRange[1]
|
||||||
|
if _, err := io.CopyN(to, from, positionToEliminate-cursor); err != nil {
|
||||||
|
return fmt.Errorf("error copying data: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cursor = positionToEliminate + lengthToEliminate
|
||||||
|
|
||||||
|
if _, err := from.Seek(lengthToEliminate, io.SeekCurrent); err != nil {
|
||||||
|
return fmt.Errorf("error seeking to position in buffer: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := io.Copy(to, from); err != nil {
|
||||||
|
return fmt.Errorf("error copying end data: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func scanForFocus(file *ast.File) (eliminations [][]int64) {
|
||||||
|
ast.Inspect(file, func(n ast.Node) bool {
|
||||||
|
if c, ok := n.(*ast.CallExpr); ok {
|
||||||
|
if i, ok := c.Fun.(*ast.Ident); ok {
|
||||||
|
if isFocus(i.Name) {
|
||||||
|
eliminations = append(eliminations, []int64{int64(i.Pos()), 1})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if i, ok := n.(*ast.Ident); ok {
|
||||||
|
if i.Name == "Focus" {
|
||||||
|
eliminations = append(eliminations, []int64{int64(i.Pos()), 6})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
|
||||||
|
return eliminations
|
||||||
|
}
|
||||||
|
|
||||||
|
func isFocus(name string) bool {
|
||||||
|
switch name {
|
||||||
|
case "FDescribe", "FContext", "FIt", "FDescribeTable", "FEntry", "FSpecify", "FWhen":
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
22
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go
generated
vendored
Normal file
22
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
package watch
|
||||||
|
|
||||||
|
import "sort"
|
||||||
|
|
||||||
|
type Delta struct {
|
||||||
|
ModifiedPackages []string
|
||||||
|
|
||||||
|
NewSuites []*Suite
|
||||||
|
RemovedSuites []*Suite
|
||||||
|
modifiedSuites []*Suite
|
||||||
|
}
|
||||||
|
|
||||||
|
type DescendingByDelta []*Suite
|
||||||
|
|
||||||
|
func (a DescendingByDelta) Len() int { return len(a) }
|
||||||
|
func (a DescendingByDelta) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||||
|
func (a DescendingByDelta) Less(i, j int) bool { return a[i].Delta() > a[j].Delta() }
|
||||||
|
|
||||||
|
func (d Delta) ModifiedSuites() []*Suite {
|
||||||
|
sort.Sort(DescendingByDelta(d.modifiedSuites))
|
||||||
|
return d.modifiedSuites
|
||||||
|
}
|
75
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go
generated
vendored
Normal file
75
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go
generated
vendored
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
package watch
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"regexp"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/ginkgo/internal"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SuiteErrors map[internal.TestSuite]error
|
||||||
|
|
||||||
|
type DeltaTracker struct {
|
||||||
|
maxDepth int
|
||||||
|
watchRegExp *regexp.Regexp
|
||||||
|
suites map[string]*Suite
|
||||||
|
packageHashes *PackageHashes
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDeltaTracker(maxDepth int, watchRegExp *regexp.Regexp) *DeltaTracker {
|
||||||
|
return &DeltaTracker{
|
||||||
|
maxDepth: maxDepth,
|
||||||
|
watchRegExp: watchRegExp,
|
||||||
|
packageHashes: NewPackageHashes(watchRegExp),
|
||||||
|
suites: map[string]*Suite{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DeltaTracker) Delta(suites internal.TestSuites) (delta Delta, errors SuiteErrors) {
|
||||||
|
errors = SuiteErrors{}
|
||||||
|
delta.ModifiedPackages = d.packageHashes.CheckForChanges()
|
||||||
|
|
||||||
|
providedSuitePaths := map[string]bool{}
|
||||||
|
for _, suite := range suites {
|
||||||
|
providedSuitePaths[suite.Path] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
d.packageHashes.StartTrackingUsage()
|
||||||
|
|
||||||
|
for _, suite := range d.suites {
|
||||||
|
if providedSuitePaths[suite.Suite.Path] {
|
||||||
|
if suite.Delta() > 0 {
|
||||||
|
delta.modifiedSuites = append(delta.modifiedSuites, suite)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
delta.RemovedSuites = append(delta.RemovedSuites, suite)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
d.packageHashes.StopTrackingUsageAndPrune()
|
||||||
|
|
||||||
|
for _, suite := range suites {
|
||||||
|
_, ok := d.suites[suite.Path]
|
||||||
|
if !ok {
|
||||||
|
s, err := NewSuite(suite, d.maxDepth, d.packageHashes)
|
||||||
|
if err != nil {
|
||||||
|
errors[suite] = err
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
d.suites[suite.Path] = s
|
||||||
|
delta.NewSuites = append(delta.NewSuites, s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return delta, errors
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DeltaTracker) WillRun(suite internal.TestSuite) error {
|
||||||
|
s, ok := d.suites[suite.Path]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unknown suite %s", suite.Path)
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.MarkAsRunAndRecomputedDependencies(d.maxDepth)
|
||||||
|
}
|
92
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go
generated
vendored
Normal file
92
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go
generated
vendored
Normal file
@ -0,0 +1,92 @@
|
|||||||
|
package watch
|
||||||
|
|
||||||
|
import (
|
||||||
|
"go/build"
|
||||||
|
"regexp"
|
||||||
|
)
|
||||||
|
|
||||||
|
var ginkgoAndGomegaFilter = regexp.MustCompile(`github\.com/onsi/ginkgo|github\.com/onsi/gomega`)
|
||||||
|
var ginkgoIntegrationTestFilter = regexp.MustCompile(`github\.com/onsi/ginkgo/integration`) //allow us to integration test this thing
|
||||||
|
|
||||||
|
type Dependencies struct {
|
||||||
|
deps map[string]int
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDependencies(path string, maxDepth int) (Dependencies, error) {
|
||||||
|
d := Dependencies{
|
||||||
|
deps: map[string]int{},
|
||||||
|
}
|
||||||
|
|
||||||
|
if maxDepth == 0 {
|
||||||
|
return d, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
err := d.seedWithDepsForPackageAtPath(path)
|
||||||
|
if err != nil {
|
||||||
|
return d, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for depth := 1; depth < maxDepth; depth++ {
|
||||||
|
n := len(d.deps)
|
||||||
|
d.addDepsForDepth(depth)
|
||||||
|
if n == len(d.deps) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return d, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d Dependencies) Dependencies() map[string]int {
|
||||||
|
return d.deps
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d Dependencies) seedWithDepsForPackageAtPath(path string) error {
|
||||||
|
pkg, err := build.ImportDir(path, 0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
d.resolveAndAdd(pkg.Imports, 1)
|
||||||
|
d.resolveAndAdd(pkg.TestImports, 1)
|
||||||
|
d.resolveAndAdd(pkg.XTestImports, 1)
|
||||||
|
|
||||||
|
delete(d.deps, pkg.Dir)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d Dependencies) addDepsForDepth(depth int) {
|
||||||
|
for dep, depDepth := range d.deps {
|
||||||
|
if depDepth == depth {
|
||||||
|
d.addDepsForDep(dep, depth+1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d Dependencies) addDepsForDep(dep string, depth int) {
|
||||||
|
pkg, err := build.ImportDir(dep, 0)
|
||||||
|
if err != nil {
|
||||||
|
println(err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
d.resolveAndAdd(pkg.Imports, depth)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d Dependencies) resolveAndAdd(deps []string, depth int) {
|
||||||
|
for _, dep := range deps {
|
||||||
|
pkg, err := build.Import(dep, ".", 0)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !pkg.Goroot && (!ginkgoAndGomegaFilter.Match([]byte(pkg.Dir)) || ginkgoIntegrationTestFilter.Match([]byte(pkg.Dir))) {
|
||||||
|
d.addDepIfNotPresent(pkg.Dir, depth)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d Dependencies) addDepIfNotPresent(dep string, depth int) {
|
||||||
|
_, ok := d.deps[dep]
|
||||||
|
if !ok {
|
||||||
|
d.deps[dep] = depth
|
||||||
|
}
|
||||||
|
}
|
108
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go
generated
vendored
Normal file
108
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go
generated
vendored
Normal file
@ -0,0 +1,108 @@
|
|||||||
|
package watch
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"regexp"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var goTestRegExp = regexp.MustCompile(`_test\.go$`)
|
||||||
|
|
||||||
|
type PackageHash struct {
|
||||||
|
CodeModifiedTime time.Time
|
||||||
|
TestModifiedTime time.Time
|
||||||
|
Deleted bool
|
||||||
|
|
||||||
|
path string
|
||||||
|
codeHash string
|
||||||
|
testHash string
|
||||||
|
watchRegExp *regexp.Regexp
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewPackageHash(path string, watchRegExp *regexp.Regexp) *PackageHash {
|
||||||
|
p := &PackageHash{
|
||||||
|
path: path,
|
||||||
|
watchRegExp: watchRegExp,
|
||||||
|
}
|
||||||
|
|
||||||
|
p.codeHash, _, p.testHash, _, p.Deleted = p.computeHashes()
|
||||||
|
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PackageHash) CheckForChanges() bool {
|
||||||
|
codeHash, codeModifiedTime, testHash, testModifiedTime, deleted := p.computeHashes()
|
||||||
|
|
||||||
|
if deleted {
|
||||||
|
if !p.Deleted {
|
||||||
|
t := time.Now()
|
||||||
|
p.CodeModifiedTime = t
|
||||||
|
p.TestModifiedTime = t
|
||||||
|
}
|
||||||
|
p.Deleted = true
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
modified := false
|
||||||
|
p.Deleted = false
|
||||||
|
|
||||||
|
if p.codeHash != codeHash {
|
||||||
|
p.CodeModifiedTime = codeModifiedTime
|
||||||
|
modified = true
|
||||||
|
}
|
||||||
|
if p.testHash != testHash {
|
||||||
|
p.TestModifiedTime = testModifiedTime
|
||||||
|
modified = true
|
||||||
|
}
|
||||||
|
|
||||||
|
p.codeHash = codeHash
|
||||||
|
p.testHash = testHash
|
||||||
|
return modified
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Time, testHash string, testModifiedTime time.Time, deleted bool) {
|
||||||
|
entries, err := os.ReadDir(p.path)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
deleted = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, entry := range entries {
|
||||||
|
if entry.IsDir() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
info, err := entry.Info()
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if goTestRegExp.Match([]byte(info.Name())) {
|
||||||
|
testHash += p.hashForFileInfo(info)
|
||||||
|
if info.ModTime().After(testModifiedTime) {
|
||||||
|
testModifiedTime = info.ModTime()
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if p.watchRegExp.Match([]byte(info.Name())) {
|
||||||
|
codeHash += p.hashForFileInfo(info)
|
||||||
|
if info.ModTime().After(codeModifiedTime) {
|
||||||
|
codeModifiedTime = info.ModTime()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
testHash += codeHash
|
||||||
|
if codeModifiedTime.After(testModifiedTime) {
|
||||||
|
testModifiedTime = codeModifiedTime
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PackageHash) hashForFileInfo(info os.FileInfo) string {
|
||||||
|
return fmt.Sprintf("%s_%d_%d", info.Name(), info.Size(), info.ModTime().UnixNano())
|
||||||
|
}
|
85
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go
generated
vendored
Normal file
85
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go
generated
vendored
Normal file
@ -0,0 +1,85 @@
|
|||||||
|
package watch
|
||||||
|
|
||||||
|
import (
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
type PackageHashes struct {
|
||||||
|
PackageHashes map[string]*PackageHash
|
||||||
|
usedPaths map[string]bool
|
||||||
|
watchRegExp *regexp.Regexp
|
||||||
|
lock *sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewPackageHashes(watchRegExp *regexp.Regexp) *PackageHashes {
|
||||||
|
return &PackageHashes{
|
||||||
|
PackageHashes: map[string]*PackageHash{},
|
||||||
|
usedPaths: nil,
|
||||||
|
watchRegExp: watchRegExp,
|
||||||
|
lock: &sync.Mutex{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PackageHashes) CheckForChanges() []string {
|
||||||
|
p.lock.Lock()
|
||||||
|
defer p.lock.Unlock()
|
||||||
|
|
||||||
|
modified := []string{}
|
||||||
|
|
||||||
|
for _, packageHash := range p.PackageHashes {
|
||||||
|
if packageHash.CheckForChanges() {
|
||||||
|
modified = append(modified, packageHash.path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return modified
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PackageHashes) Add(path string) *PackageHash {
|
||||||
|
p.lock.Lock()
|
||||||
|
defer p.lock.Unlock()
|
||||||
|
|
||||||
|
path, _ = filepath.Abs(path)
|
||||||
|
_, ok := p.PackageHashes[path]
|
||||||
|
if !ok {
|
||||||
|
p.PackageHashes[path] = NewPackageHash(path, p.watchRegExp)
|
||||||
|
}
|
||||||
|
|
||||||
|
if p.usedPaths != nil {
|
||||||
|
p.usedPaths[path] = true
|
||||||
|
}
|
||||||
|
return p.PackageHashes[path]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PackageHashes) Get(path string) *PackageHash {
|
||||||
|
p.lock.Lock()
|
||||||
|
defer p.lock.Unlock()
|
||||||
|
|
||||||
|
path, _ = filepath.Abs(path)
|
||||||
|
if p.usedPaths != nil {
|
||||||
|
p.usedPaths[path] = true
|
||||||
|
}
|
||||||
|
return p.PackageHashes[path]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PackageHashes) StartTrackingUsage() {
|
||||||
|
p.lock.Lock()
|
||||||
|
defer p.lock.Unlock()
|
||||||
|
|
||||||
|
p.usedPaths = map[string]bool{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PackageHashes) StopTrackingUsageAndPrune() {
|
||||||
|
p.lock.Lock()
|
||||||
|
defer p.lock.Unlock()
|
||||||
|
|
||||||
|
for path := range p.PackageHashes {
|
||||||
|
if !p.usedPaths[path] {
|
||||||
|
delete(p.PackageHashes, path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
p.usedPaths = nil
|
||||||
|
}
|
87
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go
generated
vendored
Normal file
87
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go
generated
vendored
Normal file
@ -0,0 +1,87 @@
|
|||||||
|
package watch
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/ginkgo/internal"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Suite struct {
|
||||||
|
Suite internal.TestSuite
|
||||||
|
RunTime time.Time
|
||||||
|
Dependencies Dependencies
|
||||||
|
|
||||||
|
sharedPackageHashes *PackageHashes
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSuite(suite internal.TestSuite, maxDepth int, sharedPackageHashes *PackageHashes) (*Suite, error) {
|
||||||
|
deps, err := NewDependencies(suite.Path, maxDepth)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
sharedPackageHashes.Add(suite.Path)
|
||||||
|
for dep := range deps.Dependencies() {
|
||||||
|
sharedPackageHashes.Add(dep)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Suite{
|
||||||
|
Suite: suite,
|
||||||
|
Dependencies: deps,
|
||||||
|
|
||||||
|
sharedPackageHashes: sharedPackageHashes,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Suite) Delta() float64 {
|
||||||
|
delta := s.delta(s.Suite.Path, true, 0) * 1000
|
||||||
|
for dep, depth := range s.Dependencies.Dependencies() {
|
||||||
|
delta += s.delta(dep, false, depth)
|
||||||
|
}
|
||||||
|
return delta
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Suite) MarkAsRunAndRecomputedDependencies(maxDepth int) error {
|
||||||
|
s.RunTime = time.Now()
|
||||||
|
|
||||||
|
deps, err := NewDependencies(s.Suite.Path, maxDepth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
s.sharedPackageHashes.Add(s.Suite.Path)
|
||||||
|
for dep := range deps.Dependencies() {
|
||||||
|
s.sharedPackageHashes.Add(dep)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.Dependencies = deps
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Suite) Description() string {
|
||||||
|
numDeps := len(s.Dependencies.Dependencies())
|
||||||
|
pluralizer := "ies"
|
||||||
|
if numDeps == 1 {
|
||||||
|
pluralizer = "y"
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s [%d dependenc%s]", s.Suite.Path, numDeps, pluralizer)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Suite) delta(packagePath string, includeTests bool, depth int) float64 {
|
||||||
|
return math.Max(float64(s.dt(packagePath, includeTests)), 0) / float64(depth+1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Suite) dt(packagePath string, includeTests bool) time.Duration {
|
||||||
|
packageHash := s.sharedPackageHashes.Get(packagePath)
|
||||||
|
var modifiedTime time.Time
|
||||||
|
if includeTests {
|
||||||
|
modifiedTime = packageHash.TestModifiedTime
|
||||||
|
} else {
|
||||||
|
modifiedTime = packageHash.CodeModifiedTime
|
||||||
|
}
|
||||||
|
|
||||||
|
return modifiedTime.Sub(s.RunTime)
|
||||||
|
}
|
192
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go
generated
vendored
Normal file
192
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go
generated
vendored
Normal file
@ -0,0 +1,192 @@
|
|||||||
|
package watch
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/formatter"
|
||||||
|
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
||||||
|
"github.com/onsi/ginkgo/v2/ginkgo/internal"
|
||||||
|
"github.com/onsi/ginkgo/v2/internal/interrupt_handler"
|
||||||
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
func BuildWatchCommand() command.Command {
|
||||||
|
var suiteConfig = types.NewDefaultSuiteConfig()
|
||||||
|
var reporterConfig = types.NewDefaultReporterConfig()
|
||||||
|
var cliConfig = types.NewDefaultCLIConfig()
|
||||||
|
var goFlagsConfig = types.NewDefaultGoFlagsConfig()
|
||||||
|
|
||||||
|
flags, err := types.BuildWatchCommandFlagSet(&suiteConfig, &reporterConfig, &cliConfig, &goFlagsConfig)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
interruptHandler := interrupt_handler.NewInterruptHandler(nil)
|
||||||
|
interrupt_handler.SwallowSigQuit()
|
||||||
|
|
||||||
|
return command.Command{
|
||||||
|
Name: "watch",
|
||||||
|
Flags: flags,
|
||||||
|
Usage: "ginkgo watch <FLAGS> <PACKAGES> -- <PASS-THROUGHS>",
|
||||||
|
ShortDoc: "Watch the passed in <PACKAGES> and runs their tests whenever changes occur.",
|
||||||
|
Documentation: "Any arguments after -- will be passed to the test.",
|
||||||
|
DocLink: "watching-for-changes",
|
||||||
|
Command: func(args []string, additionalArgs []string) {
|
||||||
|
var errors []error
|
||||||
|
cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig)
|
||||||
|
command.AbortIfErrors("Ginkgo detected configuration issues:", errors)
|
||||||
|
|
||||||
|
watcher := &SpecWatcher{
|
||||||
|
cliConfig: cliConfig,
|
||||||
|
goFlagsConfig: goFlagsConfig,
|
||||||
|
suiteConfig: suiteConfig,
|
||||||
|
reporterConfig: reporterConfig,
|
||||||
|
flags: flags,
|
||||||
|
|
||||||
|
interruptHandler: interruptHandler,
|
||||||
|
}
|
||||||
|
|
||||||
|
watcher.WatchSpecs(args, additionalArgs)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type SpecWatcher struct {
|
||||||
|
suiteConfig types.SuiteConfig
|
||||||
|
reporterConfig types.ReporterConfig
|
||||||
|
cliConfig types.CLIConfig
|
||||||
|
goFlagsConfig types.GoFlagsConfig
|
||||||
|
flags types.GinkgoFlagSet
|
||||||
|
|
||||||
|
interruptHandler *interrupt_handler.InterruptHandler
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *SpecWatcher) WatchSpecs(args []string, additionalArgs []string) {
|
||||||
|
suites := internal.FindSuites(args, w.cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter)
|
||||||
|
|
||||||
|
internal.VerifyCLIAndFrameworkVersion(suites)
|
||||||
|
|
||||||
|
if len(suites) == 0 {
|
||||||
|
command.AbortWith("Found no test suites")
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Identified %d test %s. Locating dependencies to a depth of %d (this may take a while)...\n", len(suites), internal.PluralizedWord("suite", "suites", len(suites)), w.cliConfig.Depth)
|
||||||
|
deltaTracker := NewDeltaTracker(w.cliConfig.Depth, regexp.MustCompile(w.cliConfig.WatchRegExp))
|
||||||
|
delta, errors := deltaTracker.Delta(suites)
|
||||||
|
|
||||||
|
fmt.Printf("Watching %d %s:\n", len(delta.NewSuites), internal.PluralizedWord("suite", "suites", len(delta.NewSuites)))
|
||||||
|
for _, suite := range delta.NewSuites {
|
||||||
|
fmt.Println(" " + suite.Description())
|
||||||
|
}
|
||||||
|
|
||||||
|
for suite, err := range errors {
|
||||||
|
fmt.Printf("Failed to watch %s: %s\n", suite.PackageName, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(suites) == 1 {
|
||||||
|
w.updateSeed()
|
||||||
|
w.compileAndRun(suites[0], additionalArgs)
|
||||||
|
}
|
||||||
|
|
||||||
|
ticker := time.NewTicker(time.Second)
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ticker.C:
|
||||||
|
suites := internal.FindSuites(args, w.cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter)
|
||||||
|
delta, _ := deltaTracker.Delta(suites)
|
||||||
|
coloredStream := formatter.ColorableStdOut
|
||||||
|
|
||||||
|
suites = internal.TestSuites{}
|
||||||
|
|
||||||
|
if len(delta.NewSuites) > 0 {
|
||||||
|
fmt.Fprintln(coloredStream, formatter.F("{{green}}Detected %d new %s:{{/}}", len(delta.NewSuites), internal.PluralizedWord("suite", "suites", len(delta.NewSuites))))
|
||||||
|
for _, suite := range delta.NewSuites {
|
||||||
|
suites = append(suites, suite.Suite)
|
||||||
|
fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", suite.Description()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
modifiedSuites := delta.ModifiedSuites()
|
||||||
|
if len(modifiedSuites) > 0 {
|
||||||
|
fmt.Fprintln(coloredStream, formatter.F("{{green}}Detected changes in:{{/}}"))
|
||||||
|
for _, pkg := range delta.ModifiedPackages {
|
||||||
|
fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", pkg))
|
||||||
|
}
|
||||||
|
fmt.Fprintln(coloredStream, formatter.F("{{green}}Will run %d %s:{{/}}", len(modifiedSuites), internal.PluralizedWord("suite", "suites", len(modifiedSuites))))
|
||||||
|
for _, suite := range modifiedSuites {
|
||||||
|
suites = append(suites, suite.Suite)
|
||||||
|
fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", suite.Description()))
|
||||||
|
}
|
||||||
|
fmt.Fprintln(coloredStream, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(suites) == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
w.updateSeed()
|
||||||
|
w.computeSuccinctMode(len(suites))
|
||||||
|
for idx := range suites {
|
||||||
|
if w.interruptHandler.Status().Interrupted() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
deltaTracker.WillRun(suites[idx])
|
||||||
|
suites[idx] = w.compileAndRun(suites[idx], additionalArgs)
|
||||||
|
}
|
||||||
|
color := "{{green}}"
|
||||||
|
if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 {
|
||||||
|
color = "{{red}}"
|
||||||
|
}
|
||||||
|
fmt.Fprintln(coloredStream, formatter.F(color+"\nDone. Resuming watch...{{/}}"))
|
||||||
|
|
||||||
|
messages, err := internal.FinalizeProfilesAndReportsForSuites(suites, w.cliConfig, w.suiteConfig, w.reporterConfig, w.goFlagsConfig)
|
||||||
|
command.AbortIfError("could not finalize profiles:", err)
|
||||||
|
for _, message := range messages {
|
||||||
|
fmt.Println(message)
|
||||||
|
}
|
||||||
|
case <-w.interruptHandler.Status().Channel:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *SpecWatcher) compileAndRun(suite internal.TestSuite, additionalArgs []string) internal.TestSuite {
|
||||||
|
suite = internal.CompileSuite(suite, w.goFlagsConfig)
|
||||||
|
if suite.State.Is(internal.TestSuiteStateFailedToCompile) {
|
||||||
|
fmt.Println(suite.CompilationError.Error())
|
||||||
|
return suite
|
||||||
|
}
|
||||||
|
if w.interruptHandler.Status().Interrupted() {
|
||||||
|
return suite
|
||||||
|
}
|
||||||
|
suite = internal.RunCompiledSuite(suite, w.suiteConfig, w.reporterConfig, w.cliConfig, w.goFlagsConfig, additionalArgs)
|
||||||
|
internal.Cleanup(w.goFlagsConfig, suite)
|
||||||
|
return suite
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *SpecWatcher) computeSuccinctMode(numSuites int) {
|
||||||
|
if w.reporterConfig.Verbosity().GTE(types.VerbosityLevelVerbose) {
|
||||||
|
w.reporterConfig.Succinct = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if w.flags.WasSet("succinct") {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if numSuites == 1 {
|
||||||
|
w.reporterConfig.Succinct = false
|
||||||
|
}
|
||||||
|
|
||||||
|
if numSuites > 1 {
|
||||||
|
w.reporterConfig.Succinct = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *SpecWatcher) updateSeed() {
|
||||||
|
if !w.flags.WasSet("seed") {
|
||||||
|
w.suiteConfig.RandomSeed = time.Now().Unix()
|
||||||
|
}
|
||||||
|
}
|
8
vendor/github.com/onsi/ginkgo/v2/ginkgo_cli_dependencies.go
generated
vendored
Normal file
8
vendor/github.com/onsi/ginkgo/v2/ginkgo_cli_dependencies.go
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
//go:build ginkgoclidependencies
|
||||||
|
// +build ginkgoclidependencies
|
||||||
|
|
||||||
|
package ginkgo
|
||||||
|
|
||||||
|
import (
|
||||||
|
_ "github.com/onsi/ginkgo/v2/ginkgo"
|
||||||
|
)
|
63
vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go
generated
vendored
63
vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go
generated
vendored
@ -1,26 +1,42 @@
|
|||||||
package ginkgo
|
package ginkgo
|
||||||
|
|
||||||
import "github.com/onsi/ginkgo/v2/internal/testingtproxy"
|
import (
|
||||||
|
"github.com/onsi/ginkgo/v2/internal/testingtproxy"
|
||||||
|
)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
GinkgoT() implements an interface analogous to *testing.T and can be used with
|
GinkgoT() implements an interface that allows third party libraries to integrate with and build on top of Ginkgo.
|
||||||
third-party libraries that accept *testing.T through an interface.
|
|
||||||
|
GinkgoT() is analogous to *testing.T and implements the majority of *testing.T's methods. It can be typically be used a a drop-in replacement with third-party libraries that accept *testing.T through an interface.
|
||||||
|
|
||||||
GinkgoT() takes an optional offset argument that can be used to get the
|
GinkgoT() takes an optional offset argument that can be used to get the
|
||||||
correct line number associated with the failure.
|
correct line number associated with the failure - though you do not need to use this if you call GinkgoHelper() or GinkgoT().Helper() appropriately
|
||||||
|
|
||||||
You can learn more here: https://onsi.github.io/ginkgo/#using-third-party-libraries
|
You can learn more here: https://onsi.github.io/ginkgo/#using-third-party-libraries
|
||||||
*/
|
*/
|
||||||
func GinkgoT(optionalOffset ...int) GinkgoTInterface {
|
func GinkgoT(optionalOffset ...int) FullGinkgoTInterface {
|
||||||
offset := 3
|
offset := 3
|
||||||
if len(optionalOffset) > 0 {
|
if len(optionalOffset) > 0 {
|
||||||
offset = optionalOffset[0]
|
offset = optionalOffset[0]
|
||||||
}
|
}
|
||||||
return testingtproxy.New(GinkgoWriter, Fail, Skip, DeferCleanup, CurrentSpecReport, offset)
|
return testingtproxy.New(
|
||||||
|
GinkgoWriter,
|
||||||
|
Fail,
|
||||||
|
Skip,
|
||||||
|
DeferCleanup,
|
||||||
|
CurrentSpecReport,
|
||||||
|
AddReportEntry,
|
||||||
|
GinkgoRecover,
|
||||||
|
AttachProgressReporter,
|
||||||
|
suiteConfig.RandomSeed,
|
||||||
|
suiteConfig.ParallelProcess,
|
||||||
|
suiteConfig.ParallelTotal,
|
||||||
|
reporterConfig.NoColor,
|
||||||
|
offset)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
The interface returned by GinkgoT(). This covers most of the methods in the testing package's T.
|
The portion of the interface returned by GinkgoT() that maps onto methods in the testing package's T.
|
||||||
*/
|
*/
|
||||||
type GinkgoTInterface interface {
|
type GinkgoTInterface interface {
|
||||||
Cleanup(func())
|
Cleanup(func())
|
||||||
@ -43,3 +59,36 @@ type GinkgoTInterface interface {
|
|||||||
Skipped() bool
|
Skipped() bool
|
||||||
TempDir() string
|
TempDir() string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Additional methods returned by GinkgoT() that provide deeper integration points into Ginkgo
|
||||||
|
*/
|
||||||
|
type FullGinkgoTInterface interface {
|
||||||
|
GinkgoTInterface
|
||||||
|
|
||||||
|
AddReportEntryVisibilityAlways(name string, args ...any)
|
||||||
|
AddReportEntryVisibilityFailureOrVerbose(name string, args ...any)
|
||||||
|
AddReportEntryVisibilityNever(name string, args ...any)
|
||||||
|
|
||||||
|
//Prints to the GinkgoWriter
|
||||||
|
Print(a ...interface{})
|
||||||
|
Printf(format string, a ...interface{})
|
||||||
|
Println(a ...interface{})
|
||||||
|
|
||||||
|
//Provides access to Ginkgo's color formatting, correctly configured to match the color settings specified in the invocation of ginkgo
|
||||||
|
F(format string, args ...any) string
|
||||||
|
Fi(indentation uint, format string, args ...any) string
|
||||||
|
Fiw(indentation uint, maxWidth uint, format string, args ...any) string
|
||||||
|
|
||||||
|
//Generates a formatted string version of the current spec's timeline
|
||||||
|
RenderTimeline() string
|
||||||
|
|
||||||
|
GinkgoRecover()
|
||||||
|
DeferCleanup(args ...any)
|
||||||
|
|
||||||
|
RandomSeed() int64
|
||||||
|
ParallelProcess() int
|
||||||
|
ParallelTotal() int
|
||||||
|
|
||||||
|
AttachProgressReporter(func() string) func()
|
||||||
|
}
|
||||||
|
40
vendor/github.com/onsi/ginkgo/v2/internal/group.go
generated
vendored
40
vendor/github.com/onsi/ginkgo/v2/internal/group.go
generated
vendored
@ -95,6 +95,8 @@ type group struct {
|
|||||||
runOnceTracker map[runOncePair]types.SpecState
|
runOnceTracker map[runOncePair]types.SpecState
|
||||||
|
|
||||||
succeeded bool
|
succeeded bool
|
||||||
|
failedInARunOnceBefore bool
|
||||||
|
continueOnFailure bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func newGroup(suite *Suite) *group {
|
func newGroup(suite *Suite) *group {
|
||||||
@ -103,6 +105,8 @@ func newGroup(suite *Suite) *group {
|
|||||||
runOncePairs: map[uint]runOncePairs{},
|
runOncePairs: map[uint]runOncePairs{},
|
||||||
runOnceTracker: map[runOncePair]types.SpecState{},
|
runOnceTracker: map[runOncePair]types.SpecState{},
|
||||||
succeeded: true,
|
succeeded: true,
|
||||||
|
failedInARunOnceBefore: false,
|
||||||
|
continueOnFailure: false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -116,6 +120,7 @@ func (g *group) initialReportForSpec(spec Spec) types.SpecReport {
|
|||||||
LeafNodeText: spec.FirstNodeWithType(types.NodeTypeIt).Text,
|
LeafNodeText: spec.FirstNodeWithType(types.NodeTypeIt).Text,
|
||||||
LeafNodeLabels: []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels),
|
LeafNodeLabels: []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels),
|
||||||
ParallelProcess: g.suite.config.ParallelProcess,
|
ParallelProcess: g.suite.config.ParallelProcess,
|
||||||
|
RunningInParallel: g.suite.isRunningInParallel(),
|
||||||
IsSerial: spec.Nodes.HasNodeMarkedSerial(),
|
IsSerial: spec.Nodes.HasNodeMarkedSerial(),
|
||||||
IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(),
|
IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(),
|
||||||
MaxFlakeAttempts: spec.Nodes.GetMaxFlakeAttempts(),
|
MaxFlakeAttempts: spec.Nodes.GetMaxFlakeAttempts(),
|
||||||
@ -136,10 +141,14 @@ func (g *group) evaluateSkipStatus(spec Spec) (types.SpecState, types.Failure) {
|
|||||||
if !g.suite.deadline.IsZero() && g.suite.deadline.Before(time.Now()) {
|
if !g.suite.deadline.IsZero() && g.suite.deadline.Before(time.Now()) {
|
||||||
return types.SpecStateSkipped, types.Failure{}
|
return types.SpecStateSkipped, types.Failure{}
|
||||||
}
|
}
|
||||||
if !g.succeeded {
|
if !g.succeeded && !g.continueOnFailure {
|
||||||
return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt),
|
return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt),
|
||||||
"Spec skipped because an earlier spec in an ordered container failed")
|
"Spec skipped because an earlier spec in an ordered container failed")
|
||||||
}
|
}
|
||||||
|
if g.failedInARunOnceBefore && g.continueOnFailure {
|
||||||
|
return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt),
|
||||||
|
"Spec skipped because a BeforeAll node failed")
|
||||||
|
}
|
||||||
beforeOncePairs := g.runOncePairs[spec.SubjectID()].withType(types.NodeTypeBeforeAll | types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach)
|
beforeOncePairs := g.runOncePairs[spec.SubjectID()].withType(types.NodeTypeBeforeAll | types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach)
|
||||||
for _, pair := range beforeOncePairs {
|
for _, pair := range beforeOncePairs {
|
||||||
if g.runOnceTracker[pair].Is(types.SpecStateSkipped) {
|
if g.runOnceTracker[pair].Is(types.SpecStateSkipped) {
|
||||||
@ -167,7 +176,8 @@ func (g *group) isLastSpecWithPair(specID uint, pair runOncePair) bool {
|
|||||||
return lastSpecID == specID
|
return lastSpecID == specID
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) {
|
func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) bool {
|
||||||
|
failedInARunOnceBefore := false
|
||||||
pairs := g.runOncePairs[spec.SubjectID()]
|
pairs := g.runOncePairs[spec.SubjectID()]
|
||||||
|
|
||||||
nodes := spec.Nodes.WithType(types.NodeTypeBeforeAll)
|
nodes := spec.Nodes.WithType(types.NodeTypeBeforeAll)
|
||||||
@ -193,6 +203,7 @@ func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) {
|
|||||||
}
|
}
|
||||||
if g.suite.currentSpecReport.State != types.SpecStatePassed {
|
if g.suite.currentSpecReport.State != types.SpecStatePassed {
|
||||||
terminatingNode, terminatingPair = node, oncePair
|
terminatingNode, terminatingPair = node, oncePair
|
||||||
|
failedInARunOnceBefore = !terminatingPair.isZero()
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -215,7 +226,7 @@ func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) {
|
|||||||
//this node has already been run on this attempt, don't rerun it
|
//this node has already been run on this attempt, don't rerun it
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
pair := runOncePair{}
|
var pair runOncePair
|
||||||
switch node.NodeType {
|
switch node.NodeType {
|
||||||
case types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll:
|
case types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll:
|
||||||
// check if we were generated in an AfterNode that has already run
|
// check if we were generated in an AfterNode that has already run
|
||||||
@ -245,9 +256,13 @@ func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) {
|
|||||||
if !terminatingPair.isZero() && terminatingNode.NestingLevel == node.NestingLevel {
|
if !terminatingPair.isZero() && terminatingNode.NestingLevel == node.NestingLevel {
|
||||||
return true //...or, a run-once node at our nesting level was skipped which means this is our last chance to run
|
return true //...or, a run-once node at our nesting level was skipped which means this is our last chance to run
|
||||||
}
|
}
|
||||||
case types.SpecStateFailed, types.SpecStatePanicked: // the spec has failed...
|
case types.SpecStateFailed, types.SpecStatePanicked, types.SpecStateTimedout: // the spec has failed...
|
||||||
if isFinalAttempt {
|
if isFinalAttempt {
|
||||||
return true //...if this was the last attempt then we're the last spec to run and so the AfterNode should run
|
if g.continueOnFailure {
|
||||||
|
return isLastSpecWithPair || failedInARunOnceBefore //...we're configured to continue on failures - so we should only run if we're the last spec for this pair or if we failed in a runOnceBefore (which means we _are_ the last spec to run)
|
||||||
|
} else {
|
||||||
|
return true //...this was the last attempt and continueOnFailure is false therefore we are the last spec to run and so the AfterNode should run
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if !terminatingPair.isZero() { // ...and it failed in a run-once. which will be running again
|
if !terminatingPair.isZero() { // ...and it failed in a run-once. which will be running again
|
||||||
if node.NodeType.Is(types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll) {
|
if node.NodeType.Is(types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll) {
|
||||||
@ -280,10 +295,12 @@ func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) {
|
|||||||
includeDeferCleanups = true
|
includeDeferCleanups = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return failedInARunOnceBefore
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *group) run(specs Specs) {
|
func (g *group) run(specs Specs) {
|
||||||
g.specs = specs
|
g.specs = specs
|
||||||
|
g.continueOnFailure = specs[0].Nodes.FirstNodeMarkedOrdered().MarkedContinueOnFailure
|
||||||
for _, spec := range g.specs {
|
for _, spec := range g.specs {
|
||||||
g.runOncePairs[spec.SubjectID()] = runOncePairsForSpec(spec)
|
g.runOncePairs[spec.SubjectID()] = runOncePairsForSpec(spec)
|
||||||
}
|
}
|
||||||
@ -300,8 +317,8 @@ func (g *group) run(specs Specs) {
|
|||||||
skip := g.suite.config.DryRun || g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates|types.SpecStateSkipped|types.SpecStatePending)
|
skip := g.suite.config.DryRun || g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates|types.SpecStateSkipped|types.SpecStatePending)
|
||||||
|
|
||||||
g.suite.currentSpecReport.StartTime = time.Now()
|
g.suite.currentSpecReport.StartTime = time.Now()
|
||||||
|
failedInARunOnceBefore := false
|
||||||
if !skip {
|
if !skip {
|
||||||
|
|
||||||
var maxAttempts = 1
|
var maxAttempts = 1
|
||||||
|
|
||||||
if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 {
|
if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 {
|
||||||
@ -319,14 +336,14 @@ func (g *group) run(specs Specs) {
|
|||||||
g.suite.outputInterceptor.StartInterceptingOutput()
|
g.suite.outputInterceptor.StartInterceptingOutput()
|
||||||
if attempt > 0 {
|
if attempt > 0 {
|
||||||
if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 {
|
if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 {
|
||||||
fmt.Fprintf(g.suite.writer, "\nGinkgo: Attempt #%d Passed. Repeating...\n", attempt)
|
g.suite.handleSpecEvent(types.SpecEvent{SpecEventType: types.SpecEventSpecRepeat, Attempt: attempt})
|
||||||
}
|
}
|
||||||
if g.suite.currentSpecReport.MaxFlakeAttempts > 0 {
|
if g.suite.currentSpecReport.MaxFlakeAttempts > 0 {
|
||||||
fmt.Fprintf(g.suite.writer, "\nGinkgo: Attempt #%d Failed. Retrying...\n", attempt)
|
g.suite.handleSpecEvent(types.SpecEvent{SpecEventType: types.SpecEventSpecRetry, Attempt: attempt})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
g.attemptSpec(attempt == maxAttempts-1, spec)
|
failedInARunOnceBefore = g.attemptSpec(attempt == maxAttempts-1, spec)
|
||||||
|
|
||||||
g.suite.currentSpecReport.EndTime = time.Now()
|
g.suite.currentSpecReport.EndTime = time.Now()
|
||||||
g.suite.currentSpecReport.RunTime = g.suite.currentSpecReport.EndTime.Sub(g.suite.currentSpecReport.StartTime)
|
g.suite.currentSpecReport.RunTime = g.suite.currentSpecReport.EndTime.Sub(g.suite.currentSpecReport.StartTime)
|
||||||
@ -341,6 +358,10 @@ func (g *group) run(specs Specs) {
|
|||||||
if g.suite.currentSpecReport.MaxFlakeAttempts > 0 {
|
if g.suite.currentSpecReport.MaxFlakeAttempts > 0 {
|
||||||
if g.suite.currentSpecReport.State.Is(types.SpecStatePassed | types.SpecStateSkipped | types.SpecStateAborted | types.SpecStateInterrupted) {
|
if g.suite.currentSpecReport.State.Is(types.SpecStatePassed | types.SpecStateSkipped | types.SpecStateAborted | types.SpecStateInterrupted) {
|
||||||
break
|
break
|
||||||
|
} else if attempt < maxAttempts-1 {
|
||||||
|
af := types.AdditionalFailure{State: g.suite.currentSpecReport.State, Failure: g.suite.currentSpecReport.Failure}
|
||||||
|
af.Failure.Message = fmt.Sprintf("Failure recorded during attempt %d:\n%s", attempt+1, af.Failure.Message)
|
||||||
|
g.suite.currentSpecReport.AdditionalFailures = append(g.suite.currentSpecReport.AdditionalFailures, af)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -350,6 +371,7 @@ func (g *group) run(specs Specs) {
|
|||||||
g.suite.processCurrentSpecReport()
|
g.suite.processCurrentSpecReport()
|
||||||
if g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
|
if g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
|
||||||
g.succeeded = false
|
g.succeeded = false
|
||||||
|
g.failedInARunOnceBefore = g.failedInARunOnceBefore || failedInARunOnceBefore
|
||||||
}
|
}
|
||||||
g.suite.selectiveLock.Lock()
|
g.suite.selectiveLock.Lock()
|
||||||
g.suite.currentSpecReport = types.SpecReport{}
|
g.suite.currentSpecReport = types.SpecReport{}
|
||||||
|
23
vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go
generated
vendored
23
vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go
generated
vendored
@ -10,7 +10,7 @@ import (
|
|||||||
"github.com/onsi/ginkgo/v2/internal/parallel_support"
|
"github.com/onsi/ginkgo/v2/internal/parallel_support"
|
||||||
)
|
)
|
||||||
|
|
||||||
const ABORT_POLLING_INTERVAL = 500 * time.Millisecond
|
var ABORT_POLLING_INTERVAL = 500 * time.Millisecond
|
||||||
|
|
||||||
type InterruptCause uint
|
type InterruptCause uint
|
||||||
|
|
||||||
@ -69,6 +69,7 @@ type InterruptHandler struct {
|
|||||||
client parallel_support.Client
|
client parallel_support.Client
|
||||||
stop chan interface{}
|
stop chan interface{}
|
||||||
signals []os.Signal
|
signals []os.Signal
|
||||||
|
requestAbortCheck chan interface{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewInterruptHandler(client parallel_support.Client, signals ...os.Signal) *InterruptHandler {
|
func NewInterruptHandler(client parallel_support.Client, signals ...os.Signal) *InterruptHandler {
|
||||||
@ -79,6 +80,7 @@ func NewInterruptHandler(client parallel_support.Client, signals ...os.Signal) *
|
|||||||
c: make(chan interface{}),
|
c: make(chan interface{}),
|
||||||
lock: &sync.Mutex{},
|
lock: &sync.Mutex{},
|
||||||
stop: make(chan interface{}),
|
stop: make(chan interface{}),
|
||||||
|
requestAbortCheck: make(chan interface{}),
|
||||||
client: client,
|
client: client,
|
||||||
signals: signals,
|
signals: signals,
|
||||||
}
|
}
|
||||||
@ -109,6 +111,12 @@ func (handler *InterruptHandler) registerForInterrupts() {
|
|||||||
pollTicker.Stop()
|
pollTicker.Stop()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
case <-handler.requestAbortCheck:
|
||||||
|
if handler.client.ShouldAbort() {
|
||||||
|
close(abortChannel)
|
||||||
|
pollTicker.Stop()
|
||||||
|
return
|
||||||
|
}
|
||||||
case <-handler.stop:
|
case <-handler.stop:
|
||||||
pollTicker.Stop()
|
pollTicker.Stop()
|
||||||
return
|
return
|
||||||
@ -152,11 +160,18 @@ func (handler *InterruptHandler) registerForInterrupts() {
|
|||||||
|
|
||||||
func (handler *InterruptHandler) Status() InterruptStatus {
|
func (handler *InterruptHandler) Status() InterruptStatus {
|
||||||
handler.lock.Lock()
|
handler.lock.Lock()
|
||||||
defer handler.lock.Unlock()
|
status := InterruptStatus{
|
||||||
|
|
||||||
return InterruptStatus{
|
|
||||||
Level: handler.level,
|
Level: handler.level,
|
||||||
Channel: handler.c,
|
Channel: handler.c,
|
||||||
Cause: handler.cause,
|
Cause: handler.cause,
|
||||||
}
|
}
|
||||||
|
handler.lock.Unlock()
|
||||||
|
|
||||||
|
if handler.client != nil && handler.client.ShouldAbort() && !status.Interrupted() {
|
||||||
|
close(handler.requestAbortCheck)
|
||||||
|
<-status.Channel
|
||||||
|
return handler.Status()
|
||||||
|
}
|
||||||
|
|
||||||
|
return status
|
||||||
}
|
}
|
||||||
|
43
vendor/github.com/onsi/ginkgo/v2/internal/node.go
generated
vendored
43
vendor/github.com/onsi/ginkgo/v2/internal/node.go
generated
vendored
@ -45,14 +45,14 @@ type Node struct {
|
|||||||
SynchronizedAfterSuiteProc1BodyHasContext bool
|
SynchronizedAfterSuiteProc1BodyHasContext bool
|
||||||
|
|
||||||
ReportEachBody func(types.SpecReport)
|
ReportEachBody func(types.SpecReport)
|
||||||
ReportAfterSuiteBody func(types.Report)
|
ReportSuiteBody func(types.Report)
|
||||||
|
|
||||||
MarkedFocus bool
|
MarkedFocus bool
|
||||||
MarkedPending bool
|
MarkedPending bool
|
||||||
MarkedSerial bool
|
MarkedSerial bool
|
||||||
MarkedOrdered bool
|
MarkedOrdered bool
|
||||||
|
MarkedContinueOnFailure bool
|
||||||
MarkedOncePerOrdered bool
|
MarkedOncePerOrdered bool
|
||||||
MarkedSuppressProgressReporting bool
|
|
||||||
FlakeAttempts int
|
FlakeAttempts int
|
||||||
MustPassRepeatedly int
|
MustPassRepeatedly int
|
||||||
Labels Labels
|
Labels Labels
|
||||||
@ -70,6 +70,7 @@ type focusType bool
|
|||||||
type pendingType bool
|
type pendingType bool
|
||||||
type serialType bool
|
type serialType bool
|
||||||
type orderedType bool
|
type orderedType bool
|
||||||
|
type continueOnFailureType bool
|
||||||
type honorsOrderedType bool
|
type honorsOrderedType bool
|
||||||
type suppressProgressReporting bool
|
type suppressProgressReporting bool
|
||||||
|
|
||||||
@ -77,6 +78,7 @@ const Focus = focusType(true)
|
|||||||
const Pending = pendingType(true)
|
const Pending = pendingType(true)
|
||||||
const Serial = serialType(true)
|
const Serial = serialType(true)
|
||||||
const Ordered = orderedType(true)
|
const Ordered = orderedType(true)
|
||||||
|
const ContinueOnFailure = continueOnFailureType(true)
|
||||||
const OncePerOrdered = honorsOrderedType(true)
|
const OncePerOrdered = honorsOrderedType(true)
|
||||||
const SuppressProgressReporting = suppressProgressReporting(true)
|
const SuppressProgressReporting = suppressProgressReporting(true)
|
||||||
|
|
||||||
@ -91,6 +93,10 @@ type NodeTimeout time.Duration
|
|||||||
type SpecTimeout time.Duration
|
type SpecTimeout time.Duration
|
||||||
type GracePeriod time.Duration
|
type GracePeriod time.Duration
|
||||||
|
|
||||||
|
func (l Labels) MatchesLabelFilter(query string) bool {
|
||||||
|
return types.MustParseLabelFilter(query)(l)
|
||||||
|
}
|
||||||
|
|
||||||
func UnionOfLabels(labels ...Labels) Labels {
|
func UnionOfLabels(labels ...Labels) Labels {
|
||||||
out := Labels{}
|
out := Labels{}
|
||||||
seen := map[string]bool{}
|
seen := map[string]bool{}
|
||||||
@ -134,6 +140,8 @@ func isDecoration(arg interface{}) bool {
|
|||||||
return true
|
return true
|
||||||
case t == reflect.TypeOf(Ordered):
|
case t == reflect.TypeOf(Ordered):
|
||||||
return true
|
return true
|
||||||
|
case t == reflect.TypeOf(ContinueOnFailure):
|
||||||
|
return true
|
||||||
case t == reflect.TypeOf(OncePerOrdered):
|
case t == reflect.TypeOf(OncePerOrdered):
|
||||||
return true
|
return true
|
||||||
case t == reflect.TypeOf(SuppressProgressReporting):
|
case t == reflect.TypeOf(SuppressProgressReporting):
|
||||||
@ -242,16 +250,18 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
|
|||||||
if !nodeType.Is(types.NodeTypeContainer) {
|
if !nodeType.Is(types.NodeTypeContainer) {
|
||||||
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Ordered"))
|
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Ordered"))
|
||||||
}
|
}
|
||||||
|
case t == reflect.TypeOf(ContinueOnFailure):
|
||||||
|
node.MarkedContinueOnFailure = bool(arg.(continueOnFailureType))
|
||||||
|
if !nodeType.Is(types.NodeTypeContainer) {
|
||||||
|
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "ContinueOnFailure"))
|
||||||
|
}
|
||||||
case t == reflect.TypeOf(OncePerOrdered):
|
case t == reflect.TypeOf(OncePerOrdered):
|
||||||
node.MarkedOncePerOrdered = bool(arg.(honorsOrderedType))
|
node.MarkedOncePerOrdered = bool(arg.(honorsOrderedType))
|
||||||
if !nodeType.Is(types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach | types.NodeTypeAfterEach | types.NodeTypeJustAfterEach) {
|
if !nodeType.Is(types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach | types.NodeTypeAfterEach | types.NodeTypeJustAfterEach) {
|
||||||
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "OncePerOrdered"))
|
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "OncePerOrdered"))
|
||||||
}
|
}
|
||||||
case t == reflect.TypeOf(SuppressProgressReporting):
|
case t == reflect.TypeOf(SuppressProgressReporting):
|
||||||
node.MarkedSuppressProgressReporting = bool(arg.(suppressProgressReporting))
|
deprecationTracker.TrackDeprecation(types.Deprecations.SuppressProgressReporting())
|
||||||
if nodeType.Is(types.NodeTypeContainer) {
|
|
||||||
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "SuppressProgressReporting"))
|
|
||||||
}
|
|
||||||
case t == reflect.TypeOf(FlakeAttempts(0)):
|
case t == reflect.TypeOf(FlakeAttempts(0)):
|
||||||
node.FlakeAttempts = int(arg.(FlakeAttempts))
|
node.FlakeAttempts = int(arg.(FlakeAttempts))
|
||||||
if !nodeType.Is(types.NodeTypesForContainerAndIt) {
|
if !nodeType.Is(types.NodeTypesForContainerAndIt) {
|
||||||
@ -321,9 +331,9 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
|
|||||||
trackedFunctionError = true
|
trackedFunctionError = true
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
} else if nodeType.Is(types.NodeTypeReportAfterSuite) {
|
} else if nodeType.Is(types.NodeTypeReportBeforeSuite | types.NodeTypeReportAfterSuite) {
|
||||||
if node.ReportAfterSuiteBody == nil {
|
if node.ReportSuiteBody == nil {
|
||||||
node.ReportAfterSuiteBody = arg.(func(types.Report))
|
node.ReportSuiteBody = arg.(func(types.Report))
|
||||||
} else {
|
} else {
|
||||||
appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
|
appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
|
||||||
trackedFunctionError = true
|
trackedFunctionError = true
|
||||||
@ -390,13 +400,17 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
|
|||||||
appendError(types.GinkgoErrors.InvalidDeclarationOfFocusedAndPending(node.CodeLocation, nodeType))
|
appendError(types.GinkgoErrors.InvalidDeclarationOfFocusedAndPending(node.CodeLocation, nodeType))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if node.MarkedContinueOnFailure && !node.MarkedOrdered {
|
||||||
|
appendError(types.GinkgoErrors.InvalidContinueOnFailureDecoration(node.CodeLocation))
|
||||||
|
}
|
||||||
|
|
||||||
hasContext := node.HasContext || node.SynchronizedAfterSuiteProc1BodyHasContext || node.SynchronizedAfterSuiteAllProcsBodyHasContext || node.SynchronizedBeforeSuiteProc1BodyHasContext || node.SynchronizedBeforeSuiteAllProcsBodyHasContext
|
hasContext := node.HasContext || node.SynchronizedAfterSuiteProc1BodyHasContext || node.SynchronizedAfterSuiteAllProcsBodyHasContext || node.SynchronizedBeforeSuiteProc1BodyHasContext || node.SynchronizedBeforeSuiteAllProcsBodyHasContext
|
||||||
|
|
||||||
if !hasContext && (node.NodeTimeout > 0 || node.SpecTimeout > 0 || node.GracePeriod > 0) && len(errors) == 0 {
|
if !hasContext && (node.NodeTimeout > 0 || node.SpecTimeout > 0 || node.GracePeriod > 0) && len(errors) == 0 {
|
||||||
appendError(types.GinkgoErrors.InvalidTimeoutOrGracePeriodForNonContextNode(node.CodeLocation, nodeType))
|
appendError(types.GinkgoErrors.InvalidTimeoutOrGracePeriodForNonContextNode(node.CodeLocation, nodeType))
|
||||||
}
|
}
|
||||||
|
|
||||||
if !node.NodeType.Is(types.NodeTypeReportBeforeEach|types.NodeTypeReportAfterEach|types.NodeTypeSynchronizedBeforeSuite|types.NodeTypeSynchronizedAfterSuite|types.NodeTypeReportAfterSuite) && node.Body == nil && !node.MarkedPending && !trackedFunctionError {
|
if !node.NodeType.Is(types.NodeTypeReportBeforeEach|types.NodeTypeReportAfterEach|types.NodeTypeSynchronizedBeforeSuite|types.NodeTypeSynchronizedAfterSuite|types.NodeTypeReportBeforeSuite|types.NodeTypeReportAfterSuite) && node.Body == nil && !node.MarkedPending && !trackedFunctionError {
|
||||||
appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType))
|
appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -861,6 +875,15 @@ func (n Nodes) FirstNodeMarkedOrdered() Node {
|
|||||||
return Node{}
|
return Node{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (n Nodes) IndexOfFirstNodeMarkedOrdered() int {
|
||||||
|
for i := range n {
|
||||||
|
if n[i].MarkedOrdered {
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
func (n Nodes) GetMaxFlakeAttempts() int {
|
func (n Nodes) GetMaxFlakeAttempts() int {
|
||||||
maxFlakeAttempts := 0
|
maxFlakeAttempts := 0
|
||||||
for i := range n {
|
for i := range n {
|
||||||
|
82
vendor/github.com/onsi/ginkgo/v2/internal/ordering.go
generated
vendored
82
vendor/github.com/onsi/ginkgo/v2/internal/ordering.go
generated
vendored
@ -7,6 +7,65 @@ import (
|
|||||||
"github.com/onsi/ginkgo/v2/types"
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type SortableSpecs struct {
|
||||||
|
Specs Specs
|
||||||
|
Indexes []int
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSortableSpecs(specs Specs) *SortableSpecs {
|
||||||
|
indexes := make([]int, len(specs))
|
||||||
|
for i := range specs {
|
||||||
|
indexes[i] = i
|
||||||
|
}
|
||||||
|
return &SortableSpecs{
|
||||||
|
Specs: specs,
|
||||||
|
Indexes: indexes,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (s *SortableSpecs) Len() int { return len(s.Indexes) }
|
||||||
|
func (s *SortableSpecs) Swap(i, j int) { s.Indexes[i], s.Indexes[j] = s.Indexes[j], s.Indexes[i] }
|
||||||
|
func (s *SortableSpecs) Less(i, j int) bool {
|
||||||
|
a, b := s.Specs[s.Indexes[i]], s.Specs[s.Indexes[j]]
|
||||||
|
|
||||||
|
aNodes, bNodes := a.Nodes.WithType(types.NodeTypesForContainerAndIt), b.Nodes.WithType(types.NodeTypesForContainerAndIt)
|
||||||
|
|
||||||
|
firstOrderedAIdx, firstOrderedBIdx := aNodes.IndexOfFirstNodeMarkedOrdered(), bNodes.IndexOfFirstNodeMarkedOrdered()
|
||||||
|
if firstOrderedAIdx > -1 && firstOrderedBIdx > -1 && aNodes[firstOrderedAIdx].ID == bNodes[firstOrderedBIdx].ID {
|
||||||
|
// strictly preserve order within an ordered containers. ID will track this as IDs are generated monotonically
|
||||||
|
return aNodes.FirstNodeWithType(types.NodeTypeIt).ID < bNodes.FirstNodeWithType(types.NodeTypeIt).ID
|
||||||
|
}
|
||||||
|
|
||||||
|
// if either spec is in an ordered container - only use the nodes up to the outermost ordered container
|
||||||
|
if firstOrderedAIdx > -1 {
|
||||||
|
aNodes = aNodes[:firstOrderedAIdx+1]
|
||||||
|
}
|
||||||
|
if firstOrderedBIdx > -1 {
|
||||||
|
bNodes = bNodes[:firstOrderedBIdx+1]
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < len(aNodes) && i < len(bNodes); i++ {
|
||||||
|
aCL, bCL := aNodes[i].CodeLocation, bNodes[i].CodeLocation
|
||||||
|
if aCL.FileName != bCL.FileName {
|
||||||
|
return aCL.FileName < bCL.FileName
|
||||||
|
}
|
||||||
|
if aCL.LineNumber != bCL.LineNumber {
|
||||||
|
return aCL.LineNumber < bCL.LineNumber
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// either everything is equal or we have different lengths of CLs
|
||||||
|
if len(aNodes) != len(bNodes) {
|
||||||
|
return len(aNodes) < len(bNodes)
|
||||||
|
}
|
||||||
|
// ok, now we are sure everything was equal. so we use the spec text to break ties
|
||||||
|
for i := 0; i < len(aNodes); i++ {
|
||||||
|
if aNodes[i].Text != bNodes[i].Text {
|
||||||
|
return aNodes[i].Text < bNodes[i].Text
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// ok, all those texts were equal. we'll use the ID of the most deeply nested node as a last resort
|
||||||
|
return aNodes[len(aNodes)-1].ID < bNodes[len(bNodes)-1].ID
|
||||||
|
}
|
||||||
|
|
||||||
type GroupedSpecIndices []SpecIndices
|
type GroupedSpecIndices []SpecIndices
|
||||||
type SpecIndices []int
|
type SpecIndices []int
|
||||||
|
|
||||||
@ -28,12 +87,17 @@ func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices,
|
|||||||
// Seed a new random source based on thee configured random seed.
|
// Seed a new random source based on thee configured random seed.
|
||||||
r := rand.New(rand.NewSource(suiteConfig.RandomSeed))
|
r := rand.New(rand.NewSource(suiteConfig.RandomSeed))
|
||||||
|
|
||||||
// first break things into execution groups
|
// first, we sort the entire suite to ensure a deterministic order. the sort is performed by filename, then line number, and then spec text. this ensures every parallel process has the exact same spec order and is only necessary to cover the edge case where the user iterates over a map to generate specs.
|
||||||
|
sortableSpecs := NewSortableSpecs(specs)
|
||||||
|
sort.Sort(sortableSpecs)
|
||||||
|
|
||||||
|
// then we break things into execution groups
|
||||||
// a group represents a single unit of execution and is a collection of SpecIndices
|
// a group represents a single unit of execution and is a collection of SpecIndices
|
||||||
// usually a group is just a single spec, however ordered containers must be preserved as a single group
|
// usually a group is just a single spec, however ordered containers must be preserved as a single group
|
||||||
executionGroupIDs := []uint{}
|
executionGroupIDs := []uint{}
|
||||||
executionGroups := map[uint]SpecIndices{}
|
executionGroups := map[uint]SpecIndices{}
|
||||||
for idx, spec := range specs {
|
for _, idx := range sortableSpecs.Indexes {
|
||||||
|
spec := specs[idx]
|
||||||
groupNode := spec.Nodes.FirstNodeMarkedOrdered()
|
groupNode := spec.Nodes.FirstNodeMarkedOrdered()
|
||||||
if groupNode.IsZero() {
|
if groupNode.IsZero() {
|
||||||
groupNode = spec.Nodes.FirstNodeWithType(types.NodeTypeIt)
|
groupNode = spec.Nodes.FirstNodeWithType(types.NodeTypeIt)
|
||||||
@ -48,7 +112,6 @@ func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices,
|
|||||||
// we shuffle outermost containers. so we need to form shufflable groupings of GroupIDs
|
// we shuffle outermost containers. so we need to form shufflable groupings of GroupIDs
|
||||||
shufflableGroupingIDs := []uint{}
|
shufflableGroupingIDs := []uint{}
|
||||||
shufflableGroupingIDToGroupIDs := map[uint][]uint{}
|
shufflableGroupingIDToGroupIDs := map[uint][]uint{}
|
||||||
shufflableGroupingsIDToSortKeys := map[uint]string{}
|
|
||||||
|
|
||||||
// for each execution group we're going to have to pick a node to represent how the
|
// for each execution group we're going to have to pick a node to represent how the
|
||||||
// execution group is grouped for shuffling:
|
// execution group is grouped for shuffling:
|
||||||
@ -72,22 +135,9 @@ func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices,
|
|||||||
if len(shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID]) == 1 {
|
if len(shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID]) == 1 {
|
||||||
// record the shuffleable group ID
|
// record the shuffleable group ID
|
||||||
shufflableGroupingIDs = append(shufflableGroupingIDs, shufflableGroupingNode.ID)
|
shufflableGroupingIDs = append(shufflableGroupingIDs, shufflableGroupingNode.ID)
|
||||||
// and record the sort key to use
|
|
||||||
shufflableGroupingsIDToSortKeys[shufflableGroupingNode.ID] = shufflableGroupingNode.CodeLocation.String()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// now we sort the shufflable groups by the sort key. We use the shufflable group nodes code location and break ties using its node id
|
|
||||||
sort.SliceStable(shufflableGroupingIDs, func(i, j int) bool {
|
|
||||||
keyA := shufflableGroupingsIDToSortKeys[shufflableGroupingIDs[i]]
|
|
||||||
keyB := shufflableGroupingsIDToSortKeys[shufflableGroupingIDs[j]]
|
|
||||||
if keyA == keyB {
|
|
||||||
return shufflableGroupingIDs[i] < shufflableGroupingIDs[j]
|
|
||||||
} else {
|
|
||||||
return keyA < keyB
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
// now we permute the sorted shufflable grouping IDs and build the ordered Groups
|
// now we permute the sorted shufflable grouping IDs and build the ordered Groups
|
||||||
orderedGroups := GroupedSpecIndices{}
|
orderedGroups := GroupedSpecIndices{}
|
||||||
permutation := r.Perm(len(shufflableGroupingIDs))
|
permutation := r.Perm(len(shufflableGroupingIDs))
|
||||||
|
11
vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go
generated
vendored
11
vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go
generated
vendored
@ -26,6 +26,17 @@ func (impl *dupSyscallOutputInterceptorImpl) CreateStdoutStderrClones() (*os.Fil
|
|||||||
stdoutCloneFD, _ := unix.Dup(1)
|
stdoutCloneFD, _ := unix.Dup(1)
|
||||||
stderrCloneFD, _ := unix.Dup(2)
|
stderrCloneFD, _ := unix.Dup(2)
|
||||||
|
|
||||||
|
// Important, set the fds to FD_CLOEXEC to prevent them leaking into childs
|
||||||
|
// https://github.com/onsi/ginkgo/issues/1191
|
||||||
|
flags, err := unix.FcntlInt(uintptr(stdoutCloneFD), unix.F_GETFD, 0)
|
||||||
|
if err == nil {
|
||||||
|
unix.FcntlInt(uintptr(stdoutCloneFD), unix.F_SETFD, flags|unix.FD_CLOEXEC)
|
||||||
|
}
|
||||||
|
flags, err = unix.FcntlInt(uintptr(stderrCloneFD), unix.F_GETFD, 0)
|
||||||
|
if err == nil {
|
||||||
|
unix.FcntlInt(uintptr(stderrCloneFD), unix.F_SETFD, flags|unix.FD_CLOEXEC)
|
||||||
|
}
|
||||||
|
|
||||||
// And then wrap the clone file descriptors in files.
|
// And then wrap the clone file descriptors in files.
|
||||||
// One benefit of this (that we don't use yet) is that we can actually write
|
// One benefit of this (that we don't use yet) is that we can actually write
|
||||||
// to these files to emit output to the console even though we're intercepting output
|
// to these files to emit output to the console even though we're intercepting output
|
||||||
|
2
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go
generated
vendored
2
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go
generated
vendored
@ -42,6 +42,8 @@ type Client interface {
|
|||||||
PostSuiteWillBegin(report types.Report) error
|
PostSuiteWillBegin(report types.Report) error
|
||||||
PostDidRun(report types.SpecReport) error
|
PostDidRun(report types.SpecReport) error
|
||||||
PostSuiteDidEnd(report types.Report) error
|
PostSuiteDidEnd(report types.Report) error
|
||||||
|
PostReportBeforeSuiteCompleted(state types.SpecState) error
|
||||||
|
BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error)
|
||||||
PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error
|
PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error
|
||||||
BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error)
|
BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error)
|
||||||
BlockUntilNonprimaryProcsHaveFinished() error
|
BlockUntilNonprimaryProcsHaveFinished() error
|
||||||
|
13
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go
generated
vendored
13
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go
generated
vendored
@ -98,6 +98,19 @@ func (client *httpClient) PostEmitProgressReport(report types.ProgressReport) er
|
|||||||
return client.post("/progress-report", report)
|
return client.post("/progress-report", report)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (client *httpClient) PostReportBeforeSuiteCompleted(state types.SpecState) error {
|
||||||
|
return client.post("/report-before-suite-completed", state)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *httpClient) BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error) {
|
||||||
|
var state types.SpecState
|
||||||
|
err := client.poll("/report-before-suite-state", &state)
|
||||||
|
if err == ErrorGone {
|
||||||
|
return types.SpecStateFailed, nil
|
||||||
|
}
|
||||||
|
return state, err
|
||||||
|
}
|
||||||
|
|
||||||
func (client *httpClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error {
|
func (client *httpClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error {
|
||||||
beforeSuiteState := BeforeSuiteState{
|
beforeSuiteState := BeforeSuiteState{
|
||||||
State: state,
|
State: state,
|
||||||
|
19
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go
generated
vendored
19
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go
generated
vendored
@ -52,6 +52,8 @@ func (server *httpServer) Start() {
|
|||||||
mux.HandleFunc("/progress-report", server.emitProgressReport)
|
mux.HandleFunc("/progress-report", server.emitProgressReport)
|
||||||
|
|
||||||
//synchronization endpoints
|
//synchronization endpoints
|
||||||
|
mux.HandleFunc("/report-before-suite-completed", server.handleReportBeforeSuiteCompleted)
|
||||||
|
mux.HandleFunc("/report-before-suite-state", server.handleReportBeforeSuiteState)
|
||||||
mux.HandleFunc("/before-suite-completed", server.handleBeforeSuiteCompleted)
|
mux.HandleFunc("/before-suite-completed", server.handleBeforeSuiteCompleted)
|
||||||
mux.HandleFunc("/before-suite-state", server.handleBeforeSuiteState)
|
mux.HandleFunc("/before-suite-state", server.handleBeforeSuiteState)
|
||||||
mux.HandleFunc("/have-nonprimary-procs-finished", server.handleHaveNonprimaryProcsFinished)
|
mux.HandleFunc("/have-nonprimary-procs-finished", server.handleHaveNonprimaryProcsFinished)
|
||||||
@ -164,6 +166,23 @@ func (server *httpServer) emitProgressReport(writer http.ResponseWriter, request
|
|||||||
server.handleError(server.handler.EmitProgressReport(report, voidReceiver), writer)
|
server.handleError(server.handler.EmitProgressReport(report, voidReceiver), writer)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (server *httpServer) handleReportBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) {
|
||||||
|
var state types.SpecState
|
||||||
|
if !server.decode(writer, request, &state) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
server.handleError(server.handler.ReportBeforeSuiteCompleted(state, voidReceiver), writer)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *httpServer) handleReportBeforeSuiteState(writer http.ResponseWriter, request *http.Request) {
|
||||||
|
var state types.SpecState
|
||||||
|
if server.handleError(server.handler.ReportBeforeSuiteState(voidSender, &state), writer) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
json.NewEncoder(writer).Encode(state)
|
||||||
|
}
|
||||||
|
|
||||||
func (server *httpServer) handleBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) {
|
func (server *httpServer) handleBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) {
|
||||||
var beforeSuiteState BeforeSuiteState
|
var beforeSuiteState BeforeSuiteState
|
||||||
if !server.decode(writer, request, &beforeSuiteState) {
|
if !server.decode(writer, request, &beforeSuiteState) {
|
||||||
|
13
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go
generated
vendored
13
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go
generated
vendored
@ -76,6 +76,19 @@ func (client *rpcClient) PostEmitProgressReport(report types.ProgressReport) err
|
|||||||
return client.client.Call("Server.EmitProgressReport", report, voidReceiver)
|
return client.client.Call("Server.EmitProgressReport", report, voidReceiver)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (client *rpcClient) PostReportBeforeSuiteCompleted(state types.SpecState) error {
|
||||||
|
return client.client.Call("Server.ReportBeforeSuiteCompleted", state, voidReceiver)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *rpcClient) BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error) {
|
||||||
|
var state types.SpecState
|
||||||
|
err := client.poll("Server.ReportBeforeSuiteState", &state)
|
||||||
|
if err == ErrorGone {
|
||||||
|
return types.SpecStateFailed, nil
|
||||||
|
}
|
||||||
|
return state, err
|
||||||
|
}
|
||||||
|
|
||||||
func (client *rpcClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error {
|
func (client *rpcClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error {
|
||||||
beforeSuiteState := BeforeSuiteState{
|
beforeSuiteState := BeforeSuiteState{
|
||||||
State: state,
|
State: state,
|
||||||
|
25
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go
generated
vendored
25
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go
generated
vendored
@ -24,6 +24,7 @@ type ServerHandler struct {
|
|||||||
alives []func() bool
|
alives []func() bool
|
||||||
lock *sync.Mutex
|
lock *sync.Mutex
|
||||||
beforeSuiteState BeforeSuiteState
|
beforeSuiteState BeforeSuiteState
|
||||||
|
reportBeforeSuiteState types.SpecState
|
||||||
parallelTotal int
|
parallelTotal int
|
||||||
counter int
|
counter int
|
||||||
counterLock *sync.Mutex
|
counterLock *sync.Mutex
|
||||||
@ -42,6 +43,7 @@ func newServerHandler(parallelTotal int, reporter reporters.Reporter) *ServerHan
|
|||||||
counterLock: &sync.Mutex{},
|
counterLock: &sync.Mutex{},
|
||||||
alives: make([]func() bool, parallelTotal),
|
alives: make([]func() bool, parallelTotal),
|
||||||
beforeSuiteState: BeforeSuiteState{Data: nil, State: types.SpecStateInvalid},
|
beforeSuiteState: BeforeSuiteState{Data: nil, State: types.SpecStateInvalid},
|
||||||
|
|
||||||
parallelTotal: parallelTotal,
|
parallelTotal: parallelTotal,
|
||||||
outputDestination: os.Stdout,
|
outputDestination: os.Stdout,
|
||||||
done: make(chan interface{}),
|
done: make(chan interface{}),
|
||||||
@ -140,6 +142,29 @@ func (handler *ServerHandler) haveNonprimaryProcsFinished() bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (handler *ServerHandler) ReportBeforeSuiteCompleted(reportBeforeSuiteState types.SpecState, _ *Void) error {
|
||||||
|
handler.lock.Lock()
|
||||||
|
defer handler.lock.Unlock()
|
||||||
|
handler.reportBeforeSuiteState = reportBeforeSuiteState
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (handler *ServerHandler) ReportBeforeSuiteState(_ Void, reportBeforeSuiteState *types.SpecState) error {
|
||||||
|
proc1IsAlive := handler.procIsAlive(1)
|
||||||
|
handler.lock.Lock()
|
||||||
|
defer handler.lock.Unlock()
|
||||||
|
if handler.reportBeforeSuiteState == types.SpecStateInvalid {
|
||||||
|
if proc1IsAlive {
|
||||||
|
return ErrorEarly
|
||||||
|
} else {
|
||||||
|
return ErrorGone
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*reportBeforeSuiteState = handler.reportBeforeSuiteState
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (handler *ServerHandler) BeforeSuiteCompleted(beforeSuiteState BeforeSuiteState, _ *Void) error {
|
func (handler *ServerHandler) BeforeSuiteCompleted(beforeSuiteState BeforeSuiteState, _ *Void) error {
|
||||||
handler.lock.Lock()
|
handler.lock.Lock()
|
||||||
defer handler.lock.Unlock()
|
defer handler.lock.Unlock()
|
||||||
|
22
vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go
generated
vendored
22
vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go
generated
vendored
@ -48,13 +48,10 @@ type ProgressStepCursor struct {
|
|||||||
StartTime time.Time
|
StartTime time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewProgressReport(isRunningInParallel bool, report types.SpecReport, currentNode Node, currentNodeStartTime time.Time, currentStep ProgressStepCursor, gwOutput string, additionalReports []string, sourceRoots []string, includeAll bool) (types.ProgressReport, error) {
|
func NewProgressReport(isRunningInParallel bool, report types.SpecReport, currentNode Node, currentNodeStartTime time.Time, currentStep types.SpecEvent, gwOutput string, timelineLocation types.TimelineLocation, additionalReports []string, sourceRoots []string, includeAll bool) (types.ProgressReport, error) {
|
||||||
pr := types.ProgressReport{
|
pr := types.ProgressReport{
|
||||||
ParallelProcess: report.ParallelProcess,
|
ParallelProcess: report.ParallelProcess,
|
||||||
RunningInParallel: isRunningInParallel,
|
RunningInParallel: isRunningInParallel,
|
||||||
|
|
||||||
Time: time.Now(),
|
|
||||||
|
|
||||||
ContainerHierarchyTexts: report.ContainerHierarchyTexts,
|
ContainerHierarchyTexts: report.ContainerHierarchyTexts,
|
||||||
LeafNodeText: report.LeafNodeText,
|
LeafNodeText: report.LeafNodeText,
|
||||||
LeafNodeLocation: report.LeafNodeLocation,
|
LeafNodeLocation: report.LeafNodeLocation,
|
||||||
@ -65,14 +62,14 @@ func NewProgressReport(isRunningInParallel bool, report types.SpecReport, curren
|
|||||||
CurrentNodeLocation: currentNode.CodeLocation,
|
CurrentNodeLocation: currentNode.CodeLocation,
|
||||||
CurrentNodeStartTime: currentNodeStartTime,
|
CurrentNodeStartTime: currentNodeStartTime,
|
||||||
|
|
||||||
CurrentStepText: currentStep.Text,
|
CurrentStepText: currentStep.Message,
|
||||||
CurrentStepLocation: currentStep.CodeLocation,
|
CurrentStepLocation: currentStep.CodeLocation,
|
||||||
CurrentStepStartTime: currentStep.StartTime,
|
CurrentStepStartTime: currentStep.TimelineLocation.Time,
|
||||||
|
|
||||||
AdditionalReports: additionalReports,
|
AdditionalReports: additionalReports,
|
||||||
|
|
||||||
CapturedGinkgoWriterOutput: gwOutput,
|
CapturedGinkgoWriterOutput: gwOutput,
|
||||||
GinkgoWriterOffset: len(gwOutput),
|
TimelineLocation: timelineLocation,
|
||||||
}
|
}
|
||||||
|
|
||||||
goroutines, err := extractRunningGoroutines()
|
goroutines, err := extractRunningGoroutines()
|
||||||
@ -186,7 +183,6 @@ func extractRunningGoroutines() ([]types.Goroutine, error) {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
r := bufio.NewReader(bytes.NewReader(stack))
|
r := bufio.NewReader(bytes.NewReader(stack))
|
||||||
out := []types.Goroutine{}
|
out := []types.Goroutine{}
|
||||||
idx := -1
|
idx := -1
|
||||||
@ -234,12 +230,12 @@ func extractRunningGoroutines() ([]types.Goroutine, error) {
|
|||||||
return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid function call: %s -- missing file name and line number", functionCall.Function))
|
return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid function call: %s -- missing file name and line number", functionCall.Function))
|
||||||
}
|
}
|
||||||
line = strings.TrimLeft(line, " \t")
|
line = strings.TrimLeft(line, " \t")
|
||||||
fields := strings.SplitN(line, ":", 2)
|
delimiterIdx := strings.LastIndex(line, ":")
|
||||||
if len(fields) != 2 {
|
if delimiterIdx == -1 {
|
||||||
return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid filename nad line number: %s", line))
|
return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid filename and line number: %s", line))
|
||||||
}
|
}
|
||||||
functionCall.Filename = fields[0]
|
functionCall.Filename = line[:delimiterIdx]
|
||||||
line = strings.Split(fields[1], " ")[0]
|
line = strings.Split(line[delimiterIdx+1:], " ")[0]
|
||||||
lineNumber, err := strconv.ParseInt(line, 10, 64)
|
lineNumber, err := strconv.ParseInt(line, 10, 64)
|
||||||
functionCall.Line = int(lineNumber)
|
functionCall.Line = int(lineNumber)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
79
vendor/github.com/onsi/ginkgo/v2/internal/progress_reporter_manager.go
generated
vendored
Normal file
79
vendor/github.com/onsi/ginkgo/v2/internal/progress_reporter_manager.go
generated
vendored
Normal file
@ -0,0 +1,79 @@
|
|||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ProgressReporterManager struct {
|
||||||
|
lock *sync.Mutex
|
||||||
|
progressReporters map[int]func() string
|
||||||
|
prCounter int
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewProgressReporterManager() *ProgressReporterManager {
|
||||||
|
return &ProgressReporterManager{
|
||||||
|
progressReporters: map[int]func() string{},
|
||||||
|
lock: &sync.Mutex{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (prm *ProgressReporterManager) AttachProgressReporter(reporter func() string) func() {
|
||||||
|
prm.lock.Lock()
|
||||||
|
defer prm.lock.Unlock()
|
||||||
|
prm.prCounter += 1
|
||||||
|
prCounter := prm.prCounter
|
||||||
|
prm.progressReporters[prCounter] = reporter
|
||||||
|
|
||||||
|
return func() {
|
||||||
|
prm.lock.Lock()
|
||||||
|
defer prm.lock.Unlock()
|
||||||
|
delete(prm.progressReporters, prCounter)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (prm *ProgressReporterManager) QueryProgressReporters(ctx context.Context, failer *Failer) []string {
|
||||||
|
prm.lock.Lock()
|
||||||
|
keys := []int{}
|
||||||
|
for key := range prm.progressReporters {
|
||||||
|
keys = append(keys, key)
|
||||||
|
}
|
||||||
|
sort.Ints(keys)
|
||||||
|
reporters := []func() string{}
|
||||||
|
for _, key := range keys {
|
||||||
|
reporters = append(reporters, prm.progressReporters[key])
|
||||||
|
}
|
||||||
|
prm.lock.Unlock()
|
||||||
|
|
||||||
|
if len(reporters) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := []string{}
|
||||||
|
for _, reporter := range reporters {
|
||||||
|
reportC := make(chan string, 1)
|
||||||
|
go func() {
|
||||||
|
defer func() {
|
||||||
|
e := recover()
|
||||||
|
if e != nil {
|
||||||
|
failer.Panic(types.NewCodeLocationWithStackTrace(1), e)
|
||||||
|
reportC <- "failed to query attached progress reporter"
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
reportC <- reporter()
|
||||||
|
}()
|
||||||
|
var report string
|
||||||
|
select {
|
||||||
|
case report = <-reportC:
|
||||||
|
case <-ctx.Done():
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
if strings.TrimSpace(report) != "" {
|
||||||
|
out = append(out, report)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
21
vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go
generated
vendored
21
vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go
generated
vendored
@ -1,7 +1,6 @@
|
|||||||
package internal
|
package internal
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"reflect"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/v2/types"
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
@ -13,20 +12,20 @@ func NewReportEntry(name string, cl types.CodeLocation, args ...interface{}) (Re
|
|||||||
out := ReportEntry{
|
out := ReportEntry{
|
||||||
Visibility: types.ReportEntryVisibilityAlways,
|
Visibility: types.ReportEntryVisibilityAlways,
|
||||||
Name: name,
|
Name: name,
|
||||||
Time: time.Now(),
|
|
||||||
Location: cl,
|
Location: cl,
|
||||||
|
Time: time.Now(),
|
||||||
}
|
}
|
||||||
var didSetValue = false
|
var didSetValue = false
|
||||||
for _, arg := range args {
|
for _, arg := range args {
|
||||||
switch reflect.TypeOf(arg) {
|
switch x := arg.(type) {
|
||||||
case reflect.TypeOf(types.ReportEntryVisibilityAlways):
|
case types.ReportEntryVisibility:
|
||||||
out.Visibility = arg.(types.ReportEntryVisibility)
|
out.Visibility = x
|
||||||
case reflect.TypeOf(types.CodeLocation{}):
|
case types.CodeLocation:
|
||||||
out.Location = arg.(types.CodeLocation)
|
out.Location = x
|
||||||
case reflect.TypeOf(Offset(0)):
|
case Offset:
|
||||||
out.Location = types.NewCodeLocation(2 + int(arg.(Offset)))
|
out.Location = types.NewCodeLocation(2 + int(x))
|
||||||
case reflect.TypeOf(out.Time):
|
case time.Time:
|
||||||
out.Time = arg.(time.Time)
|
out.Time = x
|
||||||
default:
|
default:
|
||||||
if didSetValue {
|
if didSetValue {
|
||||||
return ReportEntry{}, types.GinkgoErrors.TooManyReportEntryValues(out.Location, arg)
|
return ReportEntry{}, types.GinkgoErrors.TooManyReportEntryValues(out.Location, arg)
|
||||||
|
47
vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go
generated
vendored
47
vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go
generated
vendored
@ -2,8 +2,6 @@ package internal
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"sort"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/v2/types"
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
)
|
)
|
||||||
@ -17,11 +15,9 @@ type SpecContext interface {
|
|||||||
|
|
||||||
type specContext struct {
|
type specContext struct {
|
||||||
context.Context
|
context.Context
|
||||||
|
*ProgressReporterManager
|
||||||
|
|
||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
lock *sync.Mutex
|
|
||||||
progressReporters map[int]func() string
|
|
||||||
prCounter int
|
|
||||||
|
|
||||||
suite *Suite
|
suite *Suite
|
||||||
}
|
}
|
||||||
@ -38,9 +34,7 @@ func NewSpecContext(suite *Suite) *specContext {
|
|||||||
sc := &specContext{
|
sc := &specContext{
|
||||||
cancel: cancel,
|
cancel: cancel,
|
||||||
suite: suite,
|
suite: suite,
|
||||||
lock: &sync.Mutex{},
|
ProgressReporterManager: NewProgressReporterManager(),
|
||||||
prCounter: 0,
|
|
||||||
progressReporters: map[int]func() string{},
|
|
||||||
}
|
}
|
||||||
ctx = context.WithValue(ctx, "GINKGO_SPEC_CONTEXT", sc) //yes, yes, the go docs say don't use a string for a key... but we'd rather avoid a circular dependency between Gomega and Ginkgo
|
ctx = context.WithValue(ctx, "GINKGO_SPEC_CONTEXT", sc) //yes, yes, the go docs say don't use a string for a key... but we'd rather avoid a circular dependency between Gomega and Ginkgo
|
||||||
sc.Context = ctx //thank goodness for garbage collectors that can handle circular dependencies
|
sc.Context = ctx //thank goodness for garbage collectors that can handle circular dependencies
|
||||||
@ -51,40 +45,3 @@ func NewSpecContext(suite *Suite) *specContext {
|
|||||||
func (sc *specContext) SpecReport() types.SpecReport {
|
func (sc *specContext) SpecReport() types.SpecReport {
|
||||||
return sc.suite.CurrentSpecReport()
|
return sc.suite.CurrentSpecReport()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sc *specContext) AttachProgressReporter(reporter func() string) func() {
|
|
||||||
sc.lock.Lock()
|
|
||||||
defer sc.lock.Unlock()
|
|
||||||
sc.prCounter += 1
|
|
||||||
prCounter := sc.prCounter
|
|
||||||
sc.progressReporters[prCounter] = reporter
|
|
||||||
|
|
||||||
return func() {
|
|
||||||
sc.lock.Lock()
|
|
||||||
defer sc.lock.Unlock()
|
|
||||||
delete(sc.progressReporters, prCounter)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sc *specContext) QueryProgressReporters() []string {
|
|
||||||
sc.lock.Lock()
|
|
||||||
keys := []int{}
|
|
||||||
for key := range sc.progressReporters {
|
|
||||||
keys = append(keys, key)
|
|
||||||
}
|
|
||||||
sort.Ints(keys)
|
|
||||||
reporters := []func() string{}
|
|
||||||
for _, key := range keys {
|
|
||||||
reporters = append(reporters, sc.progressReporters[key])
|
|
||||||
}
|
|
||||||
sc.lock.Unlock()
|
|
||||||
|
|
||||||
if len(reporters) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := []string{}
|
|
||||||
for _, reporter := range reporters {
|
|
||||||
out = append(out, reporter())
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
269
vendor/github.com/onsi/ginkgo/v2/internal/suite.go
generated
vendored
269
vendor/github.com/onsi/ginkgo/v2/internal/suite.go
generated
vendored
@ -9,6 +9,7 @@ import (
|
|||||||
"github.com/onsi/ginkgo/v2/internal/parallel_support"
|
"github.com/onsi/ginkgo/v2/internal/parallel_support"
|
||||||
"github.com/onsi/ginkgo/v2/reporters"
|
"github.com/onsi/ginkgo/v2/reporters"
|
||||||
"github.com/onsi/ginkgo/v2/types"
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Phase uint
|
type Phase uint
|
||||||
@ -19,10 +20,14 @@ const (
|
|||||||
PhaseRun
|
PhaseRun
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var PROGRESS_REPORTER_DEADLING = 5 * time.Second
|
||||||
|
|
||||||
type Suite struct {
|
type Suite struct {
|
||||||
tree *TreeNode
|
tree *TreeNode
|
||||||
topLevelContainers Nodes
|
topLevelContainers Nodes
|
||||||
|
|
||||||
|
*ProgressReporterManager
|
||||||
|
|
||||||
phase Phase
|
phase Phase
|
||||||
|
|
||||||
suiteNodes Nodes
|
suiteNodes Nodes
|
||||||
@ -44,7 +49,8 @@ type Suite struct {
|
|||||||
|
|
||||||
currentSpecContext *specContext
|
currentSpecContext *specContext
|
||||||
|
|
||||||
progressStepCursor ProgressStepCursor
|
currentByStep types.SpecEvent
|
||||||
|
timelineOrder int
|
||||||
|
|
||||||
/*
|
/*
|
||||||
We don't need to lock around all operations. Just those that *could* happen concurrently.
|
We don't need to lock around all operations. Just those that *could* happen concurrently.
|
||||||
@ -65,6 +71,7 @@ func NewSuite() *Suite {
|
|||||||
return &Suite{
|
return &Suite{
|
||||||
tree: &TreeNode{},
|
tree: &TreeNode{},
|
||||||
phase: PhaseBuildTopLevel,
|
phase: PhaseBuildTopLevel,
|
||||||
|
ProgressReporterManager: NewProgressReporterManager(),
|
||||||
|
|
||||||
selectiveLock: &sync.Mutex{},
|
selectiveLock: &sync.Mutex{},
|
||||||
}
|
}
|
||||||
@ -128,7 +135,7 @@ func (suite *Suite) PushNode(node Node) error {
|
|||||||
return suite.pushCleanupNode(node)
|
return suite.pushCleanupNode(node)
|
||||||
}
|
}
|
||||||
|
|
||||||
if node.NodeType.Is(types.NodeTypeBeforeSuite | types.NodeTypeAfterSuite | types.NodeTypeSynchronizedBeforeSuite | types.NodeTypeSynchronizedAfterSuite | types.NodeTypeReportAfterSuite) {
|
if node.NodeType.Is(types.NodeTypeBeforeSuite | types.NodeTypeAfterSuite | types.NodeTypeSynchronizedBeforeSuite | types.NodeTypeSynchronizedAfterSuite | types.NodeTypeBeforeSuite | types.NodeTypeReportBeforeSuite | types.NodeTypeReportAfterSuite) {
|
||||||
return suite.pushSuiteNode(node)
|
return suite.pushSuiteNode(node)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -150,6 +157,13 @@ func (suite *Suite) PushNode(node Node) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if node.MarkedContinueOnFailure {
|
||||||
|
firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered()
|
||||||
|
if !firstOrderedNode.IsZero() {
|
||||||
|
return types.GinkgoErrors.InvalidContinueOnFailureDecoration(node.CodeLocation)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if node.NodeType == types.NodeTypeContainer {
|
if node.NodeType == types.NodeTypeContainer {
|
||||||
// During PhaseBuildTopLevel we only track the top level containers without entering them
|
// During PhaseBuildTopLevel we only track the top level containers without entering them
|
||||||
// We only enter the top level container nodes during PhaseBuildTree
|
// We only enter the top level container nodes during PhaseBuildTree
|
||||||
@ -221,7 +235,7 @@ func (suite *Suite) pushCleanupNode(node Node) error {
|
|||||||
node.NodeType = types.NodeTypeCleanupAfterSuite
|
node.NodeType = types.NodeTypeCleanupAfterSuite
|
||||||
case types.NodeTypeBeforeAll, types.NodeTypeAfterAll:
|
case types.NodeTypeBeforeAll, types.NodeTypeAfterAll:
|
||||||
node.NodeType = types.NodeTypeCleanupAfterAll
|
node.NodeType = types.NodeTypeCleanupAfterAll
|
||||||
case types.NodeTypeReportBeforeEach, types.NodeTypeReportAfterEach, types.NodeTypeReportAfterSuite:
|
case types.NodeTypeReportBeforeEach, types.NodeTypeReportAfterEach, types.NodeTypeReportBeforeSuite, types.NodeTypeReportAfterSuite:
|
||||||
return types.GinkgoErrors.PushingCleanupInReportingNode(node.CodeLocation, suite.currentNode.NodeType)
|
return types.GinkgoErrors.PushingCleanupInReportingNode(node.CodeLocation, suite.currentNode.NodeType)
|
||||||
case types.NodeTypeCleanupInvalid, types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll, types.NodeTypeCleanupAfterSuite:
|
case types.NodeTypeCleanupInvalid, types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll, types.NodeTypeCleanupAfterSuite:
|
||||||
return types.GinkgoErrors.PushingCleanupInCleanupNode(node.CodeLocation)
|
return types.GinkgoErrors.PushingCleanupInCleanupNode(node.CodeLocation)
|
||||||
@ -236,15 +250,65 @@ func (suite *Suite) pushCleanupNode(node Node) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
func (suite *Suite) generateTimelineLocation() types.TimelineLocation {
|
||||||
Pushing and popping the Step Cursor stack
|
|
||||||
*/
|
|
||||||
|
|
||||||
func (suite *Suite) SetProgressStepCursor(cursor ProgressStepCursor) {
|
|
||||||
suite.selectiveLock.Lock()
|
suite.selectiveLock.Lock()
|
||||||
defer suite.selectiveLock.Unlock()
|
defer suite.selectiveLock.Unlock()
|
||||||
|
|
||||||
suite.progressStepCursor = cursor
|
suite.timelineOrder += 1
|
||||||
|
return types.TimelineLocation{
|
||||||
|
Offset: len(suite.currentSpecReport.CapturedGinkgoWriterOutput) + suite.writer.Len(),
|
||||||
|
Order: suite.timelineOrder,
|
||||||
|
Time: time.Now(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) handleSpecEvent(event types.SpecEvent) types.SpecEvent {
|
||||||
|
event.TimelineLocation = suite.generateTimelineLocation()
|
||||||
|
suite.selectiveLock.Lock()
|
||||||
|
suite.currentSpecReport.SpecEvents = append(suite.currentSpecReport.SpecEvents, event)
|
||||||
|
suite.selectiveLock.Unlock()
|
||||||
|
suite.reporter.EmitSpecEvent(event)
|
||||||
|
return event
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) handleSpecEventEnd(eventType types.SpecEventType, startEvent types.SpecEvent) {
|
||||||
|
event := startEvent
|
||||||
|
event.SpecEventType = eventType
|
||||||
|
event.TimelineLocation = suite.generateTimelineLocation()
|
||||||
|
event.Duration = event.TimelineLocation.Time.Sub(startEvent.TimelineLocation.Time)
|
||||||
|
suite.selectiveLock.Lock()
|
||||||
|
suite.currentSpecReport.SpecEvents = append(suite.currentSpecReport.SpecEvents, event)
|
||||||
|
suite.selectiveLock.Unlock()
|
||||||
|
suite.reporter.EmitSpecEvent(event)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) By(text string, callback ...func()) error {
|
||||||
|
cl := types.NewCodeLocation(2)
|
||||||
|
if suite.phase != PhaseRun {
|
||||||
|
return types.GinkgoErrors.ByNotDuringRunPhase(cl)
|
||||||
|
}
|
||||||
|
|
||||||
|
event := suite.handleSpecEvent(types.SpecEvent{
|
||||||
|
SpecEventType: types.SpecEventByStart,
|
||||||
|
CodeLocation: cl,
|
||||||
|
Message: text,
|
||||||
|
})
|
||||||
|
suite.selectiveLock.Lock()
|
||||||
|
suite.currentByStep = event
|
||||||
|
suite.selectiveLock.Unlock()
|
||||||
|
|
||||||
|
if len(callback) == 1 {
|
||||||
|
defer func() {
|
||||||
|
suite.selectiveLock.Lock()
|
||||||
|
suite.currentByStep = types.SpecEvent{}
|
||||||
|
suite.selectiveLock.Unlock()
|
||||||
|
suite.handleSpecEventEnd(types.SpecEventByEnd, event)
|
||||||
|
}()
|
||||||
|
callback[0]()
|
||||||
|
} else if len(callback) > 1 {
|
||||||
|
panic("just one callback per By, please")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -263,27 +327,32 @@ func (suite *Suite) CurrentSpecReport() types.SpecReport {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (suite *Suite) AddReportEntry(entry ReportEntry) error {
|
func (suite *Suite) AddReportEntry(entry ReportEntry) error {
|
||||||
suite.selectiveLock.Lock()
|
|
||||||
defer suite.selectiveLock.Unlock()
|
|
||||||
if suite.phase != PhaseRun {
|
if suite.phase != PhaseRun {
|
||||||
return types.GinkgoErrors.AddReportEntryNotDuringRunPhase(entry.Location)
|
return types.GinkgoErrors.AddReportEntryNotDuringRunPhase(entry.Location)
|
||||||
}
|
}
|
||||||
|
entry.TimelineLocation = suite.generateTimelineLocation()
|
||||||
|
entry.Time = entry.TimelineLocation.Time
|
||||||
|
suite.selectiveLock.Lock()
|
||||||
suite.currentSpecReport.ReportEntries = append(suite.currentSpecReport.ReportEntries, entry)
|
suite.currentSpecReport.ReportEntries = append(suite.currentSpecReport.ReportEntries, entry)
|
||||||
|
suite.selectiveLock.Unlock()
|
||||||
|
suite.reporter.EmitReportEntry(entry)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *Suite) generateProgressReport(fullReport bool) types.ProgressReport {
|
func (suite *Suite) generateProgressReport(fullReport bool) types.ProgressReport {
|
||||||
|
timelineLocation := suite.generateTimelineLocation()
|
||||||
suite.selectiveLock.Lock()
|
suite.selectiveLock.Lock()
|
||||||
defer suite.selectiveLock.Unlock()
|
defer suite.selectiveLock.Unlock()
|
||||||
|
|
||||||
|
deadline, cancel := context.WithTimeout(context.Background(), PROGRESS_REPORTER_DEADLING)
|
||||||
|
defer cancel()
|
||||||
var additionalReports []string
|
var additionalReports []string
|
||||||
if suite.currentSpecContext != nil {
|
if suite.currentSpecContext != nil {
|
||||||
additionalReports = suite.currentSpecContext.QueryProgressReporters()
|
additionalReports = append(additionalReports, suite.currentSpecContext.QueryProgressReporters(deadline, suite.failer)...)
|
||||||
}
|
}
|
||||||
stepCursor := suite.progressStepCursor
|
additionalReports = append(additionalReports, suite.QueryProgressReporters(deadline, suite.failer)...)
|
||||||
|
|
||||||
gwOutput := suite.currentSpecReport.CapturedGinkgoWriterOutput + string(suite.writer.Bytes())
|
gwOutput := suite.currentSpecReport.CapturedGinkgoWriterOutput + string(suite.writer.Bytes())
|
||||||
pr, err := NewProgressReport(suite.isRunningInParallel(), suite.currentSpecReport, suite.currentNode, suite.currentNodeStartTime, stepCursor, gwOutput, additionalReports, suite.config.SourceRoots, fullReport)
|
pr, err := NewProgressReport(suite.isRunningInParallel(), suite.currentSpecReport, suite.currentNode, suite.currentNodeStartTime, suite.currentByStep, gwOutput, timelineLocation, additionalReports, suite.config.SourceRoots, fullReport)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf("{{red}}Failed to generate progress report:{{/}}\n%s\n", err.Error())
|
fmt.Printf("{{red}}Failed to generate progress report:{{/}}\n%s\n", err.Error())
|
||||||
@ -355,7 +424,13 @@ func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath s
|
|||||||
}
|
}
|
||||||
|
|
||||||
suite.report.SuiteSucceeded = true
|
suite.report.SuiteSucceeded = true
|
||||||
|
|
||||||
|
suite.runReportSuiteNodesIfNeedBe(types.NodeTypeReportBeforeSuite)
|
||||||
|
|
||||||
|
ranBeforeSuite := suite.report.SuiteSucceeded
|
||||||
|
if suite.report.SuiteSucceeded {
|
||||||
suite.runBeforeSuite(numSpecsThatWillBeRun)
|
suite.runBeforeSuite(numSpecsThatWillBeRun)
|
||||||
|
}
|
||||||
|
|
||||||
if suite.report.SuiteSucceeded {
|
if suite.report.SuiteSucceeded {
|
||||||
groupedSpecIndices, serialGroupedSpecIndices := OrderSpecs(specs, suite.config)
|
groupedSpecIndices, serialGroupedSpecIndices := OrderSpecs(specs, suite.config)
|
||||||
@ -394,7 +469,9 @@ func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath s
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ranBeforeSuite {
|
||||||
suite.runAfterSuiteCleanup(numSpecsThatWillBeRun)
|
suite.runAfterSuiteCleanup(numSpecsThatWillBeRun)
|
||||||
|
}
|
||||||
|
|
||||||
interruptStatus := suite.interruptHandler.Status()
|
interruptStatus := suite.interruptHandler.Status()
|
||||||
if interruptStatus.Interrupted() {
|
if interruptStatus.Interrupted() {
|
||||||
@ -408,9 +485,7 @@ func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath s
|
|||||||
suite.report.SuiteSucceeded = false
|
suite.report.SuiteSucceeded = false
|
||||||
}
|
}
|
||||||
|
|
||||||
if suite.config.ParallelProcess == 1 {
|
suite.runReportSuiteNodesIfNeedBe(types.NodeTypeReportAfterSuite)
|
||||||
suite.runReportAfterSuite()
|
|
||||||
}
|
|
||||||
suite.reporter.SuiteDidEnd(suite.report)
|
suite.reporter.SuiteDidEnd(suite.report)
|
||||||
if suite.isRunningInParallel() {
|
if suite.isRunningInParallel() {
|
||||||
suite.client.PostSuiteDidEnd(suite.report)
|
suite.client.PostSuiteDidEnd(suite.report)
|
||||||
@ -427,6 +502,7 @@ func (suite *Suite) runBeforeSuite(numSpecsThatWillBeRun int) {
|
|||||||
LeafNodeType: beforeSuiteNode.NodeType,
|
LeafNodeType: beforeSuiteNode.NodeType,
|
||||||
LeafNodeLocation: beforeSuiteNode.CodeLocation,
|
LeafNodeLocation: beforeSuiteNode.CodeLocation,
|
||||||
ParallelProcess: suite.config.ParallelProcess,
|
ParallelProcess: suite.config.ParallelProcess,
|
||||||
|
RunningInParallel: suite.isRunningInParallel(),
|
||||||
}
|
}
|
||||||
suite.selectiveLock.Unlock()
|
suite.selectiveLock.Unlock()
|
||||||
|
|
||||||
@ -448,6 +524,7 @@ func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) {
|
|||||||
LeafNodeType: afterSuiteNode.NodeType,
|
LeafNodeType: afterSuiteNode.NodeType,
|
||||||
LeafNodeLocation: afterSuiteNode.CodeLocation,
|
LeafNodeLocation: afterSuiteNode.CodeLocation,
|
||||||
ParallelProcess: suite.config.ParallelProcess,
|
ParallelProcess: suite.config.ParallelProcess,
|
||||||
|
RunningInParallel: suite.isRunningInParallel(),
|
||||||
}
|
}
|
||||||
suite.selectiveLock.Unlock()
|
suite.selectiveLock.Unlock()
|
||||||
|
|
||||||
@ -464,6 +541,7 @@ func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) {
|
|||||||
LeafNodeType: cleanupNode.NodeType,
|
LeafNodeType: cleanupNode.NodeType,
|
||||||
LeafNodeLocation: cleanupNode.CodeLocation,
|
LeafNodeLocation: cleanupNode.CodeLocation,
|
||||||
ParallelProcess: suite.config.ParallelProcess,
|
ParallelProcess: suite.config.ParallelProcess,
|
||||||
|
RunningInParallel: suite.isRunningInParallel(),
|
||||||
}
|
}
|
||||||
suite.selectiveLock.Unlock()
|
suite.selectiveLock.Unlock()
|
||||||
|
|
||||||
@ -474,23 +552,6 @@ func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *Suite) runReportAfterSuite() {
|
|
||||||
for _, node := range suite.suiteNodes.WithType(types.NodeTypeReportAfterSuite) {
|
|
||||||
suite.selectiveLock.Lock()
|
|
||||||
suite.currentSpecReport = types.SpecReport{
|
|
||||||
LeafNodeType: node.NodeType,
|
|
||||||
LeafNodeLocation: node.CodeLocation,
|
|
||||||
LeafNodeText: node.Text,
|
|
||||||
ParallelProcess: suite.config.ParallelProcess,
|
|
||||||
}
|
|
||||||
suite.selectiveLock.Unlock()
|
|
||||||
|
|
||||||
suite.reporter.WillRun(suite.currentSpecReport)
|
|
||||||
suite.runReportAfterSuiteNode(node, suite.report)
|
|
||||||
suite.processCurrentSpecReport()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) {
|
func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) {
|
||||||
nodes := spec.Nodes.WithType(nodeType)
|
nodes := spec.Nodes.WithType(nodeType)
|
||||||
if nodeType == types.NodeTypeReportAfterEach {
|
if nodeType == types.NodeTypeReportAfterEach {
|
||||||
@ -608,39 +669,80 @@ func (suite *Suite) runSuiteNode(node Node) {
|
|||||||
|
|
||||||
if err != nil && !suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
|
if err != nil && !suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
|
||||||
suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error())
|
suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error())
|
||||||
|
suite.reporter.EmitFailure(suite.currentSpecReport.State, suite.currentSpecReport.Failure)
|
||||||
}
|
}
|
||||||
|
|
||||||
suite.currentSpecReport.EndTime = time.Now()
|
suite.currentSpecReport.EndTime = time.Now()
|
||||||
suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime)
|
suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime)
|
||||||
suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
|
suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
|
||||||
suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
|
suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) runReportSuiteNodesIfNeedBe(nodeType types.NodeType) {
|
||||||
|
nodes := suite.suiteNodes.WithType(nodeType)
|
||||||
|
// only run ReportAfterSuite on proc 1
|
||||||
|
if nodeType.Is(types.NodeTypeReportAfterSuite) && suite.config.ParallelProcess != 1 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// if we're running ReportBeforeSuite on proc > 1 - we should wait until proc 1 has completed
|
||||||
|
if nodeType.Is(types.NodeTypeReportBeforeSuite) && suite.config.ParallelProcess != 1 && len(nodes) > 0 {
|
||||||
|
state, err := suite.client.BlockUntilReportBeforeSuiteCompleted()
|
||||||
|
if err != nil || state.Is(types.SpecStateFailed) {
|
||||||
|
suite.report.SuiteSucceeded = false
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *Suite) runReportAfterSuiteNode(node Node, report types.Report) {
|
for _, node := range nodes {
|
||||||
|
suite.selectiveLock.Lock()
|
||||||
|
suite.currentSpecReport = types.SpecReport{
|
||||||
|
LeafNodeType: node.NodeType,
|
||||||
|
LeafNodeLocation: node.CodeLocation,
|
||||||
|
LeafNodeText: node.Text,
|
||||||
|
ParallelProcess: suite.config.ParallelProcess,
|
||||||
|
RunningInParallel: suite.isRunningInParallel(),
|
||||||
|
}
|
||||||
|
suite.selectiveLock.Unlock()
|
||||||
|
|
||||||
|
suite.reporter.WillRun(suite.currentSpecReport)
|
||||||
|
suite.runReportSuiteNode(node, suite.report)
|
||||||
|
suite.processCurrentSpecReport()
|
||||||
|
}
|
||||||
|
|
||||||
|
// if we're running ReportBeforeSuite and we're running in parallel - we shuld tell the other procs that we're done
|
||||||
|
if nodeType.Is(types.NodeTypeReportBeforeSuite) && suite.isRunningInParallel() && len(nodes) > 0 {
|
||||||
|
if suite.report.SuiteSucceeded {
|
||||||
|
suite.client.PostReportBeforeSuiteCompleted(types.SpecStatePassed)
|
||||||
|
} else {
|
||||||
|
suite.client.PostReportBeforeSuiteCompleted(types.SpecStateFailed)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) runReportSuiteNode(node Node, report types.Report) {
|
||||||
suite.writer.Truncate()
|
suite.writer.Truncate()
|
||||||
suite.outputInterceptor.StartInterceptingOutput()
|
suite.outputInterceptor.StartInterceptingOutput()
|
||||||
suite.currentSpecReport.StartTime = time.Now()
|
suite.currentSpecReport.StartTime = time.Now()
|
||||||
|
|
||||||
if suite.config.ParallelTotal > 1 {
|
// if we're running a ReportAfterSuite in parallel (on proc 1) we (a) wait until other procs have exited and
|
||||||
|
// (b) always fetch the latest report as prior ReportAfterSuites will contribute to it
|
||||||
|
if node.NodeType.Is(types.NodeTypeReportAfterSuite) && suite.isRunningInParallel() {
|
||||||
aggregatedReport, err := suite.client.BlockUntilAggregatedNonprimaryProcsReport()
|
aggregatedReport, err := suite.client.BlockUntilAggregatedNonprimaryProcsReport()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error())
|
suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error())
|
||||||
|
suite.reporter.EmitFailure(suite.currentSpecReport.State, suite.currentSpecReport.Failure)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
report = report.Add(aggregatedReport)
|
report = report.Add(aggregatedReport)
|
||||||
}
|
}
|
||||||
|
|
||||||
node.Body = func(SpecContext) { node.ReportAfterSuiteBody(report) }
|
node.Body = func(SpecContext) { node.ReportSuiteBody(report) }
|
||||||
suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
|
suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
|
||||||
|
|
||||||
suite.currentSpecReport.EndTime = time.Now()
|
suite.currentSpecReport.EndTime = time.Now()
|
||||||
suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime)
|
suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime)
|
||||||
suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
|
suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
|
||||||
suite.currentSpecReport.CapturedStdOutErr = suite.outputInterceptor.StopInterceptingAndReturnOutput()
|
suite.currentSpecReport.CapturedStdOutErr = suite.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||||
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (types.SpecState, types.Failure) {
|
func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (types.SpecState, types.Failure) {
|
||||||
@ -662,7 +764,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
|
|||||||
suite.selectiveLock.Lock()
|
suite.selectiveLock.Lock()
|
||||||
suite.currentNode = node
|
suite.currentNode = node
|
||||||
suite.currentNodeStartTime = time.Now()
|
suite.currentNodeStartTime = time.Now()
|
||||||
suite.progressStepCursor = ProgressStepCursor{}
|
suite.currentByStep = types.SpecEvent{}
|
||||||
suite.selectiveLock.Unlock()
|
suite.selectiveLock.Unlock()
|
||||||
defer func() {
|
defer func() {
|
||||||
suite.selectiveLock.Lock()
|
suite.selectiveLock.Lock()
|
||||||
@ -671,13 +773,18 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
|
|||||||
suite.selectiveLock.Unlock()
|
suite.selectiveLock.Unlock()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if suite.config.EmitSpecProgress && !node.MarkedSuppressProgressReporting {
|
|
||||||
if text == "" {
|
if text == "" {
|
||||||
text = "TOP-LEVEL"
|
text = "TOP-LEVEL"
|
||||||
}
|
}
|
||||||
s := fmt.Sprintf("[%s] %s\n %s\n", node.NodeType.String(), text, node.CodeLocation.String())
|
event := suite.handleSpecEvent(types.SpecEvent{
|
||||||
suite.writer.Write([]byte(s))
|
SpecEventType: types.SpecEventNodeStart,
|
||||||
}
|
NodeType: node.NodeType,
|
||||||
|
Message: text,
|
||||||
|
CodeLocation: node.CodeLocation,
|
||||||
|
})
|
||||||
|
defer func() {
|
||||||
|
suite.handleSpecEventEnd(types.SpecEventNodeEnd, event)
|
||||||
|
}()
|
||||||
|
|
||||||
var failure types.Failure
|
var failure types.Failure
|
||||||
failure.FailureNodeType, failure.FailureNodeLocation = node.NodeType, node.CodeLocation
|
failure.FailureNodeType, failure.FailureNodeLocation = node.NodeType, node.CodeLocation
|
||||||
@ -697,18 +804,23 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
|
|||||||
|
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
deadline := suite.deadline
|
deadline := suite.deadline
|
||||||
|
timeoutInPlay := "suite"
|
||||||
if deadline.IsZero() || (!specDeadline.IsZero() && specDeadline.Before(deadline)) {
|
if deadline.IsZero() || (!specDeadline.IsZero() && specDeadline.Before(deadline)) {
|
||||||
deadline = specDeadline
|
deadline = specDeadline
|
||||||
|
timeoutInPlay = "spec"
|
||||||
}
|
}
|
||||||
if node.NodeTimeout > 0 && (deadline.IsZero() || deadline.Sub(now) > node.NodeTimeout) {
|
if node.NodeTimeout > 0 && (deadline.IsZero() || deadline.Sub(now) > node.NodeTimeout) {
|
||||||
deadline = now.Add(node.NodeTimeout)
|
deadline = now.Add(node.NodeTimeout)
|
||||||
|
timeoutInPlay = "node"
|
||||||
}
|
}
|
||||||
if (!deadline.IsZero() && deadline.Before(now)) || interruptStatus.Interrupted() {
|
if (!deadline.IsZero() && deadline.Before(now)) || interruptStatus.Interrupted() {
|
||||||
//we're out of time already. let's wait for a NodeTimeout if we have it, or GracePeriod if we don't
|
//we're out of time already. let's wait for a NodeTimeout if we have it, or GracePeriod if we don't
|
||||||
if node.NodeTimeout > 0 {
|
if node.NodeTimeout > 0 {
|
||||||
deadline = now.Add(node.NodeTimeout)
|
deadline = now.Add(node.NodeTimeout)
|
||||||
|
timeoutInPlay = "node"
|
||||||
} else {
|
} else {
|
||||||
deadline = now.Add(gracePeriod)
|
deadline = now.Add(gracePeriod)
|
||||||
|
timeoutInPlay = "grace period"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -743,6 +855,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
|
|||||||
}
|
}
|
||||||
|
|
||||||
outcomeFromRun, failureFromRun := suite.failer.Drain()
|
outcomeFromRun, failureFromRun := suite.failer.Drain()
|
||||||
|
failureFromRun.TimelineLocation = suite.generateTimelineLocation()
|
||||||
outcomeC <- outcomeFromRun
|
outcomeC <- outcomeFromRun
|
||||||
failureC <- failureFromRun
|
failureC <- failureFromRun
|
||||||
}()
|
}()
|
||||||
@ -772,23 +885,33 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
|
|||||||
select {
|
select {
|
||||||
case outcomeFromRun := <-outcomeC:
|
case outcomeFromRun := <-outcomeC:
|
||||||
failureFromRun := <-failureC
|
failureFromRun := <-failureC
|
||||||
if outcome == types.SpecStateInterrupted {
|
if outcome.Is(types.SpecStateInterrupted | types.SpecStateTimedout) {
|
||||||
// we've already been interrupted. we just managed to actually exit
|
// we've already been interrupted/timed out. we just managed to actually exit
|
||||||
// before the grace period elapsed
|
// before the grace period elapsed
|
||||||
return outcome, failure
|
// if we have a failure message we attach it as an additional failure
|
||||||
} else if outcome == types.SpecStateTimedout {
|
|
||||||
// we've already timed out. we just managed to actually exit
|
|
||||||
// before the grace period elapsed. if we have a failure message we should include it
|
|
||||||
if outcomeFromRun != types.SpecStatePassed {
|
if outcomeFromRun != types.SpecStatePassed {
|
||||||
failure.Location, failure.ForwardedPanic = failureFromRun.Location, failureFromRun.ForwardedPanic
|
additionalFailure := types.AdditionalFailure{
|
||||||
failure.Message = "This spec timed out and reported the following failure after the timeout:\n\n" + failureFromRun.Message
|
State: outcomeFromRun,
|
||||||
|
Failure: failure, //we make a copy - this will include all the configuration set up above...
|
||||||
|
}
|
||||||
|
//...and then we update the failure with the details from failureFromRun
|
||||||
|
additionalFailure.Failure.Location, additionalFailure.Failure.ForwardedPanic, additionalFailure.Failure.TimelineLocation = failureFromRun.Location, failureFromRun.ForwardedPanic, failureFromRun.TimelineLocation
|
||||||
|
additionalFailure.Failure.ProgressReport = types.ProgressReport{}
|
||||||
|
if outcome == types.SpecStateTimedout {
|
||||||
|
additionalFailure.Failure.Message = fmt.Sprintf("A %s timeout occurred and then the following failure was recorded in the timedout node before it exited:\n%s", timeoutInPlay, failureFromRun.Message)
|
||||||
|
} else {
|
||||||
|
additionalFailure.Failure.Message = fmt.Sprintf("An interrupt occurred and then the following failure was recorded in the interrupted node before it exited:\n%s", failureFromRun.Message)
|
||||||
|
}
|
||||||
|
suite.reporter.EmitFailure(additionalFailure.State, additionalFailure.Failure)
|
||||||
|
failure.AdditionalFailure = &additionalFailure
|
||||||
}
|
}
|
||||||
return outcome, failure
|
return outcome, failure
|
||||||
}
|
}
|
||||||
if outcomeFromRun.Is(types.SpecStatePassed) {
|
if outcomeFromRun.Is(types.SpecStatePassed) {
|
||||||
return outcomeFromRun, types.Failure{}
|
return outcomeFromRun, types.Failure{}
|
||||||
} else {
|
} else {
|
||||||
failure.Message, failure.Location, failure.ForwardedPanic = failureFromRun.Message, failureFromRun.Location, failureFromRun.ForwardedPanic
|
failure.Message, failure.Location, failure.ForwardedPanic, failure.TimelineLocation = failureFromRun.Message, failureFromRun.Location, failureFromRun.ForwardedPanic, failureFromRun.TimelineLocation
|
||||||
|
suite.reporter.EmitFailure(outcomeFromRun, failure)
|
||||||
return outcomeFromRun, failure
|
return outcomeFromRun, failure
|
||||||
}
|
}
|
||||||
case <-gracePeriodChannel:
|
case <-gracePeriodChannel:
|
||||||
@ -801,10 +924,12 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
|
|||||||
case <-deadlineChannel:
|
case <-deadlineChannel:
|
||||||
// we're out of time - the outcome is a timeout and we capture the failure and progress report
|
// we're out of time - the outcome is a timeout and we capture the failure and progress report
|
||||||
outcome = types.SpecStateTimedout
|
outcome = types.SpecStateTimedout
|
||||||
failure.Message, failure.Location = "Timedout", node.CodeLocation
|
failure.Message, failure.Location, failure.TimelineLocation = fmt.Sprintf("A %s timeout occurred", timeoutInPlay), node.CodeLocation, suite.generateTimelineLocation()
|
||||||
failure.ProgressReport = suite.generateProgressReport(false).WithoutCapturedGinkgoWriterOutput()
|
failure.ProgressReport = suite.generateProgressReport(false).WithoutCapturedGinkgoWriterOutput()
|
||||||
failure.ProgressReport.Message = "{{bold}}This is the Progress Report generated when the timeout occurred:{{/}}"
|
failure.ProgressReport.Message = fmt.Sprintf("{{bold}}This is the Progress Report generated when the %s timeout occurred:{{/}}", timeoutInPlay)
|
||||||
deadlineChannel = nil
|
deadlineChannel = nil
|
||||||
|
suite.reporter.EmitFailure(outcome, failure)
|
||||||
|
|
||||||
// tell the spec to stop. it's important we generate the progress report first to make sure we capture where
|
// tell the spec to stop. it's important we generate the progress report first to make sure we capture where
|
||||||
// the spec is actually stuck
|
// the spec is actually stuck
|
||||||
sc.cancel()
|
sc.cancel()
|
||||||
@ -812,38 +937,44 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
|
|||||||
gracePeriodChannel = time.After(gracePeriod)
|
gracePeriodChannel = time.After(gracePeriod)
|
||||||
case <-interruptStatus.Channel:
|
case <-interruptStatus.Channel:
|
||||||
interruptStatus = suite.interruptHandler.Status()
|
interruptStatus = suite.interruptHandler.Status()
|
||||||
|
// ignore interruption from other process if we are cleaning up or reporting
|
||||||
|
if interruptStatus.Cause == interrupt_handler.InterruptCauseAbortByOtherProcess &&
|
||||||
|
node.NodeType.Is(types.NodeTypesAllowedDuringReportInterrupt|types.NodeTypesAllowedDuringCleanupInterrupt) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
deadlineChannel = nil // don't worry about deadlines, time's up now
|
deadlineChannel = nil // don't worry about deadlines, time's up now
|
||||||
|
|
||||||
|
failureTimelineLocation := suite.generateTimelineLocation()
|
||||||
|
progressReport := suite.generateProgressReport(true)
|
||||||
|
|
||||||
if outcome == types.SpecStateInvalid {
|
if outcome == types.SpecStateInvalid {
|
||||||
outcome = types.SpecStateInterrupted
|
outcome = types.SpecStateInterrupted
|
||||||
failure.Message, failure.Location = interruptStatus.Message(), node.CodeLocation
|
failure.Message, failure.Location, failure.TimelineLocation = interruptStatus.Message(), node.CodeLocation, failureTimelineLocation
|
||||||
if interruptStatus.ShouldIncludeProgressReport() {
|
if interruptStatus.ShouldIncludeProgressReport() {
|
||||||
failure.ProgressReport = suite.generateProgressReport(true).WithoutCapturedGinkgoWriterOutput()
|
failure.ProgressReport = progressReport.WithoutCapturedGinkgoWriterOutput()
|
||||||
failure.ProgressReport.Message = "{{bold}}This is the Progress Report generated when the interrupt was received:{{/}}"
|
failure.ProgressReport.Message = "{{bold}}This is the Progress Report generated when the interrupt was received:{{/}}"
|
||||||
}
|
}
|
||||||
|
suite.reporter.EmitFailure(outcome, failure)
|
||||||
}
|
}
|
||||||
|
|
||||||
var report types.ProgressReport
|
progressReport = progressReport.WithoutOtherGoroutines()
|
||||||
if interruptStatus.ShouldIncludeProgressReport() {
|
|
||||||
report = suite.generateProgressReport(false)
|
|
||||||
}
|
|
||||||
|
|
||||||
sc.cancel()
|
sc.cancel()
|
||||||
|
|
||||||
if interruptStatus.Level == interrupt_handler.InterruptLevelBailOut {
|
if interruptStatus.Level == interrupt_handler.InterruptLevelBailOut {
|
||||||
if interruptStatus.ShouldIncludeProgressReport() {
|
if interruptStatus.ShouldIncludeProgressReport() {
|
||||||
report.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\n{{bold}}{{red}}Final interrupt received{{/}}; Ginkgo will not run any cleanup or reporting nodes and will terminate as soon as possible.\nHere's a current progress report:", interruptStatus.Message())
|
progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\n{{bold}}{{red}}Final interrupt received{{/}}; Ginkgo will not run any cleanup or reporting nodes and will terminate as soon as possible.\nHere's a current progress report:", interruptStatus.Message())
|
||||||
suite.emitProgressReport(report)
|
suite.emitProgressReport(progressReport)
|
||||||
}
|
}
|
||||||
return outcome, failure
|
return outcome, failure
|
||||||
}
|
}
|
||||||
if interruptStatus.ShouldIncludeProgressReport() {
|
if interruptStatus.ShouldIncludeProgressReport() {
|
||||||
if interruptStatus.Level == interrupt_handler.InterruptLevelCleanupAndReport {
|
if interruptStatus.Level == interrupt_handler.InterruptLevelCleanupAndReport {
|
||||||
report.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nFirst interrupt received; Ginkgo will run any cleanup and reporting nodes but will skip all remaining specs. {{bold}}Interrupt again to skip cleanup{{/}}.\nHere's a current progress report:", interruptStatus.Message())
|
progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nFirst interrupt received; Ginkgo will run any cleanup and reporting nodes but will skip all remaining specs. {{bold}}Interrupt again to skip cleanup{{/}}.\nHere's a current progress report:", interruptStatus.Message())
|
||||||
} else if interruptStatus.Level == interrupt_handler.InterruptLevelReportOnly {
|
} else if interruptStatus.Level == interrupt_handler.InterruptLevelReportOnly {
|
||||||
report.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nSecond interrupt received; Ginkgo will run any reporting nodes but will skip all remaining specs and cleanup nodes. {{bold}}Interrupt again to bail immediately{{/}}.\nHere's a current progress report:", interruptStatus.Message())
|
progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nSecond interrupt received; Ginkgo will run any reporting nodes but will skip all remaining specs and cleanup nodes. {{bold}}Interrupt again to bail immediately{{/}}.\nHere's a current progress report:", interruptStatus.Message())
|
||||||
}
|
}
|
||||||
suite.emitProgressReport(report)
|
suite.emitProgressReport(progressReport)
|
||||||
}
|
}
|
||||||
|
|
||||||
if gracePeriodChannel == nil {
|
if gracePeriodChannel == nil {
|
||||||
@ -864,10 +995,12 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: search for usages and consider if reporter.EmitFailure() is necessary
|
||||||
func (suite *Suite) failureForLeafNodeWithMessage(node Node, message string) types.Failure {
|
func (suite *Suite) failureForLeafNodeWithMessage(node Node, message string) types.Failure {
|
||||||
return types.Failure{
|
return types.Failure{
|
||||||
Message: message,
|
Message: message,
|
||||||
Location: node.CodeLocation,
|
Location: node.CodeLocation,
|
||||||
|
TimelineLocation: suite.generateTimelineLocation(),
|
||||||
FailureNodeContext: types.FailureNodeIsLeafNode,
|
FailureNodeContext: types.FailureNodeIsLeafNode,
|
||||||
FailureNodeType: node.NodeType,
|
FailureNodeType: node.NodeType,
|
||||||
FailureNodeLocation: node.CodeLocation,
|
FailureNodeLocation: node.CodeLocation,
|
||||||
|
90
vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go
generated
vendored
90
vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go
generated
vendored
@ -5,16 +5,28 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2/formatter"
|
||||||
"github.com/onsi/ginkgo/v2/internal"
|
"github.com/onsi/ginkgo/v2/internal"
|
||||||
|
"github.com/onsi/ginkgo/v2/reporters"
|
||||||
"github.com/onsi/ginkgo/v2/types"
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
type failFunc func(message string, callerSkip ...int)
|
type failFunc func(message string, callerSkip ...int)
|
||||||
type skipFunc func(message string, callerSkip ...int)
|
type skipFunc func(message string, callerSkip ...int)
|
||||||
type cleanupFunc func(args ...interface{})
|
type cleanupFunc func(args ...any)
|
||||||
type reportFunc func() types.SpecReport
|
type reportFunc func() types.SpecReport
|
||||||
|
type addReportEntryFunc func(names string, args ...any)
|
||||||
|
type ginkgoWriterInterface interface {
|
||||||
|
io.Writer
|
||||||
|
|
||||||
func New(writer io.Writer, fail failFunc, skip skipFunc, cleanup cleanupFunc, report reportFunc, offset int) *ginkgoTestingTProxy {
|
Print(a ...interface{})
|
||||||
|
Printf(format string, a ...interface{})
|
||||||
|
Println(a ...interface{})
|
||||||
|
}
|
||||||
|
type ginkgoRecoverFunc func()
|
||||||
|
type attachProgressReporterFunc func(func() string) func()
|
||||||
|
|
||||||
|
func New(writer ginkgoWriterInterface, fail failFunc, skip skipFunc, cleanup cleanupFunc, report reportFunc, addReportEntry addReportEntryFunc, ginkgoRecover ginkgoRecoverFunc, attachProgressReporter attachProgressReporterFunc, randomSeed int64, parallelProcess int, parallelTotal int, noColor bool, offset int) *ginkgoTestingTProxy {
|
||||||
return &ginkgoTestingTProxy{
|
return &ginkgoTestingTProxy{
|
||||||
fail: fail,
|
fail: fail,
|
||||||
offset: offset,
|
offset: offset,
|
||||||
@ -22,6 +34,13 @@ func New(writer io.Writer, fail failFunc, skip skipFunc, cleanup cleanupFunc, re
|
|||||||
skip: skip,
|
skip: skip,
|
||||||
cleanup: cleanup,
|
cleanup: cleanup,
|
||||||
report: report,
|
report: report,
|
||||||
|
addReportEntry: addReportEntry,
|
||||||
|
ginkgoRecover: ginkgoRecover,
|
||||||
|
attachProgressReporter: attachProgressReporter,
|
||||||
|
randomSeed: randomSeed,
|
||||||
|
parallelProcess: parallelProcess,
|
||||||
|
parallelTotal: parallelTotal,
|
||||||
|
f: formatter.NewWithNoColorBool(noColor),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -31,9 +50,18 @@ type ginkgoTestingTProxy struct {
|
|||||||
cleanup cleanupFunc
|
cleanup cleanupFunc
|
||||||
report reportFunc
|
report reportFunc
|
||||||
offset int
|
offset int
|
||||||
writer io.Writer
|
writer ginkgoWriterInterface
|
||||||
|
addReportEntry addReportEntryFunc
|
||||||
|
ginkgoRecover ginkgoRecoverFunc
|
||||||
|
attachProgressReporter attachProgressReporterFunc
|
||||||
|
randomSeed int64
|
||||||
|
parallelProcess int
|
||||||
|
parallelTotal int
|
||||||
|
f formatter.Formatter
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// basic testing.T support
|
||||||
|
|
||||||
func (t *ginkgoTestingTProxy) Cleanup(f func()) {
|
func (t *ginkgoTestingTProxy) Cleanup(f func()) {
|
||||||
t.cleanup(f, internal.Offset(1))
|
t.cleanup(f, internal.Offset(1))
|
||||||
}
|
}
|
||||||
@ -81,7 +109,7 @@ func (t *ginkgoTestingTProxy) Fatalf(format string, args ...interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (t *ginkgoTestingTProxy) Helper() {
|
func (t *ginkgoTestingTProxy) Helper() {
|
||||||
// No-op
|
types.MarkAsHelper(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *ginkgoTestingTProxy) Log(args ...interface{}) {
|
func (t *ginkgoTestingTProxy) Log(args ...interface{}) {
|
||||||
@ -126,3 +154,57 @@ func (t *ginkgoTestingTProxy) TempDir() string {
|
|||||||
|
|
||||||
return tmpDir
|
return tmpDir
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FullGinkgoTInterface
|
||||||
|
func (t *ginkgoTestingTProxy) AddReportEntryVisibilityAlways(name string, args ...any) {
|
||||||
|
finalArgs := []any{internal.Offset(1), types.ReportEntryVisibilityAlways}
|
||||||
|
t.addReportEntry(name, append(finalArgs, args...)...)
|
||||||
|
}
|
||||||
|
func (t *ginkgoTestingTProxy) AddReportEntryVisibilityFailureOrVerbose(name string, args ...any) {
|
||||||
|
finalArgs := []any{internal.Offset(1), types.ReportEntryVisibilityFailureOrVerbose}
|
||||||
|
t.addReportEntry(name, append(finalArgs, args...)...)
|
||||||
|
}
|
||||||
|
func (t *ginkgoTestingTProxy) AddReportEntryVisibilityNever(name string, args ...any) {
|
||||||
|
finalArgs := []any{internal.Offset(1), types.ReportEntryVisibilityNever}
|
||||||
|
t.addReportEntry(name, append(finalArgs, args...)...)
|
||||||
|
}
|
||||||
|
func (t *ginkgoTestingTProxy) Print(a ...any) {
|
||||||
|
t.writer.Print(a...)
|
||||||
|
}
|
||||||
|
func (t *ginkgoTestingTProxy) Printf(format string, a ...any) {
|
||||||
|
t.writer.Printf(format, a...)
|
||||||
|
}
|
||||||
|
func (t *ginkgoTestingTProxy) Println(a ...any) {
|
||||||
|
t.writer.Println(a...)
|
||||||
|
}
|
||||||
|
func (t *ginkgoTestingTProxy) F(format string, args ...any) string {
|
||||||
|
return t.f.F(format, args...)
|
||||||
|
}
|
||||||
|
func (t *ginkgoTestingTProxy) Fi(indentation uint, format string, args ...any) string {
|
||||||
|
return t.f.Fi(indentation, format, args...)
|
||||||
|
}
|
||||||
|
func (t *ginkgoTestingTProxy) Fiw(indentation uint, maxWidth uint, format string, args ...any) string {
|
||||||
|
return t.f.Fiw(indentation, maxWidth, format, args...)
|
||||||
|
}
|
||||||
|
func (t *ginkgoTestingTProxy) RenderTimeline() string {
|
||||||
|
return reporters.RenderTimeline(t.report(), false)
|
||||||
|
}
|
||||||
|
func (t *ginkgoTestingTProxy) GinkgoRecover() {
|
||||||
|
t.ginkgoRecover()
|
||||||
|
}
|
||||||
|
func (t *ginkgoTestingTProxy) DeferCleanup(args ...any) {
|
||||||
|
finalArgs := []any{internal.Offset(1)}
|
||||||
|
t.cleanup(append(finalArgs, args...)...)
|
||||||
|
}
|
||||||
|
func (t *ginkgoTestingTProxy) RandomSeed() int64 {
|
||||||
|
return t.randomSeed
|
||||||
|
}
|
||||||
|
func (t *ginkgoTestingTProxy) ParallelProcess() int {
|
||||||
|
return t.parallelProcess
|
||||||
|
}
|
||||||
|
func (t *ginkgoTestingTProxy) ParallelTotal() int {
|
||||||
|
return t.parallelTotal
|
||||||
|
}
|
||||||
|
func (t *ginkgoTestingTProxy) AttachProgressReporter(f func() string) func() {
|
||||||
|
return t.attachProgressReporter(f)
|
||||||
|
}
|
||||||
|
32
vendor/github.com/onsi/ginkgo/v2/internal/writer.go
generated
vendored
32
vendor/github.com/onsi/ginkgo/v2/internal/writer.go
generated
vendored
@ -22,6 +22,7 @@ type WriterInterface interface {
|
|||||||
|
|
||||||
Truncate()
|
Truncate()
|
||||||
Bytes() []byte
|
Bytes() []byte
|
||||||
|
Len() int
|
||||||
}
|
}
|
||||||
|
|
||||||
// Writer implements WriterInterface and GinkgoWriterInterface
|
// Writer implements WriterInterface and GinkgoWriterInterface
|
||||||
@ -31,6 +32,9 @@ type Writer struct {
|
|||||||
lock *sync.Mutex
|
lock *sync.Mutex
|
||||||
mode WriterMode
|
mode WriterMode
|
||||||
|
|
||||||
|
streamIndent []byte
|
||||||
|
indentNext bool
|
||||||
|
|
||||||
teeWriters []io.Writer
|
teeWriters []io.Writer
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -40,6 +44,8 @@ func NewWriter(outWriter io.Writer) *Writer {
|
|||||||
lock: &sync.Mutex{},
|
lock: &sync.Mutex{},
|
||||||
outWriter: outWriter,
|
outWriter: outWriter,
|
||||||
mode: WriterModeStreamAndBuffer,
|
mode: WriterModeStreamAndBuffer,
|
||||||
|
streamIndent: []byte(" "),
|
||||||
|
indentNext: true,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -49,6 +55,14 @@ func (w *Writer) SetMode(mode WriterMode) {
|
|||||||
w.mode = mode
|
w.mode = mode
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (w *Writer) Len() int {
|
||||||
|
w.lock.Lock()
|
||||||
|
defer w.lock.Unlock()
|
||||||
|
return w.buffer.Len()
|
||||||
|
}
|
||||||
|
|
||||||
|
var newline = []byte("\n")
|
||||||
|
|
||||||
func (w *Writer) Write(b []byte) (n int, err error) {
|
func (w *Writer) Write(b []byte) (n int, err error) {
|
||||||
w.lock.Lock()
|
w.lock.Lock()
|
||||||
defer w.lock.Unlock()
|
defer w.lock.Unlock()
|
||||||
@ -58,7 +72,21 @@ func (w *Writer) Write(b []byte) (n int, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if w.mode == WriterModeStreamAndBuffer {
|
if w.mode == WriterModeStreamAndBuffer {
|
||||||
w.outWriter.Write(b)
|
line, remaining, found := []byte{}, b, false
|
||||||
|
for len(remaining) > 0 {
|
||||||
|
line, remaining, found = bytes.Cut(remaining, newline)
|
||||||
|
if len(line) > 0 {
|
||||||
|
if w.indentNext {
|
||||||
|
w.outWriter.Write(w.streamIndent)
|
||||||
|
w.indentNext = false
|
||||||
|
}
|
||||||
|
w.outWriter.Write(line)
|
||||||
|
}
|
||||||
|
if found {
|
||||||
|
w.outWriter.Write(newline)
|
||||||
|
w.indentNext = true
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return w.buffer.Write(b)
|
return w.buffer.Write(b)
|
||||||
}
|
}
|
||||||
@ -107,6 +135,6 @@ func (w *Writer) Println(a ...interface{}) {
|
|||||||
|
|
||||||
func GinkgoLogrFunc(writer *Writer) logr.Logger {
|
func GinkgoLogrFunc(writer *Writer) logr.Logger {
|
||||||
return funcr.New(func(prefix, args string) {
|
return funcr.New(func(prefix, args string) {
|
||||||
writer.Printf("%s", args)
|
writer.Printf("%s\n", args)
|
||||||
}, funcr.Options{})
|
}, funcr.Options{})
|
||||||
}
|
}
|
||||||
|
675
vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go
generated
vendored
675
vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go
generated
vendored
@ -12,6 +12,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/v2/formatter"
|
"github.com/onsi/ginkgo/v2/formatter"
|
||||||
@ -23,13 +24,16 @@ type DefaultReporter struct {
|
|||||||
writer io.Writer
|
writer io.Writer
|
||||||
|
|
||||||
// managing the emission stream
|
// managing the emission stream
|
||||||
lastChar string
|
lastCharWasNewline bool
|
||||||
lastEmissionWasDelimiter bool
|
lastEmissionWasDelimiter bool
|
||||||
|
|
||||||
// rendering
|
// rendering
|
||||||
specDenoter string
|
specDenoter string
|
||||||
retryDenoter string
|
retryDenoter string
|
||||||
formatter formatter.Formatter
|
formatter formatter.Formatter
|
||||||
|
|
||||||
|
runningInParallel bool
|
||||||
|
lock *sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewDefaultReporterUnderTest(conf types.ReporterConfig, writer io.Writer) *DefaultReporter {
|
func NewDefaultReporterUnderTest(conf types.ReporterConfig, writer io.Writer) *DefaultReporter {
|
||||||
@ -44,12 +48,13 @@ func NewDefaultReporter(conf types.ReporterConfig, writer io.Writer) *DefaultRep
|
|||||||
conf: conf,
|
conf: conf,
|
||||||
writer: writer,
|
writer: writer,
|
||||||
|
|
||||||
lastChar: "\n",
|
lastCharWasNewline: true,
|
||||||
lastEmissionWasDelimiter: false,
|
lastEmissionWasDelimiter: false,
|
||||||
|
|
||||||
specDenoter: "•",
|
specDenoter: "•",
|
||||||
retryDenoter: "↺",
|
retryDenoter: "↺",
|
||||||
formatter: formatter.NewWithNoColorBool(conf.NoColor),
|
formatter: formatter.NewWithNoColorBool(conf.NoColor),
|
||||||
|
lock: &sync.Mutex{},
|
||||||
}
|
}
|
||||||
if runtime.GOOS == "windows" {
|
if runtime.GOOS == "windows" {
|
||||||
reporter.specDenoter = "+"
|
reporter.specDenoter = "+"
|
||||||
@ -97,230 +102,10 @@ func (r *DefaultReporter) SuiteWillBegin(report types.Report) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *DefaultReporter) WillRun(report types.SpecReport) {
|
|
||||||
if r.conf.Verbosity().LT(types.VerbosityLevelVerbose) || report.State.Is(types.SpecStatePending|types.SpecStateSkipped) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
r.emitDelimiter()
|
|
||||||
indentation := uint(0)
|
|
||||||
if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) {
|
|
||||||
r.emitBlock(r.f("{{bold}}[%s] %s{{/}}", report.LeafNodeType.String(), report.LeafNodeText))
|
|
||||||
} else {
|
|
||||||
if len(report.ContainerHierarchyTexts) > 0 {
|
|
||||||
r.emitBlock(r.cycleJoin(report.ContainerHierarchyTexts, " "))
|
|
||||||
indentation = 1
|
|
||||||
}
|
|
||||||
line := r.fi(indentation, "{{bold}}%s{{/}}", report.LeafNodeText)
|
|
||||||
labels := report.Labels()
|
|
||||||
if len(labels) > 0 {
|
|
||||||
line += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels, ", "))
|
|
||||||
}
|
|
||||||
r.emitBlock(line)
|
|
||||||
}
|
|
||||||
r.emitBlock(r.fi(indentation, "{{gray}}%s{{/}}", report.LeafNodeLocation))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *DefaultReporter) DidRun(report types.SpecReport) {
|
|
||||||
v := r.conf.Verbosity()
|
|
||||||
var header, highlightColor string
|
|
||||||
includeRuntime, emitGinkgoWriterOutput, stream, denoter := true, true, false, r.specDenoter
|
|
||||||
succinctLocationBlock := v.Is(types.VerbosityLevelSuccinct)
|
|
||||||
|
|
||||||
hasGW := report.CapturedGinkgoWriterOutput != ""
|
|
||||||
hasStd := report.CapturedStdOutErr != ""
|
|
||||||
hasEmittableReports := report.ReportEntries.HasVisibility(types.ReportEntryVisibilityAlways) || (report.ReportEntries.HasVisibility(types.ReportEntryVisibilityFailureOrVerbose) && (!report.Failure.IsZero() || v.GTE(types.VerbosityLevelVerbose)))
|
|
||||||
|
|
||||||
if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) {
|
|
||||||
denoter = fmt.Sprintf("[%s]", report.LeafNodeType)
|
|
||||||
}
|
|
||||||
|
|
||||||
highlightColor = r.highlightColorForState(report.State)
|
|
||||||
|
|
||||||
switch report.State {
|
|
||||||
case types.SpecStatePassed:
|
|
||||||
succinctLocationBlock = v.LT(types.VerbosityLevelVerbose)
|
|
||||||
emitGinkgoWriterOutput = (r.conf.AlwaysEmitGinkgoWriter || v.GTE(types.VerbosityLevelVerbose)) && hasGW
|
|
||||||
if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) {
|
|
||||||
if v.GTE(types.VerbosityLevelVerbose) || hasStd || hasEmittableReports {
|
|
||||||
header = fmt.Sprintf("%s PASSED", denoter)
|
|
||||||
} else {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
header, stream = denoter, true
|
|
||||||
if report.NumAttempts > 1 && report.MaxFlakeAttempts > 1 {
|
|
||||||
header, stream = fmt.Sprintf("%s [FLAKEY TEST - TOOK %d ATTEMPTS TO PASS]", r.retryDenoter, report.NumAttempts), false
|
|
||||||
}
|
|
||||||
if report.RunTime > r.conf.SlowSpecThreshold {
|
|
||||||
header, stream = fmt.Sprintf("%s [SLOW TEST]", header), false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if hasStd || emitGinkgoWriterOutput || hasEmittableReports {
|
|
||||||
stream = false
|
|
||||||
}
|
|
||||||
case types.SpecStatePending:
|
|
||||||
includeRuntime, emitGinkgoWriterOutput = false, false
|
|
||||||
if v.Is(types.VerbosityLevelSuccinct) {
|
|
||||||
header, stream = "P", true
|
|
||||||
} else {
|
|
||||||
header, succinctLocationBlock = "P [PENDING]", v.LT(types.VerbosityLevelVeryVerbose)
|
|
||||||
}
|
|
||||||
case types.SpecStateSkipped:
|
|
||||||
if report.Failure.Message != "" || v.Is(types.VerbosityLevelVeryVerbose) {
|
|
||||||
header = "S [SKIPPED]"
|
|
||||||
} else {
|
|
||||||
header, stream = "S", true
|
|
||||||
}
|
|
||||||
case types.SpecStateFailed:
|
|
||||||
header = fmt.Sprintf("%s [FAILED]", denoter)
|
|
||||||
case types.SpecStateTimedout:
|
|
||||||
header = fmt.Sprintf("%s [TIMEDOUT]", denoter)
|
|
||||||
case types.SpecStatePanicked:
|
|
||||||
header = fmt.Sprintf("%s! [PANICKED]", denoter)
|
|
||||||
case types.SpecStateInterrupted:
|
|
||||||
header = fmt.Sprintf("%s! [INTERRUPTED]", denoter)
|
|
||||||
case types.SpecStateAborted:
|
|
||||||
header = fmt.Sprintf("%s! [ABORTED]", denoter)
|
|
||||||
}
|
|
||||||
|
|
||||||
if report.State.Is(types.SpecStateFailureStates) && report.MaxMustPassRepeatedly > 1 {
|
|
||||||
header, stream = fmt.Sprintf("%s DURING REPETITION #%d", header, report.NumAttempts), false
|
|
||||||
}
|
|
||||||
// Emit stream and return
|
|
||||||
if stream {
|
|
||||||
r.emit(r.f(highlightColor + header + "{{/}}"))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Emit header
|
|
||||||
r.emitDelimiter()
|
|
||||||
if includeRuntime {
|
|
||||||
header = r.f("%s [%.3f seconds]", header, report.RunTime.Seconds())
|
|
||||||
}
|
|
||||||
r.emitBlock(r.f(highlightColor + header + "{{/}}"))
|
|
||||||
|
|
||||||
// Emit Code Location Block
|
|
||||||
r.emitBlock(r.codeLocationBlock(report, highlightColor, succinctLocationBlock, false))
|
|
||||||
|
|
||||||
//Emit Stdout/Stderr Output
|
|
||||||
if hasStd {
|
|
||||||
r.emitBlock("\n")
|
|
||||||
r.emitBlock(r.fi(1, "{{gray}}Begin Captured StdOut/StdErr Output >>{{/}}"))
|
|
||||||
r.emitBlock(r.fi(2, "%s", report.CapturedStdOutErr))
|
|
||||||
r.emitBlock(r.fi(1, "{{gray}}<< End Captured StdOut/StdErr Output{{/}}"))
|
|
||||||
}
|
|
||||||
|
|
||||||
//Emit Captured GinkgoWriter Output
|
|
||||||
if emitGinkgoWriterOutput && hasGW {
|
|
||||||
r.emitBlock("\n")
|
|
||||||
r.emitGinkgoWriterOutput(1, report.CapturedGinkgoWriterOutput, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
if hasEmittableReports {
|
|
||||||
r.emitBlock("\n")
|
|
||||||
r.emitBlock(r.fi(1, "{{gray}}Begin Report Entries >>{{/}}"))
|
|
||||||
reportEntries := report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways)
|
|
||||||
if !report.Failure.IsZero() || v.GTE(types.VerbosityLevelVerbose) {
|
|
||||||
reportEntries = report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways, types.ReportEntryVisibilityFailureOrVerbose)
|
|
||||||
}
|
|
||||||
for _, entry := range reportEntries {
|
|
||||||
r.emitBlock(r.fi(2, "{{bold}}"+entry.Name+"{{gray}} - %s @ %s{{/}}", entry.Location, entry.Time.Format(types.GINKGO_TIME_FORMAT)))
|
|
||||||
if representation := entry.StringRepresentation(); representation != "" {
|
|
||||||
r.emitBlock(r.fi(3, representation))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
r.emitBlock(r.fi(1, "{{gray}}<< End Report Entries{{/}}"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Emit Failure Message
|
|
||||||
if !report.Failure.IsZero() {
|
|
||||||
r.emitBlock("\n")
|
|
||||||
r.EmitFailure(1, report.State, report.Failure, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(report.AdditionalFailures) > 0 {
|
|
||||||
if v.GTE(types.VerbosityLevelVerbose) {
|
|
||||||
r.emitBlock("\n")
|
|
||||||
r.emitBlock(r.fi(1, "{{bold}}There were additional failures detected after the initial failure:{{/}}"))
|
|
||||||
for i, additionalFailure := range report.AdditionalFailures {
|
|
||||||
r.EmitFailure(2, additionalFailure.State, additionalFailure.Failure, true)
|
|
||||||
if i < len(report.AdditionalFailures)-1 {
|
|
||||||
r.emitBlock(r.fi(2, "{{gray}}%s{{/}}", strings.Repeat("-", 10)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
r.emitBlock("\n")
|
|
||||||
r.emitBlock(r.fi(1, "{{bold}}There were additional failures detected after the initial failure. Here's a summary - for full details run Ginkgo in verbose mode:{{/}}"))
|
|
||||||
for _, additionalFailure := range report.AdditionalFailures {
|
|
||||||
r.emitBlock(r.fi(2, r.highlightColorForState(additionalFailure.State)+"[%s]{{/}} in [%s] at %s",
|
|
||||||
r.humanReadableState(additionalFailure.State),
|
|
||||||
additionalFailure.Failure.FailureNodeType,
|
|
||||||
additionalFailure.Failure.Location,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
r.emitDelimiter()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *DefaultReporter) highlightColorForState(state types.SpecState) string {
|
|
||||||
switch state {
|
|
||||||
case types.SpecStatePassed:
|
|
||||||
return "{{green}}"
|
|
||||||
case types.SpecStatePending:
|
|
||||||
return "{{yellow}}"
|
|
||||||
case types.SpecStateSkipped:
|
|
||||||
return "{{cyan}}"
|
|
||||||
case types.SpecStateFailed:
|
|
||||||
return "{{red}}"
|
|
||||||
case types.SpecStateTimedout:
|
|
||||||
return "{{orange}}"
|
|
||||||
case types.SpecStatePanicked:
|
|
||||||
return "{{magenta}}"
|
|
||||||
case types.SpecStateInterrupted:
|
|
||||||
return "{{orange}}"
|
|
||||||
case types.SpecStateAborted:
|
|
||||||
return "{{coral}}"
|
|
||||||
default:
|
|
||||||
return "{{gray}}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *DefaultReporter) humanReadableState(state types.SpecState) string {
|
|
||||||
return strings.ToUpper(state.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *DefaultReporter) EmitFailure(indent uint, state types.SpecState, failure types.Failure, includeState bool) {
|
|
||||||
highlightColor := r.highlightColorForState(state)
|
|
||||||
if includeState {
|
|
||||||
r.emitBlock(r.fi(indent, highlightColor+"[%s]{{/}}", r.humanReadableState(state)))
|
|
||||||
}
|
|
||||||
r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.Message))
|
|
||||||
r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}}\n", failure.FailureNodeType, failure.Location))
|
|
||||||
if failure.ForwardedPanic != "" {
|
|
||||||
r.emitBlock("\n")
|
|
||||||
r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.ForwardedPanic))
|
|
||||||
}
|
|
||||||
|
|
||||||
if r.conf.FullTrace || failure.ForwardedPanic != "" {
|
|
||||||
r.emitBlock("\n")
|
|
||||||
r.emitBlock(r.fi(indent, highlightColor+"Full Stack Trace{{/}}"))
|
|
||||||
r.emitBlock(r.fi(indent+1, "%s", failure.Location.FullStackTrace))
|
|
||||||
}
|
|
||||||
|
|
||||||
if !failure.ProgressReport.IsZero() {
|
|
||||||
r.emitBlock("\n")
|
|
||||||
r.emitProgressReport(indent, false, failure.ProgressReport)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *DefaultReporter) SuiteDidEnd(report types.Report) {
|
func (r *DefaultReporter) SuiteDidEnd(report types.Report) {
|
||||||
failures := report.SpecReports.WithState(types.SpecStateFailureStates)
|
failures := report.SpecReports.WithState(types.SpecStateFailureStates)
|
||||||
if len(failures) > 0 {
|
if len(failures) > 0 {
|
||||||
r.emitBlock("\n\n")
|
r.emitBlock("\n")
|
||||||
if len(failures) > 1 {
|
if len(failures) > 1 {
|
||||||
r.emitBlock(r.f("{{red}}{{bold}}Summarizing %d Failures:{{/}}", len(failures)))
|
r.emitBlock(r.f("{{red}}{{bold}}Summarizing %d Failures:{{/}}", len(failures)))
|
||||||
} else {
|
} else {
|
||||||
@ -338,7 +123,7 @@ func (r *DefaultReporter) SuiteDidEnd(report types.Report) {
|
|||||||
case types.SpecStateInterrupted:
|
case types.SpecStateInterrupted:
|
||||||
highlightColor, heading = "{{orange}}", "[INTERRUPTED]"
|
highlightColor, heading = "{{orange}}", "[INTERRUPTED]"
|
||||||
}
|
}
|
||||||
locationBlock := r.codeLocationBlock(specReport, highlightColor, true, true)
|
locationBlock := r.codeLocationBlock(specReport, highlightColor, false, true)
|
||||||
r.emitBlock(r.fi(1, highlightColor+"%s{{/}} %s", heading, locationBlock))
|
r.emitBlock(r.fi(1, highlightColor+"%s{{/}} %s", heading, locationBlock))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -387,14 +172,271 @@ func (r *DefaultReporter) SuiteDidEnd(report types.Report) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *DefaultReporter) WillRun(report types.SpecReport) {
|
||||||
|
v := r.conf.Verbosity()
|
||||||
|
if v.LT(types.VerbosityLevelVerbose) || report.State.Is(types.SpecStatePending|types.SpecStateSkipped) || report.RunningInParallel {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
r.emitDelimiter(0)
|
||||||
|
r.emitBlock(r.f(r.codeLocationBlock(report, "{{/}}", v.Is(types.VerbosityLevelVeryVerbose), false)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *DefaultReporter) DidRun(report types.SpecReport) {
|
||||||
|
v := r.conf.Verbosity()
|
||||||
|
inParallel := report.RunningInParallel
|
||||||
|
|
||||||
|
header := r.specDenoter
|
||||||
|
if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) {
|
||||||
|
header = fmt.Sprintf("[%s]", report.LeafNodeType)
|
||||||
|
}
|
||||||
|
highlightColor := r.highlightColorForState(report.State)
|
||||||
|
|
||||||
|
// have we already been streaming the timeline?
|
||||||
|
timelineHasBeenStreaming := v.GTE(types.VerbosityLevelVerbose) && !inParallel
|
||||||
|
|
||||||
|
// should we show the timeline?
|
||||||
|
var timeline types.Timeline
|
||||||
|
showTimeline := !timelineHasBeenStreaming && (v.GTE(types.VerbosityLevelVerbose) || report.Failed())
|
||||||
|
if showTimeline {
|
||||||
|
timeline = report.Timeline().WithoutHiddenReportEntries()
|
||||||
|
keepVeryVerboseSpecEvents := v.Is(types.VerbosityLevelVeryVerbose) ||
|
||||||
|
(v.Is(types.VerbosityLevelVerbose) && r.conf.ShowNodeEvents) ||
|
||||||
|
(report.Failed() && r.conf.ShowNodeEvents)
|
||||||
|
if !keepVeryVerboseSpecEvents {
|
||||||
|
timeline = timeline.WithoutVeryVerboseSpecEvents()
|
||||||
|
}
|
||||||
|
if len(timeline) == 0 && report.CapturedGinkgoWriterOutput == "" {
|
||||||
|
// the timeline is completely empty - don't show it
|
||||||
|
showTimeline = false
|
||||||
|
}
|
||||||
|
if v.LT(types.VerbosityLevelVeryVerbose) && report.CapturedGinkgoWriterOutput == "" && len(timeline) > 0 {
|
||||||
|
//if we aren't -vv and the timeline only has a single failure, don't show it as it will appear at the end of the report
|
||||||
|
failure, isFailure := timeline[0].(types.Failure)
|
||||||
|
if isFailure && (len(timeline) == 1 || (len(timeline) == 2 && failure.AdditionalFailure != nil)) {
|
||||||
|
showTimeline = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// should we have a separate section for always-visible reports?
|
||||||
|
showSeparateVisibilityAlwaysReportsSection := !timelineHasBeenStreaming && !showTimeline && report.ReportEntries.HasVisibility(types.ReportEntryVisibilityAlways)
|
||||||
|
|
||||||
|
// should we have a separate section for captured stdout/stderr
|
||||||
|
showSeparateStdSection := inParallel && (report.CapturedStdOutErr != "")
|
||||||
|
|
||||||
|
// given all that - do we have any actual content to show? or are we a single denoter in a stream?
|
||||||
|
reportHasContent := v.Is(types.VerbosityLevelVeryVerbose) || showTimeline || showSeparateVisibilityAlwaysReportsSection || showSeparateStdSection || report.Failed() || (v.Is(types.VerbosityLevelVerbose) && !report.State.Is(types.SpecStateSkipped))
|
||||||
|
|
||||||
|
// should we show a runtime?
|
||||||
|
includeRuntime := !report.State.Is(types.SpecStateSkipped|types.SpecStatePending) || (report.State.Is(types.SpecStateSkipped) && report.Failure.Message != "")
|
||||||
|
|
||||||
|
// should we show the codelocation block?
|
||||||
|
showCodeLocation := !timelineHasBeenStreaming || !report.State.Is(types.SpecStatePassed)
|
||||||
|
|
||||||
|
switch report.State {
|
||||||
|
case types.SpecStatePassed:
|
||||||
|
if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) && !reportHasContent {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) {
|
||||||
|
header = fmt.Sprintf("%s PASSED", header)
|
||||||
|
}
|
||||||
|
if report.NumAttempts > 1 && report.MaxFlakeAttempts > 1 {
|
||||||
|
header, reportHasContent = fmt.Sprintf("%s [FLAKEY TEST - TOOK %d ATTEMPTS TO PASS]", r.retryDenoter, report.NumAttempts), true
|
||||||
|
}
|
||||||
|
case types.SpecStatePending:
|
||||||
|
header = "P"
|
||||||
|
if v.GT(types.VerbosityLevelSuccinct) {
|
||||||
|
header, reportHasContent = "P [PENDING]", true
|
||||||
|
}
|
||||||
|
case types.SpecStateSkipped:
|
||||||
|
header = "S"
|
||||||
|
if v.Is(types.VerbosityLevelVeryVerbose) || (v.Is(types.VerbosityLevelVerbose) && report.Failure.Message != "") {
|
||||||
|
header, reportHasContent = "S [SKIPPED]", true
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
header = fmt.Sprintf("%s [%s]", header, r.humanReadableState(report.State))
|
||||||
|
if report.MaxMustPassRepeatedly > 1 {
|
||||||
|
header = fmt.Sprintf("%s DURING REPETITION #%d", header, report.NumAttempts)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we have no content to show, jsut emit the header and return
|
||||||
|
if !reportHasContent {
|
||||||
|
r.emit(r.f(highlightColor + header + "{{/}}"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if includeRuntime {
|
||||||
|
header = r.f("%s [%.3f seconds]", header, report.RunTime.Seconds())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Emit header
|
||||||
|
if !timelineHasBeenStreaming {
|
||||||
|
r.emitDelimiter(0)
|
||||||
|
}
|
||||||
|
r.emitBlock(r.f(highlightColor + header + "{{/}}"))
|
||||||
|
if showCodeLocation {
|
||||||
|
r.emitBlock(r.codeLocationBlock(report, highlightColor, v.Is(types.VerbosityLevelVeryVerbose), false))
|
||||||
|
}
|
||||||
|
|
||||||
|
//Emit Stdout/Stderr Output
|
||||||
|
if showSeparateStdSection {
|
||||||
|
r.emitBlock("\n")
|
||||||
|
r.emitBlock(r.fi(1, "{{gray}}Captured StdOut/StdErr Output >>{{/}}"))
|
||||||
|
r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr))
|
||||||
|
r.emitBlock(r.fi(1, "{{gray}}<< Captured StdOut/StdErr Output{{/}}"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if showSeparateVisibilityAlwaysReportsSection {
|
||||||
|
r.emitBlock("\n")
|
||||||
|
r.emitBlock(r.fi(1, "{{gray}}Report Entries >>{{/}}"))
|
||||||
|
for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) {
|
||||||
|
r.emitReportEntry(1, entry)
|
||||||
|
}
|
||||||
|
r.emitBlock(r.fi(1, "{{gray}}<< Report Entries{{/}}"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if showTimeline {
|
||||||
|
r.emitBlock("\n")
|
||||||
|
r.emitBlock(r.fi(1, "{{gray}}Timeline >>{{/}}"))
|
||||||
|
r.emitTimeline(1, report, timeline)
|
||||||
|
r.emitBlock(r.fi(1, "{{gray}}<< Timeline{{/}}"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Emit Failure Message
|
||||||
|
if !report.Failure.IsZero() && !v.Is(types.VerbosityLevelVeryVerbose) {
|
||||||
|
r.emitBlock("\n")
|
||||||
|
r.emitFailure(1, report.State, report.Failure, true)
|
||||||
|
if len(report.AdditionalFailures) > 0 {
|
||||||
|
r.emitBlock(r.fi(1, "\nThere were {{bold}}{{red}}additional failures{{/}} detected. To view them in detail run {{bold}}ginkgo -vv{{/}}"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
r.emitDelimiter(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *DefaultReporter) highlightColorForState(state types.SpecState) string {
|
||||||
|
switch state {
|
||||||
|
case types.SpecStatePassed:
|
||||||
|
return "{{green}}"
|
||||||
|
case types.SpecStatePending:
|
||||||
|
return "{{yellow}}"
|
||||||
|
case types.SpecStateSkipped:
|
||||||
|
return "{{cyan}}"
|
||||||
|
case types.SpecStateFailed:
|
||||||
|
return "{{red}}"
|
||||||
|
case types.SpecStateTimedout:
|
||||||
|
return "{{orange}}"
|
||||||
|
case types.SpecStatePanicked:
|
||||||
|
return "{{magenta}}"
|
||||||
|
case types.SpecStateInterrupted:
|
||||||
|
return "{{orange}}"
|
||||||
|
case types.SpecStateAborted:
|
||||||
|
return "{{coral}}"
|
||||||
|
default:
|
||||||
|
return "{{gray}}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *DefaultReporter) humanReadableState(state types.SpecState) string {
|
||||||
|
return strings.ToUpper(state.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *DefaultReporter) emitTimeline(indent uint, report types.SpecReport, timeline types.Timeline) {
|
||||||
|
isVeryVerbose := r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose)
|
||||||
|
gw := report.CapturedGinkgoWriterOutput
|
||||||
|
cursor := 0
|
||||||
|
for _, entry := range timeline {
|
||||||
|
tl := entry.GetTimelineLocation()
|
||||||
|
if tl.Offset < len(gw) {
|
||||||
|
r.emit(r.fi(indent, "%s", gw[cursor:tl.Offset]))
|
||||||
|
cursor = tl.Offset
|
||||||
|
} else if cursor < len(gw) {
|
||||||
|
r.emit(r.fi(indent, "%s", gw[cursor:]))
|
||||||
|
cursor = len(gw)
|
||||||
|
}
|
||||||
|
switch x := entry.(type) {
|
||||||
|
case types.Failure:
|
||||||
|
if isVeryVerbose {
|
||||||
|
r.emitFailure(indent, report.State, x, false)
|
||||||
|
} else {
|
||||||
|
r.emitShortFailure(indent, report.State, x)
|
||||||
|
}
|
||||||
|
case types.AdditionalFailure:
|
||||||
|
if isVeryVerbose {
|
||||||
|
r.emitFailure(indent, x.State, x.Failure, true)
|
||||||
|
} else {
|
||||||
|
r.emitShortFailure(indent, x.State, x.Failure)
|
||||||
|
}
|
||||||
|
case types.ReportEntry:
|
||||||
|
r.emitReportEntry(indent, x)
|
||||||
|
case types.ProgressReport:
|
||||||
|
r.emitProgressReport(indent, false, x)
|
||||||
|
case types.SpecEvent:
|
||||||
|
if isVeryVerbose || !x.IsOnlyVisibleAtVeryVerbose() || r.conf.ShowNodeEvents {
|
||||||
|
r.emitSpecEvent(indent, x, isVeryVerbose)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if cursor < len(gw) {
|
||||||
|
r.emit(r.fi(indent, "%s", gw[cursor:]))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *DefaultReporter) EmitFailure(state types.SpecState, failure types.Failure) {
|
||||||
|
if r.conf.Verbosity().Is(types.VerbosityLevelVerbose) {
|
||||||
|
r.emitShortFailure(1, state, failure)
|
||||||
|
} else if r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose) {
|
||||||
|
r.emitFailure(1, state, failure, true)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *DefaultReporter) emitShortFailure(indent uint, state types.SpecState, failure types.Failure) {
|
||||||
|
r.emitBlock(r.fi(indent, r.highlightColorForState(state)+"[%s]{{/}} in [%s] - %s {{gray}}@ %s{{/}}",
|
||||||
|
r.humanReadableState(state),
|
||||||
|
failure.FailureNodeType,
|
||||||
|
failure.Location,
|
||||||
|
failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failure types.Failure, includeAdditionalFailure bool) {
|
||||||
|
highlightColor := r.highlightColorForState(state)
|
||||||
|
r.emitBlock(r.fi(indent, highlightColor+"[%s] %s{{/}}", r.humanReadableState(state), failure.Message))
|
||||||
|
r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
|
||||||
|
if failure.ForwardedPanic != "" {
|
||||||
|
r.emitBlock("\n")
|
||||||
|
r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.ForwardedPanic))
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.conf.FullTrace || failure.ForwardedPanic != "" {
|
||||||
|
r.emitBlock("\n")
|
||||||
|
r.emitBlock(r.fi(indent, highlightColor+"Full Stack Trace{{/}}"))
|
||||||
|
r.emitBlock(r.fi(indent+1, "%s", failure.Location.FullStackTrace))
|
||||||
|
}
|
||||||
|
|
||||||
|
if !failure.ProgressReport.IsZero() {
|
||||||
|
r.emitBlock("\n")
|
||||||
|
r.emitProgressReport(indent, false, failure.ProgressReport)
|
||||||
|
}
|
||||||
|
|
||||||
|
if failure.AdditionalFailure != nil && includeAdditionalFailure {
|
||||||
|
r.emitBlock("\n")
|
||||||
|
r.emitFailure(indent, failure.AdditionalFailure.State, failure.AdditionalFailure.Failure, true)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (r *DefaultReporter) EmitProgressReport(report types.ProgressReport) {
|
func (r *DefaultReporter) EmitProgressReport(report types.ProgressReport) {
|
||||||
r.emitDelimiter()
|
r.emitDelimiter(1)
|
||||||
|
|
||||||
if report.RunningInParallel {
|
if report.RunningInParallel {
|
||||||
r.emit(r.f("{{coral}}Progress Report for Ginkgo Process #{{bold}}%d{{/}}\n", report.ParallelProcess))
|
r.emit(r.fi(1, "{{coral}}Progress Report for Ginkgo Process #{{bold}}%d{{/}}\n", report.ParallelProcess))
|
||||||
}
|
}
|
||||||
r.emitProgressReport(0, true, report)
|
shouldEmitGW := report.RunningInParallel || r.conf.Verbosity().LT(types.VerbosityLevelVerbose)
|
||||||
r.emitDelimiter()
|
r.emitProgressReport(1, shouldEmitGW, report)
|
||||||
|
r.emitDelimiter(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput bool, report types.ProgressReport) {
|
func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput bool, report types.ProgressReport) {
|
||||||
@ -409,7 +451,7 @@ func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput
|
|||||||
r.emit(" ")
|
r.emit(" ")
|
||||||
subjectIndent = 0
|
subjectIndent = 0
|
||||||
}
|
}
|
||||||
r.emit(r.fi(subjectIndent, "{{bold}}{{orange}}%s{{/}} (Spec Runtime: %s)\n", report.LeafNodeText, report.Time.Sub(report.SpecStartTime).Round(time.Millisecond)))
|
r.emit(r.fi(subjectIndent, "{{bold}}{{orange}}%s{{/}} (Spec Runtime: %s)\n", report.LeafNodeText, report.Time().Sub(report.SpecStartTime).Round(time.Millisecond)))
|
||||||
r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.LeafNodeLocation))
|
r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.LeafNodeLocation))
|
||||||
indent += 1
|
indent += 1
|
||||||
}
|
}
|
||||||
@ -419,12 +461,12 @@ func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput
|
|||||||
r.emit(r.f(" {{bold}}{{orange}}%s{{/}}", report.CurrentNodeText))
|
r.emit(r.f(" {{bold}}{{orange}}%s{{/}}", report.CurrentNodeText))
|
||||||
}
|
}
|
||||||
|
|
||||||
r.emit(r.f(" (Node Runtime: %s)\n", report.Time.Sub(report.CurrentNodeStartTime).Round(time.Millisecond)))
|
r.emit(r.f(" (Node Runtime: %s)\n", report.Time().Sub(report.CurrentNodeStartTime).Round(time.Millisecond)))
|
||||||
r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.CurrentNodeLocation))
|
r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.CurrentNodeLocation))
|
||||||
indent += 1
|
indent += 1
|
||||||
}
|
}
|
||||||
if report.CurrentStepText != "" {
|
if report.CurrentStepText != "" {
|
||||||
r.emit(r.fi(indent, "At {{bold}}{{orange}}[By Step] %s{{/}} (Step Runtime: %s)\n", report.CurrentStepText, report.Time.Sub(report.CurrentStepStartTime).Round(time.Millisecond)))
|
r.emit(r.fi(indent, "At {{bold}}{{orange}}[By Step] %s{{/}} (Step Runtime: %s)\n", report.CurrentStepText, report.Time().Sub(report.CurrentStepStartTime).Round(time.Millisecond)))
|
||||||
r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.CurrentStepLocation))
|
r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.CurrentStepLocation))
|
||||||
indent += 1
|
indent += 1
|
||||||
}
|
}
|
||||||
@ -433,9 +475,19 @@ func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput
|
|||||||
indent -= 1
|
indent -= 1
|
||||||
}
|
}
|
||||||
|
|
||||||
if emitGinkgoWriterOutput && report.CapturedGinkgoWriterOutput != "" && (report.RunningInParallel || r.conf.Verbosity().LT(types.VerbosityLevelVerbose)) {
|
if emitGinkgoWriterOutput && report.CapturedGinkgoWriterOutput != "" {
|
||||||
r.emit("\n")
|
r.emit("\n")
|
||||||
r.emitGinkgoWriterOutput(indent, report.CapturedGinkgoWriterOutput, 10)
|
r.emitBlock(r.fi(indent, "{{gray}}Begin Captured GinkgoWriter Output >>{{/}}"))
|
||||||
|
limit, lines := 10, strings.Split(report.CapturedGinkgoWriterOutput, "\n")
|
||||||
|
if len(lines) <= limit {
|
||||||
|
r.emitBlock(r.fi(indent+1, "%s", report.CapturedGinkgoWriterOutput))
|
||||||
|
} else {
|
||||||
|
r.emitBlock(r.fi(indent+1, "{{gray}}...{{/}}"))
|
||||||
|
for _, line := range lines[len(lines)-limit-1:] {
|
||||||
|
r.emitBlock(r.fi(indent+1, "%s", line))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
r.emitBlock(r.fi(indent, "{{gray}}<< End Captured GinkgoWriter Output{{/}}"))
|
||||||
}
|
}
|
||||||
|
|
||||||
if !report.SpecGoroutine().IsZero() {
|
if !report.SpecGoroutine().IsZero() {
|
||||||
@ -471,22 +523,48 @@ func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *DefaultReporter) emitGinkgoWriterOutput(indent uint, output string, limit int) {
|
func (r *DefaultReporter) EmitReportEntry(entry types.ReportEntry) {
|
||||||
r.emitBlock(r.fi(indent, "{{gray}}Begin Captured GinkgoWriter Output >>{{/}}"))
|
if r.conf.Verbosity().LT(types.VerbosityLevelVerbose) || entry.Visibility == types.ReportEntryVisibilityNever {
|
||||||
if limit == 0 {
|
return
|
||||||
r.emitBlock(r.fi(indent+1, "%s", output))
|
}
|
||||||
} else {
|
r.emitReportEntry(1, entry)
|
||||||
lines := strings.Split(output, "\n")
|
}
|
||||||
if len(lines) <= limit {
|
|
||||||
r.emitBlock(r.fi(indent+1, "%s", output))
|
func (r *DefaultReporter) emitReportEntry(indent uint, entry types.ReportEntry) {
|
||||||
} else {
|
r.emitBlock(r.fi(indent, "{{bold}}"+entry.Name+"{{gray}} "+fmt.Sprintf("- %s @ %s{{/}}", entry.Location, entry.Time.Format(types.GINKGO_TIME_FORMAT))))
|
||||||
r.emitBlock(r.fi(indent+1, "{{gray}}...{{/}}"))
|
if representation := entry.StringRepresentation(); representation != "" {
|
||||||
for _, line := range lines[len(lines)-limit-1:] {
|
r.emitBlock(r.fi(indent+1, representation))
|
||||||
r.emitBlock(r.fi(indent+1, "%s", line))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *DefaultReporter) EmitSpecEvent(event types.SpecEvent) {
|
||||||
|
v := r.conf.Verbosity()
|
||||||
|
if v.Is(types.VerbosityLevelVeryVerbose) || (v.Is(types.VerbosityLevelVerbose) && (r.conf.ShowNodeEvents || !event.IsOnlyVisibleAtVeryVerbose())) {
|
||||||
|
r.emitSpecEvent(1, event, r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *DefaultReporter) emitSpecEvent(indent uint, event types.SpecEvent, includeLocation bool) {
|
||||||
|
location := ""
|
||||||
|
if includeLocation {
|
||||||
|
location = fmt.Sprintf("- %s ", event.CodeLocation.String())
|
||||||
|
}
|
||||||
|
switch event.SpecEventType {
|
||||||
|
case types.SpecEventInvalid:
|
||||||
|
return
|
||||||
|
case types.SpecEventByStart:
|
||||||
|
r.emitBlock(r.fi(indent, "{{bold}}STEP:{{/}} %s {{gray}}%s@ %s{{/}}", event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
|
||||||
|
case types.SpecEventByEnd:
|
||||||
|
r.emitBlock(r.fi(indent, "{{bold}}END STEP:{{/}} %s {{gray}}%s@ %s (%s){{/}}", event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT), event.Duration.Round(time.Millisecond)))
|
||||||
|
case types.SpecEventNodeStart:
|
||||||
|
r.emitBlock(r.fi(indent, "> Enter {{bold}}[%s]{{/}} %s {{gray}}%s@ %s{{/}}", event.NodeType.String(), event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
|
||||||
|
case types.SpecEventNodeEnd:
|
||||||
|
r.emitBlock(r.fi(indent, "< Exit {{bold}}[%s]{{/}} %s {{gray}}%s@ %s (%s){{/}}", event.NodeType.String(), event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT), event.Duration.Round(time.Millisecond)))
|
||||||
|
case types.SpecEventSpecRepeat:
|
||||||
|
r.emitBlock(r.fi(indent, "\n{{bold}}Attempt #%d {{green}}Passed{{/}}{{bold}}. Repeating %s{{/}} {{gray}}@ %s{{/}}\n\n", event.Attempt, r.retryDenoter, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
|
||||||
|
case types.SpecEventSpecRetry:
|
||||||
|
r.emitBlock(r.fi(indent, "\n{{bold}}Attempt #%d {{red}}Failed{{/}}{{bold}}. Retrying %s{{/}} {{gray}}@ %s{{/}}\n\n", event.Attempt, r.retryDenoter, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
|
||||||
}
|
}
|
||||||
r.emitBlock(r.fi(indent, "{{gray}}<< End Captured GinkgoWriter Output{{/}}"))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *DefaultReporter) emitGoroutines(indent uint, goroutines ...types.Goroutine) {
|
func (r *DefaultReporter) emitGoroutines(indent uint, goroutines ...types.Goroutine) {
|
||||||
@ -544,31 +622,37 @@ func (r *DefaultReporter) emitSource(indent uint, fc types.FunctionCall) {
|
|||||||
|
|
||||||
/* Emitting to the writer */
|
/* Emitting to the writer */
|
||||||
func (r *DefaultReporter) emit(s string) {
|
func (r *DefaultReporter) emit(s string) {
|
||||||
if len(s) > 0 {
|
r._emit(s, false, false)
|
||||||
r.lastChar = s[len(s)-1:]
|
|
||||||
r.lastEmissionWasDelimiter = false
|
|
||||||
r.writer.Write([]byte(s))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *DefaultReporter) emitBlock(s string) {
|
func (r *DefaultReporter) emitBlock(s string) {
|
||||||
if len(s) > 0 {
|
r._emit(s, true, false)
|
||||||
if r.lastChar != "\n" {
|
|
||||||
r.emit("\n")
|
|
||||||
}
|
|
||||||
r.emit(s)
|
|
||||||
if r.lastChar != "\n" {
|
|
||||||
r.emit("\n")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *DefaultReporter) emitDelimiter() {
|
func (r *DefaultReporter) emitDelimiter(indent uint) {
|
||||||
if r.lastEmissionWasDelimiter {
|
r._emit(r.fi(indent, "{{gray}}%s{{/}}", strings.Repeat("-", 30)), true, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// a bit ugly - but we're trying to minimize locking on this hot codepath
|
||||||
|
func (r *DefaultReporter) _emit(s string, block bool, isDelimiter bool) {
|
||||||
|
if len(s) == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r.emitBlock(r.f("{{gray}}%s{{/}}", strings.Repeat("-", 30)))
|
r.lock.Lock()
|
||||||
r.lastEmissionWasDelimiter = true
|
defer r.lock.Unlock()
|
||||||
|
if isDelimiter && r.lastEmissionWasDelimiter {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if block && !r.lastCharWasNewline {
|
||||||
|
r.writer.Write([]byte("\n"))
|
||||||
|
}
|
||||||
|
r.lastCharWasNewline = (s[len(s)-1:] == "\n")
|
||||||
|
r.writer.Write([]byte(s))
|
||||||
|
if block && !r.lastCharWasNewline {
|
||||||
|
r.writer.Write([]byte("\n"))
|
||||||
|
r.lastCharWasNewline = true
|
||||||
|
}
|
||||||
|
r.lastEmissionWasDelimiter = isDelimiter
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Rendering text */
|
/* Rendering text */
|
||||||
@ -584,13 +668,14 @@ func (r *DefaultReporter) cycleJoin(elements []string, joiner string) string {
|
|||||||
return r.formatter.CycleJoin(elements, joiner, []string{"{{/}}", "{{gray}}"})
|
return r.formatter.CycleJoin(elements, joiner, []string{"{{/}}", "{{gray}}"})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightColor string, succinct bool, usePreciseFailureLocation bool) string {
|
func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightColor string, veryVerbose bool, usePreciseFailureLocation bool) string {
|
||||||
texts, locations, labels := []string{}, []types.CodeLocation{}, [][]string{}
|
texts, locations, labels := []string{}, []types.CodeLocation{}, [][]string{}
|
||||||
texts, locations, labels = append(texts, report.ContainerHierarchyTexts...), append(locations, report.ContainerHierarchyLocations...), append(labels, report.ContainerHierarchyLabels...)
|
texts, locations, labels = append(texts, report.ContainerHierarchyTexts...), append(locations, report.ContainerHierarchyLocations...), append(labels, report.ContainerHierarchyLabels...)
|
||||||
|
|
||||||
if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) {
|
if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) {
|
||||||
texts = append(texts, r.f("[%s] %s", report.LeafNodeType, report.LeafNodeText))
|
texts = append(texts, r.f("[%s] %s", report.LeafNodeType, report.LeafNodeText))
|
||||||
} else {
|
} else {
|
||||||
texts = append(texts, report.LeafNodeText)
|
texts = append(texts, r.f(report.LeafNodeText))
|
||||||
}
|
}
|
||||||
labels = append(labels, report.LeafNodeLabels)
|
labels = append(labels, report.LeafNodeLabels)
|
||||||
locations = append(locations, report.LeafNodeLocation)
|
locations = append(locations, report.LeafNodeLocation)
|
||||||
@ -600,24 +685,58 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo
|
|||||||
failureLocation = report.Failure.Location
|
failureLocation = report.Failure.Location
|
||||||
}
|
}
|
||||||
|
|
||||||
|
highlightIndex := -1
|
||||||
switch report.Failure.FailureNodeContext {
|
switch report.Failure.FailureNodeContext {
|
||||||
case types.FailureNodeAtTopLevel:
|
case types.FailureNodeAtTopLevel:
|
||||||
texts = append([]string{r.f(highlightColor+"{{bold}}TOP-LEVEL [%s]{{/}}", report.Failure.FailureNodeType)}, texts...)
|
texts = append([]string{fmt.Sprintf("TOP-LEVEL [%s]", report.Failure.FailureNodeType)}, texts...)
|
||||||
locations = append([]types.CodeLocation{failureLocation}, locations...)
|
locations = append([]types.CodeLocation{failureLocation}, locations...)
|
||||||
labels = append([][]string{{}}, labels...)
|
labels = append([][]string{{}}, labels...)
|
||||||
|
highlightIndex = 0
|
||||||
case types.FailureNodeInContainer:
|
case types.FailureNodeInContainer:
|
||||||
i := report.Failure.FailureNodeContainerIndex
|
i := report.Failure.FailureNodeContainerIndex
|
||||||
texts[i] = r.f(highlightColor+"{{bold}}%s [%s]{{/}}", texts[i], report.Failure.FailureNodeType)
|
texts[i] = fmt.Sprintf("%s [%s]", texts[i], report.Failure.FailureNodeType)
|
||||||
locations[i] = failureLocation
|
locations[i] = failureLocation
|
||||||
|
highlightIndex = i
|
||||||
case types.FailureNodeIsLeafNode:
|
case types.FailureNodeIsLeafNode:
|
||||||
i := len(texts) - 1
|
i := len(texts) - 1
|
||||||
texts[i] = r.f(highlightColor+"{{bold}}[%s] %s{{/}}", report.LeafNodeType, report.LeafNodeText)
|
texts[i] = fmt.Sprintf("[%s] %s", report.LeafNodeType, report.LeafNodeText)
|
||||||
locations[i] = failureLocation
|
locations[i] = failureLocation
|
||||||
|
highlightIndex = i
|
||||||
|
default:
|
||||||
|
//there is no failure, so we highlight the leaf ndoe
|
||||||
|
highlightIndex = len(texts) - 1
|
||||||
}
|
}
|
||||||
|
|
||||||
out := ""
|
out := ""
|
||||||
if succinct {
|
if veryVerbose {
|
||||||
out += r.f("%s", r.cycleJoin(texts, " "))
|
for i := range texts {
|
||||||
|
if i == highlightIndex {
|
||||||
|
out += r.fi(uint(i), highlightColor+"{{bold}}%s{{/}}", texts[i])
|
||||||
|
} else {
|
||||||
|
out += r.fi(uint(i), "%s", texts[i])
|
||||||
|
}
|
||||||
|
if len(labels[i]) > 0 {
|
||||||
|
out += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels[i], ", "))
|
||||||
|
}
|
||||||
|
out += "\n"
|
||||||
|
out += r.fi(uint(i), "{{gray}}%s{{/}}\n", locations[i])
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for i := range texts {
|
||||||
|
style := "{{/}}"
|
||||||
|
if i%2 == 1 {
|
||||||
|
style = "{{gray}}"
|
||||||
|
}
|
||||||
|
if i == highlightIndex {
|
||||||
|
style = highlightColor + "{{bold}}"
|
||||||
|
}
|
||||||
|
out += r.f(style+"%s", texts[i])
|
||||||
|
if i < len(texts)-1 {
|
||||||
|
out += " "
|
||||||
|
} else {
|
||||||
|
out += r.f("{{/}}")
|
||||||
|
}
|
||||||
|
}
|
||||||
flattenedLabels := report.Labels()
|
flattenedLabels := report.Labels()
|
||||||
if len(flattenedLabels) > 0 {
|
if len(flattenedLabels) > 0 {
|
||||||
out += r.f(" {{coral}}[%s]{{/}}", strings.Join(flattenedLabels, ", "))
|
out += r.f(" {{coral}}[%s]{{/}}", strings.Join(flattenedLabels, ", "))
|
||||||
@ -626,17 +745,15 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo
|
|||||||
if usePreciseFailureLocation {
|
if usePreciseFailureLocation {
|
||||||
out += r.f("{{gray}}%s{{/}}", failureLocation)
|
out += r.f("{{gray}}%s{{/}}", failureLocation)
|
||||||
} else {
|
} else {
|
||||||
out += r.f("{{gray}}%s{{/}}", locations[len(locations)-1])
|
leafLocation := locations[len(locations)-1]
|
||||||
}
|
if (report.Failure.FailureNodeLocation != types.CodeLocation{}) && (report.Failure.FailureNodeLocation != leafLocation) {
|
||||||
|
out += r.fi(1, highlightColor+"[%s]{{/}} {{gray}}%s{{/}}\n", report.Failure.FailureNodeType, report.Failure.FailureNodeLocation)
|
||||||
|
out += r.fi(1, "{{gray}}[%s] %s{{/}}", report.LeafNodeType, leafLocation)
|
||||||
} else {
|
} else {
|
||||||
for i := range texts {
|
out += r.f("{{gray}}%s{{/}}", leafLocation)
|
||||||
out += r.fi(uint(i), "%s", texts[i])
|
|
||||||
if len(labels[i]) > 0 {
|
|
||||||
out += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels[i], ", "))
|
|
||||||
}
|
}
|
||||||
out += "\n"
|
|
||||||
out += r.fi(uint(i), "{{gray}}%s{{/}}\n", locations[i])
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
2
vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go
generated
vendored
2
vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go
generated
vendored
@ -35,7 +35,7 @@ func ReportViaDeprecatedReporter(reporter DeprecatedReporter, report types.Repor
|
|||||||
FailOnPending: report.SuiteConfig.FailOnPending,
|
FailOnPending: report.SuiteConfig.FailOnPending,
|
||||||
FailFast: report.SuiteConfig.FailFast,
|
FailFast: report.SuiteConfig.FailFast,
|
||||||
FlakeAttempts: report.SuiteConfig.FlakeAttempts,
|
FlakeAttempts: report.SuiteConfig.FlakeAttempts,
|
||||||
EmitSpecProgress: report.SuiteConfig.EmitSpecProgress,
|
EmitSpecProgress: false,
|
||||||
DryRun: report.SuiteConfig.DryRun,
|
DryRun: report.SuiteConfig.DryRun,
|
||||||
ParallelNode: report.SuiteConfig.ParallelProcess,
|
ParallelNode: report.SuiteConfig.ParallelProcess,
|
||||||
ParallelTotal: report.SuiteConfig.ParallelTotal,
|
ParallelTotal: report.SuiteConfig.ParallelTotal,
|
||||||
|
113
vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go
generated
vendored
113
vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go
generated
vendored
@ -15,12 +15,32 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/v2/config"
|
"github.com/onsi/ginkgo/v2/config"
|
||||||
"github.com/onsi/ginkgo/v2/types"
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type JunitReportConfig struct {
|
||||||
|
// Spec States for which no timeline should be emitted for system-err
|
||||||
|
// set this to types.SpecStatePassed|types.SpecStateSkipped|types.SpecStatePending to only match failing specs
|
||||||
|
OmitTimelinesForSpecState types.SpecState
|
||||||
|
|
||||||
|
// Enable OmitFailureMessageAttr to prevent failure messages appearing in the "message" attribute of the Failure and Error tags
|
||||||
|
OmitFailureMessageAttr bool
|
||||||
|
|
||||||
|
//Enable OmitCapturedStdOutErr to prevent captured stdout/stderr appearing in system-out
|
||||||
|
OmitCapturedStdOutErr bool
|
||||||
|
|
||||||
|
// Enable OmitSpecLabels to prevent labels from appearing in the spec name
|
||||||
|
OmitSpecLabels bool
|
||||||
|
|
||||||
|
// Enable OmitLeafNodeType to prevent the spec leaf node type from appearing in the spec name
|
||||||
|
OmitLeafNodeType bool
|
||||||
|
|
||||||
|
// Enable OmitSuiteSetupNodes to prevent the creation of testcase entries for setup nodes
|
||||||
|
OmitSuiteSetupNodes bool
|
||||||
|
}
|
||||||
|
|
||||||
type JUnitTestSuites struct {
|
type JUnitTestSuites struct {
|
||||||
XMLName xml.Name `xml:"testsuites"`
|
XMLName xml.Name `xml:"testsuites"`
|
||||||
// Tests maps onto the total number of specs in all test suites (this includes any suite nodes such as BeforeSuite)
|
// Tests maps onto the total number of specs in all test suites (this includes any suite nodes such as BeforeSuite)
|
||||||
@ -128,6 +148,10 @@ type JUnitFailure struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func GenerateJUnitReport(report types.Report, dst string) error {
|
func GenerateJUnitReport(report types.Report, dst string) error {
|
||||||
|
return GenerateJUnitReportWithConfig(report, dst, JunitReportConfig{})
|
||||||
|
}
|
||||||
|
|
||||||
|
func GenerateJUnitReportWithConfig(report types.Report, dst string, config JunitReportConfig) error {
|
||||||
suite := JUnitTestSuite{
|
suite := JUnitTestSuite{
|
||||||
Name: report.SuiteDescription,
|
Name: report.SuiteDescription,
|
||||||
Package: report.SuitePath,
|
Package: report.SuitePath,
|
||||||
@ -149,7 +173,6 @@ func GenerateJUnitReport(report types.Report, dst string) error {
|
|||||||
{"FailOnPending", fmt.Sprintf("%t", report.SuiteConfig.FailOnPending)},
|
{"FailOnPending", fmt.Sprintf("%t", report.SuiteConfig.FailOnPending)},
|
||||||
{"FailFast", fmt.Sprintf("%t", report.SuiteConfig.FailFast)},
|
{"FailFast", fmt.Sprintf("%t", report.SuiteConfig.FailFast)},
|
||||||
{"FlakeAttempts", fmt.Sprintf("%d", report.SuiteConfig.FlakeAttempts)},
|
{"FlakeAttempts", fmt.Sprintf("%d", report.SuiteConfig.FlakeAttempts)},
|
||||||
{"EmitSpecProgress", fmt.Sprintf("%t", report.SuiteConfig.EmitSpecProgress)},
|
|
||||||
{"DryRun", fmt.Sprintf("%t", report.SuiteConfig.DryRun)},
|
{"DryRun", fmt.Sprintf("%t", report.SuiteConfig.DryRun)},
|
||||||
{"ParallelTotal", fmt.Sprintf("%d", report.SuiteConfig.ParallelTotal)},
|
{"ParallelTotal", fmt.Sprintf("%d", report.SuiteConfig.ParallelTotal)},
|
||||||
{"OutputInterceptorMode", report.SuiteConfig.OutputInterceptorMode},
|
{"OutputInterceptorMode", report.SuiteConfig.OutputInterceptorMode},
|
||||||
@ -157,22 +180,33 @@ func GenerateJUnitReport(report types.Report, dst string) error {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, spec := range report.SpecReports {
|
for _, spec := range report.SpecReports {
|
||||||
|
if config.OmitSuiteSetupNodes && spec.LeafNodeType != types.NodeTypeIt {
|
||||||
|
continue
|
||||||
|
}
|
||||||
name := fmt.Sprintf("[%s]", spec.LeafNodeType)
|
name := fmt.Sprintf("[%s]", spec.LeafNodeType)
|
||||||
|
if config.OmitLeafNodeType {
|
||||||
|
name = ""
|
||||||
|
}
|
||||||
if spec.FullText() != "" {
|
if spec.FullText() != "" {
|
||||||
name = name + " " + spec.FullText()
|
name = name + " " + spec.FullText()
|
||||||
}
|
}
|
||||||
labels := spec.Labels()
|
labels := spec.Labels()
|
||||||
if len(labels) > 0 {
|
if len(labels) > 0 && !config.OmitSpecLabels {
|
||||||
name = name + " [" + strings.Join(labels, ", ") + "]"
|
name = name + " [" + strings.Join(labels, ", ") + "]"
|
||||||
}
|
}
|
||||||
|
name = strings.TrimSpace(name)
|
||||||
|
|
||||||
test := JUnitTestCase{
|
test := JUnitTestCase{
|
||||||
Name: name,
|
Name: name,
|
||||||
Classname: report.SuiteDescription,
|
Classname: report.SuiteDescription,
|
||||||
Status: spec.State.String(),
|
Status: spec.State.String(),
|
||||||
Time: spec.RunTime.Seconds(),
|
Time: spec.RunTime.Seconds(),
|
||||||
SystemOut: systemOutForUnstructuredReporters(spec),
|
}
|
||||||
SystemErr: systemErrForUnstructuredReporters(spec),
|
if !spec.State.Is(config.OmitTimelinesForSpecState) {
|
||||||
|
test.SystemErr = systemErrForUnstructuredReporters(spec)
|
||||||
|
}
|
||||||
|
if !config.OmitCapturedStdOutErr {
|
||||||
|
test.SystemOut = systemOutForUnstructuredReporters(spec)
|
||||||
}
|
}
|
||||||
suite.Tests += 1
|
suite.Tests += 1
|
||||||
|
|
||||||
@ -193,6 +227,9 @@ func GenerateJUnitReport(report types.Report, dst string) error {
|
|||||||
Type: "failed",
|
Type: "failed",
|
||||||
Description: failureDescriptionForUnstructuredReporters(spec),
|
Description: failureDescriptionForUnstructuredReporters(spec),
|
||||||
}
|
}
|
||||||
|
if config.OmitFailureMessageAttr {
|
||||||
|
test.Failure.Message = ""
|
||||||
|
}
|
||||||
suite.Failures += 1
|
suite.Failures += 1
|
||||||
case types.SpecStateTimedout:
|
case types.SpecStateTimedout:
|
||||||
test.Failure = &JUnitFailure{
|
test.Failure = &JUnitFailure{
|
||||||
@ -200,6 +237,9 @@ func GenerateJUnitReport(report types.Report, dst string) error {
|
|||||||
Type: "timedout",
|
Type: "timedout",
|
||||||
Description: failureDescriptionForUnstructuredReporters(spec),
|
Description: failureDescriptionForUnstructuredReporters(spec),
|
||||||
}
|
}
|
||||||
|
if config.OmitFailureMessageAttr {
|
||||||
|
test.Failure.Message = ""
|
||||||
|
}
|
||||||
suite.Failures += 1
|
suite.Failures += 1
|
||||||
case types.SpecStateInterrupted:
|
case types.SpecStateInterrupted:
|
||||||
test.Error = &JUnitError{
|
test.Error = &JUnitError{
|
||||||
@ -207,6 +247,9 @@ func GenerateJUnitReport(report types.Report, dst string) error {
|
|||||||
Type: "interrupted",
|
Type: "interrupted",
|
||||||
Description: failureDescriptionForUnstructuredReporters(spec),
|
Description: failureDescriptionForUnstructuredReporters(spec),
|
||||||
}
|
}
|
||||||
|
if config.OmitFailureMessageAttr {
|
||||||
|
test.Error.Message = ""
|
||||||
|
}
|
||||||
suite.Errors += 1
|
suite.Errors += 1
|
||||||
case types.SpecStateAborted:
|
case types.SpecStateAborted:
|
||||||
test.Failure = &JUnitFailure{
|
test.Failure = &JUnitFailure{
|
||||||
@ -214,6 +257,9 @@ func GenerateJUnitReport(report types.Report, dst string) error {
|
|||||||
Type: "aborted",
|
Type: "aborted",
|
||||||
Description: failureDescriptionForUnstructuredReporters(spec),
|
Description: failureDescriptionForUnstructuredReporters(spec),
|
||||||
}
|
}
|
||||||
|
if config.OmitFailureMessageAttr {
|
||||||
|
test.Failure.Message = ""
|
||||||
|
}
|
||||||
suite.Errors += 1
|
suite.Errors += 1
|
||||||
case types.SpecStatePanicked:
|
case types.SpecStatePanicked:
|
||||||
test.Error = &JUnitError{
|
test.Error = &JUnitError{
|
||||||
@ -221,6 +267,9 @@ func GenerateJUnitReport(report types.Report, dst string) error {
|
|||||||
Type: "panicked",
|
Type: "panicked",
|
||||||
Description: failureDescriptionForUnstructuredReporters(spec),
|
Description: failureDescriptionForUnstructuredReporters(spec),
|
||||||
}
|
}
|
||||||
|
if config.OmitFailureMessageAttr {
|
||||||
|
test.Error.Message = ""
|
||||||
|
}
|
||||||
suite.Errors += 1
|
suite.Errors += 1
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -287,63 +336,25 @@ func MergeAndCleanupJUnitReports(sources []string, dst string) ([]string, error)
|
|||||||
|
|
||||||
func failureDescriptionForUnstructuredReporters(spec types.SpecReport) string {
|
func failureDescriptionForUnstructuredReporters(spec types.SpecReport) string {
|
||||||
out := &strings.Builder{}
|
out := &strings.Builder{}
|
||||||
out.WriteString(spec.Failure.Location.String() + "\n")
|
NewDefaultReporter(types.ReporterConfig{NoColor: true, VeryVerbose: true}, out).emitFailure(0, spec.State, spec.Failure, true)
|
||||||
out.WriteString(spec.Failure.Location.FullStackTrace)
|
|
||||||
if !spec.Failure.ProgressReport.IsZero() {
|
|
||||||
out.WriteString("\n")
|
|
||||||
NewDefaultReporter(types.ReporterConfig{NoColor: true}, out).EmitProgressReport(spec.Failure.ProgressReport)
|
|
||||||
}
|
|
||||||
if len(spec.AdditionalFailures) > 0 {
|
if len(spec.AdditionalFailures) > 0 {
|
||||||
out.WriteString("\nThere were additional failures detected after the initial failure:\n")
|
out.WriteString("\nThere were additional failures detected after the initial failure. These are visible in the timeline\n")
|
||||||
for i, additionalFailure := range spec.AdditionalFailures {
|
|
||||||
NewDefaultReporter(types.ReporterConfig{NoColor: true}, out).EmitFailure(0, additionalFailure.State, additionalFailure.Failure, true)
|
|
||||||
if i < len(spec.AdditionalFailures)-1 {
|
|
||||||
out.WriteString("----------\n")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return out.String()
|
return out.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
func systemErrForUnstructuredReporters(spec types.SpecReport) string {
|
func systemErrForUnstructuredReporters(spec types.SpecReport) string {
|
||||||
|
return RenderTimeline(spec, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func RenderTimeline(spec types.SpecReport, noColor bool) string {
|
||||||
out := &strings.Builder{}
|
out := &strings.Builder{}
|
||||||
gw := spec.CapturedGinkgoWriterOutput
|
NewDefaultReporter(types.ReporterConfig{NoColor: noColor, VeryVerbose: true}, out).emitTimeline(0, spec, spec.Timeline())
|
||||||
cursor := 0
|
|
||||||
for _, pr := range spec.ProgressReports {
|
|
||||||
if cursor < pr.GinkgoWriterOffset {
|
|
||||||
if pr.GinkgoWriterOffset < len(gw) {
|
|
||||||
out.WriteString(gw[cursor:pr.GinkgoWriterOffset])
|
|
||||||
cursor = pr.GinkgoWriterOffset
|
|
||||||
} else if cursor < len(gw) {
|
|
||||||
out.WriteString(gw[cursor:])
|
|
||||||
cursor = len(gw)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
NewDefaultReporter(types.ReporterConfig{NoColor: true}, out).EmitProgressReport(pr)
|
|
||||||
}
|
|
||||||
|
|
||||||
if cursor < len(gw) {
|
|
||||||
out.WriteString(gw[cursor:])
|
|
||||||
}
|
|
||||||
|
|
||||||
return out.String()
|
return out.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
func systemOutForUnstructuredReporters(spec types.SpecReport) string {
|
func systemOutForUnstructuredReporters(spec types.SpecReport) string {
|
||||||
systemOut := spec.CapturedStdOutErr
|
return spec.CapturedStdOutErr
|
||||||
if len(spec.ReportEntries) > 0 {
|
|
||||||
systemOut += "\nReport Entries:\n"
|
|
||||||
for i, entry := range spec.ReportEntries {
|
|
||||||
systemOut += fmt.Sprintf("%s\n%s\n%s\n", entry.Name, entry.Location, entry.Time.Format(time.RFC3339Nano))
|
|
||||||
if representation := entry.StringRepresentation(); representation != "" {
|
|
||||||
systemOut += representation + "\n"
|
|
||||||
}
|
|
||||||
if i+1 < len(spec.ReportEntries) {
|
|
||||||
systemOut += "--\n"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return systemOut
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deprecated JUnitReporter (so folks can still compile their suites)
|
// Deprecated JUnitReporter (so folks can still compile their suites)
|
||||||
|
8
vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go
generated
vendored
8
vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go
generated
vendored
@ -9,7 +9,12 @@ type Reporter interface {
|
|||||||
WillRun(report types.SpecReport)
|
WillRun(report types.SpecReport)
|
||||||
DidRun(report types.SpecReport)
|
DidRun(report types.SpecReport)
|
||||||
SuiteDidEnd(report types.Report)
|
SuiteDidEnd(report types.Report)
|
||||||
|
|
||||||
|
//Timeline emission
|
||||||
|
EmitFailure(state types.SpecState, failure types.Failure)
|
||||||
EmitProgressReport(progressReport types.ProgressReport)
|
EmitProgressReport(progressReport types.ProgressReport)
|
||||||
|
EmitReportEntry(entry types.ReportEntry)
|
||||||
|
EmitSpecEvent(event types.SpecEvent)
|
||||||
}
|
}
|
||||||
|
|
||||||
type NoopReporter struct{}
|
type NoopReporter struct{}
|
||||||
@ -18,4 +23,7 @@ func (n NoopReporter) SuiteWillBegin(report types.Report) {}
|
|||||||
func (n NoopReporter) WillRun(report types.SpecReport) {}
|
func (n NoopReporter) WillRun(report types.SpecReport) {}
|
||||||
func (n NoopReporter) DidRun(report types.SpecReport) {}
|
func (n NoopReporter) DidRun(report types.SpecReport) {}
|
||||||
func (n NoopReporter) SuiteDidEnd(report types.Report) {}
|
func (n NoopReporter) SuiteDidEnd(report types.Report) {}
|
||||||
|
func (n NoopReporter) EmitFailure(state types.SpecState, failure types.Failure) {}
|
||||||
func (n NoopReporter) EmitProgressReport(progressReport types.ProgressReport) {}
|
func (n NoopReporter) EmitProgressReport(progressReport types.ProgressReport) {}
|
||||||
|
func (n NoopReporter) EmitReportEntry(entry types.ReportEntry) {}
|
||||||
|
func (n NoopReporter) EmitSpecEvent(event types.SpecEvent) {}
|
||||||
|
20
vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go
generated
vendored
20
vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go
generated
vendored
@ -100,6 +100,25 @@ func ReportAfterEach(body func(SpecReport), args ...interface{}) bool {
|
|||||||
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterEach, "", combinedArgs...))
|
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterEach, "", combinedArgs...))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
ReportBeforeSuite nodes are run at the beginning of the suite. ReportBeforeSuite nodes take a function that receives a suite Report.
|
||||||
|
|
||||||
|
They are called at the beginning of the suite, before any specs have run and any BeforeSuite or SynchronizedBeforeSuite nodes, and are passed in the initial report for the suite.
|
||||||
|
ReportBeforeSuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node)
|
||||||
|
|
||||||
|
# When running in parallel, Ginkgo ensures that only one of the parallel nodes runs the ReportBeforeSuite
|
||||||
|
|
||||||
|
You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure.
|
||||||
|
You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
|
||||||
|
|
||||||
|
You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports
|
||||||
|
*/
|
||||||
|
func ReportBeforeSuite(body func(Report), args ...interface{}) bool {
|
||||||
|
combinedArgs := []interface{}{body}
|
||||||
|
combinedArgs = append(combinedArgs, args...)
|
||||||
|
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeSuite, "", combinedArgs...))
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
ReportAfterSuite nodes are run at the end of the suite. ReportAfterSuite nodes take a function that receives a suite Report.
|
ReportAfterSuite nodes are run at the end of the suite. ReportAfterSuite nodes take a function that receives a suite Report.
|
||||||
|
|
||||||
@ -113,6 +132,7 @@ In addition to using ReportAfterSuite to programmatically generate suite reports
|
|||||||
|
|
||||||
You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure.
|
You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure.
|
||||||
You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
|
You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
|
||||||
|
|
||||||
You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports
|
You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports
|
||||||
*/
|
*/
|
||||||
func ReportAfterSuite(text string, body func(Report), args ...interface{}) bool {
|
func ReportAfterSuite(text string, body func(Report), args ...interface{}) bool {
|
||||||
|
15
vendor/github.com/onsi/ginkgo/v2/table_dsl.go
generated
vendored
15
vendor/github.com/onsi/ginkgo/v2/table_dsl.go
generated
vendored
@ -45,6 +45,7 @@ You can learn more about DescribeTable here: https://onsi.github.io/ginkgo/#tabl
|
|||||||
And can explore some Table patterns here: https://onsi.github.io/ginkgo/#table-specs-patterns
|
And can explore some Table patterns here: https://onsi.github.io/ginkgo/#table-specs-patterns
|
||||||
*/
|
*/
|
||||||
func DescribeTable(description string, args ...interface{}) bool {
|
func DescribeTable(description string, args ...interface{}) bool {
|
||||||
|
GinkgoHelper()
|
||||||
generateTable(description, args...)
|
generateTable(description, args...)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@ -53,6 +54,7 @@ func DescribeTable(description string, args ...interface{}) bool {
|
|||||||
You can focus a table with `FDescribeTable`. This is equivalent to `FDescribe`.
|
You can focus a table with `FDescribeTable`. This is equivalent to `FDescribe`.
|
||||||
*/
|
*/
|
||||||
func FDescribeTable(description string, args ...interface{}) bool {
|
func FDescribeTable(description string, args ...interface{}) bool {
|
||||||
|
GinkgoHelper()
|
||||||
args = append(args, internal.Focus)
|
args = append(args, internal.Focus)
|
||||||
generateTable(description, args...)
|
generateTable(description, args...)
|
||||||
return true
|
return true
|
||||||
@ -62,6 +64,7 @@ func FDescribeTable(description string, args ...interface{}) bool {
|
|||||||
You can mark a table as pending with `PDescribeTable`. This is equivalent to `PDescribe`.
|
You can mark a table as pending with `PDescribeTable`. This is equivalent to `PDescribe`.
|
||||||
*/
|
*/
|
||||||
func PDescribeTable(description string, args ...interface{}) bool {
|
func PDescribeTable(description string, args ...interface{}) bool {
|
||||||
|
GinkgoHelper()
|
||||||
args = append(args, internal.Pending)
|
args = append(args, internal.Pending)
|
||||||
generateTable(description, args...)
|
generateTable(description, args...)
|
||||||
return true
|
return true
|
||||||
@ -95,26 +98,29 @@ If you want to generate interruptible specs simply write a Table function that a
|
|||||||
You can learn more about Entry here: https://onsi.github.io/ginkgo/#table-specs
|
You can learn more about Entry here: https://onsi.github.io/ginkgo/#table-specs
|
||||||
*/
|
*/
|
||||||
func Entry(description interface{}, args ...interface{}) TableEntry {
|
func Entry(description interface{}, args ...interface{}) TableEntry {
|
||||||
|
GinkgoHelper()
|
||||||
decorations, parameters := internal.PartitionDecorations(args...)
|
decorations, parameters := internal.PartitionDecorations(args...)
|
||||||
return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(1)}
|
return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(0)}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
You can focus a particular entry with FEntry. This is equivalent to FIt.
|
You can focus a particular entry with FEntry. This is equivalent to FIt.
|
||||||
*/
|
*/
|
||||||
func FEntry(description interface{}, args ...interface{}) TableEntry {
|
func FEntry(description interface{}, args ...interface{}) TableEntry {
|
||||||
|
GinkgoHelper()
|
||||||
decorations, parameters := internal.PartitionDecorations(args...)
|
decorations, parameters := internal.PartitionDecorations(args...)
|
||||||
decorations = append(decorations, internal.Focus)
|
decorations = append(decorations, internal.Focus)
|
||||||
return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(1)}
|
return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(0)}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
You can mark a particular entry as pending with PEntry. This is equivalent to PIt.
|
You can mark a particular entry as pending with PEntry. This is equivalent to PIt.
|
||||||
*/
|
*/
|
||||||
func PEntry(description interface{}, args ...interface{}) TableEntry {
|
func PEntry(description interface{}, args ...interface{}) TableEntry {
|
||||||
|
GinkgoHelper()
|
||||||
decorations, parameters := internal.PartitionDecorations(args...)
|
decorations, parameters := internal.PartitionDecorations(args...)
|
||||||
decorations = append(decorations, internal.Pending)
|
decorations = append(decorations, internal.Pending)
|
||||||
return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(1)}
|
return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(0)}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -126,7 +132,8 @@ var contextType = reflect.TypeOf(new(context.Context)).Elem()
|
|||||||
var specContextType = reflect.TypeOf(new(SpecContext)).Elem()
|
var specContextType = reflect.TypeOf(new(SpecContext)).Elem()
|
||||||
|
|
||||||
func generateTable(description string, args ...interface{}) {
|
func generateTable(description string, args ...interface{}) {
|
||||||
cl := types.NewCodeLocation(2)
|
GinkgoHelper()
|
||||||
|
cl := types.NewCodeLocation(0)
|
||||||
containerNodeArgs := []interface{}{cl}
|
containerNodeArgs := []interface{}{cl}
|
||||||
|
|
||||||
entries := []TableEntry{}
|
entries := []TableEntry{}
|
||||||
|
78
vendor/github.com/onsi/ginkgo/v2/types/code_location.go
generated
vendored
78
vendor/github.com/onsi/ginkgo/v2/types/code_location.go
generated
vendored
@ -1,4 +1,5 @@
|
|||||||
package types
|
package types
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
@ -6,6 +7,7 @@ import (
|
|||||||
"runtime"
|
"runtime"
|
||||||
"runtime/debug"
|
"runtime/debug"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
type CodeLocation struct {
|
type CodeLocation struct {
|
||||||
@ -37,6 +39,73 @@ func (codeLocation CodeLocation) ContentsOfLine() string {
|
|||||||
return lines[codeLocation.LineNumber-1]
|
return lines[codeLocation.LineNumber-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type codeLocationLocator struct {
|
||||||
|
pcs map[uintptr]bool
|
||||||
|
helpers map[string]bool
|
||||||
|
lock *sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *codeLocationLocator) addHelper(pc uintptr) {
|
||||||
|
c.lock.Lock()
|
||||||
|
defer c.lock.Unlock()
|
||||||
|
|
||||||
|
if c.pcs[pc] {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.lock.Unlock()
|
||||||
|
f := runtime.FuncForPC(pc)
|
||||||
|
c.lock.Lock()
|
||||||
|
if f == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.helpers[f.Name()] = true
|
||||||
|
c.pcs[pc] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *codeLocationLocator) hasHelper(name string) bool {
|
||||||
|
c.lock.Lock()
|
||||||
|
defer c.lock.Unlock()
|
||||||
|
return c.helpers[name]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *codeLocationLocator) getCodeLocation(skip int) CodeLocation {
|
||||||
|
pc := make([]uintptr, 40)
|
||||||
|
n := runtime.Callers(skip+2, pc)
|
||||||
|
if n == 0 {
|
||||||
|
return CodeLocation{}
|
||||||
|
}
|
||||||
|
pc = pc[:n]
|
||||||
|
frames := runtime.CallersFrames(pc)
|
||||||
|
for {
|
||||||
|
frame, more := frames.Next()
|
||||||
|
if !c.hasHelper(frame.Function) {
|
||||||
|
return CodeLocation{FileName: frame.File, LineNumber: frame.Line}
|
||||||
|
}
|
||||||
|
if !more {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return CodeLocation{}
|
||||||
|
}
|
||||||
|
|
||||||
|
var clLocator = &codeLocationLocator{
|
||||||
|
pcs: map[uintptr]bool{},
|
||||||
|
helpers: map[string]bool{},
|
||||||
|
lock: &sync.Mutex{},
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarkAsHelper is used by GinkgoHelper to mark the caller (appropriately offset by skip)as a helper. You can use this directly if you need to provide an optional `skip` to mark functions further up the call stack as helpers.
|
||||||
|
func MarkAsHelper(optionalSkip ...int) {
|
||||||
|
skip := 1
|
||||||
|
if len(optionalSkip) > 0 {
|
||||||
|
skip += optionalSkip[0]
|
||||||
|
}
|
||||||
|
pc, _, _, ok := runtime.Caller(skip)
|
||||||
|
if ok {
|
||||||
|
clLocator.addHelper(pc)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func NewCustomCodeLocation(message string) CodeLocation {
|
func NewCustomCodeLocation(message string) CodeLocation {
|
||||||
return CodeLocation{
|
return CodeLocation{
|
||||||
CustomMessage: message,
|
CustomMessage: message,
|
||||||
@ -44,14 +113,13 @@ func NewCustomCodeLocation(message string) CodeLocation {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func NewCodeLocation(skip int) CodeLocation {
|
func NewCodeLocation(skip int) CodeLocation {
|
||||||
_, file, line, _ := runtime.Caller(skip + 1)
|
return clLocator.getCodeLocation(skip + 1)
|
||||||
return CodeLocation{FileName: file, LineNumber: line}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewCodeLocationWithStackTrace(skip int) CodeLocation {
|
func NewCodeLocationWithStackTrace(skip int) CodeLocation {
|
||||||
_, file, line, _ := runtime.Caller(skip + 1)
|
cl := clLocator.getCodeLocation(skip + 1)
|
||||||
stackTrace := PruneStack(string(debug.Stack()), skip+1)
|
cl.FullStackTrace = PruneStack(string(debug.Stack()), skip+1)
|
||||||
return CodeLocation{FileName: file, LineNumber: line, FullStackTrace: stackTrace}
|
return cl
|
||||||
}
|
}
|
||||||
|
|
||||||
// PruneStack removes references to functions that are internal to Ginkgo
|
// PruneStack removes references to functions that are internal to Ginkgo
|
||||||
|
43
vendor/github.com/onsi/ginkgo/v2/types/config.go
generated
vendored
43
vendor/github.com/onsi/ginkgo/v2/types/config.go
generated
vendored
@ -8,6 +8,7 @@ package types
|
|||||||
import (
|
import (
|
||||||
"flag"
|
"flag"
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@ -26,11 +27,11 @@ type SuiteConfig struct {
|
|||||||
FailOnPending bool
|
FailOnPending bool
|
||||||
FailFast bool
|
FailFast bool
|
||||||
FlakeAttempts int
|
FlakeAttempts int
|
||||||
EmitSpecProgress bool
|
|
||||||
DryRun bool
|
DryRun bool
|
||||||
PollProgressAfter time.Duration
|
PollProgressAfter time.Duration
|
||||||
PollProgressInterval time.Duration
|
PollProgressInterval time.Duration
|
||||||
Timeout time.Duration
|
Timeout time.Duration
|
||||||
|
EmitSpecProgress bool // this is deprecated but its removal is causing compile issue for some users that were setting it manually
|
||||||
OutputInterceptorMode string
|
OutputInterceptorMode string
|
||||||
SourceRoots []string
|
SourceRoots []string
|
||||||
GracePeriod time.Duration
|
GracePeriod time.Duration
|
||||||
@ -82,12 +83,11 @@ func (vl VerbosityLevel) LT(comp VerbosityLevel) bool {
|
|||||||
// Configuration for Ginkgo's reporter
|
// Configuration for Ginkgo's reporter
|
||||||
type ReporterConfig struct {
|
type ReporterConfig struct {
|
||||||
NoColor bool
|
NoColor bool
|
||||||
SlowSpecThreshold time.Duration
|
|
||||||
Succinct bool
|
Succinct bool
|
||||||
Verbose bool
|
Verbose bool
|
||||||
VeryVerbose bool
|
VeryVerbose bool
|
||||||
FullTrace bool
|
FullTrace bool
|
||||||
AlwaysEmitGinkgoWriter bool
|
ShowNodeEvents bool
|
||||||
|
|
||||||
JSONReport string
|
JSONReport string
|
||||||
JUnitReport string
|
JUnitReport string
|
||||||
@ -110,9 +110,7 @@ func (rc ReporterConfig) WillGenerateReport() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func NewDefaultReporterConfig() ReporterConfig {
|
func NewDefaultReporterConfig() ReporterConfig {
|
||||||
return ReporterConfig{
|
return ReporterConfig{}
|
||||||
SlowSpecThreshold: 5 * time.Second,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Configuration for the Ginkgo CLI
|
// Configuration for the Ginkgo CLI
|
||||||
@ -235,6 +233,9 @@ type deprecatedConfig struct {
|
|||||||
SlowSpecThresholdWithFLoatUnits float64
|
SlowSpecThresholdWithFLoatUnits float64
|
||||||
Stream bool
|
Stream bool
|
||||||
Notify bool
|
Notify bool
|
||||||
|
EmitSpecProgress bool
|
||||||
|
SlowSpecThreshold time.Duration
|
||||||
|
AlwaysEmitGinkgoWriter bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Flags
|
// Flags
|
||||||
@ -275,8 +276,6 @@ var SuiteConfigFlags = GinkgoFlags{
|
|||||||
|
|
||||||
{KeyPath: "S.DryRun", Name: "dry-run", SectionKey: "debug", DeprecatedName: "dryRun", DeprecatedDocLink: "changed-command-line-flags",
|
{KeyPath: "S.DryRun", Name: "dry-run", SectionKey: "debug", DeprecatedName: "dryRun", DeprecatedDocLink: "changed-command-line-flags",
|
||||||
Usage: "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v."},
|
Usage: "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v."},
|
||||||
{KeyPath: "S.EmitSpecProgress", Name: "progress", SectionKey: "debug",
|
|
||||||
Usage: "If set, ginkgo will emit progress information as each spec runs to the GinkgoWriter."},
|
|
||||||
{KeyPath: "S.PollProgressAfter", Name: "poll-progress-after", SectionKey: "debug", UsageDefaultValue: "0",
|
{KeyPath: "S.PollProgressAfter", Name: "poll-progress-after", SectionKey: "debug", UsageDefaultValue: "0",
|
||||||
Usage: "Emit node progress reports periodically if node hasn't completed after this duration."},
|
Usage: "Emit node progress reports periodically if node hasn't completed after this duration."},
|
||||||
{KeyPath: "S.PollProgressInterval", Name: "poll-progress-interval", SectionKey: "debug", UsageDefaultValue: "10s",
|
{KeyPath: "S.PollProgressInterval", Name: "poll-progress-interval", SectionKey: "debug", UsageDefaultValue: "10s",
|
||||||
@ -303,6 +302,8 @@ var SuiteConfigFlags = GinkgoFlags{
|
|||||||
|
|
||||||
{KeyPath: "D.RegexScansFilePath", DeprecatedName: "regexScansFilePath", DeprecatedDocLink: "removed--regexscansfilepath", DeprecatedVersion: "2.0.0"},
|
{KeyPath: "D.RegexScansFilePath", DeprecatedName: "regexScansFilePath", DeprecatedDocLink: "removed--regexscansfilepath", DeprecatedVersion: "2.0.0"},
|
||||||
{KeyPath: "D.DebugParallel", DeprecatedName: "debug", DeprecatedDocLink: "removed--debug", DeprecatedVersion: "2.0.0"},
|
{KeyPath: "D.DebugParallel", DeprecatedName: "debug", DeprecatedDocLink: "removed--debug", DeprecatedVersion: "2.0.0"},
|
||||||
|
{KeyPath: "D.EmitSpecProgress", DeprecatedName: "progress", SectionKey: "debug",
|
||||||
|
DeprecatedVersion: "2.5.0", Usage: ". The functionality provided by --progress was confusing and is no longer needed. Use --show-node-events instead to see node entry and exit events included in the timeline of failed and verbose specs. Or you can run with -vv to always see all node events. Lastly, --poll-progress-after and the PollProgressAfter decorator now provide a better mechanism for debugging specs that tend to get stuck."},
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParallelConfigFlags provides flags for the Ginkgo test process (not the CLI)
|
// ParallelConfigFlags provides flags for the Ginkgo test process (not the CLI)
|
||||||
@ -319,8 +320,6 @@ var ParallelConfigFlags = GinkgoFlags{
|
|||||||
var ReporterConfigFlags = GinkgoFlags{
|
var ReporterConfigFlags = GinkgoFlags{
|
||||||
{KeyPath: "R.NoColor", Name: "no-color", SectionKey: "output", DeprecatedName: "noColor", DeprecatedDocLink: "changed-command-line-flags",
|
{KeyPath: "R.NoColor", Name: "no-color", SectionKey: "output", DeprecatedName: "noColor", DeprecatedDocLink: "changed-command-line-flags",
|
||||||
Usage: "If set, suppress color output in default reporter."},
|
Usage: "If set, suppress color output in default reporter."},
|
||||||
{KeyPath: "R.SlowSpecThreshold", Name: "slow-spec-threshold", SectionKey: "output", UsageArgument: "duration", UsageDefaultValue: "5s",
|
|
||||||
Usage: "Specs that take longer to run than this threshold are flagged as slow by the default reporter."},
|
|
||||||
{KeyPath: "R.Verbose", Name: "v", SectionKey: "output",
|
{KeyPath: "R.Verbose", Name: "v", SectionKey: "output",
|
||||||
Usage: "If set, emits more output including GinkgoWriter contents."},
|
Usage: "If set, emits more output including GinkgoWriter contents."},
|
||||||
{KeyPath: "R.VeryVerbose", Name: "vv", SectionKey: "output",
|
{KeyPath: "R.VeryVerbose", Name: "vv", SectionKey: "output",
|
||||||
@ -329,8 +328,8 @@ var ReporterConfigFlags = GinkgoFlags{
|
|||||||
Usage: "If set, default reporter prints out a very succinct report"},
|
Usage: "If set, default reporter prints out a very succinct report"},
|
||||||
{KeyPath: "R.FullTrace", Name: "trace", SectionKey: "output",
|
{KeyPath: "R.FullTrace", Name: "trace", SectionKey: "output",
|
||||||
Usage: "If set, default reporter prints out the full stack trace when a failure occurs"},
|
Usage: "If set, default reporter prints out the full stack trace when a failure occurs"},
|
||||||
{KeyPath: "R.AlwaysEmitGinkgoWriter", Name: "always-emit-ginkgo-writer", SectionKey: "output", DeprecatedName: "reportPassed", DeprecatedDocLink: "renamed--reportpassed",
|
{KeyPath: "R.ShowNodeEvents", Name: "show-node-events", SectionKey: "output",
|
||||||
Usage: "If set, default reporter prints out captured output of passed tests."},
|
Usage: "If set, default reporter prints node > Enter and < Exit events when specs fail"},
|
||||||
|
|
||||||
{KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output",
|
{KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output",
|
||||||
Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."},
|
Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."},
|
||||||
@ -343,6 +342,8 @@ var ReporterConfigFlags = GinkgoFlags{
|
|||||||
Usage: "use --slow-spec-threshold instead and pass in a duration string (e.g. '5s', not '5.0')"},
|
Usage: "use --slow-spec-threshold instead and pass in a duration string (e.g. '5s', not '5.0')"},
|
||||||
{KeyPath: "D.NoisyPendings", DeprecatedName: "noisyPendings", DeprecatedDocLink: "removed--noisypendings-and--noisyskippings", DeprecatedVersion: "2.0.0"},
|
{KeyPath: "D.NoisyPendings", DeprecatedName: "noisyPendings", DeprecatedDocLink: "removed--noisypendings-and--noisyskippings", DeprecatedVersion: "2.0.0"},
|
||||||
{KeyPath: "D.NoisySkippings", DeprecatedName: "noisySkippings", DeprecatedDocLink: "removed--noisypendings-and--noisyskippings", DeprecatedVersion: "2.0.0"},
|
{KeyPath: "D.NoisySkippings", DeprecatedName: "noisySkippings", DeprecatedDocLink: "removed--noisypendings-and--noisyskippings", DeprecatedVersion: "2.0.0"},
|
||||||
|
{KeyPath: "D.SlowSpecThreshold", DeprecatedName: "slow-spec-threshold", SectionKey: "output", Usage: "--slow-spec-threshold has been deprecated and will be removed in a future version of Ginkgo. This feature has proved to be more noisy than useful. You can use --poll-progress-after, instead, to get more actionable feedback about potentially slow specs and understand where they might be getting stuck.", DeprecatedVersion: "2.5.0"},
|
||||||
|
{KeyPath: "D.AlwaysEmitGinkgoWriter", DeprecatedName: "always-emit-ginkgo-writer", SectionKey: "output", Usage: " - use -v instead, or one of Ginkgo's machine-readable report formats to get GinkgoWriter output for passing specs."},
|
||||||
}
|
}
|
||||||
|
|
||||||
// BuildTestSuiteFlagSet attaches to the CommandLine flagset and provides flags for the Ginkgo test process
|
// BuildTestSuiteFlagSet attaches to the CommandLine flagset and provides flags for the Ginkgo test process
|
||||||
@ -600,13 +601,29 @@ func VetAndInitializeCLIAndGoConfig(cliConfig CLIConfig, goFlagsConfig GoFlagsCo
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GenerateGoTestCompileArgs is used by the Ginkgo CLI to generate command line arguments to pass to the go test -c command when compiling the test
|
// GenerateGoTestCompileArgs is used by the Ginkgo CLI to generate command line arguments to pass to the go test -c command when compiling the test
|
||||||
func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, destination string, packageToBuild string) ([]string, error) {
|
func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, destination string, packageToBuild string, pathToInvocationPath string) ([]string, error) {
|
||||||
// if the user has set the CoverProfile run-time flag make sure to set the build-time cover flag to make sure
|
// if the user has set the CoverProfile run-time flag make sure to set the build-time cover flag to make sure
|
||||||
// the built test binary can generate a coverprofile
|
// the built test binary can generate a coverprofile
|
||||||
if goFlagsConfig.CoverProfile != "" {
|
if goFlagsConfig.CoverProfile != "" {
|
||||||
goFlagsConfig.Cover = true
|
goFlagsConfig.Cover = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if goFlagsConfig.CoverPkg != "" {
|
||||||
|
coverPkgs := strings.Split(goFlagsConfig.CoverPkg, ",")
|
||||||
|
adjustedCoverPkgs := make([]string, len(coverPkgs))
|
||||||
|
for i, coverPkg := range coverPkgs {
|
||||||
|
coverPkg = strings.Trim(coverPkg, " ")
|
||||||
|
if strings.HasPrefix(coverPkg, "./") {
|
||||||
|
// this is a relative coverPkg - we need to reroot it
|
||||||
|
adjustedCoverPkgs[i] = "./" + filepath.Join(pathToInvocationPath, strings.TrimPrefix(coverPkg, "./"))
|
||||||
|
} else {
|
||||||
|
// this is a package name - don't touch it
|
||||||
|
adjustedCoverPkgs[i] = coverPkg
|
||||||
|
}
|
||||||
|
}
|
||||||
|
goFlagsConfig.CoverPkg = strings.Join(adjustedCoverPkgs, ",")
|
||||||
|
}
|
||||||
|
|
||||||
args := []string{"test", "-c", "-o", destination, packageToBuild}
|
args := []string{"test", "-c", "-o", destination, packageToBuild}
|
||||||
goArgs, err := GenerateFlagArgs(
|
goArgs, err := GenerateFlagArgs(
|
||||||
GoBuildFlags,
|
GoBuildFlags,
|
||||||
|
9
vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go
generated
vendored
9
vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go
generated
vendored
@ -38,7 +38,7 @@ func (d deprecations) Async() Deprecation {
|
|||||||
|
|
||||||
func (d deprecations) Measure() Deprecation {
|
func (d deprecations) Measure() Deprecation {
|
||||||
return Deprecation{
|
return Deprecation{
|
||||||
Message: "Measure is deprecated and will be removed in Ginkgo V2. Please migrate to gomega/gmeasure.",
|
Message: "Measure is deprecated and has been removed from Ginkgo V2. Any Measure tests in your spec will not run. Please migrate to gomega/gmeasure.",
|
||||||
DocLink: "removed-measure",
|
DocLink: "removed-measure",
|
||||||
Version: "1.16.3",
|
Version: "1.16.3",
|
||||||
}
|
}
|
||||||
@ -83,6 +83,13 @@ func (d deprecations) Nodot() Deprecation {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d deprecations) SuppressProgressReporting() Deprecation {
|
||||||
|
return Deprecation{
|
||||||
|
Message: "Improvements to how reporters emit timeline information means that SuppressProgressReporting is no longer necessary and has been deprecated.",
|
||||||
|
Version: "2.5.0",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type DeprecationTracker struct {
|
type DeprecationTracker struct {
|
||||||
deprecations map[Deprecation][]CodeLocation
|
deprecations map[Deprecation][]CodeLocation
|
||||||
lock *sync.Mutex
|
lock *sync.Mutex
|
||||||
|
19
vendor/github.com/onsi/ginkgo/v2/types/errors.go
generated
vendored
19
vendor/github.com/onsi/ginkgo/v2/types/errors.go
generated
vendored
@ -108,8 +108,8 @@ Please ensure all assertions are inside leaf nodes such as {{bold}}BeforeEach{{/
|
|||||||
|
|
||||||
func (g ginkgoErrors) SuiteNodeInNestedContext(nodeType NodeType, cl CodeLocation) error {
|
func (g ginkgoErrors) SuiteNodeInNestedContext(nodeType NodeType, cl CodeLocation) error {
|
||||||
docLink := "suite-setup-and-cleanup-beforesuite-and-aftersuite"
|
docLink := "suite-setup-and-cleanup-beforesuite-and-aftersuite"
|
||||||
if nodeType.Is(NodeTypeReportAfterSuite) {
|
if nodeType.Is(NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite) {
|
||||||
docLink = "reporting-nodes---reportaftersuite"
|
docLink = "reporting-nodes---reportbeforesuite-and-reportaftersuite"
|
||||||
}
|
}
|
||||||
|
|
||||||
return GinkgoError{
|
return GinkgoError{
|
||||||
@ -125,8 +125,8 @@ func (g ginkgoErrors) SuiteNodeInNestedContext(nodeType NodeType, cl CodeLocatio
|
|||||||
|
|
||||||
func (g ginkgoErrors) SuiteNodeDuringRunPhase(nodeType NodeType, cl CodeLocation) error {
|
func (g ginkgoErrors) SuiteNodeDuringRunPhase(nodeType NodeType, cl CodeLocation) error {
|
||||||
docLink := "suite-setup-and-cleanup-beforesuite-and-aftersuite"
|
docLink := "suite-setup-and-cleanup-beforesuite-and-aftersuite"
|
||||||
if nodeType.Is(NodeTypeReportAfterSuite) {
|
if nodeType.Is(NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite) {
|
||||||
docLink = "reporting-nodes---reportaftersuite"
|
docLink = "reporting-nodes---reportbeforesuite-and-reportaftersuite"
|
||||||
}
|
}
|
||||||
|
|
||||||
return GinkgoError{
|
return GinkgoError{
|
||||||
@ -298,6 +298,15 @@ func (g ginkgoErrors) SetupNodeNotInOrderedContainer(cl CodeLocation, nodeType N
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (g ginkgoErrors) InvalidContinueOnFailureDecoration(cl CodeLocation) error {
|
||||||
|
return GinkgoError{
|
||||||
|
Heading: "ContinueOnFailure not decorating an outermost Ordered Container",
|
||||||
|
Message: "ContinueOnFailure can only decorate an Ordered container, and this Ordered container must be the outermost Ordered container.",
|
||||||
|
CodeLocation: cl,
|
||||||
|
DocLink: "ordered-containers",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* DeferCleanup errors */
|
/* DeferCleanup errors */
|
||||||
func (g ginkgoErrors) DeferCleanupInvalidFunction(cl CodeLocation) error {
|
func (g ginkgoErrors) DeferCleanupInvalidFunction(cl CodeLocation) error {
|
||||||
return GinkgoError{
|
return GinkgoError{
|
||||||
@ -320,7 +329,7 @@ func (g ginkgoErrors) PushingCleanupNodeDuringTreeConstruction(cl CodeLocation)
|
|||||||
func (g ginkgoErrors) PushingCleanupInReportingNode(cl CodeLocation, nodeType NodeType) error {
|
func (g ginkgoErrors) PushingCleanupInReportingNode(cl CodeLocation, nodeType NodeType) error {
|
||||||
return GinkgoError{
|
return GinkgoError{
|
||||||
Heading: fmt.Sprintf("DeferCleanup cannot be called in %s", nodeType),
|
Heading: fmt.Sprintf("DeferCleanup cannot be called in %s", nodeType),
|
||||||
Message: "Please inline your cleanup code - Ginkgo won't run cleanup code after a ReportAfterEach or ReportAfterSuite.",
|
Message: "Please inline your cleanup code - Ginkgo won't run cleanup code after a Reporting node.",
|
||||||
CodeLocation: cl,
|
CodeLocation: cl,
|
||||||
DocLink: "cleaning-up-our-cleanup-code-defercleanup",
|
DocLink: "cleaning-up-our-cleanup-code-defercleanup",
|
||||||
}
|
}
|
||||||
|
11
vendor/github.com/onsi/ginkgo/v2/types/label_filter.go
generated
vendored
11
vendor/github.com/onsi/ginkgo/v2/types/label_filter.go
generated
vendored
@ -272,12 +272,23 @@ func tokenize(input string) func() (*treeNode, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func MustParseLabelFilter(input string) LabelFilter {
|
||||||
|
filter, err := ParseLabelFilter(input)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return filter
|
||||||
|
}
|
||||||
|
|
||||||
func ParseLabelFilter(input string) (LabelFilter, error) {
|
func ParseLabelFilter(input string) (LabelFilter, error) {
|
||||||
if DEBUG_LABEL_FILTER_PARSING {
|
if DEBUG_LABEL_FILTER_PARSING {
|
||||||
fmt.Println("\n==============")
|
fmt.Println("\n==============")
|
||||||
fmt.Println("Input: ", input)
|
fmt.Println("Input: ", input)
|
||||||
fmt.Print("Tokens: ")
|
fmt.Print("Tokens: ")
|
||||||
}
|
}
|
||||||
|
if input == "" {
|
||||||
|
return func(_ []string) bool { return true }, nil
|
||||||
|
}
|
||||||
nextToken := tokenize(input)
|
nextToken := tokenize(input)
|
||||||
|
|
||||||
root := &treeNode{token: lfTokenRoot}
|
root := &treeNode{token: lfTokenRoot}
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user