mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
rebase: update github.com/onsi/ginkgo to v2.9.5
There was a `replace` statement in `go.mod` that prevented Ginkgo from updating. Kubernetes 1.27 requires a new Ginkgo version. Signed-off-by: Niels de Vos <ndevos@ibm.com>
This commit is contained in:
committed by
mergify[bot]
parent
b1a4590967
commit
6709cdd1d0
2
vendor/github.com/onsi/ginkgo/v2/.gitignore
generated
vendored
2
vendor/github.com/onsi/ginkgo/v2/.gitignore
generated
vendored
@ -1,5 +1,5 @@
|
||||
.DS_Store
|
||||
TODO.md
|
||||
TODO
|
||||
tmp/**/*
|
||||
*.coverprofile
|
||||
.vscode
|
||||
|
256
vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md
generated
vendored
256
vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md
generated
vendored
@ -1,3 +1,257 @@
|
||||
## 2.9.5
|
||||
|
||||
### Fixes
|
||||
- ensure the correct deterministic sort order is produced when ordered specs are generated by a helper function [7fa0b6b]
|
||||
|
||||
### Maintenance
|
||||
- fix generators link (#1200) [9f9d8b9]
|
||||
- Bump golang.org/x/tools from 0.8.0 to 0.9.1 (#1196) [150e3f2]
|
||||
- fix spelling err in docs (#1199) [0013b1a]
|
||||
- Bump golang.org/x/sys from 0.7.0 to 0.8.0 (#1193) [9e9e3e5]
|
||||
|
||||
## 2.9.4
|
||||
|
||||
### Fixes
|
||||
- fix hang with ginkgo -p (#1192) [15d4bdc] - this addresses a _long_ standing issue related to Ginkgo hanging when a child process spawned by the test does not exit.
|
||||
|
||||
- fix: fail fast may cause Serial spec or cleanup Node interrupted (#1178) [8dea88b] - prior to this there was a small gap in which specs on other processes might start even if one process has tried to abort the suite.
|
||||
|
||||
|
||||
### Maintenance
|
||||
- Document run order when multiple setup nodes are at the same nesting level [903be81]
|
||||
|
||||
## 2.9.3
|
||||
|
||||
### Features
|
||||
- Add RenderTimeline to GinkgoT() [c0c77b6]
|
||||
|
||||
### Fixes
|
||||
- update Measure deprecation message. fixes #1176 [227c662]
|
||||
- add newlines to GinkgoLogr (#1170) (#1171) [0de0e7c]
|
||||
|
||||
### Maintenance
|
||||
- Bump commonmarker from 0.23.8 to 0.23.9 in /docs (#1183) [8b925ab]
|
||||
- Bump nokogiri from 1.14.1 to 1.14.3 in /docs (#1184) [e3795a4]
|
||||
- Bump golang.org/x/tools from 0.7.0 to 0.8.0 (#1182) [b453793]
|
||||
- Bump actions/setup-go from 3 to 4 (#1164) [73ed75b]
|
||||
- Bump github.com/onsi/gomega from 1.27.4 to 1.27.6 (#1173) [0a2bc64]
|
||||
- Bump github.com/go-logr/logr from 1.2.3 to 1.2.4 (#1174) [f41c557]
|
||||
- Bump golang.org/x/sys from 0.6.0 to 0.7.0 (#1179) [8e423e5]
|
||||
|
||||
## 2.9.2
|
||||
|
||||
### Maintenance
|
||||
- Bump github.com/go-task/slim-sprig (#1167) [3fcc5bf]
|
||||
- Bump github.com/onsi/gomega from 1.27.3 to 1.27.4 (#1163) [6143ffe]
|
||||
|
||||
## 2.9.1
|
||||
|
||||
### Fixes
|
||||
This release fixes a longstanding issue where `ginkgo -coverpkg=./...` would not work. This is now resolved and fixes [#1161](https://github.com/onsi/ginkgo/issues/1161) and [#995](https://github.com/onsi/ginkgo/issues/995)
|
||||
- Support -coverpkg=./... [26ca1b5]
|
||||
- document coverpkg a bit more clearly [fc44c3b]
|
||||
|
||||
### Maintenance
|
||||
- bump various dependencies
|
||||
- Improve Documentation and fix typo (#1158) [93de676]
|
||||
|
||||
## 2.9.0
|
||||
|
||||
### Features
|
||||
- AttachProgressReporter is an experimental feature that allows users to provide arbitrary information when a ProgressReport is requested [28801fe]
|
||||
|
||||
- GinkgoT() has been expanded to include several Ginkgo-specific methods [2bd5a3b]
|
||||
|
||||
The intent is to enable the development of third-party libraries that integrate deeply with Ginkgo using `GinkgoT()` to access Ginkgo's functionality.
|
||||
|
||||
## 2.8.4
|
||||
|
||||
### Features
|
||||
- Add OmitSuiteSetupNodes to JunitReportConfig (#1147) [979fbc2]
|
||||
- Add a reference to ginkgolinter in docs.index.md (#1143) [8432589]
|
||||
|
||||
### Fixes
|
||||
- rename tools hack to see if it fixes things for downstream users [a8bb39a]
|
||||
|
||||
### Maintenance
|
||||
- Bump golang.org/x/text (#1144) [41b2a8a]
|
||||
- Bump github.com/onsi/gomega from 1.27.0 to 1.27.1 (#1142) [7c4f583]
|
||||
|
||||
## 2.8.3
|
||||
|
||||
Released to fix security issue in golang.org/x/net dependency
|
||||
|
||||
### Maintenance
|
||||
|
||||
- Bump golang.org/x/net from 0.6.0 to 0.7.0 (#1141) [fc1a02e]
|
||||
- remove tools.go hack from documentation [0718693]
|
||||
|
||||
## 2.8.2
|
||||
|
||||
Ginkgo now includes a `tools.go` file in the root directory of the `ginkgo` package. This should allow modules that simply `go get github.com/onsi/ginkgo/v2` to also pull in the CLI dependencies. This obviates the need for consumers of Ginkgo to have their own `tools.go` file and makes it simpler to ensure that the version of the `ginkgo` CLI being used matches the version of the library. You can simply run `go run github.com/onsi/ginkgo/v2/ginkgo` to run the version of the cli associated with your package go.mod.
|
||||
|
||||
### Maintenance
|
||||
|
||||
- Bump github.com/onsi/gomega from 1.26.0 to 1.27.0 (#1139) [5767b0a]
|
||||
- Fix minor typos (#1138) [e1e9723]
|
||||
- Fix link in V2 Migration Guide (#1137) [a588f60]
|
||||
|
||||
## 2.8.1
|
||||
|
||||
### Fixes
|
||||
- lock around default report output to avoid triggering the race detector when calling By from goroutines [2d5075a]
|
||||
- don't run ReportEntries through sprintf [febbe38]
|
||||
|
||||
### Maintenance
|
||||
- Bump golang.org/x/tools from 0.5.0 to 0.6.0 (#1135) [11a4860]
|
||||
- test: update matrix for Go 1.20 (#1130) [4890a62]
|
||||
- Bump golang.org/x/sys from 0.4.0 to 0.5.0 (#1133) [a774638]
|
||||
- Bump github.com/onsi/gomega from 1.25.0 to 1.26.0 (#1120) [3f233bd]
|
||||
- Bump github-pages from 227 to 228 in /docs (#1131) [f9b8649]
|
||||
- Bump activesupport from 6.0.6 to 6.0.6.1 in /docs (#1127) [6f8c042]
|
||||
- Update index.md with instructions on how to upgrade Ginkgo [833a75e]
|
||||
|
||||
## 2.8.0
|
||||
|
||||
### Features
|
||||
|
||||
- Introduce GinkgoHelper() to track and exclude helper functions from potential CodeLocations [e19f556]
|
||||
|
||||
Modeled after `testing.T.Helper()`. Now, rather than write code like:
|
||||
|
||||
```go
|
||||
func helper(model Model) {
|
||||
Expect(model).WithOffset(1).To(BeValid())
|
||||
Expect(model.SerialNumber).WithOffset(1).To(MatchRegexp(/[a-f0-9]*/))
|
||||
}
|
||||
```
|
||||
|
||||
you can stop tracking offsets (which makes nesting composing helpers nearly impossible) and simply write:
|
||||
|
||||
```go
|
||||
func helper(model Model) {
|
||||
GinkgoHelper()
|
||||
Expect(model).To(BeValid())
|
||||
Expect(model.SerialNumber).To(MatchRegexp(/[a-f0-9]*/))
|
||||
}
|
||||
```
|
||||
|
||||
- Introduce GinkgoLabelFilter() and Label().MatchesLabelFilter() to make it possible to programmatically match filters (fixes #1119) [2f6597c]
|
||||
|
||||
You can now write code like this:
|
||||
|
||||
```go
|
||||
BeforeSuite(func() {
|
||||
if Label("slow").MatchesLabelFilter(GinkgoLabelFilter()) {
|
||||
// do slow setup
|
||||
}
|
||||
|
||||
if Label("fast").MatchesLabelFilter(GinkgoLabelFilter()) {
|
||||
// do fast setup
|
||||
}
|
||||
})
|
||||
```
|
||||
|
||||
to programmatically check whether a given set of labels will match the configured `--label-filter`.
|
||||
|
||||
### Maintenance
|
||||
|
||||
- Bump webrick from 1.7.0 to 1.8.1 in /docs (#1125) [ea4966e]
|
||||
- cdeql: add ruby language (#1124) [9dd275b]
|
||||
- dependabot: add bundler package-ecosystem for docs (#1123) [14e7bdd]
|
||||
|
||||
## 2.7.1
|
||||
|
||||
### Fixes
|
||||
- Bring back SuiteConfig.EmitSpecProgress to avoid compilation issue for consumers that set it manually [d2a1cb0]
|
||||
|
||||
### Maintenance
|
||||
- Bump github.com/onsi/gomega from 1.24.2 to 1.25.0 (#1118) [cafece6]
|
||||
- Bump golang.org/x/tools from 0.4.0 to 0.5.0 (#1111) [eda66c2]
|
||||
- Bump golang.org/x/sys from 0.3.0 to 0.4.0 (#1112) [ac5ccaa]
|
||||
- Bump github.com/onsi/gomega from 1.24.1 to 1.24.2 (#1097) [eee6480]
|
||||
|
||||
## 2.7.0
|
||||
|
||||
### Features
|
||||
- Introduce ContinueOnFailure for Ordered containers [e0123ca] - Ordered containers that are also decorated with ContinueOnFailure will not stop running specs after the first spec fails.
|
||||
- Support for bootstrap commands to use custom data for templates (#1110) [7a2b242]
|
||||
- Support for labels and pending decorator in ginkgo outline output (#1113) [e6e3b98]
|
||||
- Color aliases for custom color support (#1101) [49fab7a]
|
||||
|
||||
### Fixes
|
||||
- correctly ensure deterministic spec order, even if specs are generated by iterating over a map [89dda20]
|
||||
- Fix a bug where timedout specs were not correctly treated as failures when determining whether or not to run AfterAlls in an Ordered container.
|
||||
- Ensure go test coverprofile outputs to the expected location (#1105) [b0bd77b]
|
||||
|
||||
## 2.6.1
|
||||
|
||||
### Features
|
||||
- Override formatter colors from envvars - this is a new feature but an alternative approach involving config files might be taken in the future (#1095) [60240d1]
|
||||
|
||||
### Fixes
|
||||
- GinkgoRecover now supports ignoring panics that match a specific, hidden, interface [301f3e2]
|
||||
|
||||
### Maintenance
|
||||
- Bump github.com/onsi/gomega from 1.24.0 to 1.24.1 (#1077) [3643823]
|
||||
- Bump golang.org/x/tools from 0.2.0 to 0.4.0 (#1090) [f9f856e]
|
||||
- Bump nokogiri from 1.13.9 to 1.13.10 in /docs (#1091) [0d7087e]
|
||||
|
||||
## 2.6.0
|
||||
|
||||
### Features
|
||||
- `ReportBeforeSuite` provides access to the suite report before the suite begins.
|
||||
- Add junit config option for omitting leafnodetype (#1088) [956e6d2]
|
||||
- Add support to customize junit report config to omit spec labels (#1087) [de44005]
|
||||
|
||||
### Fixes
|
||||
- Fix stack trace pruning so that it has a chance of working on windows [2165648]
|
||||
|
||||
## 2.5.1
|
||||
|
||||
### Fixes
|
||||
- skipped tests only show as 'S' when running with -v [3ab38ae]
|
||||
- Fix typo in docs/index.md (#1082) [55fc58d]
|
||||
- Fix typo in docs/index.md (#1081) [8a14f1f]
|
||||
- Fix link notation in docs/index.md (#1080) [2669612]
|
||||
- Fix typo in `--progress` deprecation message (#1076) [b4b7edc]
|
||||
|
||||
### Maintenance
|
||||
- chore: Included githubactions in the dependabot config (#976) [baea341]
|
||||
- Bump golang.org/x/sys from 0.1.0 to 0.2.0 (#1075) [9646297]
|
||||
|
||||
## 2.5.0
|
||||
|
||||
### Ginkgo output now includes a timeline-view of the spec
|
||||
|
||||
This commit changes Ginkgo's default output. Spec details are now
|
||||
presented as a **timeline** that includes events that occur during the spec
|
||||
lifecycle interleaved with any GinkgoWriter content. This makes is much easier
|
||||
to understand the flow of a spec and where a given failure occurs.
|
||||
|
||||
The --progress, --slow-spec-threshold, --always-emit-ginkgo-writer flags
|
||||
and the SuppressProgressReporting decorator have all been deprecated. Instead
|
||||
the existing -v and -vv flags better capture the level of verbosity to display. However,
|
||||
a new --show-node-events flag is added to include node `> Enter` and `< Exit` events
|
||||
in the spec timeline.
|
||||
|
||||
In addition, JUnit reports now include the timeline (rendered with -vv) and custom JUnit
|
||||
reports can be configured and generated using
|
||||
`GenerateJUnitReportWithConfig(report types.Report, dst string, config JunitReportConfig)`
|
||||
|
||||
Code should continue to work unchanged with this version of Ginkgo - however if you have tooling that
|
||||
was relying on the specific output format of Ginkgo you _may_ run into issues. Ginkgo's console output is not guaranteed to be stable for tooling and automation purposes. You should, instead, use Ginkgo's JSON format
|
||||
to build tooling on top of as it has stronger guarantees to be stable from version to version.
|
||||
|
||||
### Features
|
||||
- Provide details about which timeout expired [0f2fa27]
|
||||
|
||||
### Fixes
|
||||
- Add Support Policy to docs [c70867a]
|
||||
|
||||
### Maintenance
|
||||
- Bump github.com/onsi/gomega from 1.22.1 to 1.23.0 (#1070) [bb3b4e2]
|
||||
|
||||
## 2.4.0
|
||||
|
||||
### Features
|
||||
@ -8,7 +262,7 @@
|
||||
|
||||
### Fixes
|
||||
- correcting some typos (#1064) [1403d3c]
|
||||
- fix flaky internal_integration interupt specs [2105ba3]
|
||||
- fix flaky internal_integration interrupt specs [2105ba3]
|
||||
- Correct busted link in README [be6b5b9]
|
||||
|
||||
### Maintenance
|
||||
|
56
vendor/github.com/onsi/ginkgo/v2/README.md
generated
vendored
56
vendor/github.com/onsi/ginkgo/v2/README.md
generated
vendored
@ -4,11 +4,7 @@
|
||||
|
||||
---
|
||||
|
||||
# Ginkgo 2.0 is now Generally Available!
|
||||
|
||||
You can learn more about 2.0 in the [Migration Guide](https://onsi.github.io/ginkgo/MIGRATING_TO_V2)!
|
||||
|
||||
---
|
||||
# Ginkgo
|
||||
|
||||
Ginkgo is a mature testing framework for Go designed to help you write expressive specs. Ginkgo builds on top of Go's `testing` foundation and is complemented by the [Gomega](https://github.com/onsi/gomega) matcher library. Together, Ginkgo and Gomega let you express the intent behind your specs clearly:
|
||||
|
||||
@ -33,53 +29,53 @@ Describe("Checking books out of the library", Label("library"), func() {
|
||||
})
|
||||
|
||||
When("the library has the book in question", func() {
|
||||
BeforeEach(func() {
|
||||
Expect(library.Store(book)).To(Succeed())
|
||||
BeforeEach(func(ctx SpecContext) {
|
||||
Expect(library.Store(ctx, book)).To(Succeed())
|
||||
})
|
||||
|
||||
Context("and the book is available", func() {
|
||||
It("lends it to the reader", func() {
|
||||
Expect(valjean.Checkout(library, "Les Miserables")).To(Succeed())
|
||||
It("lends it to the reader", func(ctx SpecContext) {
|
||||
Expect(valjean.Checkout(ctx, library, "Les Miserables")).To(Succeed())
|
||||
Expect(valjean.Books()).To(ContainElement(book))
|
||||
Expect(library.UserWithBook(book)).To(Equal(valjean))
|
||||
})
|
||||
Expect(library.UserWithBook(ctx, book)).To(Equal(valjean))
|
||||
}, SpecTimeout(time.Second * 5))
|
||||
})
|
||||
|
||||
Context("but the book has already been checked out", func() {
|
||||
var javert *users.User
|
||||
BeforeEach(func() {
|
||||
BeforeEach(func(ctx SpecContext) {
|
||||
javert = users.NewUser("Javert")
|
||||
Expect(javert.Checkout(library, "Les Miserables")).To(Succeed())
|
||||
Expect(javert.Checkout(ctx, library, "Les Miserables")).To(Succeed())
|
||||
})
|
||||
|
||||
It("tells the user", func() {
|
||||
err := valjean.Checkout(library, "Les Miserables")
|
||||
It("tells the user", func(ctx SpecContext) {
|
||||
err := valjean.Checkout(ctx, library, "Les Miserables")
|
||||
Expect(error).To(MatchError("Les Miserables is currently checked out"))
|
||||
})
|
||||
}, SpecTimeout(time.Second * 5))
|
||||
|
||||
It("lets the user place a hold and get notified later", func() {
|
||||
Expect(valjean.Hold(library, "Les Miserables")).To(Succeed())
|
||||
Expect(valjean.Holds()).To(ContainElement(book))
|
||||
It("lets the user place a hold and get notified later", func(ctx SpecContext) {
|
||||
Expect(valjean.Hold(ctx, library, "Les Miserables")).To(Succeed())
|
||||
Expect(valjean.Holds(ctx)).To(ContainElement(book))
|
||||
|
||||
By("when Javert returns the book")
|
||||
Expect(javert.Return(library, book)).To(Succeed())
|
||||
Expect(javert.Return(ctx, library, book)).To(Succeed())
|
||||
|
||||
By("it eventually informs Valjean")
|
||||
notification := "Les Miserables is ready for pick up"
|
||||
Eventually(valjean.Notifications).Should(ContainElement(notification))
|
||||
Eventually(ctx, valjean.Notifications).Should(ContainElement(notification))
|
||||
|
||||
Expect(valjean.Checkout(library, "Les Miserables")).To(Succeed())
|
||||
Expect(valjean.Books()).To(ContainElement(book))
|
||||
Expect(valjean.Holds()).To(BeEmpty())
|
||||
})
|
||||
Expect(valjean.Checkout(ctx, library, "Les Miserables")).To(Succeed())
|
||||
Expect(valjean.Books(ctx)).To(ContainElement(book))
|
||||
Expect(valjean.Holds(ctx)).To(BeEmpty())
|
||||
}, SpecTimeout(time.Second * 10))
|
||||
})
|
||||
})
|
||||
|
||||
When("the library does not have the book in question", func() {
|
||||
It("tells the reader the book is unavailable", func() {
|
||||
err := valjean.Checkout(library, "Les Miserables")
|
||||
It("tells the reader the book is unavailable", func(ctx SpecContext) {
|
||||
err := valjean.Checkout(ctx, library, "Les Miserables")
|
||||
Expect(error).To(MatchError("Les Miserables is not in the library catalog"))
|
||||
})
|
||||
}, SpecTimeout(time.Second * 5))
|
||||
})
|
||||
})
|
||||
```
|
||||
@ -92,7 +88,7 @@ If you have a question, comment, bug report, feature request, etc. please open a
|
||||
|
||||
Whether writing basic unit specs, complex integration specs, or even performance specs - Ginkgo gives you an expressive Domain-Specific Language (DSL) that will be familiar to users coming from frameworks such as [Quick](https://github.com/Quick/Quick), [RSpec](https://rspec.info), [Jasmine](https://jasmine.github.io), and [Busted](https://lunarmodules.github.io/busted/). This style of testing is sometimes referred to as "Behavior-Driven Development" (BDD) though Ginkgo's utility extends beyond acceptance-level testing.
|
||||
|
||||
With Ginkgo's DSL you can use nestable [`Describe`, `Context` and `When` container nodes](https://onsi.github.io/ginkgo/#organizing-specs-with-container-nodes) to help you organize your specs. [`BeforeEach` and `AfterEach` setup nodes](https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and cleanup. [`It` and `Specify` subject nodes](https://onsi.github.io/ginkgo/#spec-subjects-it) that hold your assertions. [`BeforeSuite` and `AfterSuite` nodes](https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite) to prep for and cleanup after a suite... and [much more!](https://onsi.github.io/ginkgo/#writing-specs)
|
||||
With Ginkgo's DSL you can use nestable [`Describe`, `Context` and `When` container nodes](https://onsi.github.io/ginkgo/#organizing-specs-with-container-nodes) to help you organize your specs. [`BeforeEach` and `AfterEach` setup nodes](https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and cleanup. [`It` and `Specify` subject nodes](https://onsi.github.io/ginkgo/#spec-subjects-it) that hold your assertions. [`BeforeSuite` and `AfterSuite` nodes](https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite) to prep for and cleanup after a suite... and [much more!](https://onsi.github.io/ginkgo/#writing-specs).
|
||||
|
||||
At runtime, Ginkgo can run your specs in reproducibly [random order](https://onsi.github.io/ginkgo/#spec-randomization) and has sophisticated support for [spec parallelization](https://onsi.github.io/ginkgo/#spec-parallelization). In fact, running specs in parallel is as easy as
|
||||
|
||||
@ -100,7 +96,7 @@ At runtime, Ginkgo can run your specs in reproducibly [random order](https://ons
|
||||
ginkgo -p
|
||||
```
|
||||
|
||||
By following [established patterns for writing parallel specs](https://onsi.github.io/ginkgo/#patterns-for-parallel-integration-specs) you can build even large, complex integration suites that parallelize cleanly and run performantly.
|
||||
By following [established patterns for writing parallel specs](https://onsi.github.io/ginkgo/#patterns-for-parallel-integration-specs) you can build even large, complex integration suites that parallelize cleanly and run performantly. And you don't have to worry about your spec suite hanging or leaving a mess behind - Ginkgo provides a per-node `context.Context` and the capability to interrupt the spec after a set period of time - and then clean up.
|
||||
|
||||
As your suites grow Ginkgo helps you keep your specs organized with [labels](https://onsi.github.io/ginkgo/#spec-labels) and lets you easily run [subsets of specs](https://onsi.github.io/ginkgo/#filtering-specs), either [programmatically](https://onsi.github.io/ginkgo/#focused-specs) or on the [command line](https://onsi.github.io/ginkgo/#combining-filters). And Ginkgo's reporting infrastructure generates machine-readable output in a [variety of formats](https://onsi.github.io/ginkgo/#generating-machine-readable-reports) _and_ allows you to build your own [custom reporting infrastructure](https://onsi.github.io/ginkgo/#generating-reports-programmatically).
|
||||
|
||||
|
90
vendor/github.com/onsi/ginkgo/v2/core_dsl.go
generated
vendored
90
vendor/github.com/onsi/ginkgo/v2/core_dsl.go
generated
vendored
@ -21,7 +21,6 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/onsi/ginkgo/v2/formatter"
|
||||
@ -93,11 +92,11 @@ type GinkgoWriterInterface interface {
|
||||
}
|
||||
|
||||
/*
|
||||
SpecContext is the context object passed into nodes that are subject to a timeout or need to be notified of an interrupt. It implements the standard context.Context interface but also contains additional helpers to provide an extensibility point for Ginkgo. (As an example, Gomega's Eventually can use the methods defined on SpecContext to provide deeper integratoin with Ginkgo).
|
||||
SpecContext is the context object passed into nodes that are subject to a timeout or need to be notified of an interrupt. It implements the standard context.Context interface but also contains additional helpers to provide an extensibility point for Ginkgo. (As an example, Gomega's Eventually can use the methods defined on SpecContext to provide deeper integration with Ginkgo).
|
||||
|
||||
You can do anything with SpecContext that you do with a typical context.Context including wrapping it with any of the context.With* methods.
|
||||
|
||||
Ginkgo will cancel the SpecContext when a node is interrupted (e.g. by the user sending an interupt signal) or when a node has exceeded it's allowed run-time. Note, however, that even in cases where a node has a deadline, SpecContext will not return a deadline via .Deadline(). This is because Ginkgo does not use a WithDeadline() context to model node deadlines as Ginkgo needs control over the precise timing of the context cancellation to ensure it can provide an accurate progress report at the moment of cancellation.
|
||||
Ginkgo will cancel the SpecContext when a node is interrupted (e.g. by the user sending an interrupt signal) or when a node has exceeded its allowed run-time. Note, however, that even in cases where a node has a deadline, SpecContext will not return a deadline via .Deadline(). This is because Ginkgo does not use a WithDeadline() context to model node deadlines as Ginkgo needs control over the precise timing of the context cancellation to ensure it can provide an accurate progress report at the moment of cancellation.
|
||||
*/
|
||||
type SpecContext = internal.SpecContext
|
||||
|
||||
@ -164,6 +163,29 @@ func GinkgoParallelProcess() int {
|
||||
return suiteConfig.ParallelProcess
|
||||
}
|
||||
|
||||
/*
|
||||
GinkgoHelper marks the function it's called in as a test helper. When a failure occurs inside a helper function, Ginkgo will skip the helper when analyzing the stack trace to identify where the failure occurred.
|
||||
|
||||
This is an alternative, simpler, mechanism to passing in a skip offset when calling Fail or using Gomega.
|
||||
*/
|
||||
func GinkgoHelper() {
|
||||
types.MarkAsHelper(1)
|
||||
}
|
||||
|
||||
/*
|
||||
GinkgoLabelFilter() returns the label filter configured for this suite via `--label-filter`.
|
||||
|
||||
You can use this to manually check if a set of labels would satisfy the filter via:
|
||||
|
||||
if (Label("cat", "dog").MatchesLabelFilter(GinkgoLabelFilter())) {
|
||||
//...
|
||||
}
|
||||
*/
|
||||
func GinkgoLabelFilter() string {
|
||||
suiteConfig, _ := GinkgoConfiguration()
|
||||
return suiteConfig.LabelFilter
|
||||
}
|
||||
|
||||
/*
|
||||
PauseOutputInterception() pauses Ginkgo's output interception. This is only relevant
|
||||
when running in parallel and output to stdout/stderr is being intercepted. You generally
|
||||
@ -276,7 +298,7 @@ func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool {
|
||||
}
|
||||
|
||||
writer := GinkgoWriter.(*internal.Writer)
|
||||
if reporterConfig.Verbose && suiteConfig.ParallelTotal == 1 {
|
||||
if reporterConfig.Verbosity().GTE(types.VerbosityLevelVerbose) && suiteConfig.ParallelTotal == 1 {
|
||||
writer.SetMode(internal.WriterModeStreamAndBuffer)
|
||||
} else {
|
||||
writer.SetMode(internal.WriterModeBufferOnly)
|
||||
@ -370,6 +392,12 @@ func AbortSuite(message string, callerSkip ...int) {
|
||||
panic(types.GinkgoErrors.UncaughtGinkgoPanic(cl))
|
||||
}
|
||||
|
||||
/*
|
||||
ignorablePanic is used by Gomega to signal to GinkgoRecover that Goemga is handling
|
||||
the error associated with this panic. It i used when Eventually/Consistently are passed a func(g Gomega) and the resulting function launches a goroutines that makes a failed assertion. That failed assertion is registered by Gomega and then panics. Ordinarily the panic is captured by Gomega. In the case of a goroutine Gomega can't capture the panic - so we piggy back on GinkgoRecover so users have a single defer GinkgoRecover() pattern to follow. To do that we need to tell Ginkgo to ignore this panic and not register it as a panic on the global Failer.
|
||||
*/
|
||||
type ignorablePanic interface{ GinkgoRecoverShouldIgnoreThisPanic() }
|
||||
|
||||
/*
|
||||
GinkgoRecover should be deferred at the top of any spawned goroutine that (may) call `Fail`
|
||||
Since Gomega assertions call fail, you should throw a `defer GinkgoRecover()` at the top of any goroutine that
|
||||
@ -385,6 +413,9 @@ You can learn more about how Ginkgo manages failures here: https://onsi.github.i
|
||||
func GinkgoRecover() {
|
||||
e := recover()
|
||||
if e != nil {
|
||||
if _, ok := e.(ignorablePanic); ok {
|
||||
return
|
||||
}
|
||||
global.Failer.Panic(types.NewCodeLocationWithStackTrace(1), e)
|
||||
}
|
||||
}
|
||||
@ -509,35 +540,11 @@ and will simply log the passed in text to the GinkgoWriter. If By is handed a f
|
||||
|
||||
By will also generate and attach a ReportEntry to the spec. This will ensure that By annotations appear in Ginkgo's machine-readable reports.
|
||||
|
||||
Note that By does not generate a new Ginkgo node - rather it is simply synctactic sugar around GinkgoWriter and AddReportEntry
|
||||
Note that By does not generate a new Ginkgo node - rather it is simply syntactic sugar around GinkgoWriter and AddReportEntry
|
||||
You can learn more about By here: https://onsi.github.io/ginkgo/#documenting-complex-specs-by
|
||||
*/
|
||||
func By(text string, callback ...func()) {
|
||||
if !global.Suite.InRunPhase() {
|
||||
exitIfErr(types.GinkgoErrors.ByNotDuringRunPhase(types.NewCodeLocation(1)))
|
||||
}
|
||||
value := struct {
|
||||
Text string
|
||||
Duration time.Duration
|
||||
}{
|
||||
Text: text,
|
||||
}
|
||||
t := time.Now()
|
||||
global.Suite.SetProgressStepCursor(internal.ProgressStepCursor{
|
||||
Text: text,
|
||||
CodeLocation: types.NewCodeLocation(1),
|
||||
StartTime: t,
|
||||
})
|
||||
AddReportEntry("By Step", ReportEntryVisibilityNever, Offset(1), &value, t)
|
||||
formatter := formatter.NewWithNoColorBool(reporterConfig.NoColor)
|
||||
GinkgoWriter.Println(formatter.F("{{bold}}STEP:{{/}} %s {{gray}}%s{{/}}", text, t.Format(types.GINKGO_TIME_FORMAT)))
|
||||
if len(callback) == 1 {
|
||||
callback[0]()
|
||||
value.Duration = time.Since(t)
|
||||
}
|
||||
if len(callback) > 1 {
|
||||
panic("just one callback per By, please")
|
||||
}
|
||||
exitIfErr(global.Suite.By(text, callback...))
|
||||
}
|
||||
|
||||
/*
|
||||
@ -736,7 +743,7 @@ For example:
|
||||
os.SetEnv("FOO", "BAR")
|
||||
})
|
||||
|
||||
will register a cleanup handler that will set the environment variable "FOO" to it's current value (obtained by os.GetEnv("FOO")) after the spec runs and then sets the environment variable "FOO" to "BAR" for the current spec.
|
||||
will register a cleanup handler that will set the environment variable "FOO" to its current value (obtained by os.GetEnv("FOO")) after the spec runs and then sets the environment variable "FOO" to "BAR" for the current spec.
|
||||
|
||||
Similarly:
|
||||
|
||||
@ -764,3 +771,24 @@ func DeferCleanup(args ...interface{}) {
|
||||
}
|
||||
pushNode(internal.NewCleanupNode(deprecationTracker, fail, args...))
|
||||
}
|
||||
|
||||
/*
|
||||
AttachProgressReporter allows you to register a function that will be called whenever Ginkgo generates a Progress Report. The contents returned by the function will be included in the report.
|
||||
|
||||
**This is an experimental feature and the public-facing interface may change in a future minor version of Ginkgo**
|
||||
|
||||
Progress Reports are generated:
|
||||
- whenever the user explicitly requests one (via `SIGINFO` or `SIGUSR1`)
|
||||
- on nodes decorated with PollProgressAfter
|
||||
- on suites run with --poll-progress-after
|
||||
- whenever a test times out
|
||||
|
||||
Ginkgo uses Progress Reports to convey the current state of the test suite, including any running goroutines. By attaching a progress reporter you are able to supplement these reports with additional information.
|
||||
|
||||
# AttachProgressReporter returns a function that can be called to detach the progress reporter
|
||||
|
||||
You can learn more about AttachProgressReporter here: https://onsi.github.io/ginkgo/#attaching-additional-information-to-progress-reports
|
||||
*/
|
||||
func AttachProgressReporter(reporter func() string) func() {
|
||||
return global.Suite.AttachProgressReporter(reporter)
|
||||
}
|
||||
|
14
vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go
generated
vendored
14
vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go
generated
vendored
@ -46,7 +46,7 @@ const Pending = internal.Pending
|
||||
|
||||
/*
|
||||
Serial is a decorator that allows you to mark a spec or container as serial. These specs will never run in parallel with other specs.
|
||||
Tests in ordered containers cannot be marked as serial - mark the ordered container instead.
|
||||
Specs in ordered containers cannot be marked as serial - mark the ordered container instead.
|
||||
|
||||
You can learn more here: https://onsi.github.io/ginkgo/#serial-specs
|
||||
You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
|
||||
@ -54,7 +54,7 @@ You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorat
|
||||
const Serial = internal.Serial
|
||||
|
||||
/*
|
||||
Ordered is a decorator that allows you to mark a container as ordered. Tests in the container will always run in the order they appear.
|
||||
Ordered is a decorator that allows you to mark a container as ordered. Specs in the container will always run in the order they appear.
|
||||
They will never be randomized and they will never run in parallel with one another, though they may run in parallel with other specs.
|
||||
|
||||
You can learn more here: https://onsi.github.io/ginkgo/#ordered-containers
|
||||
@ -62,6 +62,16 @@ You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorat
|
||||
*/
|
||||
const Ordered = internal.Ordered
|
||||
|
||||
/*
|
||||
ContinueOnFailure is a decorator that allows you to mark an Ordered container to continue running specs even if failures occur. Ordinarily an ordered container will stop running specs after the first failure occurs. Note that if a BeforeAll or a BeforeEach/JustBeforeEach annotated with OncePerOrdered fails then no specs will run as the precondition for the Ordered container will consider to be failed.
|
||||
|
||||
ContinueOnFailure only applies to the outermost Ordered container. Attempting to place ContinueOnFailure in a nested container will result in an error.
|
||||
|
||||
You can learn more here: https://onsi.github.io/ginkgo/#ordered-containers
|
||||
You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
|
||||
*/
|
||||
const ContinueOnFailure = internal.ContinueOnFailure
|
||||
|
||||
/*
|
||||
OncePerOrdered is a decorator that allows you to mark outer BeforeEach, AfterEach, JustBeforeEach, and JustAfterEach setup nodes to run once
|
||||
per ordered context. Normally these setup nodes run around each individual spec, with OncePerOrdered they will run once around the set of specs in an ordered container.
|
||||
|
61
vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go
generated
vendored
61
vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go
generated
vendored
@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@ -50,6 +51,37 @@ func NewWithNoColorBool(noColor bool) Formatter {
|
||||
}
|
||||
|
||||
func New(colorMode ColorMode) Formatter {
|
||||
colorAliases := map[string]int{
|
||||
"black": 0,
|
||||
"red": 1,
|
||||
"green": 2,
|
||||
"yellow": 3,
|
||||
"blue": 4,
|
||||
"magenta": 5,
|
||||
"cyan": 6,
|
||||
"white": 7,
|
||||
}
|
||||
for colorAlias, n := range colorAliases {
|
||||
colorAliases[fmt.Sprintf("bright-%s", colorAlias)] = n + 8
|
||||
}
|
||||
|
||||
getColor := func(color, defaultEscapeCode string) string {
|
||||
color = strings.ToUpper(strings.ReplaceAll(color, "-", "_"))
|
||||
envVar := fmt.Sprintf("GINKGO_CLI_COLOR_%s", color)
|
||||
envVarColor := os.Getenv(envVar)
|
||||
if envVarColor == "" {
|
||||
return defaultEscapeCode
|
||||
}
|
||||
if colorCode, ok := colorAliases[envVarColor]; ok {
|
||||
return fmt.Sprintf("\x1b[38;5;%dm", colorCode)
|
||||
}
|
||||
colorCode, err := strconv.Atoi(envVarColor)
|
||||
if err != nil || colorCode < 0 || colorCode > 255 {
|
||||
return defaultEscapeCode
|
||||
}
|
||||
return fmt.Sprintf("\x1b[38;5;%dm", colorCode)
|
||||
}
|
||||
|
||||
f := Formatter{
|
||||
ColorMode: colorMode,
|
||||
colors: map[string]string{
|
||||
@ -57,18 +89,18 @@ func New(colorMode ColorMode) Formatter {
|
||||
"bold": "\x1b[1m",
|
||||
"underline": "\x1b[4m",
|
||||
|
||||
"red": "\x1b[38;5;9m",
|
||||
"orange": "\x1b[38;5;214m",
|
||||
"coral": "\x1b[38;5;204m",
|
||||
"magenta": "\x1b[38;5;13m",
|
||||
"green": "\x1b[38;5;10m",
|
||||
"dark-green": "\x1b[38;5;28m",
|
||||
"yellow": "\x1b[38;5;11m",
|
||||
"light-yellow": "\x1b[38;5;228m",
|
||||
"cyan": "\x1b[38;5;14m",
|
||||
"gray": "\x1b[38;5;243m",
|
||||
"light-gray": "\x1b[38;5;246m",
|
||||
"blue": "\x1b[38;5;12m",
|
||||
"red": getColor("red", "\x1b[38;5;9m"),
|
||||
"orange": getColor("orange", "\x1b[38;5;214m"),
|
||||
"coral": getColor("coral", "\x1b[38;5;204m"),
|
||||
"magenta": getColor("magenta", "\x1b[38;5;13m"),
|
||||
"green": getColor("green", "\x1b[38;5;10m"),
|
||||
"dark-green": getColor("dark-green", "\x1b[38;5;28m"),
|
||||
"yellow": getColor("yellow", "\x1b[38;5;11m"),
|
||||
"light-yellow": getColor("light-yellow", "\x1b[38;5;228m"),
|
||||
"cyan": getColor("cyan", "\x1b[38;5;14m"),
|
||||
"gray": getColor("gray", "\x1b[38;5;243m"),
|
||||
"light-gray": getColor("light-gray", "\x1b[38;5;246m"),
|
||||
"blue": getColor("blue", "\x1b[38;5;12m"),
|
||||
},
|
||||
}
|
||||
colors := []string{}
|
||||
@ -88,7 +120,10 @@ func (f Formatter) Fi(indentation uint, format string, args ...interface{}) stri
|
||||
}
|
||||
|
||||
func (f Formatter) Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string {
|
||||
out := fmt.Sprintf(f.style(format), args...)
|
||||
out := f.style(format)
|
||||
if len(args) > 0 {
|
||||
out = fmt.Sprintf(out, args...)
|
||||
}
|
||||
|
||||
if indentation == 0 && maxWidth == 0 {
|
||||
return out
|
||||
|
63
vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go
generated
vendored
Normal file
63
vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go
generated
vendored
Normal file
@ -0,0 +1,63 @@
|
||||
package build
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/internal"
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
func BuildBuildCommand() command.Command {
|
||||
var cliConfig = types.NewDefaultCLIConfig()
|
||||
var goFlagsConfig = types.NewDefaultGoFlagsConfig()
|
||||
|
||||
flags, err := types.BuildBuildCommandFlagSet(&cliConfig, &goFlagsConfig)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return command.Command{
|
||||
Name: "build",
|
||||
Flags: flags,
|
||||
Usage: "ginkgo build <FLAGS> <PACKAGES>",
|
||||
ShortDoc: "Build the passed in <PACKAGES> (or the package in the current directory if left blank).",
|
||||
DocLink: "precompiling-suites",
|
||||
Command: func(args []string, _ []string) {
|
||||
var errors []error
|
||||
cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig)
|
||||
command.AbortIfErrors("Ginkgo detected configuration issues:", errors)
|
||||
|
||||
buildSpecs(args, cliConfig, goFlagsConfig)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func buildSpecs(args []string, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig) {
|
||||
suites := internal.FindSuites(args, cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter)
|
||||
if len(suites) == 0 {
|
||||
command.AbortWith("Found no test suites")
|
||||
}
|
||||
|
||||
internal.VerifyCLIAndFrameworkVersion(suites)
|
||||
|
||||
opc := internal.NewOrderedParallelCompiler(cliConfig.ComputedNumCompilers())
|
||||
opc.StartCompiling(suites, goFlagsConfig)
|
||||
|
||||
for {
|
||||
suiteIdx, suite := opc.Next()
|
||||
if suiteIdx >= len(suites) {
|
||||
break
|
||||
}
|
||||
suites[suiteIdx] = suite
|
||||
if suite.State.Is(internal.TestSuiteStateFailedToCompile) {
|
||||
fmt.Println(suite.CompilationError.Error())
|
||||
} else {
|
||||
fmt.Printf("Compiled %s.test\n", suite.PackageName)
|
||||
}
|
||||
}
|
||||
|
||||
if suites.CountWithState(internal.TestSuiteStateFailedToCompile) > 0 {
|
||||
command.AbortWith("Failed to compile all tests")
|
||||
}
|
||||
}
|
61
vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go
generated
vendored
Normal file
61
vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go
generated
vendored
Normal file
@ -0,0 +1,61 @@
|
||||
package command
|
||||
|
||||
import "fmt"
|
||||
|
||||
type AbortDetails struct {
|
||||
ExitCode int
|
||||
Error error
|
||||
EmitUsage bool
|
||||
}
|
||||
|
||||
func Abort(details AbortDetails) {
|
||||
panic(details)
|
||||
}
|
||||
|
||||
func AbortGracefullyWith(format string, args ...interface{}) {
|
||||
Abort(AbortDetails{
|
||||
ExitCode: 0,
|
||||
Error: fmt.Errorf(format, args...),
|
||||
EmitUsage: false,
|
||||
})
|
||||
}
|
||||
|
||||
func AbortWith(format string, args ...interface{}) {
|
||||
Abort(AbortDetails{
|
||||
ExitCode: 1,
|
||||
Error: fmt.Errorf(format, args...),
|
||||
EmitUsage: false,
|
||||
})
|
||||
}
|
||||
|
||||
func AbortWithUsage(format string, args ...interface{}) {
|
||||
Abort(AbortDetails{
|
||||
ExitCode: 1,
|
||||
Error: fmt.Errorf(format, args...),
|
||||
EmitUsage: true,
|
||||
})
|
||||
}
|
||||
|
||||
func AbortIfError(preamble string, err error) {
|
||||
if err != nil {
|
||||
Abort(AbortDetails{
|
||||
ExitCode: 1,
|
||||
Error: fmt.Errorf("%s\n%s", preamble, err.Error()),
|
||||
EmitUsage: false,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func AbortIfErrors(preamble string, errors []error) {
|
||||
if len(errors) > 0 {
|
||||
out := ""
|
||||
for _, err := range errors {
|
||||
out += err.Error()
|
||||
}
|
||||
Abort(AbortDetails{
|
||||
ExitCode: 1,
|
||||
Error: fmt.Errorf("%s\n%s", preamble, out),
|
||||
EmitUsage: false,
|
||||
})
|
||||
}
|
||||
}
|
50
vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go
generated
vendored
Normal file
50
vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go
generated
vendored
Normal file
@ -0,0 +1,50 @@
|
||||
package command
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/formatter"
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
type Command struct {
|
||||
Name string
|
||||
Flags types.GinkgoFlagSet
|
||||
Usage string
|
||||
ShortDoc string
|
||||
Documentation string
|
||||
DocLink string
|
||||
Command func(args []string, additionalArgs []string)
|
||||
}
|
||||
|
||||
func (c Command) Run(args []string, additionalArgs []string) {
|
||||
args, err := c.Flags.Parse(args)
|
||||
if err != nil {
|
||||
AbortWithUsage(err.Error())
|
||||
}
|
||||
|
||||
c.Command(args, additionalArgs)
|
||||
}
|
||||
|
||||
func (c Command) EmitUsage(writer io.Writer) {
|
||||
fmt.Fprintln(writer, formatter.F("{{bold}}"+c.Usage+"{{/}}"))
|
||||
fmt.Fprintln(writer, formatter.F("{{gray}}%s{{/}}", strings.Repeat("-", len(c.Usage))))
|
||||
if c.ShortDoc != "" {
|
||||
fmt.Fprintln(writer, formatter.Fiw(0, formatter.COLS, c.ShortDoc))
|
||||
fmt.Fprintln(writer, "")
|
||||
}
|
||||
if c.Documentation != "" {
|
||||
fmt.Fprintln(writer, formatter.Fiw(0, formatter.COLS, c.Documentation))
|
||||
fmt.Fprintln(writer, "")
|
||||
}
|
||||
if c.DocLink != "" {
|
||||
fmt.Fprintln(writer, formatter.Fi(0, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}http://onsi.github.io/ginkgo/#%s{{/}}", c.DocLink))
|
||||
fmt.Fprintln(writer, "")
|
||||
}
|
||||
flagUsage := c.Flags.Usage()
|
||||
if flagUsage != "" {
|
||||
fmt.Fprintf(writer, formatter.F(flagUsage))
|
||||
}
|
||||
}
|
182
vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go
generated
vendored
Normal file
182
vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go
generated
vendored
Normal file
@ -0,0 +1,182 @@
|
||||
package command
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/formatter"
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
type Program struct {
|
||||
Name string
|
||||
Heading string
|
||||
Commands []Command
|
||||
DefaultCommand Command
|
||||
DeprecatedCommands []DeprecatedCommand
|
||||
|
||||
//For testing - leave as nil in production
|
||||
OutWriter io.Writer
|
||||
ErrWriter io.Writer
|
||||
Exiter func(code int)
|
||||
}
|
||||
|
||||
type DeprecatedCommand struct {
|
||||
Name string
|
||||
Deprecation types.Deprecation
|
||||
}
|
||||
|
||||
func (p Program) RunAndExit(osArgs []string) {
|
||||
var command Command
|
||||
deprecationTracker := types.NewDeprecationTracker()
|
||||
if p.Exiter == nil {
|
||||
p.Exiter = os.Exit
|
||||
}
|
||||
if p.OutWriter == nil {
|
||||
p.OutWriter = formatter.ColorableStdOut
|
||||
}
|
||||
if p.ErrWriter == nil {
|
||||
p.ErrWriter = formatter.ColorableStdErr
|
||||
}
|
||||
|
||||
defer func() {
|
||||
exitCode := 0
|
||||
|
||||
if r := recover(); r != nil {
|
||||
details, ok := r.(AbortDetails)
|
||||
if !ok {
|
||||
panic(r)
|
||||
}
|
||||
|
||||
if details.Error != nil {
|
||||
fmt.Fprintln(p.ErrWriter, formatter.F("{{red}}{{bold}}%s %s{{/}} {{red}}failed{{/}}", p.Name, command.Name))
|
||||
fmt.Fprintln(p.ErrWriter, formatter.Fi(1, details.Error.Error()))
|
||||
}
|
||||
if details.EmitUsage {
|
||||
if details.Error != nil {
|
||||
fmt.Fprintln(p.ErrWriter, "")
|
||||
}
|
||||
command.EmitUsage(p.ErrWriter)
|
||||
}
|
||||
exitCode = details.ExitCode
|
||||
}
|
||||
|
||||
command.Flags.ValidateDeprecations(deprecationTracker)
|
||||
if deprecationTracker.DidTrackDeprecations() {
|
||||
fmt.Fprintln(p.ErrWriter, deprecationTracker.DeprecationsReport())
|
||||
}
|
||||
p.Exiter(exitCode)
|
||||
return
|
||||
}()
|
||||
|
||||
args, additionalArgs := []string{}, []string{}
|
||||
|
||||
foundDelimiter := false
|
||||
for _, arg := range osArgs[1:] {
|
||||
if !foundDelimiter {
|
||||
if arg == "--" {
|
||||
foundDelimiter = true
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if foundDelimiter {
|
||||
additionalArgs = append(additionalArgs, arg)
|
||||
} else {
|
||||
args = append(args, arg)
|
||||
}
|
||||
}
|
||||
|
||||
command = p.DefaultCommand
|
||||
if len(args) > 0 {
|
||||
p.handleHelpRequestsAndExit(p.OutWriter, args)
|
||||
if command.Name == args[0] {
|
||||
args = args[1:]
|
||||
} else {
|
||||
for _, deprecatedCommand := range p.DeprecatedCommands {
|
||||
if deprecatedCommand.Name == args[0] {
|
||||
deprecationTracker.TrackDeprecation(deprecatedCommand.Deprecation)
|
||||
return
|
||||
}
|
||||
}
|
||||
for _, tryCommand := range p.Commands {
|
||||
if tryCommand.Name == args[0] {
|
||||
command, args = tryCommand, args[1:]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
command.Run(args, additionalArgs)
|
||||
}
|
||||
|
||||
func (p Program) handleHelpRequestsAndExit(writer io.Writer, args []string) {
|
||||
if len(args) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
matchesHelpFlag := func(args ...string) bool {
|
||||
for _, arg := range args {
|
||||
if arg == "--help" || arg == "-help" || arg == "-h" || arg == "--h" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
if len(args) == 1 {
|
||||
if args[0] == "help" || matchesHelpFlag(args[0]) {
|
||||
p.EmitUsage(writer)
|
||||
Abort(AbortDetails{})
|
||||
}
|
||||
} else {
|
||||
var name string
|
||||
if args[0] == "help" || matchesHelpFlag(args[0]) {
|
||||
name = args[1]
|
||||
} else if matchesHelpFlag(args[1:]...) {
|
||||
name = args[0]
|
||||
} else {
|
||||
return
|
||||
}
|
||||
|
||||
if p.DefaultCommand.Name == name || p.Name == name {
|
||||
p.DefaultCommand.EmitUsage(writer)
|
||||
Abort(AbortDetails{})
|
||||
}
|
||||
for _, command := range p.Commands {
|
||||
if command.Name == name {
|
||||
command.EmitUsage(writer)
|
||||
Abort(AbortDetails{})
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Fprintln(writer, formatter.F("{{red}}Unknown Command: {{bold}}%s{{/}}", name))
|
||||
fmt.Fprintln(writer, "")
|
||||
p.EmitUsage(writer)
|
||||
Abort(AbortDetails{ExitCode: 1})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p Program) EmitUsage(writer io.Writer) {
|
||||
fmt.Fprintln(writer, formatter.F(p.Heading))
|
||||
fmt.Fprintln(writer, formatter.F("{{gray}}%s{{/}}", strings.Repeat("-", len(p.Heading))))
|
||||
fmt.Fprintln(writer, formatter.F("For usage information for a command, run {{bold}}%s help COMMAND{{/}}.", p.Name))
|
||||
fmt.Fprintln(writer, formatter.F("For usage information for the default command, run {{bold}}%s help %s{{/}} or {{bold}}%s help %s{{/}}.", p.Name, p.Name, p.Name, p.DefaultCommand.Name))
|
||||
fmt.Fprintln(writer, "")
|
||||
fmt.Fprintln(writer, formatter.F("The following commands are available:"))
|
||||
|
||||
fmt.Fprintln(writer, formatter.Fi(1, "{{bold}}%s{{/}} or %s {{bold}}%s{{/}} - {{gray}}%s{{/}}", p.Name, p.Name, p.DefaultCommand.Name, p.DefaultCommand.Usage))
|
||||
if p.DefaultCommand.ShortDoc != "" {
|
||||
fmt.Fprintln(writer, formatter.Fi(2, p.DefaultCommand.ShortDoc))
|
||||
}
|
||||
|
||||
for _, command := range p.Commands {
|
||||
fmt.Fprintln(writer, formatter.Fi(1, "{{bold}}%s{{/}} - {{gray}}%s{{/}}", command.Name, command.Usage))
|
||||
if command.ShortDoc != "" {
|
||||
fmt.Fprintln(writer, formatter.Fi(2, command.ShortDoc))
|
||||
}
|
||||
}
|
||||
}
|
48
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go
generated
vendored
Normal file
48
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go
generated
vendored
Normal file
@ -0,0 +1,48 @@
|
||||
package generators
|
||||
|
||||
var bootstrapText = `package {{.Package}}
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
{{.GinkgoImport}}
|
||||
{{.GomegaImport}}
|
||||
)
|
||||
|
||||
func Test{{.FormattedName}}(t *testing.T) {
|
||||
{{.GomegaPackage}}RegisterFailHandler({{.GinkgoPackage}}Fail)
|
||||
{{.GinkgoPackage}}RunSpecs(t, "{{.FormattedName}} Suite")
|
||||
}
|
||||
`
|
||||
|
||||
var agoutiBootstrapText = `package {{.Package}}
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
{{.GinkgoImport}}
|
||||
{{.GomegaImport}}
|
||||
"github.com/sclevine/agouti"
|
||||
)
|
||||
|
||||
func Test{{.FormattedName}}(t *testing.T) {
|
||||
{{.GomegaPackage}}RegisterFailHandler({{.GinkgoPackage}}Fail)
|
||||
{{.GinkgoPackage}}RunSpecs(t, "{{.FormattedName}} Suite")
|
||||
}
|
||||
|
||||
var agoutiDriver *agouti.WebDriver
|
||||
|
||||
var _ = {{.GinkgoPackage}}BeforeSuite(func() {
|
||||
// Choose a WebDriver:
|
||||
|
||||
agoutiDriver = agouti.PhantomJS()
|
||||
// agoutiDriver = agouti.Selenium()
|
||||
// agoutiDriver = agouti.ChromeDriver()
|
||||
|
||||
{{.GomegaPackage}}Expect(agoutiDriver.Start()).To({{.GomegaPackage}}Succeed())
|
||||
})
|
||||
|
||||
var _ = {{.GinkgoPackage}}AfterSuite(func() {
|
||||
{{.GomegaPackage}}Expect(agoutiDriver.Stop()).To({{.GomegaPackage}}Succeed())
|
||||
})
|
||||
`
|
133
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go
generated
vendored
Normal file
133
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go
generated
vendored
Normal file
@ -0,0 +1,133 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"text/template"
|
||||
|
||||
sprig "github.com/go-task/slim-sprig"
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/internal"
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
func BuildBootstrapCommand() command.Command {
|
||||
conf := GeneratorsConfig{}
|
||||
flags, err := types.NewGinkgoFlagSet(
|
||||
types.GinkgoFlags{
|
||||
{Name: "agouti", KeyPath: "Agouti",
|
||||
Usage: "If set, bootstrap will generate a bootstrap file for writing Agouti tests"},
|
||||
{Name: "nodot", KeyPath: "NoDot",
|
||||
Usage: "If set, bootstrap will generate a bootstrap test file that does not dot-import ginkgo and gomega"},
|
||||
{Name: "internal", KeyPath: "Internal",
|
||||
Usage: "If set, bootstrap will generate a bootstrap test file that uses the regular package name (i.e. `package X`, not `package X_test`)"},
|
||||
{Name: "template", KeyPath: "CustomTemplate",
|
||||
UsageArgument: "template-file",
|
||||
Usage: "If specified, generate will use the contents of the file passed as the bootstrap template"},
|
||||
{Name: "template-data", KeyPath: "CustomTemplateData",
|
||||
UsageArgument: "template-data-file",
|
||||
Usage: "If specified, generate will use the contents of the file passed as data to be rendered in the bootstrap template"},
|
||||
},
|
||||
&conf,
|
||||
types.GinkgoFlagSections{},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return command.Command{
|
||||
Name: "bootstrap",
|
||||
Usage: "ginkgo bootstrap",
|
||||
ShortDoc: "Bootstrap a test suite for the current package",
|
||||
Documentation: `Tests written in Ginkgo and Gomega require a small amount of boilerplate to hook into Go's testing infrastructure.
|
||||
|
||||
{{bold}}ginkgo bootstrap{{/}} generates this boilerplate for you in a file named X_suite_test.go where X is the name of the package under test.`,
|
||||
DocLink: "generators",
|
||||
Flags: flags,
|
||||
Command: func(_ []string, _ []string) {
|
||||
generateBootstrap(conf)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type bootstrapData struct {
|
||||
Package string
|
||||
FormattedName string
|
||||
|
||||
GinkgoImport string
|
||||
GomegaImport string
|
||||
GinkgoPackage string
|
||||
GomegaPackage string
|
||||
CustomData map[string]any
|
||||
}
|
||||
|
||||
func generateBootstrap(conf GeneratorsConfig) {
|
||||
packageName, bootstrapFilePrefix, formattedName := getPackageAndFormattedName()
|
||||
|
||||
data := bootstrapData{
|
||||
Package: determinePackageName(packageName, conf.Internal),
|
||||
FormattedName: formattedName,
|
||||
|
||||
GinkgoImport: `. "github.com/onsi/ginkgo/v2"`,
|
||||
GomegaImport: `. "github.com/onsi/gomega"`,
|
||||
GinkgoPackage: "",
|
||||
GomegaPackage: "",
|
||||
}
|
||||
|
||||
if conf.NoDot {
|
||||
data.GinkgoImport = `"github.com/onsi/ginkgo/v2"`
|
||||
data.GomegaImport = `"github.com/onsi/gomega"`
|
||||
data.GinkgoPackage = `ginkgo.`
|
||||
data.GomegaPackage = `gomega.`
|
||||
}
|
||||
|
||||
targetFile := fmt.Sprintf("%s_suite_test.go", bootstrapFilePrefix)
|
||||
if internal.FileExists(targetFile) {
|
||||
command.AbortWith("{{bold}}%s{{/}} already exists", targetFile)
|
||||
} else {
|
||||
fmt.Printf("Generating ginkgo test suite bootstrap for %s in:\n\t%s\n", packageName, targetFile)
|
||||
}
|
||||
|
||||
f, err := os.Create(targetFile)
|
||||
command.AbortIfError("Failed to create file:", err)
|
||||
defer f.Close()
|
||||
|
||||
var templateText string
|
||||
if conf.CustomTemplate != "" {
|
||||
tpl, err := os.ReadFile(conf.CustomTemplate)
|
||||
command.AbortIfError("Failed to read custom bootstrap file:", err)
|
||||
templateText = string(tpl)
|
||||
if conf.CustomTemplateData != "" {
|
||||
var tplCustomDataMap map[string]any
|
||||
tplCustomData, err := os.ReadFile(conf.CustomTemplateData)
|
||||
command.AbortIfError("Failed to read custom boostrap data file:", err)
|
||||
if !json.Valid([]byte(tplCustomData)) {
|
||||
command.AbortWith("Invalid JSON object in custom data file.")
|
||||
}
|
||||
//create map from the custom template data
|
||||
json.Unmarshal(tplCustomData, &tplCustomDataMap)
|
||||
data.CustomData = tplCustomDataMap
|
||||
}
|
||||
} else if conf.Agouti {
|
||||
templateText = agoutiBootstrapText
|
||||
} else {
|
||||
templateText = bootstrapText
|
||||
}
|
||||
|
||||
//Setting the option to explicitly fail if template is rendered trying to access missing key
|
||||
bootstrapTemplate, err := template.New("bootstrap").Funcs(sprig.TxtFuncMap()).Option("missingkey=error").Parse(templateText)
|
||||
command.AbortIfError("Failed to parse bootstrap template:", err)
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
//Being explicit about failing sooner during template rendering
|
||||
//when accessing custom data rather than during the go fmt command
|
||||
err = bootstrapTemplate.Execute(buf, data)
|
||||
command.AbortIfError("Failed to render bootstrap template:", err)
|
||||
|
||||
buf.WriteTo(f)
|
||||
|
||||
internal.GoFmt(targetFile)
|
||||
}
|
259
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go
generated
vendored
Normal file
259
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go
generated
vendored
Normal file
@ -0,0 +1,259 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
sprig "github.com/go-task/slim-sprig"
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/internal"
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
func BuildGenerateCommand() command.Command {
|
||||
conf := GeneratorsConfig{}
|
||||
flags, err := types.NewGinkgoFlagSet(
|
||||
types.GinkgoFlags{
|
||||
{Name: "agouti", KeyPath: "Agouti",
|
||||
Usage: "If set, generate will create a test file for writing Agouti tests"},
|
||||
{Name: "nodot", KeyPath: "NoDot",
|
||||
Usage: "If set, generate will create a test file that does not dot-import ginkgo and gomega"},
|
||||
{Name: "internal", KeyPath: "Internal",
|
||||
Usage: "If set, generate will create a test file that uses the regular package name (i.e. `package X`, not `package X_test`)"},
|
||||
{Name: "template", KeyPath: "CustomTemplate",
|
||||
UsageArgument: "template-file",
|
||||
Usage: "If specified, generate will use the contents of the file passed as the test file template"},
|
||||
{Name: "template-data", KeyPath: "CustomTemplateData",
|
||||
UsageArgument: "template-data-file",
|
||||
Usage: "If specified, generate will use the contents of the file passed as data to be rendered in the test file template"},
|
||||
},
|
||||
&conf,
|
||||
types.GinkgoFlagSections{},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return command.Command{
|
||||
Name: "generate",
|
||||
Usage: "ginkgo generate <filename(s)>",
|
||||
ShortDoc: "Generate a test file named <filename>_test.go",
|
||||
Documentation: `If the optional <filename> argument is omitted, a file named after the package in the current directory will be created.
|
||||
|
||||
You can pass multiple <filename(s)> to generate multiple files simultaneously. The resulting files are named <filename>_test.go.
|
||||
|
||||
You can also pass a <filename> of the form "file.go" and generate will emit "file_test.go".`,
|
||||
DocLink: "generators",
|
||||
Flags: flags,
|
||||
Command: func(args []string, _ []string) {
|
||||
generateTestFiles(conf, args)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type specData struct {
|
||||
Package string
|
||||
Subject string
|
||||
PackageImportPath string
|
||||
ImportPackage bool
|
||||
|
||||
GinkgoImport string
|
||||
GomegaImport string
|
||||
GinkgoPackage string
|
||||
GomegaPackage string
|
||||
CustomData map[string]any
|
||||
}
|
||||
|
||||
func generateTestFiles(conf GeneratorsConfig, args []string) {
|
||||
subjects := args
|
||||
if len(subjects) == 0 {
|
||||
subjects = []string{""}
|
||||
}
|
||||
for _, subject := range subjects {
|
||||
generateTestFileForSubject(subject, conf)
|
||||
}
|
||||
}
|
||||
|
||||
func generateTestFileForSubject(subject string, conf GeneratorsConfig) {
|
||||
packageName, specFilePrefix, formattedName := getPackageAndFormattedName()
|
||||
if subject != "" {
|
||||
specFilePrefix = formatSubject(subject)
|
||||
formattedName = prettifyName(specFilePrefix)
|
||||
}
|
||||
|
||||
if conf.Internal {
|
||||
specFilePrefix = specFilePrefix + "_internal"
|
||||
}
|
||||
|
||||
data := specData{
|
||||
Package: determinePackageName(packageName, conf.Internal),
|
||||
Subject: formattedName,
|
||||
PackageImportPath: getPackageImportPath(),
|
||||
ImportPackage: !conf.Internal,
|
||||
|
||||
GinkgoImport: `. "github.com/onsi/ginkgo/v2"`,
|
||||
GomegaImport: `. "github.com/onsi/gomega"`,
|
||||
GinkgoPackage: "",
|
||||
GomegaPackage: "",
|
||||
}
|
||||
|
||||
if conf.NoDot {
|
||||
data.GinkgoImport = `"github.com/onsi/ginkgo/v2"`
|
||||
data.GomegaImport = `"github.com/onsi/gomega"`
|
||||
data.GinkgoPackage = `ginkgo.`
|
||||
data.GomegaPackage = `gomega.`
|
||||
}
|
||||
|
||||
targetFile := fmt.Sprintf("%s_test.go", specFilePrefix)
|
||||
if internal.FileExists(targetFile) {
|
||||
command.AbortWith("{{bold}}%s{{/}} already exists", targetFile)
|
||||
} else {
|
||||
fmt.Printf("Generating ginkgo test for %s in:\n %s\n", data.Subject, targetFile)
|
||||
}
|
||||
|
||||
f, err := os.Create(targetFile)
|
||||
command.AbortIfError("Failed to create test file:", err)
|
||||
defer f.Close()
|
||||
|
||||
var templateText string
|
||||
if conf.CustomTemplate != "" {
|
||||
tpl, err := os.ReadFile(conf.CustomTemplate)
|
||||
command.AbortIfError("Failed to read custom template file:", err)
|
||||
templateText = string(tpl)
|
||||
if conf.CustomTemplateData != "" {
|
||||
var tplCustomDataMap map[string]any
|
||||
tplCustomData, err := os.ReadFile(conf.CustomTemplateData)
|
||||
command.AbortIfError("Failed to read custom template data file:", err)
|
||||
if !json.Valid([]byte(tplCustomData)) {
|
||||
command.AbortWith("Invalid JSON object in custom data file.")
|
||||
}
|
||||
//create map from the custom template data
|
||||
json.Unmarshal(tplCustomData, &tplCustomDataMap)
|
||||
data.CustomData = tplCustomDataMap
|
||||
}
|
||||
} else if conf.Agouti {
|
||||
templateText = agoutiSpecText
|
||||
} else {
|
||||
templateText = specText
|
||||
}
|
||||
|
||||
//Setting the option to explicitly fail if template is rendered trying to access missing key
|
||||
specTemplate, err := template.New("spec").Funcs(sprig.TxtFuncMap()).Option("missingkey=error").Parse(templateText)
|
||||
command.AbortIfError("Failed to read parse test template:", err)
|
||||
|
||||
//Being explicit about failing sooner during template rendering
|
||||
//when accessing custom data rather than during the go fmt command
|
||||
err = specTemplate.Execute(f, data)
|
||||
command.AbortIfError("Failed to render bootstrap template:", err)
|
||||
internal.GoFmt(targetFile)
|
||||
}
|
||||
|
||||
func formatSubject(name string) string {
|
||||
name = strings.ReplaceAll(name, "-", "_")
|
||||
name = strings.ReplaceAll(name, " ", "_")
|
||||
name = strings.Split(name, ".go")[0]
|
||||
name = strings.Split(name, "_test")[0]
|
||||
return name
|
||||
}
|
||||
|
||||
// moduleName returns module name from go.mod from given module root directory
|
||||
func moduleName(modRoot string) string {
|
||||
modFile, err := os.Open(filepath.Join(modRoot, "go.mod"))
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
mod := make([]byte, 128)
|
||||
_, err = modFile.Read(mod)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
slashSlash := []byte("//")
|
||||
moduleStr := []byte("module")
|
||||
|
||||
for len(mod) > 0 {
|
||||
line := mod
|
||||
mod = nil
|
||||
if i := bytes.IndexByte(line, '\n'); i >= 0 {
|
||||
line, mod = line[:i], line[i+1:]
|
||||
}
|
||||
if i := bytes.Index(line, slashSlash); i >= 0 {
|
||||
line = line[:i]
|
||||
}
|
||||
line = bytes.TrimSpace(line)
|
||||
if !bytes.HasPrefix(line, moduleStr) {
|
||||
continue
|
||||
}
|
||||
line = line[len(moduleStr):]
|
||||
n := len(line)
|
||||
line = bytes.TrimSpace(line)
|
||||
if len(line) == n || len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if line[0] == '"' || line[0] == '`' {
|
||||
p, err := strconv.Unquote(string(line))
|
||||
if err != nil {
|
||||
return "" // malformed quoted string or multiline module path
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
return string(line)
|
||||
}
|
||||
|
||||
return "" // missing module path
|
||||
}
|
||||
|
||||
func findModuleRoot(dir string) (root string) {
|
||||
dir = filepath.Clean(dir)
|
||||
|
||||
// Look for enclosing go.mod.
|
||||
for {
|
||||
if fi, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil && !fi.IsDir() {
|
||||
return dir
|
||||
}
|
||||
d := filepath.Dir(dir)
|
||||
if d == dir {
|
||||
break
|
||||
}
|
||||
dir = d
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func getPackageImportPath() string {
|
||||
workingDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
|
||||
sep := string(filepath.Separator)
|
||||
|
||||
// Try go.mod file first
|
||||
modRoot := findModuleRoot(workingDir)
|
||||
if modRoot != "" {
|
||||
modName := moduleName(modRoot)
|
||||
if modName != "" {
|
||||
cd := strings.ReplaceAll(workingDir, modRoot, "")
|
||||
cd = strings.ReplaceAll(cd, sep, "/")
|
||||
return modName + cd
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback to GOPATH structure
|
||||
paths := strings.Split(workingDir, sep+"src"+sep)
|
||||
if len(paths) == 1 {
|
||||
fmt.Printf("\nCouldn't identify package import path.\n\n\tginkgo generate\n\nMust be run within a package directory under $GOPATH/src/...\nYou're going to have to change UNKNOWN_PACKAGE_PATH in the generated file...\n\n")
|
||||
return "UNKNOWN_PACKAGE_PATH"
|
||||
}
|
||||
return filepath.ToSlash(paths[len(paths)-1])
|
||||
}
|
41
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go
generated
vendored
Normal file
41
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
package generators
|
||||
|
||||
var specText = `package {{.Package}}
|
||||
|
||||
import (
|
||||
{{.GinkgoImport}}
|
||||
{{.GomegaImport}}
|
||||
|
||||
{{if .ImportPackage}}"{{.PackageImportPath}}"{{end}}
|
||||
)
|
||||
|
||||
var _ = {{.GinkgoPackage}}Describe("{{.Subject}}", func() {
|
||||
|
||||
})
|
||||
`
|
||||
|
||||
var agoutiSpecText = `package {{.Package}}
|
||||
|
||||
import (
|
||||
{{.GinkgoImport}}
|
||||
{{.GomegaImport}}
|
||||
"github.com/sclevine/agouti"
|
||||
. "github.com/sclevine/agouti/matchers"
|
||||
|
||||
{{if .ImportPackage}}"{{.PackageImportPath}}"{{end}}
|
||||
)
|
||||
|
||||
var _ = {{.GinkgoPackage}}Describe("{{.Subject}}", func() {
|
||||
var page *agouti.Page
|
||||
|
||||
{{.GinkgoPackage}}BeforeEach(func() {
|
||||
var err error
|
||||
page, err = agoutiDriver.NewPage()
|
||||
{{.GomegaPackage}}Expect(err).NotTo({{.GomegaPackage}}HaveOccurred())
|
||||
})
|
||||
|
||||
{{.GinkgoPackage}}AfterEach(func() {
|
||||
{{.GomegaPackage}}Expect(page.Destroy()).To({{.GomegaPackage}}Succeed())
|
||||
})
|
||||
})
|
||||
`
|
64
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go
generated
vendored
Normal file
64
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"go/build"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
||||
)
|
||||
|
||||
type GeneratorsConfig struct {
|
||||
Agouti, NoDot, Internal bool
|
||||
CustomTemplate string
|
||||
CustomTemplateData string
|
||||
}
|
||||
|
||||
func getPackageAndFormattedName() (string, string, string) {
|
||||
path, err := os.Getwd()
|
||||
command.AbortIfError("Could not get current working directory:", err)
|
||||
|
||||
dirName := strings.ReplaceAll(filepath.Base(path), "-", "_")
|
||||
dirName = strings.ReplaceAll(dirName, " ", "_")
|
||||
|
||||
pkg, err := build.ImportDir(path, 0)
|
||||
packageName := pkg.Name
|
||||
if err != nil {
|
||||
packageName = ensureLegalPackageName(dirName)
|
||||
}
|
||||
|
||||
formattedName := prettifyName(filepath.Base(path))
|
||||
return packageName, dirName, formattedName
|
||||
}
|
||||
|
||||
func ensureLegalPackageName(name string) string {
|
||||
if name == "_" {
|
||||
return "underscore"
|
||||
}
|
||||
if len(name) == 0 {
|
||||
return "empty"
|
||||
}
|
||||
n, isDigitErr := strconv.Atoi(string(name[0]))
|
||||
if isDigitErr == nil {
|
||||
return []string{"zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"}[n] + name[1:]
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
func prettifyName(name string) string {
|
||||
name = strings.ReplaceAll(name, "-", " ")
|
||||
name = strings.ReplaceAll(name, "_", " ")
|
||||
name = strings.Title(name)
|
||||
name = strings.ReplaceAll(name, " ", "")
|
||||
return name
|
||||
}
|
||||
|
||||
func determinePackageName(name string, internal bool) string {
|
||||
if internal {
|
||||
return name
|
||||
}
|
||||
|
||||
return name + "_test"
|
||||
}
|
161
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go
generated
vendored
Normal file
161
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go
generated
vendored
Normal file
@ -0,0 +1,161 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite {
|
||||
if suite.PathToCompiledTest != "" {
|
||||
return suite
|
||||
}
|
||||
|
||||
suite.CompilationError = nil
|
||||
|
||||
path, err := filepath.Abs(filepath.Join(suite.Path, suite.PackageName+".test"))
|
||||
if err != nil {
|
||||
suite.State = TestSuiteStateFailedToCompile
|
||||
suite.CompilationError = fmt.Errorf("Failed to compute compilation target path:\n%s", err.Error())
|
||||
return suite
|
||||
}
|
||||
|
||||
ginkgoInvocationPath, _ := os.Getwd()
|
||||
ginkgoInvocationPath, _ = filepath.Abs(ginkgoInvocationPath)
|
||||
packagePath := suite.AbsPath()
|
||||
pathToInvocationPath, err := filepath.Rel(packagePath, ginkgoInvocationPath)
|
||||
if err != nil {
|
||||
suite.State = TestSuiteStateFailedToCompile
|
||||
suite.CompilationError = fmt.Errorf("Failed to get relative path from package to the current working directory:\n%s", err.Error())
|
||||
return suite
|
||||
}
|
||||
args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, path, "./", pathToInvocationPath)
|
||||
if err != nil {
|
||||
suite.State = TestSuiteStateFailedToCompile
|
||||
suite.CompilationError = fmt.Errorf("Failed to generate go test compile flags:\n%s", err.Error())
|
||||
return suite
|
||||
}
|
||||
|
||||
cmd := exec.Command("go", args...)
|
||||
cmd.Dir = suite.Path
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
if len(output) > 0 {
|
||||
suite.State = TestSuiteStateFailedToCompile
|
||||
suite.CompilationError = fmt.Errorf("Failed to compile %s:\n\n%s", suite.PackageName, output)
|
||||
} else {
|
||||
suite.State = TestSuiteStateFailedToCompile
|
||||
suite.CompilationError = fmt.Errorf("Failed to compile %s\n%s", suite.PackageName, err.Error())
|
||||
}
|
||||
return suite
|
||||
}
|
||||
|
||||
if strings.Contains(string(output), "[no test files]") {
|
||||
suite.State = TestSuiteStateSkippedDueToEmptyCompilation
|
||||
return suite
|
||||
}
|
||||
|
||||
if len(output) > 0 {
|
||||
fmt.Println(string(output))
|
||||
}
|
||||
|
||||
if !FileExists(path) {
|
||||
suite.State = TestSuiteStateFailedToCompile
|
||||
suite.CompilationError = fmt.Errorf("Failed to compile %s:\nOutput file %s could not be found", suite.PackageName, path)
|
||||
return suite
|
||||
}
|
||||
|
||||
suite.State = TestSuiteStateCompiled
|
||||
suite.PathToCompiledTest = path
|
||||
return suite
|
||||
}
|
||||
|
||||
func Cleanup(goFlagsConfig types.GoFlagsConfig, suites ...TestSuite) {
|
||||
if goFlagsConfig.BinaryMustBePreserved() {
|
||||
return
|
||||
}
|
||||
for _, suite := range suites {
|
||||
if !suite.Precompiled {
|
||||
os.Remove(suite.PathToCompiledTest)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type parallelSuiteBundle struct {
|
||||
suite TestSuite
|
||||
compiled chan TestSuite
|
||||
}
|
||||
|
||||
type OrderedParallelCompiler struct {
|
||||
mutex *sync.Mutex
|
||||
stopped bool
|
||||
numCompilers int
|
||||
|
||||
idx int
|
||||
numSuites int
|
||||
completionChannels []chan TestSuite
|
||||
}
|
||||
|
||||
func NewOrderedParallelCompiler(numCompilers int) *OrderedParallelCompiler {
|
||||
return &OrderedParallelCompiler{
|
||||
mutex: &sync.Mutex{},
|
||||
numCompilers: numCompilers,
|
||||
}
|
||||
}
|
||||
|
||||
func (opc *OrderedParallelCompiler) StartCompiling(suites TestSuites, goFlagsConfig types.GoFlagsConfig) {
|
||||
opc.stopped = false
|
||||
opc.idx = 0
|
||||
opc.numSuites = len(suites)
|
||||
opc.completionChannels = make([]chan TestSuite, opc.numSuites)
|
||||
|
||||
toCompile := make(chan parallelSuiteBundle, opc.numCompilers)
|
||||
for compiler := 0; compiler < opc.numCompilers; compiler++ {
|
||||
go func() {
|
||||
for bundle := range toCompile {
|
||||
c, suite := bundle.compiled, bundle.suite
|
||||
opc.mutex.Lock()
|
||||
stopped := opc.stopped
|
||||
opc.mutex.Unlock()
|
||||
if !stopped {
|
||||
suite = CompileSuite(suite, goFlagsConfig)
|
||||
}
|
||||
c <- suite
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
for idx, suite := range suites {
|
||||
opc.completionChannels[idx] = make(chan TestSuite, 1)
|
||||
toCompile <- parallelSuiteBundle{suite, opc.completionChannels[idx]}
|
||||
if idx == 0 { //compile first suite serially
|
||||
suite = <-opc.completionChannels[0]
|
||||
opc.completionChannels[0] <- suite
|
||||
}
|
||||
}
|
||||
|
||||
close(toCompile)
|
||||
}
|
||||
|
||||
func (opc *OrderedParallelCompiler) Next() (int, TestSuite) {
|
||||
if opc.idx >= opc.numSuites {
|
||||
return opc.numSuites, TestSuite{}
|
||||
}
|
||||
|
||||
idx := opc.idx
|
||||
suite := <-opc.completionChannels[idx]
|
||||
opc.idx = opc.idx + 1
|
||||
|
||||
return idx, suite
|
||||
}
|
||||
|
||||
func (opc *OrderedParallelCompiler) StopAndDrain() {
|
||||
opc.mutex.Lock()
|
||||
opc.stopped = true
|
||||
opc.mutex.Unlock()
|
||||
}
|
237
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go
generated
vendored
Normal file
237
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go
generated
vendored
Normal file
@ -0,0 +1,237 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
||||
"github.com/google/pprof/profile"
|
||||
"github.com/onsi/ginkgo/v2/reporters"
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
func AbsPathForGeneratedAsset(assetName string, suite TestSuite, cliConfig types.CLIConfig, process int) string {
|
||||
suffix := ""
|
||||
if process != 0 {
|
||||
suffix = fmt.Sprintf(".%d", process)
|
||||
}
|
||||
if cliConfig.OutputDir == "" {
|
||||
return filepath.Join(suite.AbsPath(), assetName+suffix)
|
||||
}
|
||||
outputDir, _ := filepath.Abs(cliConfig.OutputDir)
|
||||
return filepath.Join(outputDir, suite.NamespacedName()+"_"+assetName+suffix)
|
||||
}
|
||||
|
||||
func FinalizeProfilesAndReportsForSuites(suites TestSuites, cliConfig types.CLIConfig, suiteConfig types.SuiteConfig, reporterConfig types.ReporterConfig, goFlagsConfig types.GoFlagsConfig) ([]string, error) {
|
||||
messages := []string{}
|
||||
suitesWithProfiles := suites.WithState(TestSuiteStatePassed, TestSuiteStateFailed) //anything else won't have actually run and generated a profile
|
||||
|
||||
// merge cover profiles if need be
|
||||
if goFlagsConfig.Cover && !cliConfig.KeepSeparateCoverprofiles {
|
||||
coverProfiles := []string{}
|
||||
for _, suite := range suitesWithProfiles {
|
||||
if !suite.HasProgrammaticFocus {
|
||||
coverProfiles = append(coverProfiles, AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0))
|
||||
}
|
||||
}
|
||||
|
||||
if len(coverProfiles) > 0 {
|
||||
dst := goFlagsConfig.CoverProfile
|
||||
if cliConfig.OutputDir != "" {
|
||||
dst = filepath.Join(cliConfig.OutputDir, goFlagsConfig.CoverProfile)
|
||||
}
|
||||
err := MergeAndCleanupCoverProfiles(coverProfiles, dst)
|
||||
if err != nil {
|
||||
return messages, err
|
||||
}
|
||||
coverage, err := GetCoverageFromCoverProfile(dst)
|
||||
if err != nil {
|
||||
return messages, err
|
||||
}
|
||||
if coverage == 0 {
|
||||
messages = append(messages, "composite coverage: [no statements]")
|
||||
} else if suitesWithProfiles.AnyHaveProgrammaticFocus() {
|
||||
messages = append(messages, fmt.Sprintf("composite coverage: %.1f%% of statements however some suites did not contribute because they included programatically focused specs", coverage))
|
||||
} else {
|
||||
messages = append(messages, fmt.Sprintf("composite coverage: %.1f%% of statements", coverage))
|
||||
}
|
||||
} else {
|
||||
messages = append(messages, "no composite coverage computed: all suites included programatically focused specs")
|
||||
}
|
||||
}
|
||||
|
||||
// copy binaries if need be
|
||||
for _, suite := range suitesWithProfiles {
|
||||
if goFlagsConfig.BinaryMustBePreserved() && cliConfig.OutputDir != "" {
|
||||
src := suite.PathToCompiledTest
|
||||
dst := filepath.Join(cliConfig.OutputDir, suite.NamespacedName()+".test")
|
||||
if suite.Precompiled {
|
||||
if err := CopyFile(src, dst); err != nil {
|
||||
return messages, err
|
||||
}
|
||||
} else {
|
||||
if err := os.Rename(src, dst); err != nil {
|
||||
return messages, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type reportFormat struct {
|
||||
ReportName string
|
||||
GenerateFunc func(types.Report, string) error
|
||||
MergeFunc func([]string, string) ([]string, error)
|
||||
}
|
||||
reportFormats := []reportFormat{}
|
||||
if reporterConfig.JSONReport != "" {
|
||||
reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JSONReport, GenerateFunc: reporters.GenerateJSONReport, MergeFunc: reporters.MergeAndCleanupJSONReports})
|
||||
}
|
||||
if reporterConfig.JUnitReport != "" {
|
||||
reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JUnitReport, GenerateFunc: reporters.GenerateJUnitReport, MergeFunc: reporters.MergeAndCleanupJUnitReports})
|
||||
}
|
||||
if reporterConfig.TeamcityReport != "" {
|
||||
reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.TeamcityReport, GenerateFunc: reporters.GenerateTeamcityReport, MergeFunc: reporters.MergeAndCleanupTeamcityReports})
|
||||
}
|
||||
|
||||
// Generate reports for suites that failed to run
|
||||
reportableSuites := suites.ThatAreGinkgoSuites()
|
||||
for _, suite := range reportableSuites.WithState(TestSuiteStateFailedToCompile, TestSuiteStateFailedDueToTimeout, TestSuiteStateSkippedDueToPriorFailures, TestSuiteStateSkippedDueToEmptyCompilation) {
|
||||
report := types.Report{
|
||||
SuitePath: suite.AbsPath(),
|
||||
SuiteConfig: suiteConfig,
|
||||
SuiteSucceeded: false,
|
||||
}
|
||||
switch suite.State {
|
||||
case TestSuiteStateFailedToCompile:
|
||||
report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, suite.CompilationError.Error())
|
||||
case TestSuiteStateFailedDueToTimeout:
|
||||
report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, TIMEOUT_ELAPSED_FAILURE_REASON)
|
||||
case TestSuiteStateSkippedDueToPriorFailures:
|
||||
report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, PRIOR_FAILURES_FAILURE_REASON)
|
||||
case TestSuiteStateSkippedDueToEmptyCompilation:
|
||||
report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, EMPTY_SKIP_FAILURE_REASON)
|
||||
report.SuiteSucceeded = true
|
||||
}
|
||||
|
||||
for _, format := range reportFormats {
|
||||
format.GenerateFunc(report, AbsPathForGeneratedAsset(format.ReportName, suite, cliConfig, 0))
|
||||
}
|
||||
}
|
||||
|
||||
// Merge reports unless we've been asked to keep them separate
|
||||
if !cliConfig.KeepSeparateReports {
|
||||
for _, format := range reportFormats {
|
||||
reports := []string{}
|
||||
for _, suite := range reportableSuites {
|
||||
reports = append(reports, AbsPathForGeneratedAsset(format.ReportName, suite, cliConfig, 0))
|
||||
}
|
||||
dst := format.ReportName
|
||||
if cliConfig.OutputDir != "" {
|
||||
dst = filepath.Join(cliConfig.OutputDir, format.ReportName)
|
||||
}
|
||||
mergeMessages, err := format.MergeFunc(reports, dst)
|
||||
messages = append(messages, mergeMessages...)
|
||||
if err != nil {
|
||||
return messages, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return messages, nil
|
||||
}
|
||||
|
||||
//loads each profile, combines them, deletes them, stores them in destination
|
||||
func MergeAndCleanupCoverProfiles(profiles []string, destination string) error {
|
||||
combined := &bytes.Buffer{}
|
||||
modeRegex := regexp.MustCompile(`^mode: .*\n`)
|
||||
for i, profile := range profiles {
|
||||
contents, err := os.ReadFile(profile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to read coverage file %s:\n%s", profile, err.Error())
|
||||
}
|
||||
os.Remove(profile)
|
||||
|
||||
// remove the cover mode line from every file
|
||||
// except the first one
|
||||
if i > 0 {
|
||||
contents = modeRegex.ReplaceAll(contents, []byte{})
|
||||
}
|
||||
|
||||
_, err = combined.Write(contents)
|
||||
|
||||
// Add a newline to the end of every file if missing.
|
||||
if err == nil && len(contents) > 0 && contents[len(contents)-1] != '\n' {
|
||||
_, err = combined.Write([]byte("\n"))
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to append to coverprofile:\n%s", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
err := os.WriteFile(destination, combined.Bytes(), 0666)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to create combined cover profile:\n%s", err.Error())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetCoverageFromCoverProfile(profile string) (float64, error) {
|
||||
cmd := exec.Command("go", "tool", "cover", "-func", profile)
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("Could not process Coverprofile %s: %s", profile, err.Error())
|
||||
}
|
||||
re := regexp.MustCompile(`total:\s*\(statements\)\s*(\d*\.\d*)\%`)
|
||||
matches := re.FindStringSubmatch(string(output))
|
||||
if matches == nil {
|
||||
return 0, fmt.Errorf("Could not parse Coverprofile to compute coverage percentage")
|
||||
}
|
||||
coverageString := matches[1]
|
||||
coverage, err := strconv.ParseFloat(coverageString, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("Could not parse Coverprofile to compute coverage percentage: %s", err.Error())
|
||||
}
|
||||
|
||||
return coverage, nil
|
||||
}
|
||||
|
||||
func MergeProfiles(profilePaths []string, destination string) error {
|
||||
profiles := []*profile.Profile{}
|
||||
for _, profilePath := range profilePaths {
|
||||
proFile, err := os.Open(profilePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not open profile: %s\n%s", profilePath, err.Error())
|
||||
}
|
||||
prof, err := profile.Parse(proFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not parse profile: %s\n%s", profilePath, err.Error())
|
||||
}
|
||||
profiles = append(profiles, prof)
|
||||
os.Remove(profilePath)
|
||||
}
|
||||
|
||||
mergedProfile, err := profile.Merge(profiles)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not merge profiles:\n%s", err.Error())
|
||||
}
|
||||
|
||||
outFile, err := os.Create(destination)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not create merged profile %s:\n%s", destination, err.Error())
|
||||
}
|
||||
err = mergedProfile.Write(outFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not write merged profile %s:\n%s", destination, err.Error())
|
||||
}
|
||||
err = outFile.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not close merged profile %s:\n%s", destination, err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
355
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go
generated
vendored
Normal file
355
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go
generated
vendored
Normal file
@ -0,0 +1,355 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/formatter"
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
||||
"github.com/onsi/ginkgo/v2/internal/parallel_support"
|
||||
"github.com/onsi/ginkgo/v2/reporters"
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
func RunCompiledSuite(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite {
|
||||
suite.State = TestSuiteStateFailed
|
||||
suite.HasProgrammaticFocus = false
|
||||
|
||||
if suite.PathToCompiledTest == "" {
|
||||
return suite
|
||||
}
|
||||
|
||||
if suite.IsGinkgo && cliConfig.ComputedProcs() > 1 {
|
||||
suite = runParallel(suite, ginkgoConfig, reporterConfig, cliConfig, goFlagsConfig, additionalArgs)
|
||||
} else if suite.IsGinkgo {
|
||||
suite = runSerial(suite, ginkgoConfig, reporterConfig, cliConfig, goFlagsConfig, additionalArgs)
|
||||
} else {
|
||||
suite = runGoTest(suite, cliConfig, goFlagsConfig)
|
||||
}
|
||||
runAfterRunHook(cliConfig.AfterRunHook, reporterConfig.NoColor, suite)
|
||||
return suite
|
||||
}
|
||||
|
||||
func buildAndStartCommand(suite TestSuite, args []string, pipeToStdout bool) (*exec.Cmd, *bytes.Buffer) {
|
||||
buf := &bytes.Buffer{}
|
||||
cmd := exec.Command(suite.PathToCompiledTest, args...)
|
||||
cmd.Dir = suite.Path
|
||||
if pipeToStdout {
|
||||
cmd.Stderr = io.MultiWriter(os.Stdout, buf)
|
||||
cmd.Stdout = os.Stdout
|
||||
} else {
|
||||
cmd.Stderr = buf
|
||||
cmd.Stdout = buf
|
||||
}
|
||||
err := cmd.Start()
|
||||
command.AbortIfError("Failed to start test suite", err)
|
||||
|
||||
return cmd, buf
|
||||
}
|
||||
|
||||
func checkForNoTestsWarning(buf *bytes.Buffer) bool {
|
||||
if strings.Contains(buf.String(), "warning: no tests to run") {
|
||||
fmt.Fprintf(os.Stderr, `Found no test suites, did you forget to run "ginkgo bootstrap"?`)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func runGoTest(suite TestSuite, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig) TestSuite {
|
||||
// As we run the go test from the suite directory, make sure the cover profile is absolute
|
||||
// and placed into the expected output directory when one is configured.
|
||||
if goFlagsConfig.Cover && !filepath.IsAbs(goFlagsConfig.CoverProfile) {
|
||||
goFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0)
|
||||
}
|
||||
|
||||
args, err := types.GenerateGoTestRunArgs(goFlagsConfig)
|
||||
command.AbortIfError("Failed to generate test run arguments", err)
|
||||
cmd, buf := buildAndStartCommand(suite, args, true)
|
||||
|
||||
cmd.Wait()
|
||||
|
||||
exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
|
||||
passed := (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE)
|
||||
passed = !(checkForNoTestsWarning(buf) && cliConfig.RequireSuite) && passed
|
||||
if passed {
|
||||
suite.State = TestSuiteStatePassed
|
||||
} else {
|
||||
suite.State = TestSuiteStateFailed
|
||||
}
|
||||
|
||||
return suite
|
||||
}
|
||||
|
||||
func runSerial(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite {
|
||||
if goFlagsConfig.Cover {
|
||||
goFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0)
|
||||
}
|
||||
if goFlagsConfig.BlockProfile != "" {
|
||||
goFlagsConfig.BlockProfile = AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, 0)
|
||||
}
|
||||
if goFlagsConfig.CPUProfile != "" {
|
||||
goFlagsConfig.CPUProfile = AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, 0)
|
||||
}
|
||||
if goFlagsConfig.MemProfile != "" {
|
||||
goFlagsConfig.MemProfile = AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, 0)
|
||||
}
|
||||
if goFlagsConfig.MutexProfile != "" {
|
||||
goFlagsConfig.MutexProfile = AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, 0)
|
||||
}
|
||||
if reporterConfig.JSONReport != "" {
|
||||
reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0)
|
||||
}
|
||||
if reporterConfig.JUnitReport != "" {
|
||||
reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0)
|
||||
}
|
||||
if reporterConfig.TeamcityReport != "" {
|
||||
reporterConfig.TeamcityReport = AbsPathForGeneratedAsset(reporterConfig.TeamcityReport, suite, cliConfig, 0)
|
||||
}
|
||||
|
||||
args, err := types.GenerateGinkgoTestRunArgs(ginkgoConfig, reporterConfig, goFlagsConfig)
|
||||
command.AbortIfError("Failed to generate test run arguments", err)
|
||||
args = append([]string{"--test.timeout=0"}, args...)
|
||||
args = append(args, additionalArgs...)
|
||||
|
||||
cmd, buf := buildAndStartCommand(suite, args, true)
|
||||
|
||||
cmd.Wait()
|
||||
|
||||
exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
|
||||
suite.HasProgrammaticFocus = (exitStatus == types.GINKGO_FOCUS_EXIT_CODE)
|
||||
passed := (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE)
|
||||
passed = !(checkForNoTestsWarning(buf) && cliConfig.RequireSuite) && passed
|
||||
if passed {
|
||||
suite.State = TestSuiteStatePassed
|
||||
} else {
|
||||
suite.State = TestSuiteStateFailed
|
||||
}
|
||||
|
||||
if suite.HasProgrammaticFocus {
|
||||
if goFlagsConfig.Cover {
|
||||
fmt.Fprintln(os.Stdout, "coverage: no coverfile was generated because specs are programmatically focused")
|
||||
}
|
||||
if goFlagsConfig.BlockProfile != "" {
|
||||
fmt.Fprintln(os.Stdout, "no block profile was generated because specs are programmatically focused")
|
||||
}
|
||||
if goFlagsConfig.CPUProfile != "" {
|
||||
fmt.Fprintln(os.Stdout, "no cpu profile was generated because specs are programmatically focused")
|
||||
}
|
||||
if goFlagsConfig.MemProfile != "" {
|
||||
fmt.Fprintln(os.Stdout, "no mem profile was generated because specs are programmatically focused")
|
||||
}
|
||||
if goFlagsConfig.MutexProfile != "" {
|
||||
fmt.Fprintln(os.Stdout, "no mutex profile was generated because specs are programmatically focused")
|
||||
}
|
||||
}
|
||||
|
||||
return suite
|
||||
}
|
||||
|
||||
func runParallel(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite {
|
||||
type procResult struct {
|
||||
passed bool
|
||||
hasProgrammaticFocus bool
|
||||
}
|
||||
|
||||
numProcs := cliConfig.ComputedProcs()
|
||||
procOutput := make([]*bytes.Buffer, numProcs)
|
||||
coverProfiles := []string{}
|
||||
|
||||
blockProfiles := []string{}
|
||||
cpuProfiles := []string{}
|
||||
memProfiles := []string{}
|
||||
mutexProfiles := []string{}
|
||||
|
||||
procResults := make(chan procResult)
|
||||
|
||||
server, err := parallel_support.NewServer(numProcs, reporters.NewDefaultReporter(reporterConfig, formatter.ColorableStdOut))
|
||||
command.AbortIfError("Failed to start parallel spec server", err)
|
||||
server.Start()
|
||||
defer server.Close()
|
||||
|
||||
if reporterConfig.JSONReport != "" {
|
||||
reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0)
|
||||
}
|
||||
if reporterConfig.JUnitReport != "" {
|
||||
reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0)
|
||||
}
|
||||
if reporterConfig.TeamcityReport != "" {
|
||||
reporterConfig.TeamcityReport = AbsPathForGeneratedAsset(reporterConfig.TeamcityReport, suite, cliConfig, 0)
|
||||
}
|
||||
|
||||
for proc := 1; proc <= numProcs; proc++ {
|
||||
procGinkgoConfig := ginkgoConfig
|
||||
procGinkgoConfig.ParallelProcess, procGinkgoConfig.ParallelTotal, procGinkgoConfig.ParallelHost = proc, numProcs, server.Address()
|
||||
|
||||
procGoFlagsConfig := goFlagsConfig
|
||||
if goFlagsConfig.Cover {
|
||||
procGoFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, proc)
|
||||
coverProfiles = append(coverProfiles, procGoFlagsConfig.CoverProfile)
|
||||
}
|
||||
if goFlagsConfig.BlockProfile != "" {
|
||||
procGoFlagsConfig.BlockProfile = AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, proc)
|
||||
blockProfiles = append(blockProfiles, procGoFlagsConfig.BlockProfile)
|
||||
}
|
||||
if goFlagsConfig.CPUProfile != "" {
|
||||
procGoFlagsConfig.CPUProfile = AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, proc)
|
||||
cpuProfiles = append(cpuProfiles, procGoFlagsConfig.CPUProfile)
|
||||
}
|
||||
if goFlagsConfig.MemProfile != "" {
|
||||
procGoFlagsConfig.MemProfile = AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, proc)
|
||||
memProfiles = append(memProfiles, procGoFlagsConfig.MemProfile)
|
||||
}
|
||||
if goFlagsConfig.MutexProfile != "" {
|
||||
procGoFlagsConfig.MutexProfile = AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, proc)
|
||||
mutexProfiles = append(mutexProfiles, procGoFlagsConfig.MutexProfile)
|
||||
}
|
||||
|
||||
args, err := types.GenerateGinkgoTestRunArgs(procGinkgoConfig, reporterConfig, procGoFlagsConfig)
|
||||
command.AbortIfError("Failed to generate test run arguments", err)
|
||||
args = append([]string{"--test.timeout=0"}, args...)
|
||||
args = append(args, additionalArgs...)
|
||||
|
||||
cmd, buf := buildAndStartCommand(suite, args, false)
|
||||
procOutput[proc-1] = buf
|
||||
server.RegisterAlive(proc, func() bool { return cmd.ProcessState == nil || !cmd.ProcessState.Exited() })
|
||||
|
||||
go func() {
|
||||
cmd.Wait()
|
||||
exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
|
||||
procResults <- procResult{
|
||||
passed: (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE),
|
||||
hasProgrammaticFocus: exitStatus == types.GINKGO_FOCUS_EXIT_CODE,
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
passed := true
|
||||
for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ {
|
||||
result := <-procResults
|
||||
passed = passed && result.passed
|
||||
suite.HasProgrammaticFocus = suite.HasProgrammaticFocus || result.hasProgrammaticFocus
|
||||
}
|
||||
if passed {
|
||||
suite.State = TestSuiteStatePassed
|
||||
} else {
|
||||
suite.State = TestSuiteStateFailed
|
||||
}
|
||||
|
||||
select {
|
||||
case <-server.GetSuiteDone():
|
||||
fmt.Println("")
|
||||
case <-time.After(time.Second):
|
||||
//one of the nodes never finished reporting to the server. Something must have gone wrong.
|
||||
fmt.Fprint(formatter.ColorableStdErr, formatter.F("\n{{bold}}{{red}}Ginkgo timed out waiting for all parallel procs to report back{{/}}\n"))
|
||||
fmt.Fprint(formatter.ColorableStdErr, formatter.F("{{gray}}Test suite:{{/}} %s (%s)\n\n", suite.PackageName, suite.Path))
|
||||
fmt.Fprint(formatter.ColorableStdErr, formatter.Fiw(0, formatter.COLS, "This occurs if a parallel process exits before it reports its results to the Ginkgo CLI. The CLI will now print out all the stdout/stderr output it's collected from the running processes. However you may not see anything useful in these logs because the individual test processes usually intercept output to stdout/stderr in order to capture it in the spec reports.\n\nYou may want to try rerunning your test suite with {{light-gray}}--output-interceptor-mode=none{{/}} to see additional output here and debug your suite.\n"))
|
||||
fmt.Fprintln(formatter.ColorableStdErr, " ")
|
||||
for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ {
|
||||
fmt.Fprintf(formatter.ColorableStdErr, formatter.F("{{bold}}Output from proc %d:{{/}}\n", proc))
|
||||
fmt.Fprintln(os.Stderr, formatter.Fi(1, "%s", procOutput[proc-1].String()))
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "** End **")
|
||||
}
|
||||
|
||||
for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ {
|
||||
output := procOutput[proc-1].String()
|
||||
if proc == 1 && checkForNoTestsWarning(procOutput[0]) && cliConfig.RequireSuite {
|
||||
suite.State = TestSuiteStateFailed
|
||||
}
|
||||
if strings.Contains(output, "deprecated Ginkgo functionality") {
|
||||
fmt.Fprintln(os.Stderr, output)
|
||||
}
|
||||
}
|
||||
|
||||
if len(coverProfiles) > 0 {
|
||||
if suite.HasProgrammaticFocus {
|
||||
fmt.Fprintln(os.Stdout, "coverage: no coverfile was generated because specs are programmatically focused")
|
||||
} else {
|
||||
coverProfile := AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0)
|
||||
err := MergeAndCleanupCoverProfiles(coverProfiles, coverProfile)
|
||||
command.AbortIfError("Failed to combine cover profiles", err)
|
||||
|
||||
coverage, err := GetCoverageFromCoverProfile(coverProfile)
|
||||
command.AbortIfError("Failed to compute coverage", err)
|
||||
if coverage == 0 {
|
||||
fmt.Fprintln(os.Stdout, "coverage: [no statements]")
|
||||
} else {
|
||||
fmt.Fprintf(os.Stdout, "coverage: %.1f%% of statements\n", coverage)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(blockProfiles) > 0 {
|
||||
if suite.HasProgrammaticFocus {
|
||||
fmt.Fprintln(os.Stdout, "no block profile was generated because specs are programmatically focused")
|
||||
} else {
|
||||
blockProfile := AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, 0)
|
||||
err := MergeProfiles(blockProfiles, blockProfile)
|
||||
command.AbortIfError("Failed to combine blockprofiles", err)
|
||||
}
|
||||
}
|
||||
if len(cpuProfiles) > 0 {
|
||||
if suite.HasProgrammaticFocus {
|
||||
fmt.Fprintln(os.Stdout, "no cpu profile was generated because specs are programmatically focused")
|
||||
} else {
|
||||
cpuProfile := AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, 0)
|
||||
err := MergeProfiles(cpuProfiles, cpuProfile)
|
||||
command.AbortIfError("Failed to combine cpuprofiles", err)
|
||||
}
|
||||
}
|
||||
if len(memProfiles) > 0 {
|
||||
if suite.HasProgrammaticFocus {
|
||||
fmt.Fprintln(os.Stdout, "no mem profile was generated because specs are programmatically focused")
|
||||
} else {
|
||||
memProfile := AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, 0)
|
||||
err := MergeProfiles(memProfiles, memProfile)
|
||||
command.AbortIfError("Failed to combine memprofiles", err)
|
||||
}
|
||||
}
|
||||
if len(mutexProfiles) > 0 {
|
||||
if suite.HasProgrammaticFocus {
|
||||
fmt.Fprintln(os.Stdout, "no mutex profile was generated because specs are programmatically focused")
|
||||
} else {
|
||||
mutexProfile := AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, 0)
|
||||
err := MergeProfiles(mutexProfiles, mutexProfile)
|
||||
command.AbortIfError("Failed to combine mutexprofiles", err)
|
||||
}
|
||||
}
|
||||
|
||||
return suite
|
||||
}
|
||||
|
||||
func runAfterRunHook(command string, noColor bool, suite TestSuite) {
|
||||
if command == "" {
|
||||
return
|
||||
}
|
||||
f := formatter.NewWithNoColorBool(noColor)
|
||||
|
||||
// Allow for string replacement to pass input to the command
|
||||
passed := "[FAIL]"
|
||||
if suite.State.Is(TestSuiteStatePassed) {
|
||||
passed = "[PASS]"
|
||||
}
|
||||
command = strings.ReplaceAll(command, "(ginkgo-suite-passed)", passed)
|
||||
command = strings.ReplaceAll(command, "(ginkgo-suite-name)", suite.PackageName)
|
||||
|
||||
// Must break command into parts
|
||||
splitArgs := regexp.MustCompile(`'.+'|".+"|\S+`)
|
||||
parts := splitArgs.FindAllString(command, -1)
|
||||
|
||||
output, err := exec.Command(parts[0], parts[1:]...).CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Fprintln(formatter.ColorableStdOut, f.Fi(0, "{{red}}{{bold}}After-run-hook failed:{{/}}"))
|
||||
fmt.Fprintln(formatter.ColorableStdOut, f.Fi(1, "{{red}}%s{{/}}", output))
|
||||
} else {
|
||||
fmt.Fprintln(formatter.ColorableStdOut, f.Fi(0, "{{green}}{{bold}}After-run-hook succeeded:{{/}}"))
|
||||
fmt.Fprintln(formatter.ColorableStdOut, f.Fi(1, "{{green}}%s{{/}}", output))
|
||||
}
|
||||
}
|
283
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go
generated
vendored
Normal file
283
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go
generated
vendored
Normal file
@ -0,0 +1,283 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
const TIMEOUT_ELAPSED_FAILURE_REASON = "Suite did not run because the timeout elapsed"
|
||||
const PRIOR_FAILURES_FAILURE_REASON = "Suite did not run because prior suites failed and --keep-going is not set"
|
||||
const EMPTY_SKIP_FAILURE_REASON = "Suite did not run go test reported that no test files were found"
|
||||
|
||||
type TestSuiteState uint
|
||||
|
||||
const (
|
||||
TestSuiteStateInvalid TestSuiteState = iota
|
||||
|
||||
TestSuiteStateUncompiled
|
||||
TestSuiteStateCompiled
|
||||
|
||||
TestSuiteStatePassed
|
||||
|
||||
TestSuiteStateSkippedDueToEmptyCompilation
|
||||
TestSuiteStateSkippedByFilter
|
||||
TestSuiteStateSkippedDueToPriorFailures
|
||||
|
||||
TestSuiteStateFailed
|
||||
TestSuiteStateFailedDueToTimeout
|
||||
TestSuiteStateFailedToCompile
|
||||
)
|
||||
|
||||
var TestSuiteStateFailureStates = []TestSuiteState{TestSuiteStateFailed, TestSuiteStateFailedDueToTimeout, TestSuiteStateFailedToCompile}
|
||||
|
||||
func (state TestSuiteState) Is(states ...TestSuiteState) bool {
|
||||
for _, suiteState := range states {
|
||||
if suiteState == state {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
type TestSuite struct {
|
||||
Path string
|
||||
PackageName string
|
||||
IsGinkgo bool
|
||||
|
||||
Precompiled bool
|
||||
PathToCompiledTest string
|
||||
CompilationError error
|
||||
|
||||
HasProgrammaticFocus bool
|
||||
State TestSuiteState
|
||||
}
|
||||
|
||||
func (ts TestSuite) AbsPath() string {
|
||||
path, _ := filepath.Abs(ts.Path)
|
||||
return path
|
||||
}
|
||||
|
||||
func (ts TestSuite) NamespacedName() string {
|
||||
name := relPath(ts.Path)
|
||||
name = strings.TrimLeft(name, "."+string(filepath.Separator))
|
||||
name = strings.ReplaceAll(name, string(filepath.Separator), "_")
|
||||
name = strings.ReplaceAll(name, " ", "_")
|
||||
if name == "" {
|
||||
return ts.PackageName
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
type TestSuites []TestSuite
|
||||
|
||||
func (ts TestSuites) AnyHaveProgrammaticFocus() bool {
|
||||
for _, suite := range ts {
|
||||
if suite.HasProgrammaticFocus {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (ts TestSuites) ThatAreGinkgoSuites() TestSuites {
|
||||
out := TestSuites{}
|
||||
for _, suite := range ts {
|
||||
if suite.IsGinkgo {
|
||||
out = append(out, suite)
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (ts TestSuites) CountWithState(states ...TestSuiteState) int {
|
||||
n := 0
|
||||
for _, suite := range ts {
|
||||
if suite.State.Is(states...) {
|
||||
n += 1
|
||||
}
|
||||
}
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
func (ts TestSuites) WithState(states ...TestSuiteState) TestSuites {
|
||||
out := TestSuites{}
|
||||
for _, suite := range ts {
|
||||
if suite.State.Is(states...) {
|
||||
out = append(out, suite)
|
||||
}
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
func (ts TestSuites) WithoutState(states ...TestSuiteState) TestSuites {
|
||||
out := TestSuites{}
|
||||
for _, suite := range ts {
|
||||
if !suite.State.Is(states...) {
|
||||
out = append(out, suite)
|
||||
}
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
func (ts TestSuites) ShuffledCopy(seed int64) TestSuites {
|
||||
out := make(TestSuites, len(ts))
|
||||
permutation := rand.New(rand.NewSource(seed)).Perm(len(ts))
|
||||
for i, j := range permutation {
|
||||
out[i] = ts[j]
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func FindSuites(args []string, cliConfig types.CLIConfig, allowPrecompiled bool) TestSuites {
|
||||
suites := TestSuites{}
|
||||
|
||||
if len(args) > 0 {
|
||||
for _, arg := range args {
|
||||
if allowPrecompiled {
|
||||
suite, err := precompiledTestSuite(arg)
|
||||
if err == nil {
|
||||
suites = append(suites, suite)
|
||||
continue
|
||||
}
|
||||
}
|
||||
recurseForSuite := cliConfig.Recurse
|
||||
if strings.HasSuffix(arg, "/...") && arg != "/..." {
|
||||
arg = arg[:len(arg)-4]
|
||||
recurseForSuite = true
|
||||
}
|
||||
suites = append(suites, suitesInDir(arg, recurseForSuite)...)
|
||||
}
|
||||
} else {
|
||||
suites = suitesInDir(".", cliConfig.Recurse)
|
||||
}
|
||||
|
||||
if cliConfig.SkipPackage != "" {
|
||||
skipFilters := strings.Split(cliConfig.SkipPackage, ",")
|
||||
for idx := range suites {
|
||||
for _, skipFilter := range skipFilters {
|
||||
if strings.Contains(suites[idx].Path, skipFilter) {
|
||||
suites[idx].State = TestSuiteStateSkippedByFilter
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return suites
|
||||
}
|
||||
|
||||
func precompiledTestSuite(path string) (TestSuite, error) {
|
||||
info, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return TestSuite{}, err
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
return TestSuite{}, errors.New("this is a directory, not a file")
|
||||
}
|
||||
|
||||
if filepath.Ext(path) != ".test" && filepath.Ext(path) != ".exe" {
|
||||
return TestSuite{}, errors.New("this is not a .test binary")
|
||||
}
|
||||
|
||||
if filepath.Ext(path) == ".test" && info.Mode()&0111 == 0 {
|
||||
return TestSuite{}, errors.New("this is not executable")
|
||||
}
|
||||
|
||||
dir := relPath(filepath.Dir(path))
|
||||
packageName := strings.TrimSuffix(filepath.Base(path), ".exe")
|
||||
packageName = strings.TrimSuffix(packageName, ".test")
|
||||
|
||||
path, err = filepath.Abs(path)
|
||||
if err != nil {
|
||||
return TestSuite{}, err
|
||||
}
|
||||
|
||||
return TestSuite{
|
||||
Path: dir,
|
||||
PackageName: packageName,
|
||||
IsGinkgo: true,
|
||||
Precompiled: true,
|
||||
PathToCompiledTest: path,
|
||||
State: TestSuiteStateCompiled,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func suitesInDir(dir string, recurse bool) TestSuites {
|
||||
suites := TestSuites{}
|
||||
|
||||
if path.Base(dir) == "vendor" {
|
||||
return suites
|
||||
}
|
||||
|
||||
files, _ := os.ReadDir(dir)
|
||||
re := regexp.MustCompile(`^[^._].*_test\.go$`)
|
||||
for _, file := range files {
|
||||
if !file.IsDir() && re.Match([]byte(file.Name())) {
|
||||
suite := TestSuite{
|
||||
Path: relPath(dir),
|
||||
PackageName: packageNameForSuite(dir),
|
||||
IsGinkgo: filesHaveGinkgoSuite(dir, files),
|
||||
State: TestSuiteStateUncompiled,
|
||||
}
|
||||
suites = append(suites, suite)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if recurse {
|
||||
re = regexp.MustCompile(`^[._]`)
|
||||
for _, file := range files {
|
||||
if file.IsDir() && !re.Match([]byte(file.Name())) {
|
||||
suites = append(suites, suitesInDir(dir+"/"+file.Name(), recurse)...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return suites
|
||||
}
|
||||
|
||||
func relPath(dir string) string {
|
||||
dir, _ = filepath.Abs(dir)
|
||||
cwd, _ := os.Getwd()
|
||||
dir, _ = filepath.Rel(cwd, filepath.Clean(dir))
|
||||
|
||||
if string(dir[0]) != "." {
|
||||
dir = "." + string(filepath.Separator) + dir
|
||||
}
|
||||
|
||||
return dir
|
||||
}
|
||||
|
||||
func packageNameForSuite(dir string) string {
|
||||
path, _ := filepath.Abs(dir)
|
||||
return filepath.Base(path)
|
||||
}
|
||||
|
||||
func filesHaveGinkgoSuite(dir string, files []os.DirEntry) bool {
|
||||
reTestFile := regexp.MustCompile(`_test\.go$`)
|
||||
reGinkgo := regexp.MustCompile(`package ginkgo|\/ginkgo"|\/ginkgo\/v2"|\/ginkgo\/v2/dsl/`)
|
||||
|
||||
for _, file := range files {
|
||||
if !file.IsDir() && reTestFile.Match([]byte(file.Name())) {
|
||||
contents, _ := os.ReadFile(dir + "/" + file.Name())
|
||||
if reGinkgo.Match(contents) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
86
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go
generated
vendored
Normal file
86
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go
generated
vendored
Normal file
@ -0,0 +1,86 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/formatter"
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
||||
)
|
||||
|
||||
func FileExists(path string) bool {
|
||||
_, err := os.Stat(path)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func CopyFile(src string, dest string) error {
|
||||
srcFile, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
srcStat, err := srcFile.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := os.Stat(dest); err == nil {
|
||||
os.Remove(dest)
|
||||
}
|
||||
|
||||
destFile, err := os.OpenFile(dest, os.O_WRONLY|os.O_CREATE, srcStat.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = io.Copy(destFile, srcFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := srcFile.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
return destFile.Close()
|
||||
}
|
||||
|
||||
func GoFmt(path string) {
|
||||
out, err := exec.Command("go", "fmt", path).CombinedOutput()
|
||||
if err != nil {
|
||||
command.AbortIfError(fmt.Sprintf("Could not fmt:\n%s\n", string(out)), err)
|
||||
}
|
||||
}
|
||||
|
||||
func PluralizedWord(singular, plural string, count int) string {
|
||||
if count == 1 {
|
||||
return singular
|
||||
}
|
||||
return plural
|
||||
}
|
||||
|
||||
func FailedSuitesReport(suites TestSuites, f formatter.Formatter) string {
|
||||
out := ""
|
||||
out += "There were failures detected in the following suites:\n"
|
||||
|
||||
maxPackageNameLength := 0
|
||||
for _, suite := range suites.WithState(TestSuiteStateFailureStates...) {
|
||||
if len(suite.PackageName) > maxPackageNameLength {
|
||||
maxPackageNameLength = len(suite.PackageName)
|
||||
}
|
||||
}
|
||||
|
||||
packageNameFormatter := fmt.Sprintf("%%%ds", maxPackageNameLength)
|
||||
for _, suite := range suites {
|
||||
switch suite.State {
|
||||
case TestSuiteStateFailed:
|
||||
out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s{{/}}\n", suite.PackageName, suite.Path)
|
||||
case TestSuiteStateFailedToCompile:
|
||||
out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s {{magenta}}[Compilation failure]{{/}}\n", suite.PackageName, suite.Path)
|
||||
case TestSuiteStateFailedDueToTimeout:
|
||||
out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s {{orange}}[%s]{{/}}\n", suite.PackageName, suite.Path, TIMEOUT_ELAPSED_FAILURE_REASON)
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
54
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go
generated
vendored
Normal file
54
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/formatter"
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
var versiorRe = regexp.MustCompile(`v(\d+\.\d+\.\d+)`)
|
||||
|
||||
func VerifyCLIAndFrameworkVersion(suites TestSuites) {
|
||||
cliVersion := types.VERSION
|
||||
mismatches := map[string][]string{}
|
||||
|
||||
for _, suite := range suites {
|
||||
cmd := exec.Command("go", "list", "-m", "github.com/onsi/ginkgo/v2")
|
||||
cmd.Dir = suite.Path
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
components := strings.Split(string(output), " ")
|
||||
if len(components) != 2 {
|
||||
continue
|
||||
}
|
||||
matches := versiorRe.FindStringSubmatch(components[1])
|
||||
if matches == nil || len(matches) != 2 {
|
||||
continue
|
||||
}
|
||||
libraryVersion := matches[1]
|
||||
if cliVersion != libraryVersion {
|
||||
mismatches[libraryVersion] = append(mismatches[libraryVersion], suite.PackageName)
|
||||
}
|
||||
}
|
||||
|
||||
if len(mismatches) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Println(formatter.F("{{red}}{{bold}}Ginkgo detected a version mismatch between the Ginkgo CLI and the version of Ginkgo imported by your packages:{{/}}"))
|
||||
|
||||
fmt.Println(formatter.Fi(1, "Ginkgo CLI Version:"))
|
||||
fmt.Println(formatter.Fi(2, "{{bold}}%s{{/}}", cliVersion))
|
||||
fmt.Println(formatter.Fi(1, "Mismatched package versions found:"))
|
||||
for version, packages := range mismatches {
|
||||
fmt.Println(formatter.Fi(2, "{{bold}}%s{{/}} used by %s", version, strings.Join(packages, ", ")))
|
||||
}
|
||||
fmt.Println("")
|
||||
fmt.Println(formatter.Fiw(1, formatter.COLS, "{{gray}}Ginkgo will continue to attempt to run but you may see errors (including flag parsing errors) and should either update your go.mod or your version of the Ginkgo CLI to match.\n\nTo install the matching version of the CLI run\n {{bold}}go install github.com/onsi/ginkgo/v2/ginkgo{{/}}{{gray}}\nfrom a path that contains a go.mod file. Alternatively you can use\n {{bold}}go run github.com/onsi/ginkgo/v2/ginkgo{{/}}{{gray}}\nfrom a path that contains a go.mod file to invoke the matching version of the Ginkgo CLI.\n\nIf you are attempting to test multiple packages that each have a different version of the Ginkgo library with a single Ginkgo CLI that is currently unsupported.\n{{/}}"))
|
||||
}
|
123
vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go
generated
vendored
Normal file
123
vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go
generated
vendored
Normal file
@ -0,0 +1,123 @@
|
||||
package labels
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/internal"
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
"golang.org/x/tools/go/ast/inspector"
|
||||
)
|
||||
|
||||
func BuildLabelsCommand() command.Command {
|
||||
var cliConfig = types.NewDefaultCLIConfig()
|
||||
|
||||
flags, err := types.BuildLabelsCommandFlagSet(&cliConfig)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return command.Command{
|
||||
Name: "labels",
|
||||
Usage: "ginkgo labels <FLAGS> <PACKAGES>",
|
||||
Flags: flags,
|
||||
ShortDoc: "List labels detected in the passed-in packages (or the package in the current directory if left blank).",
|
||||
DocLink: "spec-labels",
|
||||
Command: func(args []string, _ []string) {
|
||||
ListLabels(args, cliConfig)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func ListLabels(args []string, cliConfig types.CLIConfig) {
|
||||
suites := internal.FindSuites(args, cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter)
|
||||
if len(suites) == 0 {
|
||||
command.AbortWith("Found no test suites")
|
||||
}
|
||||
for _, suite := range suites {
|
||||
labels := fetchLabelsFromPackage(suite.Path)
|
||||
if len(labels) == 0 {
|
||||
fmt.Printf("%s: No labels found\n", suite.PackageName)
|
||||
} else {
|
||||
fmt.Printf("%s: [%s]\n", suite.PackageName, strings.Join(labels, ", "))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func fetchLabelsFromPackage(packagePath string) []string {
|
||||
fset := token.NewFileSet()
|
||||
parsedPackages, err := parser.ParseDir(fset, packagePath, nil, 0)
|
||||
command.AbortIfError("Failed to parse package source:", err)
|
||||
|
||||
files := []*ast.File{}
|
||||
hasTestPackage := false
|
||||
for key, pkg := range parsedPackages {
|
||||
if strings.HasSuffix(key, "_test") {
|
||||
hasTestPackage = true
|
||||
for _, file := range pkg.Files {
|
||||
files = append(files, file)
|
||||
}
|
||||
}
|
||||
}
|
||||
if !hasTestPackage {
|
||||
for _, pkg := range parsedPackages {
|
||||
for _, file := range pkg.Files {
|
||||
files = append(files, file)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
seen := map[string]bool{}
|
||||
labels := []string{}
|
||||
ispr := inspector.New(files)
|
||||
ispr.Preorder([]ast.Node{&ast.CallExpr{}}, func(n ast.Node) {
|
||||
potentialLabels := fetchLabels(n.(*ast.CallExpr))
|
||||
for _, label := range potentialLabels {
|
||||
if !seen[label] {
|
||||
seen[label] = true
|
||||
labels = append(labels, strconv.Quote(label))
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
sort.Strings(labels)
|
||||
return labels
|
||||
}
|
||||
|
||||
func fetchLabels(callExpr *ast.CallExpr) []string {
|
||||
out := []string{}
|
||||
switch expr := callExpr.Fun.(type) {
|
||||
case *ast.Ident:
|
||||
if expr.Name != "Label" {
|
||||
return out
|
||||
}
|
||||
case *ast.SelectorExpr:
|
||||
if expr.Sel.Name != "Label" {
|
||||
return out
|
||||
}
|
||||
default:
|
||||
return out
|
||||
}
|
||||
for _, arg := range callExpr.Args {
|
||||
switch expr := arg.(type) {
|
||||
case *ast.BasicLit:
|
||||
if expr.Kind == token.STRING {
|
||||
unquoted, err := strconv.Unquote(expr.Value)
|
||||
if err != nil {
|
||||
unquoted = expr.Value
|
||||
}
|
||||
validated, err := types.ValidateAndCleanupLabel(unquoted, types.CodeLocation{})
|
||||
if err == nil {
|
||||
out = append(out, validated)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
58
vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go
generated
vendored
Normal file
58
vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go
generated
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/build"
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/generators"
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/labels"
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/outline"
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/run"
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/unfocus"
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/watch"
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
var program command.Program
|
||||
|
||||
func GenerateCommands() []command.Command {
|
||||
return []command.Command{
|
||||
watch.BuildWatchCommand(),
|
||||
build.BuildBuildCommand(),
|
||||
generators.BuildBootstrapCommand(),
|
||||
generators.BuildGenerateCommand(),
|
||||
labels.BuildLabelsCommand(),
|
||||
outline.BuildOutlineCommand(),
|
||||
unfocus.BuildUnfocusCommand(),
|
||||
BuildVersionCommand(),
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
program = command.Program{
|
||||
Name: "ginkgo",
|
||||
Heading: fmt.Sprintf("Ginkgo Version %s", types.VERSION),
|
||||
Commands: GenerateCommands(),
|
||||
DefaultCommand: run.BuildRunCommand(),
|
||||
DeprecatedCommands: []command.DeprecatedCommand{
|
||||
{Name: "convert", Deprecation: types.Deprecations.Convert()},
|
||||
{Name: "blur", Deprecation: types.Deprecations.Blur()},
|
||||
{Name: "nodot", Deprecation: types.Deprecations.Nodot()},
|
||||
},
|
||||
}
|
||||
|
||||
program.RunAndExit(os.Args)
|
||||
}
|
||||
|
||||
func BuildVersionCommand() command.Command {
|
||||
return command.Command{
|
||||
Name: "version",
|
||||
Usage: "ginkgo version",
|
||||
ShortDoc: "Print Ginkgo's version",
|
||||
Command: func(_ []string, _ []string) {
|
||||
fmt.Printf("Ginkgo Version %s\n", types.VERSION)
|
||||
},
|
||||
}
|
||||
}
|
302
vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go
generated
vendored
Normal file
302
vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go
generated
vendored
Normal file
@ -0,0 +1,302 @@
|
||||
package outline
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const (
|
||||
// undefinedTextAlt is used if the spec/container text cannot be derived
|
||||
undefinedTextAlt = "undefined"
|
||||
)
|
||||
|
||||
// ginkgoMetadata holds useful bits of information for every entry in the outline
|
||||
type ginkgoMetadata struct {
|
||||
// Name is the spec or container function name, e.g. `Describe` or `It`
|
||||
Name string `json:"name"`
|
||||
|
||||
// Text is the `text` argument passed to specs, and some containers
|
||||
Text string `json:"text"`
|
||||
|
||||
// Start is the position of first character of the spec or container block
|
||||
Start int `json:"start"`
|
||||
|
||||
// End is the position of first character immediately after the spec or container block
|
||||
End int `json:"end"`
|
||||
|
||||
Spec bool `json:"spec"`
|
||||
Focused bool `json:"focused"`
|
||||
Pending bool `json:"pending"`
|
||||
Labels []string `json:"labels"`
|
||||
}
|
||||
|
||||
// ginkgoNode is used to construct the outline as a tree
|
||||
type ginkgoNode struct {
|
||||
ginkgoMetadata
|
||||
Nodes []*ginkgoNode `json:"nodes"`
|
||||
}
|
||||
|
||||
type walkFunc func(n *ginkgoNode)
|
||||
|
||||
func (n *ginkgoNode) PreOrder(f walkFunc) {
|
||||
f(n)
|
||||
for _, m := range n.Nodes {
|
||||
m.PreOrder(f)
|
||||
}
|
||||
}
|
||||
|
||||
func (n *ginkgoNode) PostOrder(f walkFunc) {
|
||||
for _, m := range n.Nodes {
|
||||
m.PostOrder(f)
|
||||
}
|
||||
f(n)
|
||||
}
|
||||
|
||||
func (n *ginkgoNode) Walk(pre, post walkFunc) {
|
||||
pre(n)
|
||||
for _, m := range n.Nodes {
|
||||
m.Walk(pre, post)
|
||||
}
|
||||
post(n)
|
||||
}
|
||||
|
||||
// PropagateInheritedProperties propagates the Pending and Focused properties
|
||||
// through the subtree rooted at n.
|
||||
func (n *ginkgoNode) PropagateInheritedProperties() {
|
||||
n.PreOrder(func(thisNode *ginkgoNode) {
|
||||
for _, descendantNode := range thisNode.Nodes {
|
||||
if thisNode.Pending {
|
||||
descendantNode.Pending = true
|
||||
descendantNode.Focused = false
|
||||
}
|
||||
if thisNode.Focused && !descendantNode.Pending {
|
||||
descendantNode.Focused = true
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// BackpropagateUnfocus propagates the Focused property through the subtree
|
||||
// rooted at n. It applies the rule described in the Ginkgo docs:
|
||||
// > Nested programmatically focused specs follow a simple rule: if a
|
||||
// > leaf-node is marked focused, any of its ancestor nodes that are marked
|
||||
// > focus will be unfocused.
|
||||
func (n *ginkgoNode) BackpropagateUnfocus() {
|
||||
focusedSpecInSubtreeStack := []bool{}
|
||||
n.PostOrder(func(thisNode *ginkgoNode) {
|
||||
if thisNode.Spec {
|
||||
focusedSpecInSubtreeStack = append(focusedSpecInSubtreeStack, thisNode.Focused)
|
||||
return
|
||||
}
|
||||
focusedSpecInSubtree := false
|
||||
for range thisNode.Nodes {
|
||||
focusedSpecInSubtree = focusedSpecInSubtree || focusedSpecInSubtreeStack[len(focusedSpecInSubtreeStack)-1]
|
||||
focusedSpecInSubtreeStack = focusedSpecInSubtreeStack[0 : len(focusedSpecInSubtreeStack)-1]
|
||||
}
|
||||
focusedSpecInSubtreeStack = append(focusedSpecInSubtreeStack, focusedSpecInSubtree)
|
||||
if focusedSpecInSubtree {
|
||||
thisNode.Focused = false
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func packageAndIdentNamesFromCallExpr(ce *ast.CallExpr) (string, string, bool) {
|
||||
switch ex := ce.Fun.(type) {
|
||||
case *ast.Ident:
|
||||
return "", ex.Name, true
|
||||
case *ast.SelectorExpr:
|
||||
pkgID, ok := ex.X.(*ast.Ident)
|
||||
if !ok {
|
||||
return "", "", false
|
||||
}
|
||||
// A package identifier is top-level, so Obj must be nil
|
||||
if pkgID.Obj != nil {
|
||||
return "", "", false
|
||||
}
|
||||
if ex.Sel == nil {
|
||||
return "", "", false
|
||||
}
|
||||
return pkgID.Name, ex.Sel.Name, true
|
||||
default:
|
||||
return "", "", false
|
||||
}
|
||||
}
|
||||
|
||||
// absoluteOffsetsForNode derives the absolute character offsets of the node start and
|
||||
// end positions.
|
||||
func absoluteOffsetsForNode(fset *token.FileSet, n ast.Node) (start, end int) {
|
||||
return fset.PositionFor(n.Pos(), false).Offset, fset.PositionFor(n.End(), false).Offset
|
||||
}
|
||||
|
||||
// ginkgoNodeFromCallExpr derives an outline entry from a go AST subtree
|
||||
// corresponding to a Ginkgo container or spec.
|
||||
func ginkgoNodeFromCallExpr(fset *token.FileSet, ce *ast.CallExpr, ginkgoPackageName *string) (*ginkgoNode, bool) {
|
||||
packageName, identName, ok := packageAndIdentNamesFromCallExpr(ce)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
n := ginkgoNode{}
|
||||
n.Name = identName
|
||||
n.Start, n.End = absoluteOffsetsForNode(fset, ce)
|
||||
n.Nodes = make([]*ginkgoNode, 0)
|
||||
switch identName {
|
||||
case "It", "Specify", "Entry":
|
||||
n.Spec = true
|
||||
n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
|
||||
n.Labels = labelFromCallExpr(ce)
|
||||
n.Pending = pendingFromCallExpr(ce)
|
||||
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||
case "FIt", "FSpecify", "FEntry":
|
||||
n.Spec = true
|
||||
n.Focused = true
|
||||
n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
|
||||
n.Labels = labelFromCallExpr(ce)
|
||||
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||
case "PIt", "PSpecify", "XIt", "XSpecify", "PEntry", "XEntry":
|
||||
n.Spec = true
|
||||
n.Pending = true
|
||||
n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
|
||||
n.Labels = labelFromCallExpr(ce)
|
||||
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||
case "Context", "Describe", "When", "DescribeTable":
|
||||
n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
|
||||
n.Labels = labelFromCallExpr(ce)
|
||||
n.Pending = pendingFromCallExpr(ce)
|
||||
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||
case "FContext", "FDescribe", "FWhen", "FDescribeTable":
|
||||
n.Focused = true
|
||||
n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
|
||||
n.Labels = labelFromCallExpr(ce)
|
||||
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||
case "PContext", "PDescribe", "PWhen", "XContext", "XDescribe", "XWhen", "PDescribeTable", "XDescribeTable":
|
||||
n.Pending = true
|
||||
n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
|
||||
n.Labels = labelFromCallExpr(ce)
|
||||
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||
case "By":
|
||||
n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
|
||||
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||
case "AfterEach", "BeforeEach":
|
||||
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||
case "JustAfterEach", "JustBeforeEach":
|
||||
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||
case "AfterSuite", "BeforeSuite":
|
||||
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||
case "SynchronizedAfterSuite", "SynchronizedBeforeSuite":
|
||||
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||
default:
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
|
||||
// textOrAltFromCallExpr tries to derive the "text" of a Ginkgo spec or
|
||||
// container. If it cannot derive it, it returns the alt text.
|
||||
func textOrAltFromCallExpr(ce *ast.CallExpr, alt string) string {
|
||||
text, defined := textFromCallExpr(ce)
|
||||
if !defined {
|
||||
return alt
|
||||
}
|
||||
return text
|
||||
}
|
||||
|
||||
// textFromCallExpr tries to derive the "text" of a Ginkgo spec or container. If
|
||||
// it cannot derive it, it returns false.
|
||||
func textFromCallExpr(ce *ast.CallExpr) (string, bool) {
|
||||
if len(ce.Args) < 1 {
|
||||
return "", false
|
||||
}
|
||||
text, ok := ce.Args[0].(*ast.BasicLit)
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
switch text.Kind {
|
||||
case token.CHAR, token.STRING:
|
||||
// For token.CHAR and token.STRING, Value is quoted
|
||||
unquoted, err := strconv.Unquote(text.Value)
|
||||
if err != nil {
|
||||
// If unquoting fails, just use the raw Value
|
||||
return text.Value, true
|
||||
}
|
||||
return unquoted, true
|
||||
default:
|
||||
return text.Value, true
|
||||
}
|
||||
}
|
||||
|
||||
func labelFromCallExpr(ce *ast.CallExpr) []string {
|
||||
|
||||
labels := []string{}
|
||||
if len(ce.Args) < 2 {
|
||||
return labels
|
||||
}
|
||||
|
||||
for _, arg := range ce.Args[1:] {
|
||||
switch expr := arg.(type) {
|
||||
case *ast.CallExpr:
|
||||
id, ok := expr.Fun.(*ast.Ident)
|
||||
if !ok {
|
||||
// to skip over cases where the expr.Fun. is actually *ast.SelectorExpr
|
||||
continue
|
||||
}
|
||||
if id.Name == "Label" {
|
||||
ls := extractLabels(expr)
|
||||
for _, label := range ls {
|
||||
labels = append(labels, label)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return labels
|
||||
}
|
||||
|
||||
func extractLabels(expr *ast.CallExpr) []string {
|
||||
out := []string{}
|
||||
for _, arg := range expr.Args {
|
||||
switch expr := arg.(type) {
|
||||
case *ast.BasicLit:
|
||||
if expr.Kind == token.STRING {
|
||||
unquoted, err := strconv.Unquote(expr.Value)
|
||||
if err != nil {
|
||||
unquoted = expr.Value
|
||||
}
|
||||
validated, err := types.ValidateAndCleanupLabel(unquoted, types.CodeLocation{})
|
||||
if err == nil {
|
||||
out = append(out, validated)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
func pendingFromCallExpr(ce *ast.CallExpr) bool {
|
||||
|
||||
pending := false
|
||||
if len(ce.Args) < 2 {
|
||||
return pending
|
||||
}
|
||||
|
||||
for _, arg := range ce.Args[1:] {
|
||||
switch expr := arg.(type) {
|
||||
case *ast.CallExpr:
|
||||
id, ok := expr.Fun.(*ast.Ident)
|
||||
if !ok {
|
||||
// to skip over cases where the expr.Fun. is actually *ast.SelectorExpr
|
||||
continue
|
||||
}
|
||||
if id.Name == "Pending" {
|
||||
pending = true
|
||||
}
|
||||
case *ast.Ident:
|
||||
if expr.Name == "Pending" {
|
||||
pending = true
|
||||
}
|
||||
}
|
||||
}
|
||||
return pending
|
||||
}
|
65
vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go
generated
vendored
Normal file
65
vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go
generated
vendored
Normal file
@ -0,0 +1,65 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Most of the required functions were available in the
|
||||
// "golang.org/x/tools/go/ast/astutil" package, but not exported.
|
||||
// They were copied from https://github.com/golang/tools/blob/2b0845dc783e36ae26d683f4915a5840ef01ab0f/go/ast/astutil/imports.go
|
||||
|
||||
package outline
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// packageNameForImport returns the package name for the package. If the package
|
||||
// is not imported, it returns nil. "Package name" refers to `pkgname` in the
|
||||
// call expression `pkgname.ExportedIdentifier`. Examples:
|
||||
// (import path not found) -> nil
|
||||
// "import example.com/pkg/foo" -> "foo"
|
||||
// "import fooalias example.com/pkg/foo" -> "fooalias"
|
||||
// "import . example.com/pkg/foo" -> ""
|
||||
func packageNameForImport(f *ast.File, path string) *string {
|
||||
spec := importSpec(f, path)
|
||||
if spec == nil {
|
||||
return nil
|
||||
}
|
||||
name := spec.Name.String()
|
||||
if name == "<nil>" {
|
||||
// If the package name is not explicitly specified,
|
||||
// make an educated guess. This is not guaranteed to be correct.
|
||||
lastSlash := strings.LastIndex(path, "/")
|
||||
if lastSlash == -1 {
|
||||
name = path
|
||||
} else {
|
||||
name = path[lastSlash+1:]
|
||||
}
|
||||
}
|
||||
if name == "." {
|
||||
name = ""
|
||||
}
|
||||
return &name
|
||||
}
|
||||
|
||||
// importSpec returns the import spec if f imports path,
|
||||
// or nil otherwise.
|
||||
func importSpec(f *ast.File, path string) *ast.ImportSpec {
|
||||
for _, s := range f.Imports {
|
||||
if strings.HasPrefix(importPath(s), path) {
|
||||
return s
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// importPath returns the unquoted import path of s,
|
||||
// or "" if the path is not properly quoted.
|
||||
func importPath(s *ast.ImportSpec) string {
|
||||
t, err := strconv.Unquote(s.Path.Value)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return t
|
||||
}
|
110
vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go
generated
vendored
Normal file
110
vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go
generated
vendored
Normal file
@ -0,0 +1,110 @@
|
||||
package outline
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/tools/go/ast/inspector"
|
||||
)
|
||||
|
||||
const (
|
||||
// ginkgoImportPath is the well-known ginkgo import path
|
||||
ginkgoImportPath = "github.com/onsi/ginkgo/v2"
|
||||
)
|
||||
|
||||
// FromASTFile returns an outline for a Ginkgo test source file
|
||||
func FromASTFile(fset *token.FileSet, src *ast.File) (*outline, error) {
|
||||
ginkgoPackageName := packageNameForImport(src, ginkgoImportPath)
|
||||
if ginkgoPackageName == nil {
|
||||
return nil, fmt.Errorf("file does not import %q", ginkgoImportPath)
|
||||
}
|
||||
|
||||
root := ginkgoNode{}
|
||||
stack := []*ginkgoNode{&root}
|
||||
ispr := inspector.New([]*ast.File{src})
|
||||
ispr.Nodes([]ast.Node{(*ast.CallExpr)(nil)}, func(node ast.Node, push bool) bool {
|
||||
if push {
|
||||
// Pre-order traversal
|
||||
ce, ok := node.(*ast.CallExpr)
|
||||
if !ok {
|
||||
// Because `Nodes` calls this function only when the node is an
|
||||
// ast.CallExpr, this should never happen
|
||||
panic(fmt.Errorf("node starting at %d, ending at %d is not an *ast.CallExpr", node.Pos(), node.End()))
|
||||
}
|
||||
gn, ok := ginkgoNodeFromCallExpr(fset, ce, ginkgoPackageName)
|
||||
if !ok {
|
||||
// Node is not a Ginkgo spec or container, continue
|
||||
return true
|
||||
}
|
||||
parent := stack[len(stack)-1]
|
||||
parent.Nodes = append(parent.Nodes, gn)
|
||||
stack = append(stack, gn)
|
||||
return true
|
||||
}
|
||||
// Post-order traversal
|
||||
start, end := absoluteOffsetsForNode(fset, node)
|
||||
lastVisitedGinkgoNode := stack[len(stack)-1]
|
||||
if start != lastVisitedGinkgoNode.Start || end != lastVisitedGinkgoNode.End {
|
||||
// Node is not a Ginkgo spec or container, so it was not pushed onto the stack, continue
|
||||
return true
|
||||
}
|
||||
stack = stack[0 : len(stack)-1]
|
||||
return true
|
||||
})
|
||||
if len(root.Nodes) == 0 {
|
||||
return &outline{[]*ginkgoNode{}}, nil
|
||||
}
|
||||
|
||||
// Derive the final focused property for all nodes. This must be done
|
||||
// _before_ propagating the inherited focused property.
|
||||
root.BackpropagateUnfocus()
|
||||
// Now, propagate inherited properties, including focused and pending.
|
||||
root.PropagateInheritedProperties()
|
||||
|
||||
return &outline{root.Nodes}, nil
|
||||
}
|
||||
|
||||
type outline struct {
|
||||
Nodes []*ginkgoNode `json:"nodes"`
|
||||
}
|
||||
|
||||
func (o *outline) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(o.Nodes)
|
||||
}
|
||||
|
||||
// String returns a CSV-formatted outline. Spec or container are output in
|
||||
// depth-first order.
|
||||
func (o *outline) String() string {
|
||||
return o.StringIndent(0)
|
||||
}
|
||||
|
||||
// StringIndent returns a CSV-formated outline, but every line is indented by
|
||||
// one 'width' of spaces for every level of nesting.
|
||||
func (o *outline) StringIndent(width int) string {
|
||||
var b strings.Builder
|
||||
b.WriteString("Name,Text,Start,End,Spec,Focused,Pending,Labels\n")
|
||||
|
||||
currentIndent := 0
|
||||
pre := func(n *ginkgoNode) {
|
||||
b.WriteString(fmt.Sprintf("%*s", currentIndent, ""))
|
||||
var labels string
|
||||
if len(n.Labels) == 1 {
|
||||
labels = n.Labels[0]
|
||||
} else {
|
||||
labels = strings.Join(n.Labels, ", ")
|
||||
}
|
||||
//enclosing labels in a double quoted comma separate listed so that when inmported into a CSV app the Labels column has comma separate strings
|
||||
b.WriteString(fmt.Sprintf("%s,%s,%d,%d,%t,%t,%t,\"%s\"\n", n.Name, n.Text, n.Start, n.End, n.Spec, n.Focused, n.Pending, labels))
|
||||
currentIndent += width
|
||||
}
|
||||
post := func(n *ginkgoNode) {
|
||||
currentIndent -= width
|
||||
}
|
||||
for _, n := range o.Nodes {
|
||||
n.Walk(pre, post)
|
||||
}
|
||||
return b.String()
|
||||
}
|
98
vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go
generated
vendored
Normal file
98
vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go
generated
vendored
Normal file
@ -0,0 +1,98 @@
|
||||
package outline
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"os"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
const (
|
||||
// indentWidth is the width used by the 'indent' output
|
||||
indentWidth = 4
|
||||
// stdinAlias is a portable alias for stdin. This convention is used in
|
||||
// other CLIs, e.g., kubectl.
|
||||
stdinAlias = "-"
|
||||
usageCommand = "ginkgo outline <filename>"
|
||||
)
|
||||
|
||||
type outlineConfig struct {
|
||||
Format string
|
||||
}
|
||||
|
||||
func BuildOutlineCommand() command.Command {
|
||||
conf := outlineConfig{
|
||||
Format: "csv",
|
||||
}
|
||||
flags, err := types.NewGinkgoFlagSet(
|
||||
types.GinkgoFlags{
|
||||
{Name: "format", KeyPath: "Format",
|
||||
Usage: "Format of outline",
|
||||
UsageArgument: "one of 'csv', 'indent', or 'json'",
|
||||
UsageDefaultValue: conf.Format,
|
||||
},
|
||||
},
|
||||
&conf,
|
||||
types.GinkgoFlagSections{},
|
||||
)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return command.Command{
|
||||
Name: "outline",
|
||||
Usage: "ginkgo outline <filename>",
|
||||
ShortDoc: "Create an outline of Ginkgo symbols for a file",
|
||||
Documentation: "To read from stdin, use: `ginkgo outline -`",
|
||||
DocLink: "creating-an-outline-of-specs",
|
||||
Flags: flags,
|
||||
Command: func(args []string, _ []string) {
|
||||
outlineFile(args, conf.Format)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func outlineFile(args []string, format string) {
|
||||
if len(args) != 1 {
|
||||
command.AbortWithUsage("outline expects exactly one argument")
|
||||
}
|
||||
|
||||
filename := args[0]
|
||||
var src *os.File
|
||||
if filename == stdinAlias {
|
||||
src = os.Stdin
|
||||
} else {
|
||||
var err error
|
||||
src, err = os.Open(filename)
|
||||
command.AbortIfError("Failed to open file:", err)
|
||||
}
|
||||
|
||||
fset := token.NewFileSet()
|
||||
|
||||
parsedSrc, err := parser.ParseFile(fset, filename, src, 0)
|
||||
command.AbortIfError("Failed to parse source:", err)
|
||||
|
||||
o, err := FromASTFile(fset, parsedSrc)
|
||||
command.AbortIfError("Failed to create outline:", err)
|
||||
|
||||
var oerr error
|
||||
switch format {
|
||||
case "csv":
|
||||
_, oerr = fmt.Print(o)
|
||||
case "indent":
|
||||
_, oerr = fmt.Print(o.StringIndent(indentWidth))
|
||||
case "json":
|
||||
b, err := json.Marshal(o)
|
||||
if err != nil {
|
||||
println(fmt.Sprintf("error marshalling to json: %s", err))
|
||||
}
|
||||
_, oerr = fmt.Println(string(b))
|
||||
default:
|
||||
command.AbortWith("Format %s not accepted", format)
|
||||
}
|
||||
command.AbortIfError("Failed to write outline:", oerr)
|
||||
}
|
232
vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go
generated
vendored
Normal file
232
vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go
generated
vendored
Normal file
@ -0,0 +1,232 @@
|
||||
package run
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/formatter"
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/internal"
|
||||
"github.com/onsi/ginkgo/v2/internal/interrupt_handler"
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
func BuildRunCommand() command.Command {
|
||||
var suiteConfig = types.NewDefaultSuiteConfig()
|
||||
var reporterConfig = types.NewDefaultReporterConfig()
|
||||
var cliConfig = types.NewDefaultCLIConfig()
|
||||
var goFlagsConfig = types.NewDefaultGoFlagsConfig()
|
||||
|
||||
flags, err := types.BuildRunCommandFlagSet(&suiteConfig, &reporterConfig, &cliConfig, &goFlagsConfig)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
interruptHandler := interrupt_handler.NewInterruptHandler(nil)
|
||||
interrupt_handler.SwallowSigQuit()
|
||||
|
||||
return command.Command{
|
||||
Name: "run",
|
||||
Flags: flags,
|
||||
Usage: "ginkgo run <FLAGS> <PACKAGES> -- <PASS-THROUGHS>",
|
||||
ShortDoc: "Run the tests in the passed in <PACKAGES> (or the package in the current directory if left blank)",
|
||||
Documentation: "Any arguments after -- will be passed to the test.",
|
||||
DocLink: "running-tests",
|
||||
Command: func(args []string, additionalArgs []string) {
|
||||
var errors []error
|
||||
cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig)
|
||||
command.AbortIfErrors("Ginkgo detected configuration issues:", errors)
|
||||
|
||||
runner := &SpecRunner{
|
||||
cliConfig: cliConfig,
|
||||
goFlagsConfig: goFlagsConfig,
|
||||
suiteConfig: suiteConfig,
|
||||
reporterConfig: reporterConfig,
|
||||
flags: flags,
|
||||
|
||||
interruptHandler: interruptHandler,
|
||||
}
|
||||
|
||||
runner.RunSpecs(args, additionalArgs)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type SpecRunner struct {
|
||||
suiteConfig types.SuiteConfig
|
||||
reporterConfig types.ReporterConfig
|
||||
cliConfig types.CLIConfig
|
||||
goFlagsConfig types.GoFlagsConfig
|
||||
flags types.GinkgoFlagSet
|
||||
|
||||
interruptHandler *interrupt_handler.InterruptHandler
|
||||
}
|
||||
|
||||
func (r *SpecRunner) RunSpecs(args []string, additionalArgs []string) {
|
||||
suites := internal.FindSuites(args, r.cliConfig, true)
|
||||
skippedSuites := suites.WithState(internal.TestSuiteStateSkippedByFilter)
|
||||
suites = suites.WithoutState(internal.TestSuiteStateSkippedByFilter)
|
||||
|
||||
internal.VerifyCLIAndFrameworkVersion(suites)
|
||||
|
||||
if len(skippedSuites) > 0 {
|
||||
fmt.Println("Will skip:")
|
||||
for _, skippedSuite := range skippedSuites {
|
||||
fmt.Println(" " + skippedSuite.Path)
|
||||
}
|
||||
}
|
||||
|
||||
if len(skippedSuites) > 0 && len(suites) == 0 {
|
||||
command.AbortGracefullyWith("All tests skipped! Exiting...")
|
||||
}
|
||||
|
||||
if len(suites) == 0 {
|
||||
command.AbortWith("Found no test suites")
|
||||
}
|
||||
|
||||
if len(suites) > 1 && !r.flags.WasSet("succinct") && r.reporterConfig.Verbosity().LT(types.VerbosityLevelVerbose) {
|
||||
r.reporterConfig.Succinct = true
|
||||
}
|
||||
|
||||
t := time.Now()
|
||||
var endTime time.Time
|
||||
if r.suiteConfig.Timeout > 0 {
|
||||
endTime = t.Add(r.suiteConfig.Timeout)
|
||||
}
|
||||
|
||||
iteration := 0
|
||||
OUTER_LOOP:
|
||||
for {
|
||||
if !r.flags.WasSet("seed") {
|
||||
r.suiteConfig.RandomSeed = time.Now().Unix()
|
||||
}
|
||||
if r.cliConfig.RandomizeSuites && len(suites) > 1 {
|
||||
suites = suites.ShuffledCopy(r.suiteConfig.RandomSeed)
|
||||
}
|
||||
|
||||
opc := internal.NewOrderedParallelCompiler(r.cliConfig.ComputedNumCompilers())
|
||||
opc.StartCompiling(suites, r.goFlagsConfig)
|
||||
|
||||
SUITE_LOOP:
|
||||
for {
|
||||
suiteIdx, suite := opc.Next()
|
||||
if suiteIdx >= len(suites) {
|
||||
break SUITE_LOOP
|
||||
}
|
||||
suites[suiteIdx] = suite
|
||||
|
||||
if r.interruptHandler.Status().Interrupted() {
|
||||
opc.StopAndDrain()
|
||||
break OUTER_LOOP
|
||||
}
|
||||
|
||||
if suites[suiteIdx].State.Is(internal.TestSuiteStateSkippedDueToEmptyCompilation) {
|
||||
fmt.Printf("Skipping %s (no test files)\n", suite.Path)
|
||||
continue SUITE_LOOP
|
||||
}
|
||||
|
||||
if suites[suiteIdx].State.Is(internal.TestSuiteStateFailedToCompile) {
|
||||
fmt.Println(suites[suiteIdx].CompilationError.Error())
|
||||
if !r.cliConfig.KeepGoing {
|
||||
opc.StopAndDrain()
|
||||
}
|
||||
continue SUITE_LOOP
|
||||
}
|
||||
|
||||
if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 && !r.cliConfig.KeepGoing {
|
||||
suites[suiteIdx].State = internal.TestSuiteStateSkippedDueToPriorFailures
|
||||
opc.StopAndDrain()
|
||||
continue SUITE_LOOP
|
||||
}
|
||||
|
||||
if !endTime.IsZero() {
|
||||
r.suiteConfig.Timeout = endTime.Sub(time.Now())
|
||||
if r.suiteConfig.Timeout <= 0 {
|
||||
suites[suiteIdx].State = internal.TestSuiteStateFailedDueToTimeout
|
||||
opc.StopAndDrain()
|
||||
continue SUITE_LOOP
|
||||
}
|
||||
}
|
||||
|
||||
suites[suiteIdx] = internal.RunCompiledSuite(suites[suiteIdx], r.suiteConfig, r.reporterConfig, r.cliConfig, r.goFlagsConfig, additionalArgs)
|
||||
}
|
||||
|
||||
if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 {
|
||||
if iteration > 0 {
|
||||
fmt.Printf("\nTests failed on attempt #%d\n\n", iteration+1)
|
||||
}
|
||||
break OUTER_LOOP
|
||||
}
|
||||
|
||||
if r.cliConfig.UntilItFails {
|
||||
fmt.Printf("\nAll tests passed...\nWill keep running them until they fail.\nThis was attempt #%d\n%s\n", iteration+1, orcMessage(iteration+1))
|
||||
} else if r.cliConfig.Repeat > 0 && iteration < r.cliConfig.Repeat {
|
||||
fmt.Printf("\nAll tests passed...\nThis was attempt %d of %d.\n", iteration+1, r.cliConfig.Repeat+1)
|
||||
} else {
|
||||
break OUTER_LOOP
|
||||
}
|
||||
iteration += 1
|
||||
}
|
||||
|
||||
internal.Cleanup(r.goFlagsConfig, suites...)
|
||||
|
||||
messages, err := internal.FinalizeProfilesAndReportsForSuites(suites, r.cliConfig, r.suiteConfig, r.reporterConfig, r.goFlagsConfig)
|
||||
command.AbortIfError("could not finalize profiles:", err)
|
||||
for _, message := range messages {
|
||||
fmt.Println(message)
|
||||
}
|
||||
|
||||
fmt.Printf("\nGinkgo ran %d %s in %s\n", len(suites), internal.PluralizedWord("suite", "suites", len(suites)), time.Since(t))
|
||||
|
||||
if suites.CountWithState(internal.TestSuiteStateFailureStates...) == 0 {
|
||||
if suites.AnyHaveProgrammaticFocus() && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" {
|
||||
fmt.Printf("Test Suite Passed\n")
|
||||
fmt.Printf("Detected Programmatic Focus - setting exit status to %d\n", types.GINKGO_FOCUS_EXIT_CODE)
|
||||
command.Abort(command.AbortDetails{ExitCode: types.GINKGO_FOCUS_EXIT_CODE})
|
||||
} else {
|
||||
fmt.Printf("Test Suite Passed\n")
|
||||
command.Abort(command.AbortDetails{})
|
||||
}
|
||||
} else {
|
||||
fmt.Fprintln(formatter.ColorableStdOut, "")
|
||||
if len(suites) > 1 && suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 {
|
||||
fmt.Fprintln(formatter.ColorableStdOut,
|
||||
internal.FailedSuitesReport(suites, formatter.NewWithNoColorBool(r.reporterConfig.NoColor)))
|
||||
}
|
||||
fmt.Printf("Test Suite Failed\n")
|
||||
command.Abort(command.AbortDetails{ExitCode: 1})
|
||||
}
|
||||
}
|
||||
|
||||
func orcMessage(iteration int) string {
|
||||
if iteration < 10 {
|
||||
return ""
|
||||
} else if iteration < 30 {
|
||||
return []string{
|
||||
"If at first you succeed...",
|
||||
"...try, try again.",
|
||||
"Looking good!",
|
||||
"Still good...",
|
||||
"I think your tests are fine....",
|
||||
"Yep, still passing",
|
||||
"Oh boy, here I go testin' again!",
|
||||
"Even the gophers are getting bored",
|
||||
"Did you try -race?",
|
||||
"Maybe you should stop now?",
|
||||
"I'm getting tired...",
|
||||
"What if I just made you a sandwich?",
|
||||
"Hit ^C, hit ^C, please hit ^C",
|
||||
"Make it stop. Please!",
|
||||
"Come on! Enough is enough!",
|
||||
"Dave, this conversation can serve no purpose anymore. Goodbye.",
|
||||
"Just what do you think you're doing, Dave? ",
|
||||
"I, Sisyphus",
|
||||
"Insanity: doing the same thing over and over again and expecting different results. -Einstein",
|
||||
"I guess Einstein never tried to churn butter",
|
||||
}[iteration-10] + "\n"
|
||||
} else {
|
||||
return "No, seriously... you can probably stop now.\n"
|
||||
}
|
||||
}
|
186
vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go
generated
vendored
Normal file
186
vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go
generated
vendored
Normal file
@ -0,0 +1,186 @@
|
||||
package unfocus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
||||
)
|
||||
|
||||
func BuildUnfocusCommand() command.Command {
|
||||
return command.Command{
|
||||
Name: "unfocus",
|
||||
Usage: "ginkgo unfocus",
|
||||
ShortDoc: "Recursively unfocus any focused tests under the current directory",
|
||||
DocLink: "filtering-specs",
|
||||
Command: func(_ []string, _ []string) {
|
||||
unfocusSpecs()
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func unfocusSpecs() {
|
||||
fmt.Println("Scanning for focus...")
|
||||
|
||||
goFiles := make(chan string)
|
||||
go func() {
|
||||
unfocusDir(goFiles, ".")
|
||||
close(goFiles)
|
||||
}()
|
||||
|
||||
const workers = 10
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(workers)
|
||||
|
||||
for i := 0; i < workers; i++ {
|
||||
go func() {
|
||||
for path := range goFiles {
|
||||
unfocusFile(path)
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func unfocusDir(goFiles chan string, path string) {
|
||||
files, err := os.ReadDir(path)
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
for _, f := range files {
|
||||
switch {
|
||||
case f.IsDir() && shouldProcessDir(f.Name()):
|
||||
unfocusDir(goFiles, filepath.Join(path, f.Name()))
|
||||
case !f.IsDir() && shouldProcessFile(f.Name()):
|
||||
goFiles <- filepath.Join(path, f.Name())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func shouldProcessDir(basename string) bool {
|
||||
return basename != "vendor" && !strings.HasPrefix(basename, ".")
|
||||
}
|
||||
|
||||
func shouldProcessFile(basename string) bool {
|
||||
return strings.HasSuffix(basename, ".go")
|
||||
}
|
||||
|
||||
func unfocusFile(path string) {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
fmt.Printf("error reading file '%s': %s\n", path, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
ast, err := parser.ParseFile(token.NewFileSet(), path, bytes.NewReader(data), parser.ParseComments)
|
||||
if err != nil {
|
||||
fmt.Printf("error parsing file '%s': %s\n", path, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
eliminations := scanForFocus(ast)
|
||||
if len(eliminations) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf("...updating %s\n", path)
|
||||
backup, err := writeBackup(path, data)
|
||||
if err != nil {
|
||||
fmt.Printf("error creating backup file: %s\n", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if err := updateFile(path, data, eliminations); err != nil {
|
||||
fmt.Printf("error writing file '%s': %s\n", path, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
os.Remove(backup)
|
||||
}
|
||||
|
||||
func writeBackup(path string, data []byte) (string, error) {
|
||||
t, err := os.CreateTemp(filepath.Dir(path), filepath.Base(path))
|
||||
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error creating temporary file: %w", err)
|
||||
}
|
||||
defer t.Close()
|
||||
|
||||
if _, err := io.Copy(t, bytes.NewReader(data)); err != nil {
|
||||
return "", fmt.Errorf("error writing to temporary file: %w", err)
|
||||
}
|
||||
|
||||
return t.Name(), nil
|
||||
}
|
||||
|
||||
func updateFile(path string, data []byte, eliminations [][]int64) error {
|
||||
to, err := os.Create(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error opening file for writing '%s': %w\n", path, err)
|
||||
}
|
||||
defer to.Close()
|
||||
|
||||
from := bytes.NewReader(data)
|
||||
var cursor int64
|
||||
for _, eliminationRange := range eliminations {
|
||||
positionToEliminate, lengthToEliminate := eliminationRange[0]-1, eliminationRange[1]
|
||||
if _, err := io.CopyN(to, from, positionToEliminate-cursor); err != nil {
|
||||
return fmt.Errorf("error copying data: %w", err)
|
||||
}
|
||||
|
||||
cursor = positionToEliminate + lengthToEliminate
|
||||
|
||||
if _, err := from.Seek(lengthToEliminate, io.SeekCurrent); err != nil {
|
||||
return fmt.Errorf("error seeking to position in buffer: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := io.Copy(to, from); err != nil {
|
||||
return fmt.Errorf("error copying end data: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func scanForFocus(file *ast.File) (eliminations [][]int64) {
|
||||
ast.Inspect(file, func(n ast.Node) bool {
|
||||
if c, ok := n.(*ast.CallExpr); ok {
|
||||
if i, ok := c.Fun.(*ast.Ident); ok {
|
||||
if isFocus(i.Name) {
|
||||
eliminations = append(eliminations, []int64{int64(i.Pos()), 1})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if i, ok := n.(*ast.Ident); ok {
|
||||
if i.Name == "Focus" {
|
||||
eliminations = append(eliminations, []int64{int64(i.Pos()), 6})
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
return eliminations
|
||||
}
|
||||
|
||||
func isFocus(name string) bool {
|
||||
switch name {
|
||||
case "FDescribe", "FContext", "FIt", "FDescribeTable", "FEntry", "FSpecify", "FWhen":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
22
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go
generated
vendored
Normal file
22
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
package watch
|
||||
|
||||
import "sort"
|
||||
|
||||
type Delta struct {
|
||||
ModifiedPackages []string
|
||||
|
||||
NewSuites []*Suite
|
||||
RemovedSuites []*Suite
|
||||
modifiedSuites []*Suite
|
||||
}
|
||||
|
||||
type DescendingByDelta []*Suite
|
||||
|
||||
func (a DescendingByDelta) Len() int { return len(a) }
|
||||
func (a DescendingByDelta) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a DescendingByDelta) Less(i, j int) bool { return a[i].Delta() > a[j].Delta() }
|
||||
|
||||
func (d Delta) ModifiedSuites() []*Suite {
|
||||
sort.Sort(DescendingByDelta(d.modifiedSuites))
|
||||
return d.modifiedSuites
|
||||
}
|
75
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go
generated
vendored
Normal file
75
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go
generated
vendored
Normal file
@ -0,0 +1,75 @@
|
||||
package watch
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"regexp"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/internal"
|
||||
)
|
||||
|
||||
type SuiteErrors map[internal.TestSuite]error
|
||||
|
||||
type DeltaTracker struct {
|
||||
maxDepth int
|
||||
watchRegExp *regexp.Regexp
|
||||
suites map[string]*Suite
|
||||
packageHashes *PackageHashes
|
||||
}
|
||||
|
||||
func NewDeltaTracker(maxDepth int, watchRegExp *regexp.Regexp) *DeltaTracker {
|
||||
return &DeltaTracker{
|
||||
maxDepth: maxDepth,
|
||||
watchRegExp: watchRegExp,
|
||||
packageHashes: NewPackageHashes(watchRegExp),
|
||||
suites: map[string]*Suite{},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *DeltaTracker) Delta(suites internal.TestSuites) (delta Delta, errors SuiteErrors) {
|
||||
errors = SuiteErrors{}
|
||||
delta.ModifiedPackages = d.packageHashes.CheckForChanges()
|
||||
|
||||
providedSuitePaths := map[string]bool{}
|
||||
for _, suite := range suites {
|
||||
providedSuitePaths[suite.Path] = true
|
||||
}
|
||||
|
||||
d.packageHashes.StartTrackingUsage()
|
||||
|
||||
for _, suite := range d.suites {
|
||||
if providedSuitePaths[suite.Suite.Path] {
|
||||
if suite.Delta() > 0 {
|
||||
delta.modifiedSuites = append(delta.modifiedSuites, suite)
|
||||
}
|
||||
} else {
|
||||
delta.RemovedSuites = append(delta.RemovedSuites, suite)
|
||||
}
|
||||
}
|
||||
|
||||
d.packageHashes.StopTrackingUsageAndPrune()
|
||||
|
||||
for _, suite := range suites {
|
||||
_, ok := d.suites[suite.Path]
|
||||
if !ok {
|
||||
s, err := NewSuite(suite, d.maxDepth, d.packageHashes)
|
||||
if err != nil {
|
||||
errors[suite] = err
|
||||
continue
|
||||
}
|
||||
d.suites[suite.Path] = s
|
||||
delta.NewSuites = append(delta.NewSuites, s)
|
||||
}
|
||||
}
|
||||
|
||||
return delta, errors
|
||||
}
|
||||
|
||||
func (d *DeltaTracker) WillRun(suite internal.TestSuite) error {
|
||||
s, ok := d.suites[suite.Path]
|
||||
if !ok {
|
||||
return fmt.Errorf("unknown suite %s", suite.Path)
|
||||
}
|
||||
|
||||
return s.MarkAsRunAndRecomputedDependencies(d.maxDepth)
|
||||
}
|
92
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go
generated
vendored
Normal file
92
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go
generated
vendored
Normal file
@ -0,0 +1,92 @@
|
||||
package watch
|
||||
|
||||
import (
|
||||
"go/build"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
var ginkgoAndGomegaFilter = regexp.MustCompile(`github\.com/onsi/ginkgo|github\.com/onsi/gomega`)
|
||||
var ginkgoIntegrationTestFilter = regexp.MustCompile(`github\.com/onsi/ginkgo/integration`) //allow us to integration test this thing
|
||||
|
||||
type Dependencies struct {
|
||||
deps map[string]int
|
||||
}
|
||||
|
||||
func NewDependencies(path string, maxDepth int) (Dependencies, error) {
|
||||
d := Dependencies{
|
||||
deps: map[string]int{},
|
||||
}
|
||||
|
||||
if maxDepth == 0 {
|
||||
return d, nil
|
||||
}
|
||||
|
||||
err := d.seedWithDepsForPackageAtPath(path)
|
||||
if err != nil {
|
||||
return d, err
|
||||
}
|
||||
|
||||
for depth := 1; depth < maxDepth; depth++ {
|
||||
n := len(d.deps)
|
||||
d.addDepsForDepth(depth)
|
||||
if n == len(d.deps) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
func (d Dependencies) Dependencies() map[string]int {
|
||||
return d.deps
|
||||
}
|
||||
|
||||
func (d Dependencies) seedWithDepsForPackageAtPath(path string) error {
|
||||
pkg, err := build.ImportDir(path, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.resolveAndAdd(pkg.Imports, 1)
|
||||
d.resolveAndAdd(pkg.TestImports, 1)
|
||||
d.resolveAndAdd(pkg.XTestImports, 1)
|
||||
|
||||
delete(d.deps, pkg.Dir)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d Dependencies) addDepsForDepth(depth int) {
|
||||
for dep, depDepth := range d.deps {
|
||||
if depDepth == depth {
|
||||
d.addDepsForDep(dep, depth+1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d Dependencies) addDepsForDep(dep string, depth int) {
|
||||
pkg, err := build.ImportDir(dep, 0)
|
||||
if err != nil {
|
||||
println(err.Error())
|
||||
return
|
||||
}
|
||||
d.resolveAndAdd(pkg.Imports, depth)
|
||||
}
|
||||
|
||||
func (d Dependencies) resolveAndAdd(deps []string, depth int) {
|
||||
for _, dep := range deps {
|
||||
pkg, err := build.Import(dep, ".", 0)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if !pkg.Goroot && (!ginkgoAndGomegaFilter.Match([]byte(pkg.Dir)) || ginkgoIntegrationTestFilter.Match([]byte(pkg.Dir))) {
|
||||
d.addDepIfNotPresent(pkg.Dir, depth)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d Dependencies) addDepIfNotPresent(dep string, depth int) {
|
||||
_, ok := d.deps[dep]
|
||||
if !ok {
|
||||
d.deps[dep] = depth
|
||||
}
|
||||
}
|
108
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go
generated
vendored
Normal file
108
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go
generated
vendored
Normal file
@ -0,0 +1,108 @@
|
||||
package watch
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"time"
|
||||
)
|
||||
|
||||
var goTestRegExp = regexp.MustCompile(`_test\.go$`)
|
||||
|
||||
type PackageHash struct {
|
||||
CodeModifiedTime time.Time
|
||||
TestModifiedTime time.Time
|
||||
Deleted bool
|
||||
|
||||
path string
|
||||
codeHash string
|
||||
testHash string
|
||||
watchRegExp *regexp.Regexp
|
||||
}
|
||||
|
||||
func NewPackageHash(path string, watchRegExp *regexp.Regexp) *PackageHash {
|
||||
p := &PackageHash{
|
||||
path: path,
|
||||
watchRegExp: watchRegExp,
|
||||
}
|
||||
|
||||
p.codeHash, _, p.testHash, _, p.Deleted = p.computeHashes()
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *PackageHash) CheckForChanges() bool {
|
||||
codeHash, codeModifiedTime, testHash, testModifiedTime, deleted := p.computeHashes()
|
||||
|
||||
if deleted {
|
||||
if !p.Deleted {
|
||||
t := time.Now()
|
||||
p.CodeModifiedTime = t
|
||||
p.TestModifiedTime = t
|
||||
}
|
||||
p.Deleted = true
|
||||
return true
|
||||
}
|
||||
|
||||
modified := false
|
||||
p.Deleted = false
|
||||
|
||||
if p.codeHash != codeHash {
|
||||
p.CodeModifiedTime = codeModifiedTime
|
||||
modified = true
|
||||
}
|
||||
if p.testHash != testHash {
|
||||
p.TestModifiedTime = testModifiedTime
|
||||
modified = true
|
||||
}
|
||||
|
||||
p.codeHash = codeHash
|
||||
p.testHash = testHash
|
||||
return modified
|
||||
}
|
||||
|
||||
func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Time, testHash string, testModifiedTime time.Time, deleted bool) {
|
||||
entries, err := os.ReadDir(p.path)
|
||||
|
||||
if err != nil {
|
||||
deleted = true
|
||||
return
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
info, err := entry.Info()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if goTestRegExp.Match([]byte(info.Name())) {
|
||||
testHash += p.hashForFileInfo(info)
|
||||
if info.ModTime().After(testModifiedTime) {
|
||||
testModifiedTime = info.ModTime()
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if p.watchRegExp.Match([]byte(info.Name())) {
|
||||
codeHash += p.hashForFileInfo(info)
|
||||
if info.ModTime().After(codeModifiedTime) {
|
||||
codeModifiedTime = info.ModTime()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
testHash += codeHash
|
||||
if codeModifiedTime.After(testModifiedTime) {
|
||||
testModifiedTime = codeModifiedTime
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (p *PackageHash) hashForFileInfo(info os.FileInfo) string {
|
||||
return fmt.Sprintf("%s_%d_%d", info.Name(), info.Size(), info.ModTime().UnixNano())
|
||||
}
|
85
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go
generated
vendored
Normal file
85
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go
generated
vendored
Normal file
@ -0,0 +1,85 @@
|
||||
package watch
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type PackageHashes struct {
|
||||
PackageHashes map[string]*PackageHash
|
||||
usedPaths map[string]bool
|
||||
watchRegExp *regexp.Regexp
|
||||
lock *sync.Mutex
|
||||
}
|
||||
|
||||
func NewPackageHashes(watchRegExp *regexp.Regexp) *PackageHashes {
|
||||
return &PackageHashes{
|
||||
PackageHashes: map[string]*PackageHash{},
|
||||
usedPaths: nil,
|
||||
watchRegExp: watchRegExp,
|
||||
lock: &sync.Mutex{},
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PackageHashes) CheckForChanges() []string {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
modified := []string{}
|
||||
|
||||
for _, packageHash := range p.PackageHashes {
|
||||
if packageHash.CheckForChanges() {
|
||||
modified = append(modified, packageHash.path)
|
||||
}
|
||||
}
|
||||
|
||||
return modified
|
||||
}
|
||||
|
||||
func (p *PackageHashes) Add(path string) *PackageHash {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
path, _ = filepath.Abs(path)
|
||||
_, ok := p.PackageHashes[path]
|
||||
if !ok {
|
||||
p.PackageHashes[path] = NewPackageHash(path, p.watchRegExp)
|
||||
}
|
||||
|
||||
if p.usedPaths != nil {
|
||||
p.usedPaths[path] = true
|
||||
}
|
||||
return p.PackageHashes[path]
|
||||
}
|
||||
|
||||
func (p *PackageHashes) Get(path string) *PackageHash {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
path, _ = filepath.Abs(path)
|
||||
if p.usedPaths != nil {
|
||||
p.usedPaths[path] = true
|
||||
}
|
||||
return p.PackageHashes[path]
|
||||
}
|
||||
|
||||
func (p *PackageHashes) StartTrackingUsage() {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
p.usedPaths = map[string]bool{}
|
||||
}
|
||||
|
||||
func (p *PackageHashes) StopTrackingUsageAndPrune() {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
for path := range p.PackageHashes {
|
||||
if !p.usedPaths[path] {
|
||||
delete(p.PackageHashes, path)
|
||||
}
|
||||
}
|
||||
|
||||
p.usedPaths = nil
|
||||
}
|
87
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go
generated
vendored
Normal file
87
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go
generated
vendored
Normal file
@ -0,0 +1,87 @@
|
||||
package watch
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/internal"
|
||||
)
|
||||
|
||||
type Suite struct {
|
||||
Suite internal.TestSuite
|
||||
RunTime time.Time
|
||||
Dependencies Dependencies
|
||||
|
||||
sharedPackageHashes *PackageHashes
|
||||
}
|
||||
|
||||
func NewSuite(suite internal.TestSuite, maxDepth int, sharedPackageHashes *PackageHashes) (*Suite, error) {
|
||||
deps, err := NewDependencies(suite.Path, maxDepth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sharedPackageHashes.Add(suite.Path)
|
||||
for dep := range deps.Dependencies() {
|
||||
sharedPackageHashes.Add(dep)
|
||||
}
|
||||
|
||||
return &Suite{
|
||||
Suite: suite,
|
||||
Dependencies: deps,
|
||||
|
||||
sharedPackageHashes: sharedPackageHashes,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Suite) Delta() float64 {
|
||||
delta := s.delta(s.Suite.Path, true, 0) * 1000
|
||||
for dep, depth := range s.Dependencies.Dependencies() {
|
||||
delta += s.delta(dep, false, depth)
|
||||
}
|
||||
return delta
|
||||
}
|
||||
|
||||
func (s *Suite) MarkAsRunAndRecomputedDependencies(maxDepth int) error {
|
||||
s.RunTime = time.Now()
|
||||
|
||||
deps, err := NewDependencies(s.Suite.Path, maxDepth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.sharedPackageHashes.Add(s.Suite.Path)
|
||||
for dep := range deps.Dependencies() {
|
||||
s.sharedPackageHashes.Add(dep)
|
||||
}
|
||||
|
||||
s.Dependencies = deps
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Suite) Description() string {
|
||||
numDeps := len(s.Dependencies.Dependencies())
|
||||
pluralizer := "ies"
|
||||
if numDeps == 1 {
|
||||
pluralizer = "y"
|
||||
}
|
||||
return fmt.Sprintf("%s [%d dependenc%s]", s.Suite.Path, numDeps, pluralizer)
|
||||
}
|
||||
|
||||
func (s *Suite) delta(packagePath string, includeTests bool, depth int) float64 {
|
||||
return math.Max(float64(s.dt(packagePath, includeTests)), 0) / float64(depth+1)
|
||||
}
|
||||
|
||||
func (s *Suite) dt(packagePath string, includeTests bool) time.Duration {
|
||||
packageHash := s.sharedPackageHashes.Get(packagePath)
|
||||
var modifiedTime time.Time
|
||||
if includeTests {
|
||||
modifiedTime = packageHash.TestModifiedTime
|
||||
} else {
|
||||
modifiedTime = packageHash.CodeModifiedTime
|
||||
}
|
||||
|
||||
return modifiedTime.Sub(s.RunTime)
|
||||
}
|
192
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go
generated
vendored
Normal file
192
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go
generated
vendored
Normal file
@ -0,0 +1,192 @@
|
||||
package watch
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/formatter"
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/internal"
|
||||
"github.com/onsi/ginkgo/v2/internal/interrupt_handler"
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
func BuildWatchCommand() command.Command {
|
||||
var suiteConfig = types.NewDefaultSuiteConfig()
|
||||
var reporterConfig = types.NewDefaultReporterConfig()
|
||||
var cliConfig = types.NewDefaultCLIConfig()
|
||||
var goFlagsConfig = types.NewDefaultGoFlagsConfig()
|
||||
|
||||
flags, err := types.BuildWatchCommandFlagSet(&suiteConfig, &reporterConfig, &cliConfig, &goFlagsConfig)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
interruptHandler := interrupt_handler.NewInterruptHandler(nil)
|
||||
interrupt_handler.SwallowSigQuit()
|
||||
|
||||
return command.Command{
|
||||
Name: "watch",
|
||||
Flags: flags,
|
||||
Usage: "ginkgo watch <FLAGS> <PACKAGES> -- <PASS-THROUGHS>",
|
||||
ShortDoc: "Watch the passed in <PACKAGES> and runs their tests whenever changes occur.",
|
||||
Documentation: "Any arguments after -- will be passed to the test.",
|
||||
DocLink: "watching-for-changes",
|
||||
Command: func(args []string, additionalArgs []string) {
|
||||
var errors []error
|
||||
cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig)
|
||||
command.AbortIfErrors("Ginkgo detected configuration issues:", errors)
|
||||
|
||||
watcher := &SpecWatcher{
|
||||
cliConfig: cliConfig,
|
||||
goFlagsConfig: goFlagsConfig,
|
||||
suiteConfig: suiteConfig,
|
||||
reporterConfig: reporterConfig,
|
||||
flags: flags,
|
||||
|
||||
interruptHandler: interruptHandler,
|
||||
}
|
||||
|
||||
watcher.WatchSpecs(args, additionalArgs)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type SpecWatcher struct {
|
||||
suiteConfig types.SuiteConfig
|
||||
reporterConfig types.ReporterConfig
|
||||
cliConfig types.CLIConfig
|
||||
goFlagsConfig types.GoFlagsConfig
|
||||
flags types.GinkgoFlagSet
|
||||
|
||||
interruptHandler *interrupt_handler.InterruptHandler
|
||||
}
|
||||
|
||||
func (w *SpecWatcher) WatchSpecs(args []string, additionalArgs []string) {
|
||||
suites := internal.FindSuites(args, w.cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter)
|
||||
|
||||
internal.VerifyCLIAndFrameworkVersion(suites)
|
||||
|
||||
if len(suites) == 0 {
|
||||
command.AbortWith("Found no test suites")
|
||||
}
|
||||
|
||||
fmt.Printf("Identified %d test %s. Locating dependencies to a depth of %d (this may take a while)...\n", len(suites), internal.PluralizedWord("suite", "suites", len(suites)), w.cliConfig.Depth)
|
||||
deltaTracker := NewDeltaTracker(w.cliConfig.Depth, regexp.MustCompile(w.cliConfig.WatchRegExp))
|
||||
delta, errors := deltaTracker.Delta(suites)
|
||||
|
||||
fmt.Printf("Watching %d %s:\n", len(delta.NewSuites), internal.PluralizedWord("suite", "suites", len(delta.NewSuites)))
|
||||
for _, suite := range delta.NewSuites {
|
||||
fmt.Println(" " + suite.Description())
|
||||
}
|
||||
|
||||
for suite, err := range errors {
|
||||
fmt.Printf("Failed to watch %s: %s\n", suite.PackageName, err)
|
||||
}
|
||||
|
||||
if len(suites) == 1 {
|
||||
w.updateSeed()
|
||||
w.compileAndRun(suites[0], additionalArgs)
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(time.Second)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
suites := internal.FindSuites(args, w.cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter)
|
||||
delta, _ := deltaTracker.Delta(suites)
|
||||
coloredStream := formatter.ColorableStdOut
|
||||
|
||||
suites = internal.TestSuites{}
|
||||
|
||||
if len(delta.NewSuites) > 0 {
|
||||
fmt.Fprintln(coloredStream, formatter.F("{{green}}Detected %d new %s:{{/}}", len(delta.NewSuites), internal.PluralizedWord("suite", "suites", len(delta.NewSuites))))
|
||||
for _, suite := range delta.NewSuites {
|
||||
suites = append(suites, suite.Suite)
|
||||
fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", suite.Description()))
|
||||
}
|
||||
}
|
||||
|
||||
modifiedSuites := delta.ModifiedSuites()
|
||||
if len(modifiedSuites) > 0 {
|
||||
fmt.Fprintln(coloredStream, formatter.F("{{green}}Detected changes in:{{/}}"))
|
||||
for _, pkg := range delta.ModifiedPackages {
|
||||
fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", pkg))
|
||||
}
|
||||
fmt.Fprintln(coloredStream, formatter.F("{{green}}Will run %d %s:{{/}}", len(modifiedSuites), internal.PluralizedWord("suite", "suites", len(modifiedSuites))))
|
||||
for _, suite := range modifiedSuites {
|
||||
suites = append(suites, suite.Suite)
|
||||
fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", suite.Description()))
|
||||
}
|
||||
fmt.Fprintln(coloredStream, "")
|
||||
}
|
||||
|
||||
if len(suites) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
w.updateSeed()
|
||||
w.computeSuccinctMode(len(suites))
|
||||
for idx := range suites {
|
||||
if w.interruptHandler.Status().Interrupted() {
|
||||
return
|
||||
}
|
||||
deltaTracker.WillRun(suites[idx])
|
||||
suites[idx] = w.compileAndRun(suites[idx], additionalArgs)
|
||||
}
|
||||
color := "{{green}}"
|
||||
if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 {
|
||||
color = "{{red}}"
|
||||
}
|
||||
fmt.Fprintln(coloredStream, formatter.F(color+"\nDone. Resuming watch...{{/}}"))
|
||||
|
||||
messages, err := internal.FinalizeProfilesAndReportsForSuites(suites, w.cliConfig, w.suiteConfig, w.reporterConfig, w.goFlagsConfig)
|
||||
command.AbortIfError("could not finalize profiles:", err)
|
||||
for _, message := range messages {
|
||||
fmt.Println(message)
|
||||
}
|
||||
case <-w.interruptHandler.Status().Channel:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *SpecWatcher) compileAndRun(suite internal.TestSuite, additionalArgs []string) internal.TestSuite {
|
||||
suite = internal.CompileSuite(suite, w.goFlagsConfig)
|
||||
if suite.State.Is(internal.TestSuiteStateFailedToCompile) {
|
||||
fmt.Println(suite.CompilationError.Error())
|
||||
return suite
|
||||
}
|
||||
if w.interruptHandler.Status().Interrupted() {
|
||||
return suite
|
||||
}
|
||||
suite = internal.RunCompiledSuite(suite, w.suiteConfig, w.reporterConfig, w.cliConfig, w.goFlagsConfig, additionalArgs)
|
||||
internal.Cleanup(w.goFlagsConfig, suite)
|
||||
return suite
|
||||
}
|
||||
|
||||
func (w *SpecWatcher) computeSuccinctMode(numSuites int) {
|
||||
if w.reporterConfig.Verbosity().GTE(types.VerbosityLevelVerbose) {
|
||||
w.reporterConfig.Succinct = false
|
||||
return
|
||||
}
|
||||
|
||||
if w.flags.WasSet("succinct") {
|
||||
return
|
||||
}
|
||||
|
||||
if numSuites == 1 {
|
||||
w.reporterConfig.Succinct = false
|
||||
}
|
||||
|
||||
if numSuites > 1 {
|
||||
w.reporterConfig.Succinct = true
|
||||
}
|
||||
}
|
||||
|
||||
func (w *SpecWatcher) updateSeed() {
|
||||
if !w.flags.WasSet("seed") {
|
||||
w.suiteConfig.RandomSeed = time.Now().Unix()
|
||||
}
|
||||
}
|
8
vendor/github.com/onsi/ginkgo/v2/ginkgo_cli_dependencies.go
generated
vendored
Normal file
8
vendor/github.com/onsi/ginkgo/v2/ginkgo_cli_dependencies.go
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
//go:build ginkgoclidependencies
|
||||
// +build ginkgoclidependencies
|
||||
|
||||
package ginkgo
|
||||
|
||||
import (
|
||||
_ "github.com/onsi/ginkgo/v2/ginkgo"
|
||||
)
|
63
vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go
generated
vendored
63
vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go
generated
vendored
@ -1,26 +1,42 @@
|
||||
package ginkgo
|
||||
|
||||
import "github.com/onsi/ginkgo/v2/internal/testingtproxy"
|
||||
import (
|
||||
"github.com/onsi/ginkgo/v2/internal/testingtproxy"
|
||||
)
|
||||
|
||||
/*
|
||||
GinkgoT() implements an interface analogous to *testing.T and can be used with
|
||||
third-party libraries that accept *testing.T through an interface.
|
||||
GinkgoT() implements an interface that allows third party libraries to integrate with and build on top of Ginkgo.
|
||||
|
||||
GinkgoT() is analogous to *testing.T and implements the majority of *testing.T's methods. It can be typically be used a a drop-in replacement with third-party libraries that accept *testing.T through an interface.
|
||||
|
||||
GinkgoT() takes an optional offset argument that can be used to get the
|
||||
correct line number associated with the failure.
|
||||
correct line number associated with the failure - though you do not need to use this if you call GinkgoHelper() or GinkgoT().Helper() appropriately
|
||||
|
||||
You can learn more here: https://onsi.github.io/ginkgo/#using-third-party-libraries
|
||||
*/
|
||||
func GinkgoT(optionalOffset ...int) GinkgoTInterface {
|
||||
func GinkgoT(optionalOffset ...int) FullGinkgoTInterface {
|
||||
offset := 3
|
||||
if len(optionalOffset) > 0 {
|
||||
offset = optionalOffset[0]
|
||||
}
|
||||
return testingtproxy.New(GinkgoWriter, Fail, Skip, DeferCleanup, CurrentSpecReport, offset)
|
||||
return testingtproxy.New(
|
||||
GinkgoWriter,
|
||||
Fail,
|
||||
Skip,
|
||||
DeferCleanup,
|
||||
CurrentSpecReport,
|
||||
AddReportEntry,
|
||||
GinkgoRecover,
|
||||
AttachProgressReporter,
|
||||
suiteConfig.RandomSeed,
|
||||
suiteConfig.ParallelProcess,
|
||||
suiteConfig.ParallelTotal,
|
||||
reporterConfig.NoColor,
|
||||
offset)
|
||||
}
|
||||
|
||||
/*
|
||||
The interface returned by GinkgoT(). This covers most of the methods in the testing package's T.
|
||||
The portion of the interface returned by GinkgoT() that maps onto methods in the testing package's T.
|
||||
*/
|
||||
type GinkgoTInterface interface {
|
||||
Cleanup(func())
|
||||
@ -43,3 +59,36 @@ type GinkgoTInterface interface {
|
||||
Skipped() bool
|
||||
TempDir() string
|
||||
}
|
||||
|
||||
/*
|
||||
Additional methods returned by GinkgoT() that provide deeper integration points into Ginkgo
|
||||
*/
|
||||
type FullGinkgoTInterface interface {
|
||||
GinkgoTInterface
|
||||
|
||||
AddReportEntryVisibilityAlways(name string, args ...any)
|
||||
AddReportEntryVisibilityFailureOrVerbose(name string, args ...any)
|
||||
AddReportEntryVisibilityNever(name string, args ...any)
|
||||
|
||||
//Prints to the GinkgoWriter
|
||||
Print(a ...interface{})
|
||||
Printf(format string, a ...interface{})
|
||||
Println(a ...interface{})
|
||||
|
||||
//Provides access to Ginkgo's color formatting, correctly configured to match the color settings specified in the invocation of ginkgo
|
||||
F(format string, args ...any) string
|
||||
Fi(indentation uint, format string, args ...any) string
|
||||
Fiw(indentation uint, maxWidth uint, format string, args ...any) string
|
||||
|
||||
//Generates a formatted string version of the current spec's timeline
|
||||
RenderTimeline() string
|
||||
|
||||
GinkgoRecover()
|
||||
DeferCleanup(args ...any)
|
||||
|
||||
RandomSeed() int64
|
||||
ParallelProcess() int
|
||||
ParallelTotal() int
|
||||
|
||||
AttachProgressReporter(func() string) func()
|
||||
}
|
||||
|
50
vendor/github.com/onsi/ginkgo/v2/internal/group.go
generated
vendored
50
vendor/github.com/onsi/ginkgo/v2/internal/group.go
generated
vendored
@ -94,15 +94,19 @@ type group struct {
|
||||
runOncePairs map[uint]runOncePairs
|
||||
runOnceTracker map[runOncePair]types.SpecState
|
||||
|
||||
succeeded bool
|
||||
succeeded bool
|
||||
failedInARunOnceBefore bool
|
||||
continueOnFailure bool
|
||||
}
|
||||
|
||||
func newGroup(suite *Suite) *group {
|
||||
return &group{
|
||||
suite: suite,
|
||||
runOncePairs: map[uint]runOncePairs{},
|
||||
runOnceTracker: map[runOncePair]types.SpecState{},
|
||||
succeeded: true,
|
||||
suite: suite,
|
||||
runOncePairs: map[uint]runOncePairs{},
|
||||
runOnceTracker: map[runOncePair]types.SpecState{},
|
||||
succeeded: true,
|
||||
failedInARunOnceBefore: false,
|
||||
continueOnFailure: false,
|
||||
}
|
||||
}
|
||||
|
||||
@ -116,6 +120,7 @@ func (g *group) initialReportForSpec(spec Spec) types.SpecReport {
|
||||
LeafNodeText: spec.FirstNodeWithType(types.NodeTypeIt).Text,
|
||||
LeafNodeLabels: []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels),
|
||||
ParallelProcess: g.suite.config.ParallelProcess,
|
||||
RunningInParallel: g.suite.isRunningInParallel(),
|
||||
IsSerial: spec.Nodes.HasNodeMarkedSerial(),
|
||||
IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(),
|
||||
MaxFlakeAttempts: spec.Nodes.GetMaxFlakeAttempts(),
|
||||
@ -136,10 +141,14 @@ func (g *group) evaluateSkipStatus(spec Spec) (types.SpecState, types.Failure) {
|
||||
if !g.suite.deadline.IsZero() && g.suite.deadline.Before(time.Now()) {
|
||||
return types.SpecStateSkipped, types.Failure{}
|
||||
}
|
||||
if !g.succeeded {
|
||||
if !g.succeeded && !g.continueOnFailure {
|
||||
return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt),
|
||||
"Spec skipped because an earlier spec in an ordered container failed")
|
||||
}
|
||||
if g.failedInARunOnceBefore && g.continueOnFailure {
|
||||
return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt),
|
||||
"Spec skipped because a BeforeAll node failed")
|
||||
}
|
||||
beforeOncePairs := g.runOncePairs[spec.SubjectID()].withType(types.NodeTypeBeforeAll | types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach)
|
||||
for _, pair := range beforeOncePairs {
|
||||
if g.runOnceTracker[pair].Is(types.SpecStateSkipped) {
|
||||
@ -167,7 +176,8 @@ func (g *group) isLastSpecWithPair(specID uint, pair runOncePair) bool {
|
||||
return lastSpecID == specID
|
||||
}
|
||||
|
||||
func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) {
|
||||
func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) bool {
|
||||
failedInARunOnceBefore := false
|
||||
pairs := g.runOncePairs[spec.SubjectID()]
|
||||
|
||||
nodes := spec.Nodes.WithType(types.NodeTypeBeforeAll)
|
||||
@ -193,6 +203,7 @@ func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) {
|
||||
}
|
||||
if g.suite.currentSpecReport.State != types.SpecStatePassed {
|
||||
terminatingNode, terminatingPair = node, oncePair
|
||||
failedInARunOnceBefore = !terminatingPair.isZero()
|
||||
break
|
||||
}
|
||||
}
|
||||
@ -215,7 +226,7 @@ func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) {
|
||||
//this node has already been run on this attempt, don't rerun it
|
||||
return false
|
||||
}
|
||||
pair := runOncePair{}
|
||||
var pair runOncePair
|
||||
switch node.NodeType {
|
||||
case types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll:
|
||||
// check if we were generated in an AfterNode that has already run
|
||||
@ -245,9 +256,13 @@ func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) {
|
||||
if !terminatingPair.isZero() && terminatingNode.NestingLevel == node.NestingLevel {
|
||||
return true //...or, a run-once node at our nesting level was skipped which means this is our last chance to run
|
||||
}
|
||||
case types.SpecStateFailed, types.SpecStatePanicked: // the spec has failed...
|
||||
case types.SpecStateFailed, types.SpecStatePanicked, types.SpecStateTimedout: // the spec has failed...
|
||||
if isFinalAttempt {
|
||||
return true //...if this was the last attempt then we're the last spec to run and so the AfterNode should run
|
||||
if g.continueOnFailure {
|
||||
return isLastSpecWithPair || failedInARunOnceBefore //...we're configured to continue on failures - so we should only run if we're the last spec for this pair or if we failed in a runOnceBefore (which means we _are_ the last spec to run)
|
||||
} else {
|
||||
return true //...this was the last attempt and continueOnFailure is false therefore we are the last spec to run and so the AfterNode should run
|
||||
}
|
||||
}
|
||||
if !terminatingPair.isZero() { // ...and it failed in a run-once. which will be running again
|
||||
if node.NodeType.Is(types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll) {
|
||||
@ -280,10 +295,12 @@ func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) {
|
||||
includeDeferCleanups = true
|
||||
}
|
||||
|
||||
return failedInARunOnceBefore
|
||||
}
|
||||
|
||||
func (g *group) run(specs Specs) {
|
||||
g.specs = specs
|
||||
g.continueOnFailure = specs[0].Nodes.FirstNodeMarkedOrdered().MarkedContinueOnFailure
|
||||
for _, spec := range g.specs {
|
||||
g.runOncePairs[spec.SubjectID()] = runOncePairsForSpec(spec)
|
||||
}
|
||||
@ -300,8 +317,8 @@ func (g *group) run(specs Specs) {
|
||||
skip := g.suite.config.DryRun || g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates|types.SpecStateSkipped|types.SpecStatePending)
|
||||
|
||||
g.suite.currentSpecReport.StartTime = time.Now()
|
||||
failedInARunOnceBefore := false
|
||||
if !skip {
|
||||
|
||||
var maxAttempts = 1
|
||||
|
||||
if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 {
|
||||
@ -319,14 +336,14 @@ func (g *group) run(specs Specs) {
|
||||
g.suite.outputInterceptor.StartInterceptingOutput()
|
||||
if attempt > 0 {
|
||||
if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 {
|
||||
fmt.Fprintf(g.suite.writer, "\nGinkgo: Attempt #%d Passed. Repeating...\n", attempt)
|
||||
g.suite.handleSpecEvent(types.SpecEvent{SpecEventType: types.SpecEventSpecRepeat, Attempt: attempt})
|
||||
}
|
||||
if g.suite.currentSpecReport.MaxFlakeAttempts > 0 {
|
||||
fmt.Fprintf(g.suite.writer, "\nGinkgo: Attempt #%d Failed. Retrying...\n", attempt)
|
||||
g.suite.handleSpecEvent(types.SpecEvent{SpecEventType: types.SpecEventSpecRetry, Attempt: attempt})
|
||||
}
|
||||
}
|
||||
|
||||
g.attemptSpec(attempt == maxAttempts-1, spec)
|
||||
failedInARunOnceBefore = g.attemptSpec(attempt == maxAttempts-1, spec)
|
||||
|
||||
g.suite.currentSpecReport.EndTime = time.Now()
|
||||
g.suite.currentSpecReport.RunTime = g.suite.currentSpecReport.EndTime.Sub(g.suite.currentSpecReport.StartTime)
|
||||
@ -341,6 +358,10 @@ func (g *group) run(specs Specs) {
|
||||
if g.suite.currentSpecReport.MaxFlakeAttempts > 0 {
|
||||
if g.suite.currentSpecReport.State.Is(types.SpecStatePassed | types.SpecStateSkipped | types.SpecStateAborted | types.SpecStateInterrupted) {
|
||||
break
|
||||
} else if attempt < maxAttempts-1 {
|
||||
af := types.AdditionalFailure{State: g.suite.currentSpecReport.State, Failure: g.suite.currentSpecReport.Failure}
|
||||
af.Failure.Message = fmt.Sprintf("Failure recorded during attempt %d:\n%s", attempt+1, af.Failure.Message)
|
||||
g.suite.currentSpecReport.AdditionalFailures = append(g.suite.currentSpecReport.AdditionalFailures, af)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -350,6 +371,7 @@ func (g *group) run(specs Specs) {
|
||||
g.suite.processCurrentSpecReport()
|
||||
if g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
|
||||
g.succeeded = false
|
||||
g.failedInARunOnceBefore = g.failedInARunOnceBefore || failedInARunOnceBefore
|
||||
}
|
||||
g.suite.selectiveLock.Lock()
|
||||
g.suite.currentSpecReport = types.SpecReport{}
|
||||
|
47
vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go
generated
vendored
47
vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go
generated
vendored
@ -10,7 +10,7 @@ import (
|
||||
"github.com/onsi/ginkgo/v2/internal/parallel_support"
|
||||
)
|
||||
|
||||
const ABORT_POLLING_INTERVAL = 500 * time.Millisecond
|
||||
var ABORT_POLLING_INTERVAL = 500 * time.Millisecond
|
||||
|
||||
type InterruptCause uint
|
||||
|
||||
@ -62,13 +62,14 @@ type InterruptHandlerInterface interface {
|
||||
}
|
||||
|
||||
type InterruptHandler struct {
|
||||
c chan interface{}
|
||||
lock *sync.Mutex
|
||||
level InterruptLevel
|
||||
cause InterruptCause
|
||||
client parallel_support.Client
|
||||
stop chan interface{}
|
||||
signals []os.Signal
|
||||
c chan interface{}
|
||||
lock *sync.Mutex
|
||||
level InterruptLevel
|
||||
cause InterruptCause
|
||||
client parallel_support.Client
|
||||
stop chan interface{}
|
||||
signals []os.Signal
|
||||
requestAbortCheck chan interface{}
|
||||
}
|
||||
|
||||
func NewInterruptHandler(client parallel_support.Client, signals ...os.Signal) *InterruptHandler {
|
||||
@ -76,11 +77,12 @@ func NewInterruptHandler(client parallel_support.Client, signals ...os.Signal) *
|
||||
signals = []os.Signal{os.Interrupt, syscall.SIGTERM}
|
||||
}
|
||||
handler := &InterruptHandler{
|
||||
c: make(chan interface{}),
|
||||
lock: &sync.Mutex{},
|
||||
stop: make(chan interface{}),
|
||||
client: client,
|
||||
signals: signals,
|
||||
c: make(chan interface{}),
|
||||
lock: &sync.Mutex{},
|
||||
stop: make(chan interface{}),
|
||||
requestAbortCheck: make(chan interface{}),
|
||||
client: client,
|
||||
signals: signals,
|
||||
}
|
||||
handler.registerForInterrupts()
|
||||
return handler
|
||||
@ -109,6 +111,12 @@ func (handler *InterruptHandler) registerForInterrupts() {
|
||||
pollTicker.Stop()
|
||||
return
|
||||
}
|
||||
case <-handler.requestAbortCheck:
|
||||
if handler.client.ShouldAbort() {
|
||||
close(abortChannel)
|
||||
pollTicker.Stop()
|
||||
return
|
||||
}
|
||||
case <-handler.stop:
|
||||
pollTicker.Stop()
|
||||
return
|
||||
@ -152,11 +160,18 @@ func (handler *InterruptHandler) registerForInterrupts() {
|
||||
|
||||
func (handler *InterruptHandler) Status() InterruptStatus {
|
||||
handler.lock.Lock()
|
||||
defer handler.lock.Unlock()
|
||||
|
||||
return InterruptStatus{
|
||||
status := InterruptStatus{
|
||||
Level: handler.level,
|
||||
Channel: handler.c,
|
||||
Cause: handler.cause,
|
||||
}
|
||||
handler.lock.Unlock()
|
||||
|
||||
if handler.client != nil && handler.client.ShouldAbort() && !status.Interrupted() {
|
||||
close(handler.requestAbortCheck)
|
||||
<-status.Channel
|
||||
return handler.Status()
|
||||
}
|
||||
|
||||
return status
|
||||
}
|
||||
|
71
vendor/github.com/onsi/ginkgo/v2/internal/node.go
generated
vendored
71
vendor/github.com/onsi/ginkgo/v2/internal/node.go
generated
vendored
@ -44,23 +44,23 @@ type Node struct {
|
||||
SynchronizedAfterSuiteProc1Body func(SpecContext)
|
||||
SynchronizedAfterSuiteProc1BodyHasContext bool
|
||||
|
||||
ReportEachBody func(types.SpecReport)
|
||||
ReportAfterSuiteBody func(types.Report)
|
||||
ReportEachBody func(types.SpecReport)
|
||||
ReportSuiteBody func(types.Report)
|
||||
|
||||
MarkedFocus bool
|
||||
MarkedPending bool
|
||||
MarkedSerial bool
|
||||
MarkedOrdered bool
|
||||
MarkedOncePerOrdered bool
|
||||
MarkedSuppressProgressReporting bool
|
||||
FlakeAttempts int
|
||||
MustPassRepeatedly int
|
||||
Labels Labels
|
||||
PollProgressAfter time.Duration
|
||||
PollProgressInterval time.Duration
|
||||
NodeTimeout time.Duration
|
||||
SpecTimeout time.Duration
|
||||
GracePeriod time.Duration
|
||||
MarkedFocus bool
|
||||
MarkedPending bool
|
||||
MarkedSerial bool
|
||||
MarkedOrdered bool
|
||||
MarkedContinueOnFailure bool
|
||||
MarkedOncePerOrdered bool
|
||||
FlakeAttempts int
|
||||
MustPassRepeatedly int
|
||||
Labels Labels
|
||||
PollProgressAfter time.Duration
|
||||
PollProgressInterval time.Duration
|
||||
NodeTimeout time.Duration
|
||||
SpecTimeout time.Duration
|
||||
GracePeriod time.Duration
|
||||
|
||||
NodeIDWhereCleanupWasGenerated uint
|
||||
}
|
||||
@ -70,6 +70,7 @@ type focusType bool
|
||||
type pendingType bool
|
||||
type serialType bool
|
||||
type orderedType bool
|
||||
type continueOnFailureType bool
|
||||
type honorsOrderedType bool
|
||||
type suppressProgressReporting bool
|
||||
|
||||
@ -77,6 +78,7 @@ const Focus = focusType(true)
|
||||
const Pending = pendingType(true)
|
||||
const Serial = serialType(true)
|
||||
const Ordered = orderedType(true)
|
||||
const ContinueOnFailure = continueOnFailureType(true)
|
||||
const OncePerOrdered = honorsOrderedType(true)
|
||||
const SuppressProgressReporting = suppressProgressReporting(true)
|
||||
|
||||
@ -91,6 +93,10 @@ type NodeTimeout time.Duration
|
||||
type SpecTimeout time.Duration
|
||||
type GracePeriod time.Duration
|
||||
|
||||
func (l Labels) MatchesLabelFilter(query string) bool {
|
||||
return types.MustParseLabelFilter(query)(l)
|
||||
}
|
||||
|
||||
func UnionOfLabels(labels ...Labels) Labels {
|
||||
out := Labels{}
|
||||
seen := map[string]bool{}
|
||||
@ -134,6 +140,8 @@ func isDecoration(arg interface{}) bool {
|
||||
return true
|
||||
case t == reflect.TypeOf(Ordered):
|
||||
return true
|
||||
case t == reflect.TypeOf(ContinueOnFailure):
|
||||
return true
|
||||
case t == reflect.TypeOf(OncePerOrdered):
|
||||
return true
|
||||
case t == reflect.TypeOf(SuppressProgressReporting):
|
||||
@ -242,16 +250,18 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
|
||||
if !nodeType.Is(types.NodeTypeContainer) {
|
||||
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Ordered"))
|
||||
}
|
||||
case t == reflect.TypeOf(ContinueOnFailure):
|
||||
node.MarkedContinueOnFailure = bool(arg.(continueOnFailureType))
|
||||
if !nodeType.Is(types.NodeTypeContainer) {
|
||||
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "ContinueOnFailure"))
|
||||
}
|
||||
case t == reflect.TypeOf(OncePerOrdered):
|
||||
node.MarkedOncePerOrdered = bool(arg.(honorsOrderedType))
|
||||
if !nodeType.Is(types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach | types.NodeTypeAfterEach | types.NodeTypeJustAfterEach) {
|
||||
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "OncePerOrdered"))
|
||||
}
|
||||
case t == reflect.TypeOf(SuppressProgressReporting):
|
||||
node.MarkedSuppressProgressReporting = bool(arg.(suppressProgressReporting))
|
||||
if nodeType.Is(types.NodeTypeContainer) {
|
||||
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "SuppressProgressReporting"))
|
||||
}
|
||||
deprecationTracker.TrackDeprecation(types.Deprecations.SuppressProgressReporting())
|
||||
case t == reflect.TypeOf(FlakeAttempts(0)):
|
||||
node.FlakeAttempts = int(arg.(FlakeAttempts))
|
||||
if !nodeType.Is(types.NodeTypesForContainerAndIt) {
|
||||
@ -321,9 +331,9 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
|
||||
trackedFunctionError = true
|
||||
break
|
||||
}
|
||||
} else if nodeType.Is(types.NodeTypeReportAfterSuite) {
|
||||
if node.ReportAfterSuiteBody == nil {
|
||||
node.ReportAfterSuiteBody = arg.(func(types.Report))
|
||||
} else if nodeType.Is(types.NodeTypeReportBeforeSuite | types.NodeTypeReportAfterSuite) {
|
||||
if node.ReportSuiteBody == nil {
|
||||
node.ReportSuiteBody = arg.(func(types.Report))
|
||||
} else {
|
||||
appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
|
||||
trackedFunctionError = true
|
||||
@ -390,13 +400,17 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
|
||||
appendError(types.GinkgoErrors.InvalidDeclarationOfFocusedAndPending(node.CodeLocation, nodeType))
|
||||
}
|
||||
|
||||
if node.MarkedContinueOnFailure && !node.MarkedOrdered {
|
||||
appendError(types.GinkgoErrors.InvalidContinueOnFailureDecoration(node.CodeLocation))
|
||||
}
|
||||
|
||||
hasContext := node.HasContext || node.SynchronizedAfterSuiteProc1BodyHasContext || node.SynchronizedAfterSuiteAllProcsBodyHasContext || node.SynchronizedBeforeSuiteProc1BodyHasContext || node.SynchronizedBeforeSuiteAllProcsBodyHasContext
|
||||
|
||||
if !hasContext && (node.NodeTimeout > 0 || node.SpecTimeout > 0 || node.GracePeriod > 0) && len(errors) == 0 {
|
||||
appendError(types.GinkgoErrors.InvalidTimeoutOrGracePeriodForNonContextNode(node.CodeLocation, nodeType))
|
||||
}
|
||||
|
||||
if !node.NodeType.Is(types.NodeTypeReportBeforeEach|types.NodeTypeReportAfterEach|types.NodeTypeSynchronizedBeforeSuite|types.NodeTypeSynchronizedAfterSuite|types.NodeTypeReportAfterSuite) && node.Body == nil && !node.MarkedPending && !trackedFunctionError {
|
||||
if !node.NodeType.Is(types.NodeTypeReportBeforeEach|types.NodeTypeReportAfterEach|types.NodeTypeSynchronizedBeforeSuite|types.NodeTypeSynchronizedAfterSuite|types.NodeTypeReportBeforeSuite|types.NodeTypeReportAfterSuite) && node.Body == nil && !node.MarkedPending && !trackedFunctionError {
|
||||
appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType))
|
||||
}
|
||||
|
||||
@ -861,6 +875,15 @@ func (n Nodes) FirstNodeMarkedOrdered() Node {
|
||||
return Node{}
|
||||
}
|
||||
|
||||
func (n Nodes) IndexOfFirstNodeMarkedOrdered() int {
|
||||
for i := range n {
|
||||
if n[i].MarkedOrdered {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func (n Nodes) GetMaxFlakeAttempts() int {
|
||||
maxFlakeAttempts := 0
|
||||
for i := range n {
|
||||
|
84
vendor/github.com/onsi/ginkgo/v2/internal/ordering.go
generated
vendored
84
vendor/github.com/onsi/ginkgo/v2/internal/ordering.go
generated
vendored
@ -7,6 +7,65 @@ import (
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
type SortableSpecs struct {
|
||||
Specs Specs
|
||||
Indexes []int
|
||||
}
|
||||
|
||||
func NewSortableSpecs(specs Specs) *SortableSpecs {
|
||||
indexes := make([]int, len(specs))
|
||||
for i := range specs {
|
||||
indexes[i] = i
|
||||
}
|
||||
return &SortableSpecs{
|
||||
Specs: specs,
|
||||
Indexes: indexes,
|
||||
}
|
||||
}
|
||||
func (s *SortableSpecs) Len() int { return len(s.Indexes) }
|
||||
func (s *SortableSpecs) Swap(i, j int) { s.Indexes[i], s.Indexes[j] = s.Indexes[j], s.Indexes[i] }
|
||||
func (s *SortableSpecs) Less(i, j int) bool {
|
||||
a, b := s.Specs[s.Indexes[i]], s.Specs[s.Indexes[j]]
|
||||
|
||||
aNodes, bNodes := a.Nodes.WithType(types.NodeTypesForContainerAndIt), b.Nodes.WithType(types.NodeTypesForContainerAndIt)
|
||||
|
||||
firstOrderedAIdx, firstOrderedBIdx := aNodes.IndexOfFirstNodeMarkedOrdered(), bNodes.IndexOfFirstNodeMarkedOrdered()
|
||||
if firstOrderedAIdx > -1 && firstOrderedBIdx > -1 && aNodes[firstOrderedAIdx].ID == bNodes[firstOrderedBIdx].ID {
|
||||
// strictly preserve order within an ordered containers. ID will track this as IDs are generated monotonically
|
||||
return aNodes.FirstNodeWithType(types.NodeTypeIt).ID < bNodes.FirstNodeWithType(types.NodeTypeIt).ID
|
||||
}
|
||||
|
||||
// if either spec is in an ordered container - only use the nodes up to the outermost ordered container
|
||||
if firstOrderedAIdx > -1 {
|
||||
aNodes = aNodes[:firstOrderedAIdx+1]
|
||||
}
|
||||
if firstOrderedBIdx > -1 {
|
||||
bNodes = bNodes[:firstOrderedBIdx+1]
|
||||
}
|
||||
|
||||
for i := 0; i < len(aNodes) && i < len(bNodes); i++ {
|
||||
aCL, bCL := aNodes[i].CodeLocation, bNodes[i].CodeLocation
|
||||
if aCL.FileName != bCL.FileName {
|
||||
return aCL.FileName < bCL.FileName
|
||||
}
|
||||
if aCL.LineNumber != bCL.LineNumber {
|
||||
return aCL.LineNumber < bCL.LineNumber
|
||||
}
|
||||
}
|
||||
// either everything is equal or we have different lengths of CLs
|
||||
if len(aNodes) != len(bNodes) {
|
||||
return len(aNodes) < len(bNodes)
|
||||
}
|
||||
// ok, now we are sure everything was equal. so we use the spec text to break ties
|
||||
for i := 0; i < len(aNodes); i++ {
|
||||
if aNodes[i].Text != bNodes[i].Text {
|
||||
return aNodes[i].Text < bNodes[i].Text
|
||||
}
|
||||
}
|
||||
// ok, all those texts were equal. we'll use the ID of the most deeply nested node as a last resort
|
||||
return aNodes[len(aNodes)-1].ID < bNodes[len(bNodes)-1].ID
|
||||
}
|
||||
|
||||
type GroupedSpecIndices []SpecIndices
|
||||
type SpecIndices []int
|
||||
|
||||
@ -28,12 +87,17 @@ func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices,
|
||||
// Seed a new random source based on thee configured random seed.
|
||||
r := rand.New(rand.NewSource(suiteConfig.RandomSeed))
|
||||
|
||||
// first break things into execution groups
|
||||
// first, we sort the entire suite to ensure a deterministic order. the sort is performed by filename, then line number, and then spec text. this ensures every parallel process has the exact same spec order and is only necessary to cover the edge case where the user iterates over a map to generate specs.
|
||||
sortableSpecs := NewSortableSpecs(specs)
|
||||
sort.Sort(sortableSpecs)
|
||||
|
||||
// then we break things into execution groups
|
||||
// a group represents a single unit of execution and is a collection of SpecIndices
|
||||
// usually a group is just a single spec, however ordered containers must be preserved as a single group
|
||||
executionGroupIDs := []uint{}
|
||||
executionGroups := map[uint]SpecIndices{}
|
||||
for idx, spec := range specs {
|
||||
for _, idx := range sortableSpecs.Indexes {
|
||||
spec := specs[idx]
|
||||
groupNode := spec.Nodes.FirstNodeMarkedOrdered()
|
||||
if groupNode.IsZero() {
|
||||
groupNode = spec.Nodes.FirstNodeWithType(types.NodeTypeIt)
|
||||
@ -48,7 +112,6 @@ func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices,
|
||||
// we shuffle outermost containers. so we need to form shufflable groupings of GroupIDs
|
||||
shufflableGroupingIDs := []uint{}
|
||||
shufflableGroupingIDToGroupIDs := map[uint][]uint{}
|
||||
shufflableGroupingsIDToSortKeys := map[uint]string{}
|
||||
|
||||
// for each execution group we're going to have to pick a node to represent how the
|
||||
// execution group is grouped for shuffling:
|
||||
@ -57,7 +120,7 @@ func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices,
|
||||
nodeTypesToShuffle = types.NodeTypeIt
|
||||
}
|
||||
|
||||
//so, fo reach execution group:
|
||||
//so, for each execution group:
|
||||
for _, groupID := range executionGroupIDs {
|
||||
// pick out a representative spec
|
||||
representativeSpec := specs[executionGroups[groupID][0]]
|
||||
@ -72,22 +135,9 @@ func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices,
|
||||
if len(shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID]) == 1 {
|
||||
// record the shuffleable group ID
|
||||
shufflableGroupingIDs = append(shufflableGroupingIDs, shufflableGroupingNode.ID)
|
||||
// and record the sort key to use
|
||||
shufflableGroupingsIDToSortKeys[shufflableGroupingNode.ID] = shufflableGroupingNode.CodeLocation.String()
|
||||
}
|
||||
}
|
||||
|
||||
// now we sort the shufflable groups by the sort key. We use the shufflable group nodes code location and break ties using its node id
|
||||
sort.SliceStable(shufflableGroupingIDs, func(i, j int) bool {
|
||||
keyA := shufflableGroupingsIDToSortKeys[shufflableGroupingIDs[i]]
|
||||
keyB := shufflableGroupingsIDToSortKeys[shufflableGroupingIDs[j]]
|
||||
if keyA == keyB {
|
||||
return shufflableGroupingIDs[i] < shufflableGroupingIDs[j]
|
||||
} else {
|
||||
return keyA < keyB
|
||||
}
|
||||
})
|
||||
|
||||
// now we permute the sorted shufflable grouping IDs and build the ordered Groups
|
||||
orderedGroups := GroupedSpecIndices{}
|
||||
permutation := r.Perm(len(shufflableGroupingIDs))
|
||||
|
11
vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go
generated
vendored
11
vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go
generated
vendored
@ -26,6 +26,17 @@ func (impl *dupSyscallOutputInterceptorImpl) CreateStdoutStderrClones() (*os.Fil
|
||||
stdoutCloneFD, _ := unix.Dup(1)
|
||||
stderrCloneFD, _ := unix.Dup(2)
|
||||
|
||||
// Important, set the fds to FD_CLOEXEC to prevent them leaking into childs
|
||||
// https://github.com/onsi/ginkgo/issues/1191
|
||||
flags, err := unix.FcntlInt(uintptr(stdoutCloneFD), unix.F_GETFD, 0)
|
||||
if err == nil {
|
||||
unix.FcntlInt(uintptr(stdoutCloneFD), unix.F_SETFD, flags|unix.FD_CLOEXEC)
|
||||
}
|
||||
flags, err = unix.FcntlInt(uintptr(stderrCloneFD), unix.F_GETFD, 0)
|
||||
if err == nil {
|
||||
unix.FcntlInt(uintptr(stderrCloneFD), unix.F_SETFD, flags|unix.FD_CLOEXEC)
|
||||
}
|
||||
|
||||
// And then wrap the clone file descriptors in files.
|
||||
// One benefit of this (that we don't use yet) is that we can actually write
|
||||
// to these files to emit output to the console even though we're intercepting output
|
||||
|
2
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go
generated
vendored
2
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go
generated
vendored
@ -42,6 +42,8 @@ type Client interface {
|
||||
PostSuiteWillBegin(report types.Report) error
|
||||
PostDidRun(report types.SpecReport) error
|
||||
PostSuiteDidEnd(report types.Report) error
|
||||
PostReportBeforeSuiteCompleted(state types.SpecState) error
|
||||
BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error)
|
||||
PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error
|
||||
BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error)
|
||||
BlockUntilNonprimaryProcsHaveFinished() error
|
||||
|
13
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go
generated
vendored
13
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go
generated
vendored
@ -98,6 +98,19 @@ func (client *httpClient) PostEmitProgressReport(report types.ProgressReport) er
|
||||
return client.post("/progress-report", report)
|
||||
}
|
||||
|
||||
func (client *httpClient) PostReportBeforeSuiteCompleted(state types.SpecState) error {
|
||||
return client.post("/report-before-suite-completed", state)
|
||||
}
|
||||
|
||||
func (client *httpClient) BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error) {
|
||||
var state types.SpecState
|
||||
err := client.poll("/report-before-suite-state", &state)
|
||||
if err == ErrorGone {
|
||||
return types.SpecStateFailed, nil
|
||||
}
|
||||
return state, err
|
||||
}
|
||||
|
||||
func (client *httpClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error {
|
||||
beforeSuiteState := BeforeSuiteState{
|
||||
State: state,
|
||||
|
29
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go
generated
vendored
29
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go
generated
vendored
@ -26,7 +26,7 @@ type httpServer struct {
|
||||
handler *ServerHandler
|
||||
}
|
||||
|
||||
//Create a new server, automatically selecting a port
|
||||
// Create a new server, automatically selecting a port
|
||||
func newHttpServer(parallelTotal int, reporter reporters.Reporter) (*httpServer, error) {
|
||||
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
@ -38,7 +38,7 @@ func newHttpServer(parallelTotal int, reporter reporters.Reporter) (*httpServer,
|
||||
}, nil
|
||||
}
|
||||
|
||||
//Start the server. You don't need to `go s.Start()`, just `s.Start()`
|
||||
// Start the server. You don't need to `go s.Start()`, just `s.Start()`
|
||||
func (server *httpServer) Start() {
|
||||
httpServer := &http.Server{}
|
||||
mux := http.NewServeMux()
|
||||
@ -52,6 +52,8 @@ func (server *httpServer) Start() {
|
||||
mux.HandleFunc("/progress-report", server.emitProgressReport)
|
||||
|
||||
//synchronization endpoints
|
||||
mux.HandleFunc("/report-before-suite-completed", server.handleReportBeforeSuiteCompleted)
|
||||
mux.HandleFunc("/report-before-suite-state", server.handleReportBeforeSuiteState)
|
||||
mux.HandleFunc("/before-suite-completed", server.handleBeforeSuiteCompleted)
|
||||
mux.HandleFunc("/before-suite-state", server.handleBeforeSuiteState)
|
||||
mux.HandleFunc("/have-nonprimary-procs-finished", server.handleHaveNonprimaryProcsFinished)
|
||||
@ -63,12 +65,12 @@ func (server *httpServer) Start() {
|
||||
go httpServer.Serve(server.listener)
|
||||
}
|
||||
|
||||
//Stop the server
|
||||
// Stop the server
|
||||
func (server *httpServer) Close() {
|
||||
server.listener.Close()
|
||||
}
|
||||
|
||||
//The address the server can be reached it. Pass this into the `ForwardingReporter`.
|
||||
// The address the server can be reached it. Pass this into the `ForwardingReporter`.
|
||||
func (server *httpServer) Address() string {
|
||||
return "http://" + server.listener.Addr().String()
|
||||
}
|
||||
@ -93,7 +95,7 @@ func (server *httpServer) RegisterAlive(node int, alive func() bool) {
|
||||
// Streaming Endpoints
|
||||
//
|
||||
|
||||
//The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters`
|
||||
// The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters`
|
||||
func (server *httpServer) decode(writer http.ResponseWriter, request *http.Request, object interface{}) bool {
|
||||
defer request.Body.Close()
|
||||
if json.NewDecoder(request.Body).Decode(object) != nil {
|
||||
@ -164,6 +166,23 @@ func (server *httpServer) emitProgressReport(writer http.ResponseWriter, request
|
||||
server.handleError(server.handler.EmitProgressReport(report, voidReceiver), writer)
|
||||
}
|
||||
|
||||
func (server *httpServer) handleReportBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) {
|
||||
var state types.SpecState
|
||||
if !server.decode(writer, request, &state) {
|
||||
return
|
||||
}
|
||||
|
||||
server.handleError(server.handler.ReportBeforeSuiteCompleted(state, voidReceiver), writer)
|
||||
}
|
||||
|
||||
func (server *httpServer) handleReportBeforeSuiteState(writer http.ResponseWriter, request *http.Request) {
|
||||
var state types.SpecState
|
||||
if server.handleError(server.handler.ReportBeforeSuiteState(voidSender, &state), writer) {
|
||||
return
|
||||
}
|
||||
json.NewEncoder(writer).Encode(state)
|
||||
}
|
||||
|
||||
func (server *httpServer) handleBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) {
|
||||
var beforeSuiteState BeforeSuiteState
|
||||
if !server.decode(writer, request, &beforeSuiteState) {
|
||||
|
13
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go
generated
vendored
13
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go
generated
vendored
@ -76,6 +76,19 @@ func (client *rpcClient) PostEmitProgressReport(report types.ProgressReport) err
|
||||
return client.client.Call("Server.EmitProgressReport", report, voidReceiver)
|
||||
}
|
||||
|
||||
func (client *rpcClient) PostReportBeforeSuiteCompleted(state types.SpecState) error {
|
||||
return client.client.Call("Server.ReportBeforeSuiteCompleted", state, voidReceiver)
|
||||
}
|
||||
|
||||
func (client *rpcClient) BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error) {
|
||||
var state types.SpecState
|
||||
err := client.poll("Server.ReportBeforeSuiteState", &state)
|
||||
if err == ErrorGone {
|
||||
return types.SpecStateFailed, nil
|
||||
}
|
||||
return state, err
|
||||
}
|
||||
|
||||
func (client *rpcClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error {
|
||||
beforeSuiteState := BeforeSuiteState{
|
||||
State: state,
|
||||
|
55
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go
generated
vendored
55
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go
generated
vendored
@ -18,16 +18,17 @@ var voidSender Void
|
||||
// It handles all the business logic to avoid duplication between the two servers
|
||||
|
||||
type ServerHandler struct {
|
||||
done chan interface{}
|
||||
outputDestination io.Writer
|
||||
reporter reporters.Reporter
|
||||
alives []func() bool
|
||||
lock *sync.Mutex
|
||||
beforeSuiteState BeforeSuiteState
|
||||
parallelTotal int
|
||||
counter int
|
||||
counterLock *sync.Mutex
|
||||
shouldAbort bool
|
||||
done chan interface{}
|
||||
outputDestination io.Writer
|
||||
reporter reporters.Reporter
|
||||
alives []func() bool
|
||||
lock *sync.Mutex
|
||||
beforeSuiteState BeforeSuiteState
|
||||
reportBeforeSuiteState types.SpecState
|
||||
parallelTotal int
|
||||
counter int
|
||||
counterLock *sync.Mutex
|
||||
shouldAbort bool
|
||||
|
||||
numSuiteDidBegins int
|
||||
numSuiteDidEnds int
|
||||
@ -37,11 +38,12 @@ type ServerHandler struct {
|
||||
|
||||
func newServerHandler(parallelTotal int, reporter reporters.Reporter) *ServerHandler {
|
||||
return &ServerHandler{
|
||||
reporter: reporter,
|
||||
lock: &sync.Mutex{},
|
||||
counterLock: &sync.Mutex{},
|
||||
alives: make([]func() bool, parallelTotal),
|
||||
beforeSuiteState: BeforeSuiteState{Data: nil, State: types.SpecStateInvalid},
|
||||
reporter: reporter,
|
||||
lock: &sync.Mutex{},
|
||||
counterLock: &sync.Mutex{},
|
||||
alives: make([]func() bool, parallelTotal),
|
||||
beforeSuiteState: BeforeSuiteState{Data: nil, State: types.SpecStateInvalid},
|
||||
|
||||
parallelTotal: parallelTotal,
|
||||
outputDestination: os.Stdout,
|
||||
done: make(chan interface{}),
|
||||
@ -140,6 +142,29 @@ func (handler *ServerHandler) haveNonprimaryProcsFinished() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (handler *ServerHandler) ReportBeforeSuiteCompleted(reportBeforeSuiteState types.SpecState, _ *Void) error {
|
||||
handler.lock.Lock()
|
||||
defer handler.lock.Unlock()
|
||||
handler.reportBeforeSuiteState = reportBeforeSuiteState
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (handler *ServerHandler) ReportBeforeSuiteState(_ Void, reportBeforeSuiteState *types.SpecState) error {
|
||||
proc1IsAlive := handler.procIsAlive(1)
|
||||
handler.lock.Lock()
|
||||
defer handler.lock.Unlock()
|
||||
if handler.reportBeforeSuiteState == types.SpecStateInvalid {
|
||||
if proc1IsAlive {
|
||||
return ErrorEarly
|
||||
} else {
|
||||
return ErrorGone
|
||||
}
|
||||
}
|
||||
*reportBeforeSuiteState = handler.reportBeforeSuiteState
|
||||
return nil
|
||||
}
|
||||
|
||||
func (handler *ServerHandler) BeforeSuiteCompleted(beforeSuiteState BeforeSuiteState, _ *Void) error {
|
||||
handler.lock.Lock()
|
||||
defer handler.lock.Unlock()
|
||||
|
26
vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go
generated
vendored
26
vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go
generated
vendored
@ -48,13 +48,10 @@ type ProgressStepCursor struct {
|
||||
StartTime time.Time
|
||||
}
|
||||
|
||||
func NewProgressReport(isRunningInParallel bool, report types.SpecReport, currentNode Node, currentNodeStartTime time.Time, currentStep ProgressStepCursor, gwOutput string, additionalReports []string, sourceRoots []string, includeAll bool) (types.ProgressReport, error) {
|
||||
func NewProgressReport(isRunningInParallel bool, report types.SpecReport, currentNode Node, currentNodeStartTime time.Time, currentStep types.SpecEvent, gwOutput string, timelineLocation types.TimelineLocation, additionalReports []string, sourceRoots []string, includeAll bool) (types.ProgressReport, error) {
|
||||
pr := types.ProgressReport{
|
||||
ParallelProcess: report.ParallelProcess,
|
||||
RunningInParallel: isRunningInParallel,
|
||||
|
||||
Time: time.Now(),
|
||||
|
||||
ParallelProcess: report.ParallelProcess,
|
||||
RunningInParallel: isRunningInParallel,
|
||||
ContainerHierarchyTexts: report.ContainerHierarchyTexts,
|
||||
LeafNodeText: report.LeafNodeText,
|
||||
LeafNodeLocation: report.LeafNodeLocation,
|
||||
@ -65,14 +62,14 @@ func NewProgressReport(isRunningInParallel bool, report types.SpecReport, curren
|
||||
CurrentNodeLocation: currentNode.CodeLocation,
|
||||
CurrentNodeStartTime: currentNodeStartTime,
|
||||
|
||||
CurrentStepText: currentStep.Text,
|
||||
CurrentStepText: currentStep.Message,
|
||||
CurrentStepLocation: currentStep.CodeLocation,
|
||||
CurrentStepStartTime: currentStep.StartTime,
|
||||
CurrentStepStartTime: currentStep.TimelineLocation.Time,
|
||||
|
||||
AdditionalReports: additionalReports,
|
||||
|
||||
CapturedGinkgoWriterOutput: gwOutput,
|
||||
GinkgoWriterOffset: len(gwOutput),
|
||||
TimelineLocation: timelineLocation,
|
||||
}
|
||||
|
||||
goroutines, err := extractRunningGoroutines()
|
||||
@ -186,7 +183,6 @@ func extractRunningGoroutines() ([]types.Goroutine, error) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
r := bufio.NewReader(bytes.NewReader(stack))
|
||||
out := []types.Goroutine{}
|
||||
idx := -1
|
||||
@ -234,12 +230,12 @@ func extractRunningGoroutines() ([]types.Goroutine, error) {
|
||||
return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid function call: %s -- missing file name and line number", functionCall.Function))
|
||||
}
|
||||
line = strings.TrimLeft(line, " \t")
|
||||
fields := strings.SplitN(line, ":", 2)
|
||||
if len(fields) != 2 {
|
||||
return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid filename nad line number: %s", line))
|
||||
delimiterIdx := strings.LastIndex(line, ":")
|
||||
if delimiterIdx == -1 {
|
||||
return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid filename and line number: %s", line))
|
||||
}
|
||||
functionCall.Filename = fields[0]
|
||||
line = strings.Split(fields[1], " ")[0]
|
||||
functionCall.Filename = line[:delimiterIdx]
|
||||
line = strings.Split(line[delimiterIdx+1:], " ")[0]
|
||||
lineNumber, err := strconv.ParseInt(line, 10, 64)
|
||||
functionCall.Line = int(lineNumber)
|
||||
if err != nil {
|
||||
|
79
vendor/github.com/onsi/ginkgo/v2/internal/progress_reporter_manager.go
generated
vendored
Normal file
79
vendor/github.com/onsi/ginkgo/v2/internal/progress_reporter_manager.go
generated
vendored
Normal file
@ -0,0 +1,79 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
type ProgressReporterManager struct {
|
||||
lock *sync.Mutex
|
||||
progressReporters map[int]func() string
|
||||
prCounter int
|
||||
}
|
||||
|
||||
func NewProgressReporterManager() *ProgressReporterManager {
|
||||
return &ProgressReporterManager{
|
||||
progressReporters: map[int]func() string{},
|
||||
lock: &sync.Mutex{},
|
||||
}
|
||||
}
|
||||
|
||||
func (prm *ProgressReporterManager) AttachProgressReporter(reporter func() string) func() {
|
||||
prm.lock.Lock()
|
||||
defer prm.lock.Unlock()
|
||||
prm.prCounter += 1
|
||||
prCounter := prm.prCounter
|
||||
prm.progressReporters[prCounter] = reporter
|
||||
|
||||
return func() {
|
||||
prm.lock.Lock()
|
||||
defer prm.lock.Unlock()
|
||||
delete(prm.progressReporters, prCounter)
|
||||
}
|
||||
}
|
||||
|
||||
func (prm *ProgressReporterManager) QueryProgressReporters(ctx context.Context, failer *Failer) []string {
|
||||
prm.lock.Lock()
|
||||
keys := []int{}
|
||||
for key := range prm.progressReporters {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
sort.Ints(keys)
|
||||
reporters := []func() string{}
|
||||
for _, key := range keys {
|
||||
reporters = append(reporters, prm.progressReporters[key])
|
||||
}
|
||||
prm.lock.Unlock()
|
||||
|
||||
if len(reporters) == 0 {
|
||||
return nil
|
||||
}
|
||||
out := []string{}
|
||||
for _, reporter := range reporters {
|
||||
reportC := make(chan string, 1)
|
||||
go func() {
|
||||
defer func() {
|
||||
e := recover()
|
||||
if e != nil {
|
||||
failer.Panic(types.NewCodeLocationWithStackTrace(1), e)
|
||||
reportC <- "failed to query attached progress reporter"
|
||||
}
|
||||
}()
|
||||
reportC <- reporter()
|
||||
}()
|
||||
var report string
|
||||
select {
|
||||
case report = <-reportC:
|
||||
case <-ctx.Done():
|
||||
return out
|
||||
}
|
||||
if strings.TrimSpace(report) != "" {
|
||||
out = append(out, report)
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
21
vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go
generated
vendored
21
vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go
generated
vendored
@ -1,7 +1,6 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
@ -13,20 +12,20 @@ func NewReportEntry(name string, cl types.CodeLocation, args ...interface{}) (Re
|
||||
out := ReportEntry{
|
||||
Visibility: types.ReportEntryVisibilityAlways,
|
||||
Name: name,
|
||||
Time: time.Now(),
|
||||
Location: cl,
|
||||
Time: time.Now(),
|
||||
}
|
||||
var didSetValue = false
|
||||
for _, arg := range args {
|
||||
switch reflect.TypeOf(arg) {
|
||||
case reflect.TypeOf(types.ReportEntryVisibilityAlways):
|
||||
out.Visibility = arg.(types.ReportEntryVisibility)
|
||||
case reflect.TypeOf(types.CodeLocation{}):
|
||||
out.Location = arg.(types.CodeLocation)
|
||||
case reflect.TypeOf(Offset(0)):
|
||||
out.Location = types.NewCodeLocation(2 + int(arg.(Offset)))
|
||||
case reflect.TypeOf(out.Time):
|
||||
out.Time = arg.(time.Time)
|
||||
switch x := arg.(type) {
|
||||
case types.ReportEntryVisibility:
|
||||
out.Visibility = x
|
||||
case types.CodeLocation:
|
||||
out.Location = x
|
||||
case Offset:
|
||||
out.Location = types.NewCodeLocation(2 + int(x))
|
||||
case time.Time:
|
||||
out.Time = x
|
||||
default:
|
||||
if didSetValue {
|
||||
return ReportEntry{}, types.GinkgoErrors.TooManyReportEntryValues(out.Location, arg)
|
||||
|
53
vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go
generated
vendored
53
vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go
generated
vendored
@ -2,8 +2,6 @@ package internal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
@ -17,11 +15,9 @@ type SpecContext interface {
|
||||
|
||||
type specContext struct {
|
||||
context.Context
|
||||
*ProgressReporterManager
|
||||
|
||||
cancel context.CancelFunc
|
||||
lock *sync.Mutex
|
||||
progressReporters map[int]func() string
|
||||
prCounter int
|
||||
cancel context.CancelFunc
|
||||
|
||||
suite *Suite
|
||||
}
|
||||
@ -36,11 +32,9 @@ This is because Ginkgo needs finer control over when the context is canceled. S
|
||||
func NewSpecContext(suite *Suite) *specContext {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
sc := &specContext{
|
||||
cancel: cancel,
|
||||
suite: suite,
|
||||
lock: &sync.Mutex{},
|
||||
prCounter: 0,
|
||||
progressReporters: map[int]func() string{},
|
||||
cancel: cancel,
|
||||
suite: suite,
|
||||
ProgressReporterManager: NewProgressReporterManager(),
|
||||
}
|
||||
ctx = context.WithValue(ctx, "GINKGO_SPEC_CONTEXT", sc) //yes, yes, the go docs say don't use a string for a key... but we'd rather avoid a circular dependency between Gomega and Ginkgo
|
||||
sc.Context = ctx //thank goodness for garbage collectors that can handle circular dependencies
|
||||
@ -51,40 +45,3 @@ func NewSpecContext(suite *Suite) *specContext {
|
||||
func (sc *specContext) SpecReport() types.SpecReport {
|
||||
return sc.suite.CurrentSpecReport()
|
||||
}
|
||||
|
||||
func (sc *specContext) AttachProgressReporter(reporter func() string) func() {
|
||||
sc.lock.Lock()
|
||||
defer sc.lock.Unlock()
|
||||
sc.prCounter += 1
|
||||
prCounter := sc.prCounter
|
||||
sc.progressReporters[prCounter] = reporter
|
||||
|
||||
return func() {
|
||||
sc.lock.Lock()
|
||||
defer sc.lock.Unlock()
|
||||
delete(sc.progressReporters, prCounter)
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *specContext) QueryProgressReporters() []string {
|
||||
sc.lock.Lock()
|
||||
keys := []int{}
|
||||
for key := range sc.progressReporters {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
sort.Ints(keys)
|
||||
reporters := []func() string{}
|
||||
for _, key := range keys {
|
||||
reporters = append(reporters, sc.progressReporters[key])
|
||||
}
|
||||
sc.lock.Unlock()
|
||||
|
||||
if len(reporters) == 0 {
|
||||
return nil
|
||||
}
|
||||
out := []string{}
|
||||
for _, reporter := range reporters {
|
||||
out = append(out, reporter())
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
305
vendor/github.com/onsi/ginkgo/v2/internal/suite.go
generated
vendored
305
vendor/github.com/onsi/ginkgo/v2/internal/suite.go
generated
vendored
@ -9,6 +9,7 @@ import (
|
||||
"github.com/onsi/ginkgo/v2/internal/parallel_support"
|
||||
"github.com/onsi/ginkgo/v2/reporters"
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type Phase uint
|
||||
@ -19,10 +20,14 @@ const (
|
||||
PhaseRun
|
||||
)
|
||||
|
||||
var PROGRESS_REPORTER_DEADLING = 5 * time.Second
|
||||
|
||||
type Suite struct {
|
||||
tree *TreeNode
|
||||
topLevelContainers Nodes
|
||||
|
||||
*ProgressReporterManager
|
||||
|
||||
phase Phase
|
||||
|
||||
suiteNodes Nodes
|
||||
@ -44,7 +49,8 @@ type Suite struct {
|
||||
|
||||
currentSpecContext *specContext
|
||||
|
||||
progressStepCursor ProgressStepCursor
|
||||
currentByStep types.SpecEvent
|
||||
timelineOrder int
|
||||
|
||||
/*
|
||||
We don't need to lock around all operations. Just those that *could* happen concurrently.
|
||||
@ -63,8 +69,9 @@ type Suite struct {
|
||||
|
||||
func NewSuite() *Suite {
|
||||
return &Suite{
|
||||
tree: &TreeNode{},
|
||||
phase: PhaseBuildTopLevel,
|
||||
tree: &TreeNode{},
|
||||
phase: PhaseBuildTopLevel,
|
||||
ProgressReporterManager: NewProgressReporterManager(),
|
||||
|
||||
selectiveLock: &sync.Mutex{},
|
||||
}
|
||||
@ -128,7 +135,7 @@ func (suite *Suite) PushNode(node Node) error {
|
||||
return suite.pushCleanupNode(node)
|
||||
}
|
||||
|
||||
if node.NodeType.Is(types.NodeTypeBeforeSuite | types.NodeTypeAfterSuite | types.NodeTypeSynchronizedBeforeSuite | types.NodeTypeSynchronizedAfterSuite | types.NodeTypeReportAfterSuite) {
|
||||
if node.NodeType.Is(types.NodeTypeBeforeSuite | types.NodeTypeAfterSuite | types.NodeTypeSynchronizedBeforeSuite | types.NodeTypeSynchronizedAfterSuite | types.NodeTypeBeforeSuite | types.NodeTypeReportBeforeSuite | types.NodeTypeReportAfterSuite) {
|
||||
return suite.pushSuiteNode(node)
|
||||
}
|
||||
|
||||
@ -150,6 +157,13 @@ func (suite *Suite) PushNode(node Node) error {
|
||||
}
|
||||
}
|
||||
|
||||
if node.MarkedContinueOnFailure {
|
||||
firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered()
|
||||
if !firstOrderedNode.IsZero() {
|
||||
return types.GinkgoErrors.InvalidContinueOnFailureDecoration(node.CodeLocation)
|
||||
}
|
||||
}
|
||||
|
||||
if node.NodeType == types.NodeTypeContainer {
|
||||
// During PhaseBuildTopLevel we only track the top level containers without entering them
|
||||
// We only enter the top level container nodes during PhaseBuildTree
|
||||
@ -221,7 +235,7 @@ func (suite *Suite) pushCleanupNode(node Node) error {
|
||||
node.NodeType = types.NodeTypeCleanupAfterSuite
|
||||
case types.NodeTypeBeforeAll, types.NodeTypeAfterAll:
|
||||
node.NodeType = types.NodeTypeCleanupAfterAll
|
||||
case types.NodeTypeReportBeforeEach, types.NodeTypeReportAfterEach, types.NodeTypeReportAfterSuite:
|
||||
case types.NodeTypeReportBeforeEach, types.NodeTypeReportAfterEach, types.NodeTypeReportBeforeSuite, types.NodeTypeReportAfterSuite:
|
||||
return types.GinkgoErrors.PushingCleanupInReportingNode(node.CodeLocation, suite.currentNode.NodeType)
|
||||
case types.NodeTypeCleanupInvalid, types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll, types.NodeTypeCleanupAfterSuite:
|
||||
return types.GinkgoErrors.PushingCleanupInCleanupNode(node.CodeLocation)
|
||||
@ -236,19 +250,69 @@ func (suite *Suite) pushCleanupNode(node Node) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
Pushing and popping the Step Cursor stack
|
||||
*/
|
||||
|
||||
func (suite *Suite) SetProgressStepCursor(cursor ProgressStepCursor) {
|
||||
func (suite *Suite) generateTimelineLocation() types.TimelineLocation {
|
||||
suite.selectiveLock.Lock()
|
||||
defer suite.selectiveLock.Unlock()
|
||||
|
||||
suite.progressStepCursor = cursor
|
||||
suite.timelineOrder += 1
|
||||
return types.TimelineLocation{
|
||||
Offset: len(suite.currentSpecReport.CapturedGinkgoWriterOutput) + suite.writer.Len(),
|
||||
Order: suite.timelineOrder,
|
||||
Time: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *Suite) handleSpecEvent(event types.SpecEvent) types.SpecEvent {
|
||||
event.TimelineLocation = suite.generateTimelineLocation()
|
||||
suite.selectiveLock.Lock()
|
||||
suite.currentSpecReport.SpecEvents = append(suite.currentSpecReport.SpecEvents, event)
|
||||
suite.selectiveLock.Unlock()
|
||||
suite.reporter.EmitSpecEvent(event)
|
||||
return event
|
||||
}
|
||||
|
||||
func (suite *Suite) handleSpecEventEnd(eventType types.SpecEventType, startEvent types.SpecEvent) {
|
||||
event := startEvent
|
||||
event.SpecEventType = eventType
|
||||
event.TimelineLocation = suite.generateTimelineLocation()
|
||||
event.Duration = event.TimelineLocation.Time.Sub(startEvent.TimelineLocation.Time)
|
||||
suite.selectiveLock.Lock()
|
||||
suite.currentSpecReport.SpecEvents = append(suite.currentSpecReport.SpecEvents, event)
|
||||
suite.selectiveLock.Unlock()
|
||||
suite.reporter.EmitSpecEvent(event)
|
||||
}
|
||||
|
||||
func (suite *Suite) By(text string, callback ...func()) error {
|
||||
cl := types.NewCodeLocation(2)
|
||||
if suite.phase != PhaseRun {
|
||||
return types.GinkgoErrors.ByNotDuringRunPhase(cl)
|
||||
}
|
||||
|
||||
event := suite.handleSpecEvent(types.SpecEvent{
|
||||
SpecEventType: types.SpecEventByStart,
|
||||
CodeLocation: cl,
|
||||
Message: text,
|
||||
})
|
||||
suite.selectiveLock.Lock()
|
||||
suite.currentByStep = event
|
||||
suite.selectiveLock.Unlock()
|
||||
|
||||
if len(callback) == 1 {
|
||||
defer func() {
|
||||
suite.selectiveLock.Lock()
|
||||
suite.currentByStep = types.SpecEvent{}
|
||||
suite.selectiveLock.Unlock()
|
||||
suite.handleSpecEventEnd(types.SpecEventByEnd, event)
|
||||
}()
|
||||
callback[0]()
|
||||
} else if len(callback) > 1 {
|
||||
panic("just one callback per By, please")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
Spec Running methods - used during PhaseRun
|
||||
Spec Running methods - used during PhaseRun
|
||||
*/
|
||||
func (suite *Suite) CurrentSpecReport() types.SpecReport {
|
||||
suite.selectiveLock.Lock()
|
||||
@ -263,27 +327,32 @@ func (suite *Suite) CurrentSpecReport() types.SpecReport {
|
||||
}
|
||||
|
||||
func (suite *Suite) AddReportEntry(entry ReportEntry) error {
|
||||
suite.selectiveLock.Lock()
|
||||
defer suite.selectiveLock.Unlock()
|
||||
if suite.phase != PhaseRun {
|
||||
return types.GinkgoErrors.AddReportEntryNotDuringRunPhase(entry.Location)
|
||||
}
|
||||
entry.TimelineLocation = suite.generateTimelineLocation()
|
||||
entry.Time = entry.TimelineLocation.Time
|
||||
suite.selectiveLock.Lock()
|
||||
suite.currentSpecReport.ReportEntries = append(suite.currentSpecReport.ReportEntries, entry)
|
||||
suite.selectiveLock.Unlock()
|
||||
suite.reporter.EmitReportEntry(entry)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (suite *Suite) generateProgressReport(fullReport bool) types.ProgressReport {
|
||||
timelineLocation := suite.generateTimelineLocation()
|
||||
suite.selectiveLock.Lock()
|
||||
defer suite.selectiveLock.Unlock()
|
||||
|
||||
deadline, cancel := context.WithTimeout(context.Background(), PROGRESS_REPORTER_DEADLING)
|
||||
defer cancel()
|
||||
var additionalReports []string
|
||||
if suite.currentSpecContext != nil {
|
||||
additionalReports = suite.currentSpecContext.QueryProgressReporters()
|
||||
additionalReports = append(additionalReports, suite.currentSpecContext.QueryProgressReporters(deadline, suite.failer)...)
|
||||
}
|
||||
stepCursor := suite.progressStepCursor
|
||||
|
||||
additionalReports = append(additionalReports, suite.QueryProgressReporters(deadline, suite.failer)...)
|
||||
gwOutput := suite.currentSpecReport.CapturedGinkgoWriterOutput + string(suite.writer.Bytes())
|
||||
pr, err := NewProgressReport(suite.isRunningInParallel(), suite.currentSpecReport, suite.currentNode, suite.currentNodeStartTime, stepCursor, gwOutput, additionalReports, suite.config.SourceRoots, fullReport)
|
||||
pr, err := NewProgressReport(suite.isRunningInParallel(), suite.currentSpecReport, suite.currentNode, suite.currentNodeStartTime, suite.currentByStep, gwOutput, timelineLocation, additionalReports, suite.config.SourceRoots, fullReport)
|
||||
|
||||
if err != nil {
|
||||
fmt.Printf("{{red}}Failed to generate progress report:{{/}}\n%s\n", err.Error())
|
||||
@ -355,7 +424,13 @@ func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath s
|
||||
}
|
||||
|
||||
suite.report.SuiteSucceeded = true
|
||||
suite.runBeforeSuite(numSpecsThatWillBeRun)
|
||||
|
||||
suite.runReportSuiteNodesIfNeedBe(types.NodeTypeReportBeforeSuite)
|
||||
|
||||
ranBeforeSuite := suite.report.SuiteSucceeded
|
||||
if suite.report.SuiteSucceeded {
|
||||
suite.runBeforeSuite(numSpecsThatWillBeRun)
|
||||
}
|
||||
|
||||
if suite.report.SuiteSucceeded {
|
||||
groupedSpecIndices, serialGroupedSpecIndices := OrderSpecs(specs, suite.config)
|
||||
@ -394,7 +469,9 @@ func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath s
|
||||
}
|
||||
}
|
||||
|
||||
suite.runAfterSuiteCleanup(numSpecsThatWillBeRun)
|
||||
if ranBeforeSuite {
|
||||
suite.runAfterSuiteCleanup(numSpecsThatWillBeRun)
|
||||
}
|
||||
|
||||
interruptStatus := suite.interruptHandler.Status()
|
||||
if interruptStatus.Interrupted() {
|
||||
@ -408,9 +485,7 @@ func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath s
|
||||
suite.report.SuiteSucceeded = false
|
||||
}
|
||||
|
||||
if suite.config.ParallelProcess == 1 {
|
||||
suite.runReportAfterSuite()
|
||||
}
|
||||
suite.runReportSuiteNodesIfNeedBe(types.NodeTypeReportAfterSuite)
|
||||
suite.reporter.SuiteDidEnd(suite.report)
|
||||
if suite.isRunningInParallel() {
|
||||
suite.client.PostSuiteDidEnd(suite.report)
|
||||
@ -424,9 +499,10 @@ func (suite *Suite) runBeforeSuite(numSpecsThatWillBeRun int) {
|
||||
if !beforeSuiteNode.IsZero() && numSpecsThatWillBeRun > 0 {
|
||||
suite.selectiveLock.Lock()
|
||||
suite.currentSpecReport = types.SpecReport{
|
||||
LeafNodeType: beforeSuiteNode.NodeType,
|
||||
LeafNodeLocation: beforeSuiteNode.CodeLocation,
|
||||
ParallelProcess: suite.config.ParallelProcess,
|
||||
LeafNodeType: beforeSuiteNode.NodeType,
|
||||
LeafNodeLocation: beforeSuiteNode.CodeLocation,
|
||||
ParallelProcess: suite.config.ParallelProcess,
|
||||
RunningInParallel: suite.isRunningInParallel(),
|
||||
}
|
||||
suite.selectiveLock.Unlock()
|
||||
|
||||
@ -445,9 +521,10 @@ func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) {
|
||||
if !afterSuiteNode.IsZero() && numSpecsThatWillBeRun > 0 {
|
||||
suite.selectiveLock.Lock()
|
||||
suite.currentSpecReport = types.SpecReport{
|
||||
LeafNodeType: afterSuiteNode.NodeType,
|
||||
LeafNodeLocation: afterSuiteNode.CodeLocation,
|
||||
ParallelProcess: suite.config.ParallelProcess,
|
||||
LeafNodeType: afterSuiteNode.NodeType,
|
||||
LeafNodeLocation: afterSuiteNode.CodeLocation,
|
||||
ParallelProcess: suite.config.ParallelProcess,
|
||||
RunningInParallel: suite.isRunningInParallel(),
|
||||
}
|
||||
suite.selectiveLock.Unlock()
|
||||
|
||||
@ -461,9 +538,10 @@ func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) {
|
||||
for _, cleanupNode := range afterSuiteCleanup {
|
||||
suite.selectiveLock.Lock()
|
||||
suite.currentSpecReport = types.SpecReport{
|
||||
LeafNodeType: cleanupNode.NodeType,
|
||||
LeafNodeLocation: cleanupNode.CodeLocation,
|
||||
ParallelProcess: suite.config.ParallelProcess,
|
||||
LeafNodeType: cleanupNode.NodeType,
|
||||
LeafNodeLocation: cleanupNode.CodeLocation,
|
||||
ParallelProcess: suite.config.ParallelProcess,
|
||||
RunningInParallel: suite.isRunningInParallel(),
|
||||
}
|
||||
suite.selectiveLock.Unlock()
|
||||
|
||||
@ -474,23 +552,6 @@ func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) {
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *Suite) runReportAfterSuite() {
|
||||
for _, node := range suite.suiteNodes.WithType(types.NodeTypeReportAfterSuite) {
|
||||
suite.selectiveLock.Lock()
|
||||
suite.currentSpecReport = types.SpecReport{
|
||||
LeafNodeType: node.NodeType,
|
||||
LeafNodeLocation: node.CodeLocation,
|
||||
LeafNodeText: node.Text,
|
||||
ParallelProcess: suite.config.ParallelProcess,
|
||||
}
|
||||
suite.selectiveLock.Unlock()
|
||||
|
||||
suite.reporter.WillRun(suite.currentSpecReport)
|
||||
suite.runReportAfterSuiteNode(node, suite.report)
|
||||
suite.processCurrentSpecReport()
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) {
|
||||
nodes := spec.Nodes.WithType(nodeType)
|
||||
if nodeType == types.NodeTypeReportAfterEach {
|
||||
@ -608,39 +669,80 @@ func (suite *Suite) runSuiteNode(node Node) {
|
||||
|
||||
if err != nil && !suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
|
||||
suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error())
|
||||
suite.reporter.EmitFailure(suite.currentSpecReport.State, suite.currentSpecReport.Failure)
|
||||
}
|
||||
|
||||
suite.currentSpecReport.EndTime = time.Now()
|
||||
suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime)
|
||||
suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
|
||||
suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (suite *Suite) runReportAfterSuiteNode(node Node, report types.Report) {
|
||||
func (suite *Suite) runReportSuiteNodesIfNeedBe(nodeType types.NodeType) {
|
||||
nodes := suite.suiteNodes.WithType(nodeType)
|
||||
// only run ReportAfterSuite on proc 1
|
||||
if nodeType.Is(types.NodeTypeReportAfterSuite) && suite.config.ParallelProcess != 1 {
|
||||
return
|
||||
}
|
||||
// if we're running ReportBeforeSuite on proc > 1 - we should wait until proc 1 has completed
|
||||
if nodeType.Is(types.NodeTypeReportBeforeSuite) && suite.config.ParallelProcess != 1 && len(nodes) > 0 {
|
||||
state, err := suite.client.BlockUntilReportBeforeSuiteCompleted()
|
||||
if err != nil || state.Is(types.SpecStateFailed) {
|
||||
suite.report.SuiteSucceeded = false
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
suite.selectiveLock.Lock()
|
||||
suite.currentSpecReport = types.SpecReport{
|
||||
LeafNodeType: node.NodeType,
|
||||
LeafNodeLocation: node.CodeLocation,
|
||||
LeafNodeText: node.Text,
|
||||
ParallelProcess: suite.config.ParallelProcess,
|
||||
RunningInParallel: suite.isRunningInParallel(),
|
||||
}
|
||||
suite.selectiveLock.Unlock()
|
||||
|
||||
suite.reporter.WillRun(suite.currentSpecReport)
|
||||
suite.runReportSuiteNode(node, suite.report)
|
||||
suite.processCurrentSpecReport()
|
||||
}
|
||||
|
||||
// if we're running ReportBeforeSuite and we're running in parallel - we shuld tell the other procs that we're done
|
||||
if nodeType.Is(types.NodeTypeReportBeforeSuite) && suite.isRunningInParallel() && len(nodes) > 0 {
|
||||
if suite.report.SuiteSucceeded {
|
||||
suite.client.PostReportBeforeSuiteCompleted(types.SpecStatePassed)
|
||||
} else {
|
||||
suite.client.PostReportBeforeSuiteCompleted(types.SpecStateFailed)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *Suite) runReportSuiteNode(node Node, report types.Report) {
|
||||
suite.writer.Truncate()
|
||||
suite.outputInterceptor.StartInterceptingOutput()
|
||||
suite.currentSpecReport.StartTime = time.Now()
|
||||
|
||||
if suite.config.ParallelTotal > 1 {
|
||||
// if we're running a ReportAfterSuite in parallel (on proc 1) we (a) wait until other procs have exited and
|
||||
// (b) always fetch the latest report as prior ReportAfterSuites will contribute to it
|
||||
if node.NodeType.Is(types.NodeTypeReportAfterSuite) && suite.isRunningInParallel() {
|
||||
aggregatedReport, err := suite.client.BlockUntilAggregatedNonprimaryProcsReport()
|
||||
if err != nil {
|
||||
suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error())
|
||||
suite.reporter.EmitFailure(suite.currentSpecReport.State, suite.currentSpecReport.Failure)
|
||||
return
|
||||
}
|
||||
report = report.Add(aggregatedReport)
|
||||
}
|
||||
|
||||
node.Body = func(SpecContext) { node.ReportAfterSuiteBody(report) }
|
||||
node.Body = func(SpecContext) { node.ReportSuiteBody(report) }
|
||||
suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
|
||||
|
||||
suite.currentSpecReport.EndTime = time.Now()
|
||||
suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime)
|
||||
suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
|
||||
suite.currentSpecReport.CapturedStdOutErr = suite.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (types.SpecState, types.Failure) {
|
||||
@ -662,7 +764,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
|
||||
suite.selectiveLock.Lock()
|
||||
suite.currentNode = node
|
||||
suite.currentNodeStartTime = time.Now()
|
||||
suite.progressStepCursor = ProgressStepCursor{}
|
||||
suite.currentByStep = types.SpecEvent{}
|
||||
suite.selectiveLock.Unlock()
|
||||
defer func() {
|
||||
suite.selectiveLock.Lock()
|
||||
@ -671,13 +773,18 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
|
||||
suite.selectiveLock.Unlock()
|
||||
}()
|
||||
|
||||
if suite.config.EmitSpecProgress && !node.MarkedSuppressProgressReporting {
|
||||
if text == "" {
|
||||
text = "TOP-LEVEL"
|
||||
}
|
||||
s := fmt.Sprintf("[%s] %s\n %s\n", node.NodeType.String(), text, node.CodeLocation.String())
|
||||
suite.writer.Write([]byte(s))
|
||||
if text == "" {
|
||||
text = "TOP-LEVEL"
|
||||
}
|
||||
event := suite.handleSpecEvent(types.SpecEvent{
|
||||
SpecEventType: types.SpecEventNodeStart,
|
||||
NodeType: node.NodeType,
|
||||
Message: text,
|
||||
CodeLocation: node.CodeLocation,
|
||||
})
|
||||
defer func() {
|
||||
suite.handleSpecEventEnd(types.SpecEventNodeEnd, event)
|
||||
}()
|
||||
|
||||
var failure types.Failure
|
||||
failure.FailureNodeType, failure.FailureNodeLocation = node.NodeType, node.CodeLocation
|
||||
@ -697,18 +804,23 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
|
||||
|
||||
now := time.Now()
|
||||
deadline := suite.deadline
|
||||
timeoutInPlay := "suite"
|
||||
if deadline.IsZero() || (!specDeadline.IsZero() && specDeadline.Before(deadline)) {
|
||||
deadline = specDeadline
|
||||
timeoutInPlay = "spec"
|
||||
}
|
||||
if node.NodeTimeout > 0 && (deadline.IsZero() || deadline.Sub(now) > node.NodeTimeout) {
|
||||
deadline = now.Add(node.NodeTimeout)
|
||||
timeoutInPlay = "node"
|
||||
}
|
||||
if (!deadline.IsZero() && deadline.Before(now)) || interruptStatus.Interrupted() {
|
||||
//we're out of time already. let's wait for a NodeTimeout if we have it, or GracePeriod if we don't
|
||||
if node.NodeTimeout > 0 {
|
||||
deadline = now.Add(node.NodeTimeout)
|
||||
timeoutInPlay = "node"
|
||||
} else {
|
||||
deadline = now.Add(gracePeriod)
|
||||
timeoutInPlay = "grace period"
|
||||
}
|
||||
}
|
||||
|
||||
@ -743,6 +855,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
|
||||
}
|
||||
|
||||
outcomeFromRun, failureFromRun := suite.failer.Drain()
|
||||
failureFromRun.TimelineLocation = suite.generateTimelineLocation()
|
||||
outcomeC <- outcomeFromRun
|
||||
failureC <- failureFromRun
|
||||
}()
|
||||
@ -772,23 +885,33 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
|
||||
select {
|
||||
case outcomeFromRun := <-outcomeC:
|
||||
failureFromRun := <-failureC
|
||||
if outcome == types.SpecStateInterrupted {
|
||||
// we've already been interrupted. we just managed to actually exit
|
||||
if outcome.Is(types.SpecStateInterrupted | types.SpecStateTimedout) {
|
||||
// we've already been interrupted/timed out. we just managed to actually exit
|
||||
// before the grace period elapsed
|
||||
return outcome, failure
|
||||
} else if outcome == types.SpecStateTimedout {
|
||||
// we've already timed out. we just managed to actually exit
|
||||
// before the grace period elapsed. if we have a failure message we should include it
|
||||
// if we have a failure message we attach it as an additional failure
|
||||
if outcomeFromRun != types.SpecStatePassed {
|
||||
failure.Location, failure.ForwardedPanic = failureFromRun.Location, failureFromRun.ForwardedPanic
|
||||
failure.Message = "This spec timed out and reported the following failure after the timeout:\n\n" + failureFromRun.Message
|
||||
additionalFailure := types.AdditionalFailure{
|
||||
State: outcomeFromRun,
|
||||
Failure: failure, //we make a copy - this will include all the configuration set up above...
|
||||
}
|
||||
//...and then we update the failure with the details from failureFromRun
|
||||
additionalFailure.Failure.Location, additionalFailure.Failure.ForwardedPanic, additionalFailure.Failure.TimelineLocation = failureFromRun.Location, failureFromRun.ForwardedPanic, failureFromRun.TimelineLocation
|
||||
additionalFailure.Failure.ProgressReport = types.ProgressReport{}
|
||||
if outcome == types.SpecStateTimedout {
|
||||
additionalFailure.Failure.Message = fmt.Sprintf("A %s timeout occurred and then the following failure was recorded in the timedout node before it exited:\n%s", timeoutInPlay, failureFromRun.Message)
|
||||
} else {
|
||||
additionalFailure.Failure.Message = fmt.Sprintf("An interrupt occurred and then the following failure was recorded in the interrupted node before it exited:\n%s", failureFromRun.Message)
|
||||
}
|
||||
suite.reporter.EmitFailure(additionalFailure.State, additionalFailure.Failure)
|
||||
failure.AdditionalFailure = &additionalFailure
|
||||
}
|
||||
return outcome, failure
|
||||
}
|
||||
if outcomeFromRun.Is(types.SpecStatePassed) {
|
||||
return outcomeFromRun, types.Failure{}
|
||||
} else {
|
||||
failure.Message, failure.Location, failure.ForwardedPanic = failureFromRun.Message, failureFromRun.Location, failureFromRun.ForwardedPanic
|
||||
failure.Message, failure.Location, failure.ForwardedPanic, failure.TimelineLocation = failureFromRun.Message, failureFromRun.Location, failureFromRun.ForwardedPanic, failureFromRun.TimelineLocation
|
||||
suite.reporter.EmitFailure(outcomeFromRun, failure)
|
||||
return outcomeFromRun, failure
|
||||
}
|
||||
case <-gracePeriodChannel:
|
||||
@ -801,10 +924,12 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
|
||||
case <-deadlineChannel:
|
||||
// we're out of time - the outcome is a timeout and we capture the failure and progress report
|
||||
outcome = types.SpecStateTimedout
|
||||
failure.Message, failure.Location = "Timedout", node.CodeLocation
|
||||
failure.Message, failure.Location, failure.TimelineLocation = fmt.Sprintf("A %s timeout occurred", timeoutInPlay), node.CodeLocation, suite.generateTimelineLocation()
|
||||
failure.ProgressReport = suite.generateProgressReport(false).WithoutCapturedGinkgoWriterOutput()
|
||||
failure.ProgressReport.Message = "{{bold}}This is the Progress Report generated when the timeout occurred:{{/}}"
|
||||
failure.ProgressReport.Message = fmt.Sprintf("{{bold}}This is the Progress Report generated when the %s timeout occurred:{{/}}", timeoutInPlay)
|
||||
deadlineChannel = nil
|
||||
suite.reporter.EmitFailure(outcome, failure)
|
||||
|
||||
// tell the spec to stop. it's important we generate the progress report first to make sure we capture where
|
||||
// the spec is actually stuck
|
||||
sc.cancel()
|
||||
@ -812,38 +937,44 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
|
||||
gracePeriodChannel = time.After(gracePeriod)
|
||||
case <-interruptStatus.Channel:
|
||||
interruptStatus = suite.interruptHandler.Status()
|
||||
// ignore interruption from other process if we are cleaning up or reporting
|
||||
if interruptStatus.Cause == interrupt_handler.InterruptCauseAbortByOtherProcess &&
|
||||
node.NodeType.Is(types.NodeTypesAllowedDuringReportInterrupt|types.NodeTypesAllowedDuringCleanupInterrupt) {
|
||||
continue
|
||||
}
|
||||
|
||||
deadlineChannel = nil // don't worry about deadlines, time's up now
|
||||
|
||||
failureTimelineLocation := suite.generateTimelineLocation()
|
||||
progressReport := suite.generateProgressReport(true)
|
||||
|
||||
if outcome == types.SpecStateInvalid {
|
||||
outcome = types.SpecStateInterrupted
|
||||
failure.Message, failure.Location = interruptStatus.Message(), node.CodeLocation
|
||||
failure.Message, failure.Location, failure.TimelineLocation = interruptStatus.Message(), node.CodeLocation, failureTimelineLocation
|
||||
if interruptStatus.ShouldIncludeProgressReport() {
|
||||
failure.ProgressReport = suite.generateProgressReport(true).WithoutCapturedGinkgoWriterOutput()
|
||||
failure.ProgressReport = progressReport.WithoutCapturedGinkgoWriterOutput()
|
||||
failure.ProgressReport.Message = "{{bold}}This is the Progress Report generated when the interrupt was received:{{/}}"
|
||||
}
|
||||
suite.reporter.EmitFailure(outcome, failure)
|
||||
}
|
||||
|
||||
var report types.ProgressReport
|
||||
if interruptStatus.ShouldIncludeProgressReport() {
|
||||
report = suite.generateProgressReport(false)
|
||||
}
|
||||
|
||||
progressReport = progressReport.WithoutOtherGoroutines()
|
||||
sc.cancel()
|
||||
|
||||
if interruptStatus.Level == interrupt_handler.InterruptLevelBailOut {
|
||||
if interruptStatus.ShouldIncludeProgressReport() {
|
||||
report.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\n{{bold}}{{red}}Final interrupt received{{/}}; Ginkgo will not run any cleanup or reporting nodes and will terminate as soon as possible.\nHere's a current progress report:", interruptStatus.Message())
|
||||
suite.emitProgressReport(report)
|
||||
progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\n{{bold}}{{red}}Final interrupt received{{/}}; Ginkgo will not run any cleanup or reporting nodes and will terminate as soon as possible.\nHere's a current progress report:", interruptStatus.Message())
|
||||
suite.emitProgressReport(progressReport)
|
||||
}
|
||||
return outcome, failure
|
||||
}
|
||||
if interruptStatus.ShouldIncludeProgressReport() {
|
||||
if interruptStatus.Level == interrupt_handler.InterruptLevelCleanupAndReport {
|
||||
report.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nFirst interrupt received; Ginkgo will run any cleanup and reporting nodes but will skip all remaining specs. {{bold}}Interrupt again to skip cleanup{{/}}.\nHere's a current progress report:", interruptStatus.Message())
|
||||
progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nFirst interrupt received; Ginkgo will run any cleanup and reporting nodes but will skip all remaining specs. {{bold}}Interrupt again to skip cleanup{{/}}.\nHere's a current progress report:", interruptStatus.Message())
|
||||
} else if interruptStatus.Level == interrupt_handler.InterruptLevelReportOnly {
|
||||
report.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nSecond interrupt received; Ginkgo will run any reporting nodes but will skip all remaining specs and cleanup nodes. {{bold}}Interrupt again to bail immediately{{/}}.\nHere's a current progress report:", interruptStatus.Message())
|
||||
progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nSecond interrupt received; Ginkgo will run any reporting nodes but will skip all remaining specs and cleanup nodes. {{bold}}Interrupt again to bail immediately{{/}}.\nHere's a current progress report:", interruptStatus.Message())
|
||||
}
|
||||
suite.emitProgressReport(report)
|
||||
suite.emitProgressReport(progressReport)
|
||||
}
|
||||
|
||||
if gracePeriodChannel == nil {
|
||||
@ -864,10 +995,12 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: search for usages and consider if reporter.EmitFailure() is necessary
|
||||
func (suite *Suite) failureForLeafNodeWithMessage(node Node, message string) types.Failure {
|
||||
return types.Failure{
|
||||
Message: message,
|
||||
Location: node.CodeLocation,
|
||||
TimelineLocation: suite.generateTimelineLocation(),
|
||||
FailureNodeContext: types.FailureNodeIsLeafNode,
|
||||
FailureNodeType: node.NodeType,
|
||||
FailureNodeLocation: node.CodeLocation,
|
||||
|
112
vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go
generated
vendored
112
vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go
generated
vendored
@ -5,35 +5,63 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/formatter"
|
||||
"github.com/onsi/ginkgo/v2/internal"
|
||||
"github.com/onsi/ginkgo/v2/reporters"
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
type failFunc func(message string, callerSkip ...int)
|
||||
type skipFunc func(message string, callerSkip ...int)
|
||||
type cleanupFunc func(args ...interface{})
|
||||
type cleanupFunc func(args ...any)
|
||||
type reportFunc func() types.SpecReport
|
||||
type addReportEntryFunc func(names string, args ...any)
|
||||
type ginkgoWriterInterface interface {
|
||||
io.Writer
|
||||
|
||||
func New(writer io.Writer, fail failFunc, skip skipFunc, cleanup cleanupFunc, report reportFunc, offset int) *ginkgoTestingTProxy {
|
||||
Print(a ...interface{})
|
||||
Printf(format string, a ...interface{})
|
||||
Println(a ...interface{})
|
||||
}
|
||||
type ginkgoRecoverFunc func()
|
||||
type attachProgressReporterFunc func(func() string) func()
|
||||
|
||||
func New(writer ginkgoWriterInterface, fail failFunc, skip skipFunc, cleanup cleanupFunc, report reportFunc, addReportEntry addReportEntryFunc, ginkgoRecover ginkgoRecoverFunc, attachProgressReporter attachProgressReporterFunc, randomSeed int64, parallelProcess int, parallelTotal int, noColor bool, offset int) *ginkgoTestingTProxy {
|
||||
return &ginkgoTestingTProxy{
|
||||
fail: fail,
|
||||
offset: offset,
|
||||
writer: writer,
|
||||
skip: skip,
|
||||
cleanup: cleanup,
|
||||
report: report,
|
||||
fail: fail,
|
||||
offset: offset,
|
||||
writer: writer,
|
||||
skip: skip,
|
||||
cleanup: cleanup,
|
||||
report: report,
|
||||
addReportEntry: addReportEntry,
|
||||
ginkgoRecover: ginkgoRecover,
|
||||
attachProgressReporter: attachProgressReporter,
|
||||
randomSeed: randomSeed,
|
||||
parallelProcess: parallelProcess,
|
||||
parallelTotal: parallelTotal,
|
||||
f: formatter.NewWithNoColorBool(noColor),
|
||||
}
|
||||
}
|
||||
|
||||
type ginkgoTestingTProxy struct {
|
||||
fail failFunc
|
||||
skip skipFunc
|
||||
cleanup cleanupFunc
|
||||
report reportFunc
|
||||
offset int
|
||||
writer io.Writer
|
||||
fail failFunc
|
||||
skip skipFunc
|
||||
cleanup cleanupFunc
|
||||
report reportFunc
|
||||
offset int
|
||||
writer ginkgoWriterInterface
|
||||
addReportEntry addReportEntryFunc
|
||||
ginkgoRecover ginkgoRecoverFunc
|
||||
attachProgressReporter attachProgressReporterFunc
|
||||
randomSeed int64
|
||||
parallelProcess int
|
||||
parallelTotal int
|
||||
f formatter.Formatter
|
||||
}
|
||||
|
||||
// basic testing.T support
|
||||
|
||||
func (t *ginkgoTestingTProxy) Cleanup(f func()) {
|
||||
t.cleanup(f, internal.Offset(1))
|
||||
}
|
||||
@ -81,7 +109,7 @@ func (t *ginkgoTestingTProxy) Fatalf(format string, args ...interface{}) {
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Helper() {
|
||||
// No-op
|
||||
types.MarkAsHelper(1)
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Log(args ...interface{}) {
|
||||
@ -126,3 +154,57 @@ func (t *ginkgoTestingTProxy) TempDir() string {
|
||||
|
||||
return tmpDir
|
||||
}
|
||||
|
||||
// FullGinkgoTInterface
|
||||
func (t *ginkgoTestingTProxy) AddReportEntryVisibilityAlways(name string, args ...any) {
|
||||
finalArgs := []any{internal.Offset(1), types.ReportEntryVisibilityAlways}
|
||||
t.addReportEntry(name, append(finalArgs, args...)...)
|
||||
}
|
||||
func (t *ginkgoTestingTProxy) AddReportEntryVisibilityFailureOrVerbose(name string, args ...any) {
|
||||
finalArgs := []any{internal.Offset(1), types.ReportEntryVisibilityFailureOrVerbose}
|
||||
t.addReportEntry(name, append(finalArgs, args...)...)
|
||||
}
|
||||
func (t *ginkgoTestingTProxy) AddReportEntryVisibilityNever(name string, args ...any) {
|
||||
finalArgs := []any{internal.Offset(1), types.ReportEntryVisibilityNever}
|
||||
t.addReportEntry(name, append(finalArgs, args...)...)
|
||||
}
|
||||
func (t *ginkgoTestingTProxy) Print(a ...any) {
|
||||
t.writer.Print(a...)
|
||||
}
|
||||
func (t *ginkgoTestingTProxy) Printf(format string, a ...any) {
|
||||
t.writer.Printf(format, a...)
|
||||
}
|
||||
func (t *ginkgoTestingTProxy) Println(a ...any) {
|
||||
t.writer.Println(a...)
|
||||
}
|
||||
func (t *ginkgoTestingTProxy) F(format string, args ...any) string {
|
||||
return t.f.F(format, args...)
|
||||
}
|
||||
func (t *ginkgoTestingTProxy) Fi(indentation uint, format string, args ...any) string {
|
||||
return t.f.Fi(indentation, format, args...)
|
||||
}
|
||||
func (t *ginkgoTestingTProxy) Fiw(indentation uint, maxWidth uint, format string, args ...any) string {
|
||||
return t.f.Fiw(indentation, maxWidth, format, args...)
|
||||
}
|
||||
func (t *ginkgoTestingTProxy) RenderTimeline() string {
|
||||
return reporters.RenderTimeline(t.report(), false)
|
||||
}
|
||||
func (t *ginkgoTestingTProxy) GinkgoRecover() {
|
||||
t.ginkgoRecover()
|
||||
}
|
||||
func (t *ginkgoTestingTProxy) DeferCleanup(args ...any) {
|
||||
finalArgs := []any{internal.Offset(1)}
|
||||
t.cleanup(append(finalArgs, args...)...)
|
||||
}
|
||||
func (t *ginkgoTestingTProxy) RandomSeed() int64 {
|
||||
return t.randomSeed
|
||||
}
|
||||
func (t *ginkgoTestingTProxy) ParallelProcess() int {
|
||||
return t.parallelProcess
|
||||
}
|
||||
func (t *ginkgoTestingTProxy) ParallelTotal() int {
|
||||
return t.parallelTotal
|
||||
}
|
||||
func (t *ginkgoTestingTProxy) AttachProgressReporter(f func() string) func() {
|
||||
return t.attachProgressReporter(f)
|
||||
}
|
||||
|
44
vendor/github.com/onsi/ginkgo/v2/internal/writer.go
generated
vendored
44
vendor/github.com/onsi/ginkgo/v2/internal/writer.go
generated
vendored
@ -22,24 +22,30 @@ type WriterInterface interface {
|
||||
|
||||
Truncate()
|
||||
Bytes() []byte
|
||||
Len() int
|
||||
}
|
||||
|
||||
//Writer implements WriterInterface and GinkgoWriterInterface
|
||||
// Writer implements WriterInterface and GinkgoWriterInterface
|
||||
type Writer struct {
|
||||
buffer *bytes.Buffer
|
||||
outWriter io.Writer
|
||||
lock *sync.Mutex
|
||||
mode WriterMode
|
||||
|
||||
streamIndent []byte
|
||||
indentNext bool
|
||||
|
||||
teeWriters []io.Writer
|
||||
}
|
||||
|
||||
func NewWriter(outWriter io.Writer) *Writer {
|
||||
return &Writer{
|
||||
buffer: &bytes.Buffer{},
|
||||
lock: &sync.Mutex{},
|
||||
outWriter: outWriter,
|
||||
mode: WriterModeStreamAndBuffer,
|
||||
buffer: &bytes.Buffer{},
|
||||
lock: &sync.Mutex{},
|
||||
outWriter: outWriter,
|
||||
mode: WriterModeStreamAndBuffer,
|
||||
streamIndent: []byte(" "),
|
||||
indentNext: true,
|
||||
}
|
||||
}
|
||||
|
||||
@ -49,6 +55,14 @@ func (w *Writer) SetMode(mode WriterMode) {
|
||||
w.mode = mode
|
||||
}
|
||||
|
||||
func (w *Writer) Len() int {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
return w.buffer.Len()
|
||||
}
|
||||
|
||||
var newline = []byte("\n")
|
||||
|
||||
func (w *Writer) Write(b []byte) (n int, err error) {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
@ -58,7 +72,21 @@ func (w *Writer) Write(b []byte) (n int, err error) {
|
||||
}
|
||||
|
||||
if w.mode == WriterModeStreamAndBuffer {
|
||||
w.outWriter.Write(b)
|
||||
line, remaining, found := []byte{}, b, false
|
||||
for len(remaining) > 0 {
|
||||
line, remaining, found = bytes.Cut(remaining, newline)
|
||||
if len(line) > 0 {
|
||||
if w.indentNext {
|
||||
w.outWriter.Write(w.streamIndent)
|
||||
w.indentNext = false
|
||||
}
|
||||
w.outWriter.Write(line)
|
||||
}
|
||||
if found {
|
||||
w.outWriter.Write(newline)
|
||||
w.indentNext = true
|
||||
}
|
||||
}
|
||||
}
|
||||
return w.buffer.Write(b)
|
||||
}
|
||||
@ -78,7 +106,7 @@ func (w *Writer) Bytes() []byte {
|
||||
return copied
|
||||
}
|
||||
|
||||
//GinkgoWriterInterface
|
||||
// GinkgoWriterInterface
|
||||
func (w *Writer) TeeTo(writer io.Writer) {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
@ -107,6 +135,6 @@ func (w *Writer) Println(a ...interface{}) {
|
||||
|
||||
func GinkgoLogrFunc(writer *Writer) logr.Logger {
|
||||
return funcr.New(func(prefix, args string) {
|
||||
writer.Printf("%s", args)
|
||||
writer.Printf("%s\n", args)
|
||||
}, funcr.Options{})
|
||||
}
|
||||
|
681
vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go
generated
vendored
681
vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go
generated
vendored
@ -12,6 +12,7 @@ import (
|
||||
"io"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/formatter"
|
||||
@ -23,13 +24,16 @@ type DefaultReporter struct {
|
||||
writer io.Writer
|
||||
|
||||
// managing the emission stream
|
||||
lastChar string
|
||||
lastCharWasNewline bool
|
||||
lastEmissionWasDelimiter bool
|
||||
|
||||
// rendering
|
||||
specDenoter string
|
||||
retryDenoter string
|
||||
formatter formatter.Formatter
|
||||
|
||||
runningInParallel bool
|
||||
lock *sync.Mutex
|
||||
}
|
||||
|
||||
func NewDefaultReporterUnderTest(conf types.ReporterConfig, writer io.Writer) *DefaultReporter {
|
||||
@ -44,12 +48,13 @@ func NewDefaultReporter(conf types.ReporterConfig, writer io.Writer) *DefaultRep
|
||||
conf: conf,
|
||||
writer: writer,
|
||||
|
||||
lastChar: "\n",
|
||||
lastCharWasNewline: true,
|
||||
lastEmissionWasDelimiter: false,
|
||||
|
||||
specDenoter: "•",
|
||||
retryDenoter: "↺",
|
||||
formatter: formatter.NewWithNoColorBool(conf.NoColor),
|
||||
lock: &sync.Mutex{},
|
||||
}
|
||||
if runtime.GOOS == "windows" {
|
||||
reporter.specDenoter = "+"
|
||||
@ -97,230 +102,10 @@ func (r *DefaultReporter) SuiteWillBegin(report types.Report) {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *DefaultReporter) WillRun(report types.SpecReport) {
|
||||
if r.conf.Verbosity().LT(types.VerbosityLevelVerbose) || report.State.Is(types.SpecStatePending|types.SpecStateSkipped) {
|
||||
return
|
||||
}
|
||||
|
||||
r.emitDelimiter()
|
||||
indentation := uint(0)
|
||||
if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) {
|
||||
r.emitBlock(r.f("{{bold}}[%s] %s{{/}}", report.LeafNodeType.String(), report.LeafNodeText))
|
||||
} else {
|
||||
if len(report.ContainerHierarchyTexts) > 0 {
|
||||
r.emitBlock(r.cycleJoin(report.ContainerHierarchyTexts, " "))
|
||||
indentation = 1
|
||||
}
|
||||
line := r.fi(indentation, "{{bold}}%s{{/}}", report.LeafNodeText)
|
||||
labels := report.Labels()
|
||||
if len(labels) > 0 {
|
||||
line += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels, ", "))
|
||||
}
|
||||
r.emitBlock(line)
|
||||
}
|
||||
r.emitBlock(r.fi(indentation, "{{gray}}%s{{/}}", report.LeafNodeLocation))
|
||||
}
|
||||
|
||||
func (r *DefaultReporter) DidRun(report types.SpecReport) {
|
||||
v := r.conf.Verbosity()
|
||||
var header, highlightColor string
|
||||
includeRuntime, emitGinkgoWriterOutput, stream, denoter := true, true, false, r.specDenoter
|
||||
succinctLocationBlock := v.Is(types.VerbosityLevelSuccinct)
|
||||
|
||||
hasGW := report.CapturedGinkgoWriterOutput != ""
|
||||
hasStd := report.CapturedStdOutErr != ""
|
||||
hasEmittableReports := report.ReportEntries.HasVisibility(types.ReportEntryVisibilityAlways) || (report.ReportEntries.HasVisibility(types.ReportEntryVisibilityFailureOrVerbose) && (!report.Failure.IsZero() || v.GTE(types.VerbosityLevelVerbose)))
|
||||
|
||||
if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) {
|
||||
denoter = fmt.Sprintf("[%s]", report.LeafNodeType)
|
||||
}
|
||||
|
||||
highlightColor = r.highlightColorForState(report.State)
|
||||
|
||||
switch report.State {
|
||||
case types.SpecStatePassed:
|
||||
succinctLocationBlock = v.LT(types.VerbosityLevelVerbose)
|
||||
emitGinkgoWriterOutput = (r.conf.AlwaysEmitGinkgoWriter || v.GTE(types.VerbosityLevelVerbose)) && hasGW
|
||||
if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) {
|
||||
if v.GTE(types.VerbosityLevelVerbose) || hasStd || hasEmittableReports {
|
||||
header = fmt.Sprintf("%s PASSED", denoter)
|
||||
} else {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
header, stream = denoter, true
|
||||
if report.NumAttempts > 1 && report.MaxFlakeAttempts > 1 {
|
||||
header, stream = fmt.Sprintf("%s [FLAKEY TEST - TOOK %d ATTEMPTS TO PASS]", r.retryDenoter, report.NumAttempts), false
|
||||
}
|
||||
if report.RunTime > r.conf.SlowSpecThreshold {
|
||||
header, stream = fmt.Sprintf("%s [SLOW TEST]", header), false
|
||||
}
|
||||
}
|
||||
if hasStd || emitGinkgoWriterOutput || hasEmittableReports {
|
||||
stream = false
|
||||
}
|
||||
case types.SpecStatePending:
|
||||
includeRuntime, emitGinkgoWriterOutput = false, false
|
||||
if v.Is(types.VerbosityLevelSuccinct) {
|
||||
header, stream = "P", true
|
||||
} else {
|
||||
header, succinctLocationBlock = "P [PENDING]", v.LT(types.VerbosityLevelVeryVerbose)
|
||||
}
|
||||
case types.SpecStateSkipped:
|
||||
if report.Failure.Message != "" || v.Is(types.VerbosityLevelVeryVerbose) {
|
||||
header = "S [SKIPPED]"
|
||||
} else {
|
||||
header, stream = "S", true
|
||||
}
|
||||
case types.SpecStateFailed:
|
||||
header = fmt.Sprintf("%s [FAILED]", denoter)
|
||||
case types.SpecStateTimedout:
|
||||
header = fmt.Sprintf("%s [TIMEDOUT]", denoter)
|
||||
case types.SpecStatePanicked:
|
||||
header = fmt.Sprintf("%s! [PANICKED]", denoter)
|
||||
case types.SpecStateInterrupted:
|
||||
header = fmt.Sprintf("%s! [INTERRUPTED]", denoter)
|
||||
case types.SpecStateAborted:
|
||||
header = fmt.Sprintf("%s! [ABORTED]", denoter)
|
||||
}
|
||||
|
||||
if report.State.Is(types.SpecStateFailureStates) && report.MaxMustPassRepeatedly > 1 {
|
||||
header, stream = fmt.Sprintf("%s DURING REPETITION #%d", header, report.NumAttempts), false
|
||||
}
|
||||
// Emit stream and return
|
||||
if stream {
|
||||
r.emit(r.f(highlightColor + header + "{{/}}"))
|
||||
return
|
||||
}
|
||||
|
||||
// Emit header
|
||||
r.emitDelimiter()
|
||||
if includeRuntime {
|
||||
header = r.f("%s [%.3f seconds]", header, report.RunTime.Seconds())
|
||||
}
|
||||
r.emitBlock(r.f(highlightColor + header + "{{/}}"))
|
||||
|
||||
// Emit Code Location Block
|
||||
r.emitBlock(r.codeLocationBlock(report, highlightColor, succinctLocationBlock, false))
|
||||
|
||||
//Emit Stdout/Stderr Output
|
||||
if hasStd {
|
||||
r.emitBlock("\n")
|
||||
r.emitBlock(r.fi(1, "{{gray}}Begin Captured StdOut/StdErr Output >>{{/}}"))
|
||||
r.emitBlock(r.fi(2, "%s", report.CapturedStdOutErr))
|
||||
r.emitBlock(r.fi(1, "{{gray}}<< End Captured StdOut/StdErr Output{{/}}"))
|
||||
}
|
||||
|
||||
//Emit Captured GinkgoWriter Output
|
||||
if emitGinkgoWriterOutput && hasGW {
|
||||
r.emitBlock("\n")
|
||||
r.emitGinkgoWriterOutput(1, report.CapturedGinkgoWriterOutput, 0)
|
||||
}
|
||||
|
||||
if hasEmittableReports {
|
||||
r.emitBlock("\n")
|
||||
r.emitBlock(r.fi(1, "{{gray}}Begin Report Entries >>{{/}}"))
|
||||
reportEntries := report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways)
|
||||
if !report.Failure.IsZero() || v.GTE(types.VerbosityLevelVerbose) {
|
||||
reportEntries = report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways, types.ReportEntryVisibilityFailureOrVerbose)
|
||||
}
|
||||
for _, entry := range reportEntries {
|
||||
r.emitBlock(r.fi(2, "{{bold}}"+entry.Name+"{{gray}} - %s @ %s{{/}}", entry.Location, entry.Time.Format(types.GINKGO_TIME_FORMAT)))
|
||||
if representation := entry.StringRepresentation(); representation != "" {
|
||||
r.emitBlock(r.fi(3, representation))
|
||||
}
|
||||
}
|
||||
r.emitBlock(r.fi(1, "{{gray}}<< End Report Entries{{/}}"))
|
||||
}
|
||||
|
||||
// Emit Failure Message
|
||||
if !report.Failure.IsZero() {
|
||||
r.emitBlock("\n")
|
||||
r.EmitFailure(1, report.State, report.Failure, false)
|
||||
}
|
||||
|
||||
if len(report.AdditionalFailures) > 0 {
|
||||
if v.GTE(types.VerbosityLevelVerbose) {
|
||||
r.emitBlock("\n")
|
||||
r.emitBlock(r.fi(1, "{{bold}}There were additional failures detected after the initial failure:{{/}}"))
|
||||
for i, additionalFailure := range report.AdditionalFailures {
|
||||
r.EmitFailure(2, additionalFailure.State, additionalFailure.Failure, true)
|
||||
if i < len(report.AdditionalFailures)-1 {
|
||||
r.emitBlock(r.fi(2, "{{gray}}%s{{/}}", strings.Repeat("-", 10)))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
r.emitBlock("\n")
|
||||
r.emitBlock(r.fi(1, "{{bold}}There were additional failures detected after the initial failure. Here's a summary - for full details run Ginkgo in verbose mode:{{/}}"))
|
||||
for _, additionalFailure := range report.AdditionalFailures {
|
||||
r.emitBlock(r.fi(2, r.highlightColorForState(additionalFailure.State)+"[%s]{{/}} in [%s] at %s",
|
||||
r.humanReadableState(additionalFailure.State),
|
||||
additionalFailure.Failure.FailureNodeType,
|
||||
additionalFailure.Failure.Location,
|
||||
))
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
r.emitDelimiter()
|
||||
}
|
||||
|
||||
func (r *DefaultReporter) highlightColorForState(state types.SpecState) string {
|
||||
switch state {
|
||||
case types.SpecStatePassed:
|
||||
return "{{green}}"
|
||||
case types.SpecStatePending:
|
||||
return "{{yellow}}"
|
||||
case types.SpecStateSkipped:
|
||||
return "{{cyan}}"
|
||||
case types.SpecStateFailed:
|
||||
return "{{red}}"
|
||||
case types.SpecStateTimedout:
|
||||
return "{{orange}}"
|
||||
case types.SpecStatePanicked:
|
||||
return "{{magenta}}"
|
||||
case types.SpecStateInterrupted:
|
||||
return "{{orange}}"
|
||||
case types.SpecStateAborted:
|
||||
return "{{coral}}"
|
||||
default:
|
||||
return "{{gray}}"
|
||||
}
|
||||
}
|
||||
|
||||
func (r *DefaultReporter) humanReadableState(state types.SpecState) string {
|
||||
return strings.ToUpper(state.String())
|
||||
}
|
||||
|
||||
func (r *DefaultReporter) EmitFailure(indent uint, state types.SpecState, failure types.Failure, includeState bool) {
|
||||
highlightColor := r.highlightColorForState(state)
|
||||
if includeState {
|
||||
r.emitBlock(r.fi(indent, highlightColor+"[%s]{{/}}", r.humanReadableState(state)))
|
||||
}
|
||||
r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.Message))
|
||||
r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}}\n", failure.FailureNodeType, failure.Location))
|
||||
if failure.ForwardedPanic != "" {
|
||||
r.emitBlock("\n")
|
||||
r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.ForwardedPanic))
|
||||
}
|
||||
|
||||
if r.conf.FullTrace || failure.ForwardedPanic != "" {
|
||||
r.emitBlock("\n")
|
||||
r.emitBlock(r.fi(indent, highlightColor+"Full Stack Trace{{/}}"))
|
||||
r.emitBlock(r.fi(indent+1, "%s", failure.Location.FullStackTrace))
|
||||
}
|
||||
|
||||
if !failure.ProgressReport.IsZero() {
|
||||
r.emitBlock("\n")
|
||||
r.emitProgressReport(indent, false, failure.ProgressReport)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *DefaultReporter) SuiteDidEnd(report types.Report) {
|
||||
failures := report.SpecReports.WithState(types.SpecStateFailureStates)
|
||||
if len(failures) > 0 {
|
||||
r.emitBlock("\n\n")
|
||||
r.emitBlock("\n")
|
||||
if len(failures) > 1 {
|
||||
r.emitBlock(r.f("{{red}}{{bold}}Summarizing %d Failures:{{/}}", len(failures)))
|
||||
} else {
|
||||
@ -338,7 +123,7 @@ func (r *DefaultReporter) SuiteDidEnd(report types.Report) {
|
||||
case types.SpecStateInterrupted:
|
||||
highlightColor, heading = "{{orange}}", "[INTERRUPTED]"
|
||||
}
|
||||
locationBlock := r.codeLocationBlock(specReport, highlightColor, true, true)
|
||||
locationBlock := r.codeLocationBlock(specReport, highlightColor, false, true)
|
||||
r.emitBlock(r.fi(1, highlightColor+"%s{{/}} %s", heading, locationBlock))
|
||||
}
|
||||
}
|
||||
@ -387,14 +172,271 @@ func (r *DefaultReporter) SuiteDidEnd(report types.Report) {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *DefaultReporter) WillRun(report types.SpecReport) {
|
||||
v := r.conf.Verbosity()
|
||||
if v.LT(types.VerbosityLevelVerbose) || report.State.Is(types.SpecStatePending|types.SpecStateSkipped) || report.RunningInParallel {
|
||||
return
|
||||
}
|
||||
|
||||
r.emitDelimiter(0)
|
||||
r.emitBlock(r.f(r.codeLocationBlock(report, "{{/}}", v.Is(types.VerbosityLevelVeryVerbose), false)))
|
||||
}
|
||||
|
||||
func (r *DefaultReporter) DidRun(report types.SpecReport) {
|
||||
v := r.conf.Verbosity()
|
||||
inParallel := report.RunningInParallel
|
||||
|
||||
header := r.specDenoter
|
||||
if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) {
|
||||
header = fmt.Sprintf("[%s]", report.LeafNodeType)
|
||||
}
|
||||
highlightColor := r.highlightColorForState(report.State)
|
||||
|
||||
// have we already been streaming the timeline?
|
||||
timelineHasBeenStreaming := v.GTE(types.VerbosityLevelVerbose) && !inParallel
|
||||
|
||||
// should we show the timeline?
|
||||
var timeline types.Timeline
|
||||
showTimeline := !timelineHasBeenStreaming && (v.GTE(types.VerbosityLevelVerbose) || report.Failed())
|
||||
if showTimeline {
|
||||
timeline = report.Timeline().WithoutHiddenReportEntries()
|
||||
keepVeryVerboseSpecEvents := v.Is(types.VerbosityLevelVeryVerbose) ||
|
||||
(v.Is(types.VerbosityLevelVerbose) && r.conf.ShowNodeEvents) ||
|
||||
(report.Failed() && r.conf.ShowNodeEvents)
|
||||
if !keepVeryVerboseSpecEvents {
|
||||
timeline = timeline.WithoutVeryVerboseSpecEvents()
|
||||
}
|
||||
if len(timeline) == 0 && report.CapturedGinkgoWriterOutput == "" {
|
||||
// the timeline is completely empty - don't show it
|
||||
showTimeline = false
|
||||
}
|
||||
if v.LT(types.VerbosityLevelVeryVerbose) && report.CapturedGinkgoWriterOutput == "" && len(timeline) > 0 {
|
||||
//if we aren't -vv and the timeline only has a single failure, don't show it as it will appear at the end of the report
|
||||
failure, isFailure := timeline[0].(types.Failure)
|
||||
if isFailure && (len(timeline) == 1 || (len(timeline) == 2 && failure.AdditionalFailure != nil)) {
|
||||
showTimeline = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// should we have a separate section for always-visible reports?
|
||||
showSeparateVisibilityAlwaysReportsSection := !timelineHasBeenStreaming && !showTimeline && report.ReportEntries.HasVisibility(types.ReportEntryVisibilityAlways)
|
||||
|
||||
// should we have a separate section for captured stdout/stderr
|
||||
showSeparateStdSection := inParallel && (report.CapturedStdOutErr != "")
|
||||
|
||||
// given all that - do we have any actual content to show? or are we a single denoter in a stream?
|
||||
reportHasContent := v.Is(types.VerbosityLevelVeryVerbose) || showTimeline || showSeparateVisibilityAlwaysReportsSection || showSeparateStdSection || report.Failed() || (v.Is(types.VerbosityLevelVerbose) && !report.State.Is(types.SpecStateSkipped))
|
||||
|
||||
// should we show a runtime?
|
||||
includeRuntime := !report.State.Is(types.SpecStateSkipped|types.SpecStatePending) || (report.State.Is(types.SpecStateSkipped) && report.Failure.Message != "")
|
||||
|
||||
// should we show the codelocation block?
|
||||
showCodeLocation := !timelineHasBeenStreaming || !report.State.Is(types.SpecStatePassed)
|
||||
|
||||
switch report.State {
|
||||
case types.SpecStatePassed:
|
||||
if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) && !reportHasContent {
|
||||
return
|
||||
}
|
||||
if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) {
|
||||
header = fmt.Sprintf("%s PASSED", header)
|
||||
}
|
||||
if report.NumAttempts > 1 && report.MaxFlakeAttempts > 1 {
|
||||
header, reportHasContent = fmt.Sprintf("%s [FLAKEY TEST - TOOK %d ATTEMPTS TO PASS]", r.retryDenoter, report.NumAttempts), true
|
||||
}
|
||||
case types.SpecStatePending:
|
||||
header = "P"
|
||||
if v.GT(types.VerbosityLevelSuccinct) {
|
||||
header, reportHasContent = "P [PENDING]", true
|
||||
}
|
||||
case types.SpecStateSkipped:
|
||||
header = "S"
|
||||
if v.Is(types.VerbosityLevelVeryVerbose) || (v.Is(types.VerbosityLevelVerbose) && report.Failure.Message != "") {
|
||||
header, reportHasContent = "S [SKIPPED]", true
|
||||
}
|
||||
default:
|
||||
header = fmt.Sprintf("%s [%s]", header, r.humanReadableState(report.State))
|
||||
if report.MaxMustPassRepeatedly > 1 {
|
||||
header = fmt.Sprintf("%s DURING REPETITION #%d", header, report.NumAttempts)
|
||||
}
|
||||
}
|
||||
|
||||
// If we have no content to show, jsut emit the header and return
|
||||
if !reportHasContent {
|
||||
r.emit(r.f(highlightColor + header + "{{/}}"))
|
||||
return
|
||||
}
|
||||
|
||||
if includeRuntime {
|
||||
header = r.f("%s [%.3f seconds]", header, report.RunTime.Seconds())
|
||||
}
|
||||
|
||||
// Emit header
|
||||
if !timelineHasBeenStreaming {
|
||||
r.emitDelimiter(0)
|
||||
}
|
||||
r.emitBlock(r.f(highlightColor + header + "{{/}}"))
|
||||
if showCodeLocation {
|
||||
r.emitBlock(r.codeLocationBlock(report, highlightColor, v.Is(types.VerbosityLevelVeryVerbose), false))
|
||||
}
|
||||
|
||||
//Emit Stdout/Stderr Output
|
||||
if showSeparateStdSection {
|
||||
r.emitBlock("\n")
|
||||
r.emitBlock(r.fi(1, "{{gray}}Captured StdOut/StdErr Output >>{{/}}"))
|
||||
r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr))
|
||||
r.emitBlock(r.fi(1, "{{gray}}<< Captured StdOut/StdErr Output{{/}}"))
|
||||
}
|
||||
|
||||
if showSeparateVisibilityAlwaysReportsSection {
|
||||
r.emitBlock("\n")
|
||||
r.emitBlock(r.fi(1, "{{gray}}Report Entries >>{{/}}"))
|
||||
for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) {
|
||||
r.emitReportEntry(1, entry)
|
||||
}
|
||||
r.emitBlock(r.fi(1, "{{gray}}<< Report Entries{{/}}"))
|
||||
}
|
||||
|
||||
if showTimeline {
|
||||
r.emitBlock("\n")
|
||||
r.emitBlock(r.fi(1, "{{gray}}Timeline >>{{/}}"))
|
||||
r.emitTimeline(1, report, timeline)
|
||||
r.emitBlock(r.fi(1, "{{gray}}<< Timeline{{/}}"))
|
||||
}
|
||||
|
||||
// Emit Failure Message
|
||||
if !report.Failure.IsZero() && !v.Is(types.VerbosityLevelVeryVerbose) {
|
||||
r.emitBlock("\n")
|
||||
r.emitFailure(1, report.State, report.Failure, true)
|
||||
if len(report.AdditionalFailures) > 0 {
|
||||
r.emitBlock(r.fi(1, "\nThere were {{bold}}{{red}}additional failures{{/}} detected. To view them in detail run {{bold}}ginkgo -vv{{/}}"))
|
||||
}
|
||||
}
|
||||
|
||||
r.emitDelimiter(0)
|
||||
}
|
||||
|
||||
func (r *DefaultReporter) highlightColorForState(state types.SpecState) string {
|
||||
switch state {
|
||||
case types.SpecStatePassed:
|
||||
return "{{green}}"
|
||||
case types.SpecStatePending:
|
||||
return "{{yellow}}"
|
||||
case types.SpecStateSkipped:
|
||||
return "{{cyan}}"
|
||||
case types.SpecStateFailed:
|
||||
return "{{red}}"
|
||||
case types.SpecStateTimedout:
|
||||
return "{{orange}}"
|
||||
case types.SpecStatePanicked:
|
||||
return "{{magenta}}"
|
||||
case types.SpecStateInterrupted:
|
||||
return "{{orange}}"
|
||||
case types.SpecStateAborted:
|
||||
return "{{coral}}"
|
||||
default:
|
||||
return "{{gray}}"
|
||||
}
|
||||
}
|
||||
|
||||
func (r *DefaultReporter) humanReadableState(state types.SpecState) string {
|
||||
return strings.ToUpper(state.String())
|
||||
}
|
||||
|
||||
func (r *DefaultReporter) emitTimeline(indent uint, report types.SpecReport, timeline types.Timeline) {
|
||||
isVeryVerbose := r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose)
|
||||
gw := report.CapturedGinkgoWriterOutput
|
||||
cursor := 0
|
||||
for _, entry := range timeline {
|
||||
tl := entry.GetTimelineLocation()
|
||||
if tl.Offset < len(gw) {
|
||||
r.emit(r.fi(indent, "%s", gw[cursor:tl.Offset]))
|
||||
cursor = tl.Offset
|
||||
} else if cursor < len(gw) {
|
||||
r.emit(r.fi(indent, "%s", gw[cursor:]))
|
||||
cursor = len(gw)
|
||||
}
|
||||
switch x := entry.(type) {
|
||||
case types.Failure:
|
||||
if isVeryVerbose {
|
||||
r.emitFailure(indent, report.State, x, false)
|
||||
} else {
|
||||
r.emitShortFailure(indent, report.State, x)
|
||||
}
|
||||
case types.AdditionalFailure:
|
||||
if isVeryVerbose {
|
||||
r.emitFailure(indent, x.State, x.Failure, true)
|
||||
} else {
|
||||
r.emitShortFailure(indent, x.State, x.Failure)
|
||||
}
|
||||
case types.ReportEntry:
|
||||
r.emitReportEntry(indent, x)
|
||||
case types.ProgressReport:
|
||||
r.emitProgressReport(indent, false, x)
|
||||
case types.SpecEvent:
|
||||
if isVeryVerbose || !x.IsOnlyVisibleAtVeryVerbose() || r.conf.ShowNodeEvents {
|
||||
r.emitSpecEvent(indent, x, isVeryVerbose)
|
||||
}
|
||||
}
|
||||
}
|
||||
if cursor < len(gw) {
|
||||
r.emit(r.fi(indent, "%s", gw[cursor:]))
|
||||
}
|
||||
}
|
||||
|
||||
func (r *DefaultReporter) EmitFailure(state types.SpecState, failure types.Failure) {
|
||||
if r.conf.Verbosity().Is(types.VerbosityLevelVerbose) {
|
||||
r.emitShortFailure(1, state, failure)
|
||||
} else if r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose) {
|
||||
r.emitFailure(1, state, failure, true)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *DefaultReporter) emitShortFailure(indent uint, state types.SpecState, failure types.Failure) {
|
||||
r.emitBlock(r.fi(indent, r.highlightColorForState(state)+"[%s]{{/}} in [%s] - %s {{gray}}@ %s{{/}}",
|
||||
r.humanReadableState(state),
|
||||
failure.FailureNodeType,
|
||||
failure.Location,
|
||||
failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT),
|
||||
))
|
||||
}
|
||||
|
||||
func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failure types.Failure, includeAdditionalFailure bool) {
|
||||
highlightColor := r.highlightColorForState(state)
|
||||
r.emitBlock(r.fi(indent, highlightColor+"[%s] %s{{/}}", r.humanReadableState(state), failure.Message))
|
||||
r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
|
||||
if failure.ForwardedPanic != "" {
|
||||
r.emitBlock("\n")
|
||||
r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.ForwardedPanic))
|
||||
}
|
||||
|
||||
if r.conf.FullTrace || failure.ForwardedPanic != "" {
|
||||
r.emitBlock("\n")
|
||||
r.emitBlock(r.fi(indent, highlightColor+"Full Stack Trace{{/}}"))
|
||||
r.emitBlock(r.fi(indent+1, "%s", failure.Location.FullStackTrace))
|
||||
}
|
||||
|
||||
if !failure.ProgressReport.IsZero() {
|
||||
r.emitBlock("\n")
|
||||
r.emitProgressReport(indent, false, failure.ProgressReport)
|
||||
}
|
||||
|
||||
if failure.AdditionalFailure != nil && includeAdditionalFailure {
|
||||
r.emitBlock("\n")
|
||||
r.emitFailure(indent, failure.AdditionalFailure.State, failure.AdditionalFailure.Failure, true)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *DefaultReporter) EmitProgressReport(report types.ProgressReport) {
|
||||
r.emitDelimiter()
|
||||
r.emitDelimiter(1)
|
||||
|
||||
if report.RunningInParallel {
|
||||
r.emit(r.f("{{coral}}Progress Report for Ginkgo Process #{{bold}}%d{{/}}\n", report.ParallelProcess))
|
||||
r.emit(r.fi(1, "{{coral}}Progress Report for Ginkgo Process #{{bold}}%d{{/}}\n", report.ParallelProcess))
|
||||
}
|
||||
r.emitProgressReport(0, true, report)
|
||||
r.emitDelimiter()
|
||||
shouldEmitGW := report.RunningInParallel || r.conf.Verbosity().LT(types.VerbosityLevelVerbose)
|
||||
r.emitProgressReport(1, shouldEmitGW, report)
|
||||
r.emitDelimiter(1)
|
||||
}
|
||||
|
||||
func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput bool, report types.ProgressReport) {
|
||||
@ -409,7 +451,7 @@ func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput
|
||||
r.emit(" ")
|
||||
subjectIndent = 0
|
||||
}
|
||||
r.emit(r.fi(subjectIndent, "{{bold}}{{orange}}%s{{/}} (Spec Runtime: %s)\n", report.LeafNodeText, report.Time.Sub(report.SpecStartTime).Round(time.Millisecond)))
|
||||
r.emit(r.fi(subjectIndent, "{{bold}}{{orange}}%s{{/}} (Spec Runtime: %s)\n", report.LeafNodeText, report.Time().Sub(report.SpecStartTime).Round(time.Millisecond)))
|
||||
r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.LeafNodeLocation))
|
||||
indent += 1
|
||||
}
|
||||
@ -419,12 +461,12 @@ func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput
|
||||
r.emit(r.f(" {{bold}}{{orange}}%s{{/}}", report.CurrentNodeText))
|
||||
}
|
||||
|
||||
r.emit(r.f(" (Node Runtime: %s)\n", report.Time.Sub(report.CurrentNodeStartTime).Round(time.Millisecond)))
|
||||
r.emit(r.f(" (Node Runtime: %s)\n", report.Time().Sub(report.CurrentNodeStartTime).Round(time.Millisecond)))
|
||||
r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.CurrentNodeLocation))
|
||||
indent += 1
|
||||
}
|
||||
if report.CurrentStepText != "" {
|
||||
r.emit(r.fi(indent, "At {{bold}}{{orange}}[By Step] %s{{/}} (Step Runtime: %s)\n", report.CurrentStepText, report.Time.Sub(report.CurrentStepStartTime).Round(time.Millisecond)))
|
||||
r.emit(r.fi(indent, "At {{bold}}{{orange}}[By Step] %s{{/}} (Step Runtime: %s)\n", report.CurrentStepText, report.Time().Sub(report.CurrentStepStartTime).Round(time.Millisecond)))
|
||||
r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.CurrentStepLocation))
|
||||
indent += 1
|
||||
}
|
||||
@ -433,9 +475,19 @@ func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput
|
||||
indent -= 1
|
||||
}
|
||||
|
||||
if emitGinkgoWriterOutput && report.CapturedGinkgoWriterOutput != "" && (report.RunningInParallel || r.conf.Verbosity().LT(types.VerbosityLevelVerbose)) {
|
||||
if emitGinkgoWriterOutput && report.CapturedGinkgoWriterOutput != "" {
|
||||
r.emit("\n")
|
||||
r.emitGinkgoWriterOutput(indent, report.CapturedGinkgoWriterOutput, 10)
|
||||
r.emitBlock(r.fi(indent, "{{gray}}Begin Captured GinkgoWriter Output >>{{/}}"))
|
||||
limit, lines := 10, strings.Split(report.CapturedGinkgoWriterOutput, "\n")
|
||||
if len(lines) <= limit {
|
||||
r.emitBlock(r.fi(indent+1, "%s", report.CapturedGinkgoWriterOutput))
|
||||
} else {
|
||||
r.emitBlock(r.fi(indent+1, "{{gray}}...{{/}}"))
|
||||
for _, line := range lines[len(lines)-limit-1:] {
|
||||
r.emitBlock(r.fi(indent+1, "%s", line))
|
||||
}
|
||||
}
|
||||
r.emitBlock(r.fi(indent, "{{gray}}<< End Captured GinkgoWriter Output{{/}}"))
|
||||
}
|
||||
|
||||
if !report.SpecGoroutine().IsZero() {
|
||||
@ -471,22 +523,48 @@ func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput
|
||||
}
|
||||
}
|
||||
|
||||
func (r *DefaultReporter) emitGinkgoWriterOutput(indent uint, output string, limit int) {
|
||||
r.emitBlock(r.fi(indent, "{{gray}}Begin Captured GinkgoWriter Output >>{{/}}"))
|
||||
if limit == 0 {
|
||||
r.emitBlock(r.fi(indent+1, "%s", output))
|
||||
} else {
|
||||
lines := strings.Split(output, "\n")
|
||||
if len(lines) <= limit {
|
||||
r.emitBlock(r.fi(indent+1, "%s", output))
|
||||
} else {
|
||||
r.emitBlock(r.fi(indent+1, "{{gray}}...{{/}}"))
|
||||
for _, line := range lines[len(lines)-limit-1:] {
|
||||
r.emitBlock(r.fi(indent+1, "%s", line))
|
||||
}
|
||||
}
|
||||
func (r *DefaultReporter) EmitReportEntry(entry types.ReportEntry) {
|
||||
if r.conf.Verbosity().LT(types.VerbosityLevelVerbose) || entry.Visibility == types.ReportEntryVisibilityNever {
|
||||
return
|
||||
}
|
||||
r.emitReportEntry(1, entry)
|
||||
}
|
||||
|
||||
func (r *DefaultReporter) emitReportEntry(indent uint, entry types.ReportEntry) {
|
||||
r.emitBlock(r.fi(indent, "{{bold}}"+entry.Name+"{{gray}} "+fmt.Sprintf("- %s @ %s{{/}}", entry.Location, entry.Time.Format(types.GINKGO_TIME_FORMAT))))
|
||||
if representation := entry.StringRepresentation(); representation != "" {
|
||||
r.emitBlock(r.fi(indent+1, representation))
|
||||
}
|
||||
}
|
||||
|
||||
func (r *DefaultReporter) EmitSpecEvent(event types.SpecEvent) {
|
||||
v := r.conf.Verbosity()
|
||||
if v.Is(types.VerbosityLevelVeryVerbose) || (v.Is(types.VerbosityLevelVerbose) && (r.conf.ShowNodeEvents || !event.IsOnlyVisibleAtVeryVerbose())) {
|
||||
r.emitSpecEvent(1, event, r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose))
|
||||
}
|
||||
}
|
||||
|
||||
func (r *DefaultReporter) emitSpecEvent(indent uint, event types.SpecEvent, includeLocation bool) {
|
||||
location := ""
|
||||
if includeLocation {
|
||||
location = fmt.Sprintf("- %s ", event.CodeLocation.String())
|
||||
}
|
||||
switch event.SpecEventType {
|
||||
case types.SpecEventInvalid:
|
||||
return
|
||||
case types.SpecEventByStart:
|
||||
r.emitBlock(r.fi(indent, "{{bold}}STEP:{{/}} %s {{gray}}%s@ %s{{/}}", event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
|
||||
case types.SpecEventByEnd:
|
||||
r.emitBlock(r.fi(indent, "{{bold}}END STEP:{{/}} %s {{gray}}%s@ %s (%s){{/}}", event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT), event.Duration.Round(time.Millisecond)))
|
||||
case types.SpecEventNodeStart:
|
||||
r.emitBlock(r.fi(indent, "> Enter {{bold}}[%s]{{/}} %s {{gray}}%s@ %s{{/}}", event.NodeType.String(), event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
|
||||
case types.SpecEventNodeEnd:
|
||||
r.emitBlock(r.fi(indent, "< Exit {{bold}}[%s]{{/}} %s {{gray}}%s@ %s (%s){{/}}", event.NodeType.String(), event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT), event.Duration.Round(time.Millisecond)))
|
||||
case types.SpecEventSpecRepeat:
|
||||
r.emitBlock(r.fi(indent, "\n{{bold}}Attempt #%d {{green}}Passed{{/}}{{bold}}. Repeating %s{{/}} {{gray}}@ %s{{/}}\n\n", event.Attempt, r.retryDenoter, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
|
||||
case types.SpecEventSpecRetry:
|
||||
r.emitBlock(r.fi(indent, "\n{{bold}}Attempt #%d {{red}}Failed{{/}}{{bold}}. Retrying %s{{/}} {{gray}}@ %s{{/}}\n\n", event.Attempt, r.retryDenoter, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
|
||||
}
|
||||
r.emitBlock(r.fi(indent, "{{gray}}<< End Captured GinkgoWriter Output{{/}}"))
|
||||
}
|
||||
|
||||
func (r *DefaultReporter) emitGoroutines(indent uint, goroutines ...types.Goroutine) {
|
||||
@ -544,31 +622,37 @@ func (r *DefaultReporter) emitSource(indent uint, fc types.FunctionCall) {
|
||||
|
||||
/* Emitting to the writer */
|
||||
func (r *DefaultReporter) emit(s string) {
|
||||
if len(s) > 0 {
|
||||
r.lastChar = s[len(s)-1:]
|
||||
r.lastEmissionWasDelimiter = false
|
||||
r.writer.Write([]byte(s))
|
||||
}
|
||||
r._emit(s, false, false)
|
||||
}
|
||||
|
||||
func (r *DefaultReporter) emitBlock(s string) {
|
||||
if len(s) > 0 {
|
||||
if r.lastChar != "\n" {
|
||||
r.emit("\n")
|
||||
}
|
||||
r.emit(s)
|
||||
if r.lastChar != "\n" {
|
||||
r.emit("\n")
|
||||
}
|
||||
}
|
||||
r._emit(s, true, false)
|
||||
}
|
||||
|
||||
func (r *DefaultReporter) emitDelimiter() {
|
||||
if r.lastEmissionWasDelimiter {
|
||||
func (r *DefaultReporter) emitDelimiter(indent uint) {
|
||||
r._emit(r.fi(indent, "{{gray}}%s{{/}}", strings.Repeat("-", 30)), true, true)
|
||||
}
|
||||
|
||||
// a bit ugly - but we're trying to minimize locking on this hot codepath
|
||||
func (r *DefaultReporter) _emit(s string, block bool, isDelimiter bool) {
|
||||
if len(s) == 0 {
|
||||
return
|
||||
}
|
||||
r.emitBlock(r.f("{{gray}}%s{{/}}", strings.Repeat("-", 30)))
|
||||
r.lastEmissionWasDelimiter = true
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
if isDelimiter && r.lastEmissionWasDelimiter {
|
||||
return
|
||||
}
|
||||
if block && !r.lastCharWasNewline {
|
||||
r.writer.Write([]byte("\n"))
|
||||
}
|
||||
r.lastCharWasNewline = (s[len(s)-1:] == "\n")
|
||||
r.writer.Write([]byte(s))
|
||||
if block && !r.lastCharWasNewline {
|
||||
r.writer.Write([]byte("\n"))
|
||||
r.lastCharWasNewline = true
|
||||
}
|
||||
r.lastEmissionWasDelimiter = isDelimiter
|
||||
}
|
||||
|
||||
/* Rendering text */
|
||||
@ -584,13 +668,14 @@ func (r *DefaultReporter) cycleJoin(elements []string, joiner string) string {
|
||||
return r.formatter.CycleJoin(elements, joiner, []string{"{{/}}", "{{gray}}"})
|
||||
}
|
||||
|
||||
func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightColor string, succinct bool, usePreciseFailureLocation bool) string {
|
||||
func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightColor string, veryVerbose bool, usePreciseFailureLocation bool) string {
|
||||
texts, locations, labels := []string{}, []types.CodeLocation{}, [][]string{}
|
||||
texts, locations, labels = append(texts, report.ContainerHierarchyTexts...), append(locations, report.ContainerHierarchyLocations...), append(labels, report.ContainerHierarchyLabels...)
|
||||
|
||||
if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) {
|
||||
texts = append(texts, r.f("[%s] %s", report.LeafNodeType, report.LeafNodeText))
|
||||
} else {
|
||||
texts = append(texts, report.LeafNodeText)
|
||||
texts = append(texts, r.f(report.LeafNodeText))
|
||||
}
|
||||
labels = append(labels, report.LeafNodeLabels)
|
||||
locations = append(locations, report.LeafNodeLocation)
|
||||
@ -600,24 +685,58 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo
|
||||
failureLocation = report.Failure.Location
|
||||
}
|
||||
|
||||
highlightIndex := -1
|
||||
switch report.Failure.FailureNodeContext {
|
||||
case types.FailureNodeAtTopLevel:
|
||||
texts = append([]string{r.f(highlightColor+"{{bold}}TOP-LEVEL [%s]{{/}}", report.Failure.FailureNodeType)}, texts...)
|
||||
texts = append([]string{fmt.Sprintf("TOP-LEVEL [%s]", report.Failure.FailureNodeType)}, texts...)
|
||||
locations = append([]types.CodeLocation{failureLocation}, locations...)
|
||||
labels = append([][]string{{}}, labels...)
|
||||
highlightIndex = 0
|
||||
case types.FailureNodeInContainer:
|
||||
i := report.Failure.FailureNodeContainerIndex
|
||||
texts[i] = r.f(highlightColor+"{{bold}}%s [%s]{{/}}", texts[i], report.Failure.FailureNodeType)
|
||||
texts[i] = fmt.Sprintf("%s [%s]", texts[i], report.Failure.FailureNodeType)
|
||||
locations[i] = failureLocation
|
||||
highlightIndex = i
|
||||
case types.FailureNodeIsLeafNode:
|
||||
i := len(texts) - 1
|
||||
texts[i] = r.f(highlightColor+"{{bold}}[%s] %s{{/}}", report.LeafNodeType, report.LeafNodeText)
|
||||
texts[i] = fmt.Sprintf("[%s] %s", report.LeafNodeType, report.LeafNodeText)
|
||||
locations[i] = failureLocation
|
||||
highlightIndex = i
|
||||
default:
|
||||
//there is no failure, so we highlight the leaf ndoe
|
||||
highlightIndex = len(texts) - 1
|
||||
}
|
||||
|
||||
out := ""
|
||||
if succinct {
|
||||
out += r.f("%s", r.cycleJoin(texts, " "))
|
||||
if veryVerbose {
|
||||
for i := range texts {
|
||||
if i == highlightIndex {
|
||||
out += r.fi(uint(i), highlightColor+"{{bold}}%s{{/}}", texts[i])
|
||||
} else {
|
||||
out += r.fi(uint(i), "%s", texts[i])
|
||||
}
|
||||
if len(labels[i]) > 0 {
|
||||
out += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels[i], ", "))
|
||||
}
|
||||
out += "\n"
|
||||
out += r.fi(uint(i), "{{gray}}%s{{/}}\n", locations[i])
|
||||
}
|
||||
} else {
|
||||
for i := range texts {
|
||||
style := "{{/}}"
|
||||
if i%2 == 1 {
|
||||
style = "{{gray}}"
|
||||
}
|
||||
if i == highlightIndex {
|
||||
style = highlightColor + "{{bold}}"
|
||||
}
|
||||
out += r.f(style+"%s", texts[i])
|
||||
if i < len(texts)-1 {
|
||||
out += " "
|
||||
} else {
|
||||
out += r.f("{{/}}")
|
||||
}
|
||||
}
|
||||
flattenedLabels := report.Labels()
|
||||
if len(flattenedLabels) > 0 {
|
||||
out += r.f(" {{coral}}[%s]{{/}}", strings.Join(flattenedLabels, ", "))
|
||||
@ -626,17 +745,15 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo
|
||||
if usePreciseFailureLocation {
|
||||
out += r.f("{{gray}}%s{{/}}", failureLocation)
|
||||
} else {
|
||||
out += r.f("{{gray}}%s{{/}}", locations[len(locations)-1])
|
||||
}
|
||||
} else {
|
||||
for i := range texts {
|
||||
out += r.fi(uint(i), "%s", texts[i])
|
||||
if len(labels[i]) > 0 {
|
||||
out += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels[i], ", "))
|
||||
leafLocation := locations[len(locations)-1]
|
||||
if (report.Failure.FailureNodeLocation != types.CodeLocation{}) && (report.Failure.FailureNodeLocation != leafLocation) {
|
||||
out += r.fi(1, highlightColor+"[%s]{{/}} {{gray}}%s{{/}}\n", report.Failure.FailureNodeType, report.Failure.FailureNodeLocation)
|
||||
out += r.fi(1, "{{gray}}[%s] %s{{/}}", report.LeafNodeType, leafLocation)
|
||||
} else {
|
||||
out += r.f("{{gray}}%s{{/}}", leafLocation)
|
||||
}
|
||||
out += "\n"
|
||||
out += r.fi(uint(i), "{{gray}}%s{{/}}\n", locations[i])
|
||||
}
|
||||
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
2
vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go
generated
vendored
2
vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go
generated
vendored
@ -35,7 +35,7 @@ func ReportViaDeprecatedReporter(reporter DeprecatedReporter, report types.Repor
|
||||
FailOnPending: report.SuiteConfig.FailOnPending,
|
||||
FailFast: report.SuiteConfig.FailFast,
|
||||
FlakeAttempts: report.SuiteConfig.FlakeAttempts,
|
||||
EmitSpecProgress: report.SuiteConfig.EmitSpecProgress,
|
||||
EmitSpecProgress: false,
|
||||
DryRun: report.SuiteConfig.DryRun,
|
||||
ParallelNode: report.SuiteConfig.ParallelProcess,
|
||||
ParallelTotal: report.SuiteConfig.ParallelTotal,
|
||||
|
113
vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go
generated
vendored
113
vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go
generated
vendored
@ -15,12 +15,32 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/config"
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
type JunitReportConfig struct {
|
||||
// Spec States for which no timeline should be emitted for system-err
|
||||
// set this to types.SpecStatePassed|types.SpecStateSkipped|types.SpecStatePending to only match failing specs
|
||||
OmitTimelinesForSpecState types.SpecState
|
||||
|
||||
// Enable OmitFailureMessageAttr to prevent failure messages appearing in the "message" attribute of the Failure and Error tags
|
||||
OmitFailureMessageAttr bool
|
||||
|
||||
//Enable OmitCapturedStdOutErr to prevent captured stdout/stderr appearing in system-out
|
||||
OmitCapturedStdOutErr bool
|
||||
|
||||
// Enable OmitSpecLabels to prevent labels from appearing in the spec name
|
||||
OmitSpecLabels bool
|
||||
|
||||
// Enable OmitLeafNodeType to prevent the spec leaf node type from appearing in the spec name
|
||||
OmitLeafNodeType bool
|
||||
|
||||
// Enable OmitSuiteSetupNodes to prevent the creation of testcase entries for setup nodes
|
||||
OmitSuiteSetupNodes bool
|
||||
}
|
||||
|
||||
type JUnitTestSuites struct {
|
||||
XMLName xml.Name `xml:"testsuites"`
|
||||
// Tests maps onto the total number of specs in all test suites (this includes any suite nodes such as BeforeSuite)
|
||||
@ -128,6 +148,10 @@ type JUnitFailure struct {
|
||||
}
|
||||
|
||||
func GenerateJUnitReport(report types.Report, dst string) error {
|
||||
return GenerateJUnitReportWithConfig(report, dst, JunitReportConfig{})
|
||||
}
|
||||
|
||||
func GenerateJUnitReportWithConfig(report types.Report, dst string, config JunitReportConfig) error {
|
||||
suite := JUnitTestSuite{
|
||||
Name: report.SuiteDescription,
|
||||
Package: report.SuitePath,
|
||||
@ -149,7 +173,6 @@ func GenerateJUnitReport(report types.Report, dst string) error {
|
||||
{"FailOnPending", fmt.Sprintf("%t", report.SuiteConfig.FailOnPending)},
|
||||
{"FailFast", fmt.Sprintf("%t", report.SuiteConfig.FailFast)},
|
||||
{"FlakeAttempts", fmt.Sprintf("%d", report.SuiteConfig.FlakeAttempts)},
|
||||
{"EmitSpecProgress", fmt.Sprintf("%t", report.SuiteConfig.EmitSpecProgress)},
|
||||
{"DryRun", fmt.Sprintf("%t", report.SuiteConfig.DryRun)},
|
||||
{"ParallelTotal", fmt.Sprintf("%d", report.SuiteConfig.ParallelTotal)},
|
||||
{"OutputInterceptorMode", report.SuiteConfig.OutputInterceptorMode},
|
||||
@ -157,22 +180,33 @@ func GenerateJUnitReport(report types.Report, dst string) error {
|
||||
},
|
||||
}
|
||||
for _, spec := range report.SpecReports {
|
||||
if config.OmitSuiteSetupNodes && spec.LeafNodeType != types.NodeTypeIt {
|
||||
continue
|
||||
}
|
||||
name := fmt.Sprintf("[%s]", spec.LeafNodeType)
|
||||
if config.OmitLeafNodeType {
|
||||
name = ""
|
||||
}
|
||||
if spec.FullText() != "" {
|
||||
name = name + " " + spec.FullText()
|
||||
}
|
||||
labels := spec.Labels()
|
||||
if len(labels) > 0 {
|
||||
if len(labels) > 0 && !config.OmitSpecLabels {
|
||||
name = name + " [" + strings.Join(labels, ", ") + "]"
|
||||
}
|
||||
name = strings.TrimSpace(name)
|
||||
|
||||
test := JUnitTestCase{
|
||||
Name: name,
|
||||
Classname: report.SuiteDescription,
|
||||
Status: spec.State.String(),
|
||||
Time: spec.RunTime.Seconds(),
|
||||
SystemOut: systemOutForUnstructuredReporters(spec),
|
||||
SystemErr: systemErrForUnstructuredReporters(spec),
|
||||
}
|
||||
if !spec.State.Is(config.OmitTimelinesForSpecState) {
|
||||
test.SystemErr = systemErrForUnstructuredReporters(spec)
|
||||
}
|
||||
if !config.OmitCapturedStdOutErr {
|
||||
test.SystemOut = systemOutForUnstructuredReporters(spec)
|
||||
}
|
||||
suite.Tests += 1
|
||||
|
||||
@ -193,6 +227,9 @@ func GenerateJUnitReport(report types.Report, dst string) error {
|
||||
Type: "failed",
|
||||
Description: failureDescriptionForUnstructuredReporters(spec),
|
||||
}
|
||||
if config.OmitFailureMessageAttr {
|
||||
test.Failure.Message = ""
|
||||
}
|
||||
suite.Failures += 1
|
||||
case types.SpecStateTimedout:
|
||||
test.Failure = &JUnitFailure{
|
||||
@ -200,6 +237,9 @@ func GenerateJUnitReport(report types.Report, dst string) error {
|
||||
Type: "timedout",
|
||||
Description: failureDescriptionForUnstructuredReporters(spec),
|
||||
}
|
||||
if config.OmitFailureMessageAttr {
|
||||
test.Failure.Message = ""
|
||||
}
|
||||
suite.Failures += 1
|
||||
case types.SpecStateInterrupted:
|
||||
test.Error = &JUnitError{
|
||||
@ -207,6 +247,9 @@ func GenerateJUnitReport(report types.Report, dst string) error {
|
||||
Type: "interrupted",
|
||||
Description: failureDescriptionForUnstructuredReporters(spec),
|
||||
}
|
||||
if config.OmitFailureMessageAttr {
|
||||
test.Error.Message = ""
|
||||
}
|
||||
suite.Errors += 1
|
||||
case types.SpecStateAborted:
|
||||
test.Failure = &JUnitFailure{
|
||||
@ -214,6 +257,9 @@ func GenerateJUnitReport(report types.Report, dst string) error {
|
||||
Type: "aborted",
|
||||
Description: failureDescriptionForUnstructuredReporters(spec),
|
||||
}
|
||||
if config.OmitFailureMessageAttr {
|
||||
test.Failure.Message = ""
|
||||
}
|
||||
suite.Errors += 1
|
||||
case types.SpecStatePanicked:
|
||||
test.Error = &JUnitError{
|
||||
@ -221,6 +267,9 @@ func GenerateJUnitReport(report types.Report, dst string) error {
|
||||
Type: "panicked",
|
||||
Description: failureDescriptionForUnstructuredReporters(spec),
|
||||
}
|
||||
if config.OmitFailureMessageAttr {
|
||||
test.Error.Message = ""
|
||||
}
|
||||
suite.Errors += 1
|
||||
}
|
||||
|
||||
@ -287,63 +336,25 @@ func MergeAndCleanupJUnitReports(sources []string, dst string) ([]string, error)
|
||||
|
||||
func failureDescriptionForUnstructuredReporters(spec types.SpecReport) string {
|
||||
out := &strings.Builder{}
|
||||
out.WriteString(spec.Failure.Location.String() + "\n")
|
||||
out.WriteString(spec.Failure.Location.FullStackTrace)
|
||||
if !spec.Failure.ProgressReport.IsZero() {
|
||||
out.WriteString("\n")
|
||||
NewDefaultReporter(types.ReporterConfig{NoColor: true}, out).EmitProgressReport(spec.Failure.ProgressReport)
|
||||
}
|
||||
NewDefaultReporter(types.ReporterConfig{NoColor: true, VeryVerbose: true}, out).emitFailure(0, spec.State, spec.Failure, true)
|
||||
if len(spec.AdditionalFailures) > 0 {
|
||||
out.WriteString("\nThere were additional failures detected after the initial failure:\n")
|
||||
for i, additionalFailure := range spec.AdditionalFailures {
|
||||
NewDefaultReporter(types.ReporterConfig{NoColor: true}, out).EmitFailure(0, additionalFailure.State, additionalFailure.Failure, true)
|
||||
if i < len(spec.AdditionalFailures)-1 {
|
||||
out.WriteString("----------\n")
|
||||
}
|
||||
}
|
||||
out.WriteString("\nThere were additional failures detected after the initial failure. These are visible in the timeline\n")
|
||||
}
|
||||
return out.String()
|
||||
}
|
||||
|
||||
func systemErrForUnstructuredReporters(spec types.SpecReport) string {
|
||||
return RenderTimeline(spec, true)
|
||||
}
|
||||
|
||||
func RenderTimeline(spec types.SpecReport, noColor bool) string {
|
||||
out := &strings.Builder{}
|
||||
gw := spec.CapturedGinkgoWriterOutput
|
||||
cursor := 0
|
||||
for _, pr := range spec.ProgressReports {
|
||||
if cursor < pr.GinkgoWriterOffset {
|
||||
if pr.GinkgoWriterOffset < len(gw) {
|
||||
out.WriteString(gw[cursor:pr.GinkgoWriterOffset])
|
||||
cursor = pr.GinkgoWriterOffset
|
||||
} else if cursor < len(gw) {
|
||||
out.WriteString(gw[cursor:])
|
||||
cursor = len(gw)
|
||||
}
|
||||
}
|
||||
NewDefaultReporter(types.ReporterConfig{NoColor: true}, out).EmitProgressReport(pr)
|
||||
}
|
||||
|
||||
if cursor < len(gw) {
|
||||
out.WriteString(gw[cursor:])
|
||||
}
|
||||
|
||||
NewDefaultReporter(types.ReporterConfig{NoColor: noColor, VeryVerbose: true}, out).emitTimeline(0, spec, spec.Timeline())
|
||||
return out.String()
|
||||
}
|
||||
|
||||
func systemOutForUnstructuredReporters(spec types.SpecReport) string {
|
||||
systemOut := spec.CapturedStdOutErr
|
||||
if len(spec.ReportEntries) > 0 {
|
||||
systemOut += "\nReport Entries:\n"
|
||||
for i, entry := range spec.ReportEntries {
|
||||
systemOut += fmt.Sprintf("%s\n%s\n%s\n", entry.Name, entry.Location, entry.Time.Format(time.RFC3339Nano))
|
||||
if representation := entry.StringRepresentation(); representation != "" {
|
||||
systemOut += representation + "\n"
|
||||
}
|
||||
if i+1 < len(spec.ReportEntries) {
|
||||
systemOut += "--\n"
|
||||
}
|
||||
}
|
||||
}
|
||||
return systemOut
|
||||
return spec.CapturedStdOutErr
|
||||
}
|
||||
|
||||
// Deprecated JUnitReporter (so folks can still compile their suites)
|
||||
|
18
vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go
generated
vendored
18
vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go
generated
vendored
@ -9,13 +9,21 @@ type Reporter interface {
|
||||
WillRun(report types.SpecReport)
|
||||
DidRun(report types.SpecReport)
|
||||
SuiteDidEnd(report types.Report)
|
||||
|
||||
//Timeline emission
|
||||
EmitFailure(state types.SpecState, failure types.Failure)
|
||||
EmitProgressReport(progressReport types.ProgressReport)
|
||||
EmitReportEntry(entry types.ReportEntry)
|
||||
EmitSpecEvent(event types.SpecEvent)
|
||||
}
|
||||
|
||||
type NoopReporter struct{}
|
||||
|
||||
func (n NoopReporter) SuiteWillBegin(report types.Report) {}
|
||||
func (n NoopReporter) WillRun(report types.SpecReport) {}
|
||||
func (n NoopReporter) DidRun(report types.SpecReport) {}
|
||||
func (n NoopReporter) SuiteDidEnd(report types.Report) {}
|
||||
func (n NoopReporter) EmitProgressReport(progressReport types.ProgressReport) {}
|
||||
func (n NoopReporter) SuiteWillBegin(report types.Report) {}
|
||||
func (n NoopReporter) WillRun(report types.SpecReport) {}
|
||||
func (n NoopReporter) DidRun(report types.SpecReport) {}
|
||||
func (n NoopReporter) SuiteDidEnd(report types.Report) {}
|
||||
func (n NoopReporter) EmitFailure(state types.SpecState, failure types.Failure) {}
|
||||
func (n NoopReporter) EmitProgressReport(progressReport types.ProgressReport) {}
|
||||
func (n NoopReporter) EmitReportEntry(entry types.ReportEntry) {}
|
||||
func (n NoopReporter) EmitSpecEvent(event types.SpecEvent) {}
|
||||
|
28
vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go
generated
vendored
28
vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go
generated
vendored
@ -35,7 +35,7 @@ func CurrentSpecReport() SpecReport {
|
||||
}
|
||||
|
||||
/*
|
||||
ReportEntryVisibility governs the visibility of ReportEntries in Ginkgo's console reporter
|
||||
ReportEntryVisibility governs the visibility of ReportEntries in Ginkgo's console reporter
|
||||
|
||||
- ReportEntryVisibilityAlways: the default behavior - the ReportEntry is always emitted.
|
||||
- ReportEntryVisibilityFailureOrVerbose: the ReportEntry is only emitted if the spec fails or if the tests are run with -v (similar to GinkgoWriters behavior).
|
||||
@ -50,9 +50,9 @@ const ReportEntryVisibilityAlways, ReportEntryVisibilityFailureOrVerbose, Report
|
||||
/*
|
||||
AddReportEntry generates and adds a new ReportEntry to the current spec's SpecReport.
|
||||
It can take any of the following arguments:
|
||||
- A single arbitrary object to attach as the Value of the ReportEntry. This object will be included in any generated reports and will be emitted to the console when the report is emitted.
|
||||
- A ReportEntryVisibility enum to control the visibility of the ReportEntry
|
||||
- An Offset or CodeLocation decoration to control the reported location of the ReportEntry
|
||||
- A single arbitrary object to attach as the Value of the ReportEntry. This object will be included in any generated reports and will be emitted to the console when the report is emitted.
|
||||
- A ReportEntryVisibility enum to control the visibility of the ReportEntry
|
||||
- An Offset or CodeLocation decoration to control the reported location of the ReportEntry
|
||||
|
||||
If the Value object implements `fmt.Stringer`, it's `String()` representation is used when emitting to the console.
|
||||
|
||||
@ -100,6 +100,25 @@ func ReportAfterEach(body func(SpecReport), args ...interface{}) bool {
|
||||
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterEach, "", combinedArgs...))
|
||||
}
|
||||
|
||||
/*
|
||||
ReportBeforeSuite nodes are run at the beginning of the suite. ReportBeforeSuite nodes take a function that receives a suite Report.
|
||||
|
||||
They are called at the beginning of the suite, before any specs have run and any BeforeSuite or SynchronizedBeforeSuite nodes, and are passed in the initial report for the suite.
|
||||
ReportBeforeSuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node)
|
||||
|
||||
# When running in parallel, Ginkgo ensures that only one of the parallel nodes runs the ReportBeforeSuite
|
||||
|
||||
You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure.
|
||||
You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
|
||||
|
||||
You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports
|
||||
*/
|
||||
func ReportBeforeSuite(body func(Report), args ...interface{}) bool {
|
||||
combinedArgs := []interface{}{body}
|
||||
combinedArgs = append(combinedArgs, args...)
|
||||
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeSuite, "", combinedArgs...))
|
||||
}
|
||||
|
||||
/*
|
||||
ReportAfterSuite nodes are run at the end of the suite. ReportAfterSuite nodes take a function that receives a suite Report.
|
||||
|
||||
@ -113,6 +132,7 @@ In addition to using ReportAfterSuite to programmatically generate suite reports
|
||||
|
||||
You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure.
|
||||
You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
|
||||
|
||||
You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports
|
||||
*/
|
||||
func ReportAfterSuite(text string, body func(Report), args ...interface{}) bool {
|
||||
|
33
vendor/github.com/onsi/ginkgo/v2/table_dsl.go
generated
vendored
33
vendor/github.com/onsi/ginkgo/v2/table_dsl.go
generated
vendored
@ -13,7 +13,7 @@ import (
|
||||
/*
|
||||
The EntryDescription decorator allows you to pass a format string to DescribeTable() and Entry(). This format string is used to generate entry names via:
|
||||
|
||||
fmt.Sprintf(formatString, parameters...)
|
||||
fmt.Sprintf(formatString, parameters...)
|
||||
|
||||
where parameters are the parameters passed into the entry.
|
||||
|
||||
@ -32,19 +32,20 @@ DescribeTable describes a table-driven spec.
|
||||
|
||||
For example:
|
||||
|
||||
DescribeTable("a simple table",
|
||||
func(x int, y int, expected bool) {
|
||||
Ω(x > y).Should(Equal(expected))
|
||||
},
|
||||
Entry("x > y", 1, 0, true),
|
||||
Entry("x == y", 0, 0, false),
|
||||
Entry("x < y", 0, 1, false),
|
||||
)
|
||||
DescribeTable("a simple table",
|
||||
func(x int, y int, expected bool) {
|
||||
Ω(x > y).Should(Equal(expected))
|
||||
},
|
||||
Entry("x > y", 1, 0, true),
|
||||
Entry("x == y", 0, 0, false),
|
||||
Entry("x < y", 0, 1, false),
|
||||
)
|
||||
|
||||
You can learn more about DescribeTable here: https://onsi.github.io/ginkgo/#table-specs
|
||||
And can explore some Table patterns here: https://onsi.github.io/ginkgo/#table-specs-patterns
|
||||
*/
|
||||
func DescribeTable(description string, args ...interface{}) bool {
|
||||
GinkgoHelper()
|
||||
generateTable(description, args...)
|
||||
return true
|
||||
}
|
||||
@ -53,6 +54,7 @@ func DescribeTable(description string, args ...interface{}) bool {
|
||||
You can focus a table with `FDescribeTable`. This is equivalent to `FDescribe`.
|
||||
*/
|
||||
func FDescribeTable(description string, args ...interface{}) bool {
|
||||
GinkgoHelper()
|
||||
args = append(args, internal.Focus)
|
||||
generateTable(description, args...)
|
||||
return true
|
||||
@ -62,6 +64,7 @@ func FDescribeTable(description string, args ...interface{}) bool {
|
||||
You can mark a table as pending with `PDescribeTable`. This is equivalent to `PDescribe`.
|
||||
*/
|
||||
func PDescribeTable(description string, args ...interface{}) bool {
|
||||
GinkgoHelper()
|
||||
args = append(args, internal.Pending)
|
||||
generateTable(description, args...)
|
||||
return true
|
||||
@ -95,26 +98,29 @@ If you want to generate interruptible specs simply write a Table function that a
|
||||
You can learn more about Entry here: https://onsi.github.io/ginkgo/#table-specs
|
||||
*/
|
||||
func Entry(description interface{}, args ...interface{}) TableEntry {
|
||||
GinkgoHelper()
|
||||
decorations, parameters := internal.PartitionDecorations(args...)
|
||||
return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(1)}
|
||||
return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(0)}
|
||||
}
|
||||
|
||||
/*
|
||||
You can focus a particular entry with FEntry. This is equivalent to FIt.
|
||||
*/
|
||||
func FEntry(description interface{}, args ...interface{}) TableEntry {
|
||||
GinkgoHelper()
|
||||
decorations, parameters := internal.PartitionDecorations(args...)
|
||||
decorations = append(decorations, internal.Focus)
|
||||
return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(1)}
|
||||
return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(0)}
|
||||
}
|
||||
|
||||
/*
|
||||
You can mark a particular entry as pending with PEntry. This is equivalent to PIt.
|
||||
*/
|
||||
func PEntry(description interface{}, args ...interface{}) TableEntry {
|
||||
GinkgoHelper()
|
||||
decorations, parameters := internal.PartitionDecorations(args...)
|
||||
decorations = append(decorations, internal.Pending)
|
||||
return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(1)}
|
||||
return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(0)}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -126,7 +132,8 @@ var contextType = reflect.TypeOf(new(context.Context)).Elem()
|
||||
var specContextType = reflect.TypeOf(new(SpecContext)).Elem()
|
||||
|
||||
func generateTable(description string, args ...interface{}) {
|
||||
cl := types.NewCodeLocation(2)
|
||||
GinkgoHelper()
|
||||
cl := types.NewCodeLocation(0)
|
||||
containerNodeArgs := []interface{}{cl}
|
||||
|
||||
entries := []TableEntry{}
|
||||
|
78
vendor/github.com/onsi/ginkgo/v2/types/code_location.go
generated
vendored
78
vendor/github.com/onsi/ginkgo/v2/types/code_location.go
generated
vendored
@ -1,4 +1,5 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
@ -6,6 +7,7 @@ import (
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type CodeLocation struct {
|
||||
@ -37,6 +39,73 @@ func (codeLocation CodeLocation) ContentsOfLine() string {
|
||||
return lines[codeLocation.LineNumber-1]
|
||||
}
|
||||
|
||||
type codeLocationLocator struct {
|
||||
pcs map[uintptr]bool
|
||||
helpers map[string]bool
|
||||
lock *sync.Mutex
|
||||
}
|
||||
|
||||
func (c *codeLocationLocator) addHelper(pc uintptr) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if c.pcs[pc] {
|
||||
return
|
||||
}
|
||||
c.lock.Unlock()
|
||||
f := runtime.FuncForPC(pc)
|
||||
c.lock.Lock()
|
||||
if f == nil {
|
||||
return
|
||||
}
|
||||
c.helpers[f.Name()] = true
|
||||
c.pcs[pc] = true
|
||||
}
|
||||
|
||||
func (c *codeLocationLocator) hasHelper(name string) bool {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
return c.helpers[name]
|
||||
}
|
||||
|
||||
func (c *codeLocationLocator) getCodeLocation(skip int) CodeLocation {
|
||||
pc := make([]uintptr, 40)
|
||||
n := runtime.Callers(skip+2, pc)
|
||||
if n == 0 {
|
||||
return CodeLocation{}
|
||||
}
|
||||
pc = pc[:n]
|
||||
frames := runtime.CallersFrames(pc)
|
||||
for {
|
||||
frame, more := frames.Next()
|
||||
if !c.hasHelper(frame.Function) {
|
||||
return CodeLocation{FileName: frame.File, LineNumber: frame.Line}
|
||||
}
|
||||
if !more {
|
||||
break
|
||||
}
|
||||
}
|
||||
return CodeLocation{}
|
||||
}
|
||||
|
||||
var clLocator = &codeLocationLocator{
|
||||
pcs: map[uintptr]bool{},
|
||||
helpers: map[string]bool{},
|
||||
lock: &sync.Mutex{},
|
||||
}
|
||||
|
||||
// MarkAsHelper is used by GinkgoHelper to mark the caller (appropriately offset by skip)as a helper. You can use this directly if you need to provide an optional `skip` to mark functions further up the call stack as helpers.
|
||||
func MarkAsHelper(optionalSkip ...int) {
|
||||
skip := 1
|
||||
if len(optionalSkip) > 0 {
|
||||
skip += optionalSkip[0]
|
||||
}
|
||||
pc, _, _, ok := runtime.Caller(skip)
|
||||
if ok {
|
||||
clLocator.addHelper(pc)
|
||||
}
|
||||
}
|
||||
|
||||
func NewCustomCodeLocation(message string) CodeLocation {
|
||||
return CodeLocation{
|
||||
CustomMessage: message,
|
||||
@ -44,14 +113,13 @@ func NewCustomCodeLocation(message string) CodeLocation {
|
||||
}
|
||||
|
||||
func NewCodeLocation(skip int) CodeLocation {
|
||||
_, file, line, _ := runtime.Caller(skip + 1)
|
||||
return CodeLocation{FileName: file, LineNumber: line}
|
||||
return clLocator.getCodeLocation(skip + 1)
|
||||
}
|
||||
|
||||
func NewCodeLocationWithStackTrace(skip int) CodeLocation {
|
||||
_, file, line, _ := runtime.Caller(skip + 1)
|
||||
stackTrace := PruneStack(string(debug.Stack()), skip+1)
|
||||
return CodeLocation{FileName: file, LineNumber: line, FullStackTrace: stackTrace}
|
||||
cl := clLocator.getCodeLocation(skip + 1)
|
||||
cl.FullStackTrace = PruneStack(string(debug.Stack()), skip+1)
|
||||
return cl
|
||||
}
|
||||
|
||||
// PruneStack removes references to functions that are internal to Ginkgo
|
||||
|
53
vendor/github.com/onsi/ginkgo/v2/types/config.go
generated
vendored
53
vendor/github.com/onsi/ginkgo/v2/types/config.go
generated
vendored
@ -8,6 +8,7 @@ package types
|
||||
import (
|
||||
"flag"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -26,11 +27,11 @@ type SuiteConfig struct {
|
||||
FailOnPending bool
|
||||
FailFast bool
|
||||
FlakeAttempts int
|
||||
EmitSpecProgress bool
|
||||
DryRun bool
|
||||
PollProgressAfter time.Duration
|
||||
PollProgressInterval time.Duration
|
||||
Timeout time.Duration
|
||||
EmitSpecProgress bool // this is deprecated but its removal is causing compile issue for some users that were setting it manually
|
||||
OutputInterceptorMode string
|
||||
SourceRoots []string
|
||||
GracePeriod time.Duration
|
||||
@ -81,13 +82,12 @@ func (vl VerbosityLevel) LT(comp VerbosityLevel) bool {
|
||||
|
||||
// Configuration for Ginkgo's reporter
|
||||
type ReporterConfig struct {
|
||||
NoColor bool
|
||||
SlowSpecThreshold time.Duration
|
||||
Succinct bool
|
||||
Verbose bool
|
||||
VeryVerbose bool
|
||||
FullTrace bool
|
||||
AlwaysEmitGinkgoWriter bool
|
||||
NoColor bool
|
||||
Succinct bool
|
||||
Verbose bool
|
||||
VeryVerbose bool
|
||||
FullTrace bool
|
||||
ShowNodeEvents bool
|
||||
|
||||
JSONReport string
|
||||
JUnitReport string
|
||||
@ -110,9 +110,7 @@ func (rc ReporterConfig) WillGenerateReport() bool {
|
||||
}
|
||||
|
||||
func NewDefaultReporterConfig() ReporterConfig {
|
||||
return ReporterConfig{
|
||||
SlowSpecThreshold: 5 * time.Second,
|
||||
}
|
||||
return ReporterConfig{}
|
||||
}
|
||||
|
||||
// Configuration for the Ginkgo CLI
|
||||
@ -235,6 +233,9 @@ type deprecatedConfig struct {
|
||||
SlowSpecThresholdWithFLoatUnits float64
|
||||
Stream bool
|
||||
Notify bool
|
||||
EmitSpecProgress bool
|
||||
SlowSpecThreshold time.Duration
|
||||
AlwaysEmitGinkgoWriter bool
|
||||
}
|
||||
|
||||
// Flags
|
||||
@ -275,8 +276,6 @@ var SuiteConfigFlags = GinkgoFlags{
|
||||
|
||||
{KeyPath: "S.DryRun", Name: "dry-run", SectionKey: "debug", DeprecatedName: "dryRun", DeprecatedDocLink: "changed-command-line-flags",
|
||||
Usage: "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v."},
|
||||
{KeyPath: "S.EmitSpecProgress", Name: "progress", SectionKey: "debug",
|
||||
Usage: "If set, ginkgo will emit progress information as each spec runs to the GinkgoWriter."},
|
||||
{KeyPath: "S.PollProgressAfter", Name: "poll-progress-after", SectionKey: "debug", UsageDefaultValue: "0",
|
||||
Usage: "Emit node progress reports periodically if node hasn't completed after this duration."},
|
||||
{KeyPath: "S.PollProgressInterval", Name: "poll-progress-interval", SectionKey: "debug", UsageDefaultValue: "10s",
|
||||
@ -303,6 +302,8 @@ var SuiteConfigFlags = GinkgoFlags{
|
||||
|
||||
{KeyPath: "D.RegexScansFilePath", DeprecatedName: "regexScansFilePath", DeprecatedDocLink: "removed--regexscansfilepath", DeprecatedVersion: "2.0.0"},
|
||||
{KeyPath: "D.DebugParallel", DeprecatedName: "debug", DeprecatedDocLink: "removed--debug", DeprecatedVersion: "2.0.0"},
|
||||
{KeyPath: "D.EmitSpecProgress", DeprecatedName: "progress", SectionKey: "debug",
|
||||
DeprecatedVersion: "2.5.0", Usage: ". The functionality provided by --progress was confusing and is no longer needed. Use --show-node-events instead to see node entry and exit events included in the timeline of failed and verbose specs. Or you can run with -vv to always see all node events. Lastly, --poll-progress-after and the PollProgressAfter decorator now provide a better mechanism for debugging specs that tend to get stuck."},
|
||||
}
|
||||
|
||||
// ParallelConfigFlags provides flags for the Ginkgo test process (not the CLI)
|
||||
@ -319,8 +320,6 @@ var ParallelConfigFlags = GinkgoFlags{
|
||||
var ReporterConfigFlags = GinkgoFlags{
|
||||
{KeyPath: "R.NoColor", Name: "no-color", SectionKey: "output", DeprecatedName: "noColor", DeprecatedDocLink: "changed-command-line-flags",
|
||||
Usage: "If set, suppress color output in default reporter."},
|
||||
{KeyPath: "R.SlowSpecThreshold", Name: "slow-spec-threshold", SectionKey: "output", UsageArgument: "duration", UsageDefaultValue: "5s",
|
||||
Usage: "Specs that take longer to run than this threshold are flagged as slow by the default reporter."},
|
||||
{KeyPath: "R.Verbose", Name: "v", SectionKey: "output",
|
||||
Usage: "If set, emits more output including GinkgoWriter contents."},
|
||||
{KeyPath: "R.VeryVerbose", Name: "vv", SectionKey: "output",
|
||||
@ -329,8 +328,8 @@ var ReporterConfigFlags = GinkgoFlags{
|
||||
Usage: "If set, default reporter prints out a very succinct report"},
|
||||
{KeyPath: "R.FullTrace", Name: "trace", SectionKey: "output",
|
||||
Usage: "If set, default reporter prints out the full stack trace when a failure occurs"},
|
||||
{KeyPath: "R.AlwaysEmitGinkgoWriter", Name: "always-emit-ginkgo-writer", SectionKey: "output", DeprecatedName: "reportPassed", DeprecatedDocLink: "renamed--reportpassed",
|
||||
Usage: "If set, default reporter prints out captured output of passed tests."},
|
||||
{KeyPath: "R.ShowNodeEvents", Name: "show-node-events", SectionKey: "output",
|
||||
Usage: "If set, default reporter prints node > Enter and < Exit events when specs fail"},
|
||||
|
||||
{KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output",
|
||||
Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."},
|
||||
@ -343,6 +342,8 @@ var ReporterConfigFlags = GinkgoFlags{
|
||||
Usage: "use --slow-spec-threshold instead and pass in a duration string (e.g. '5s', not '5.0')"},
|
||||
{KeyPath: "D.NoisyPendings", DeprecatedName: "noisyPendings", DeprecatedDocLink: "removed--noisypendings-and--noisyskippings", DeprecatedVersion: "2.0.0"},
|
||||
{KeyPath: "D.NoisySkippings", DeprecatedName: "noisySkippings", DeprecatedDocLink: "removed--noisypendings-and--noisyskippings", DeprecatedVersion: "2.0.0"},
|
||||
{KeyPath: "D.SlowSpecThreshold", DeprecatedName: "slow-spec-threshold", SectionKey: "output", Usage: "--slow-spec-threshold has been deprecated and will be removed in a future version of Ginkgo. This feature has proved to be more noisy than useful. You can use --poll-progress-after, instead, to get more actionable feedback about potentially slow specs and understand where they might be getting stuck.", DeprecatedVersion: "2.5.0"},
|
||||
{KeyPath: "D.AlwaysEmitGinkgoWriter", DeprecatedName: "always-emit-ginkgo-writer", SectionKey: "output", Usage: " - use -v instead, or one of Ginkgo's machine-readable report formats to get GinkgoWriter output for passing specs."},
|
||||
}
|
||||
|
||||
// BuildTestSuiteFlagSet attaches to the CommandLine flagset and provides flags for the Ginkgo test process
|
||||
@ -600,13 +601,29 @@ func VetAndInitializeCLIAndGoConfig(cliConfig CLIConfig, goFlagsConfig GoFlagsCo
|
||||
}
|
||||
|
||||
// GenerateGoTestCompileArgs is used by the Ginkgo CLI to generate command line arguments to pass to the go test -c command when compiling the test
|
||||
func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, destination string, packageToBuild string) ([]string, error) {
|
||||
func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, destination string, packageToBuild string, pathToInvocationPath string) ([]string, error) {
|
||||
// if the user has set the CoverProfile run-time flag make sure to set the build-time cover flag to make sure
|
||||
// the built test binary can generate a coverprofile
|
||||
if goFlagsConfig.CoverProfile != "" {
|
||||
goFlagsConfig.Cover = true
|
||||
}
|
||||
|
||||
if goFlagsConfig.CoverPkg != "" {
|
||||
coverPkgs := strings.Split(goFlagsConfig.CoverPkg, ",")
|
||||
adjustedCoverPkgs := make([]string, len(coverPkgs))
|
||||
for i, coverPkg := range coverPkgs {
|
||||
coverPkg = strings.Trim(coverPkg, " ")
|
||||
if strings.HasPrefix(coverPkg, "./") {
|
||||
// this is a relative coverPkg - we need to reroot it
|
||||
adjustedCoverPkgs[i] = "./" + filepath.Join(pathToInvocationPath, strings.TrimPrefix(coverPkg, "./"))
|
||||
} else {
|
||||
// this is a package name - don't touch it
|
||||
adjustedCoverPkgs[i] = coverPkg
|
||||
}
|
||||
}
|
||||
goFlagsConfig.CoverPkg = strings.Join(adjustedCoverPkgs, ",")
|
||||
}
|
||||
|
||||
args := []string{"test", "-c", "-o", destination, packageToBuild}
|
||||
goArgs, err := GenerateFlagArgs(
|
||||
GoBuildFlags,
|
||||
|
9
vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go
generated
vendored
9
vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go
generated
vendored
@ -38,7 +38,7 @@ func (d deprecations) Async() Deprecation {
|
||||
|
||||
func (d deprecations) Measure() Deprecation {
|
||||
return Deprecation{
|
||||
Message: "Measure is deprecated and will be removed in Ginkgo V2. Please migrate to gomega/gmeasure.",
|
||||
Message: "Measure is deprecated and has been removed from Ginkgo V2. Any Measure tests in your spec will not run. Please migrate to gomega/gmeasure.",
|
||||
DocLink: "removed-measure",
|
||||
Version: "1.16.3",
|
||||
}
|
||||
@ -83,6 +83,13 @@ func (d deprecations) Nodot() Deprecation {
|
||||
}
|
||||
}
|
||||
|
||||
func (d deprecations) SuppressProgressReporting() Deprecation {
|
||||
return Deprecation{
|
||||
Message: "Improvements to how reporters emit timeline information means that SuppressProgressReporting is no longer necessary and has been deprecated.",
|
||||
Version: "2.5.0",
|
||||
}
|
||||
}
|
||||
|
||||
type DeprecationTracker struct {
|
||||
deprecations map[Deprecation][]CodeLocation
|
||||
lock *sync.Mutex
|
||||
|
19
vendor/github.com/onsi/ginkgo/v2/types/errors.go
generated
vendored
19
vendor/github.com/onsi/ginkgo/v2/types/errors.go
generated
vendored
@ -108,8 +108,8 @@ Please ensure all assertions are inside leaf nodes such as {{bold}}BeforeEach{{/
|
||||
|
||||
func (g ginkgoErrors) SuiteNodeInNestedContext(nodeType NodeType, cl CodeLocation) error {
|
||||
docLink := "suite-setup-and-cleanup-beforesuite-and-aftersuite"
|
||||
if nodeType.Is(NodeTypeReportAfterSuite) {
|
||||
docLink = "reporting-nodes---reportaftersuite"
|
||||
if nodeType.Is(NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite) {
|
||||
docLink = "reporting-nodes---reportbeforesuite-and-reportaftersuite"
|
||||
}
|
||||
|
||||
return GinkgoError{
|
||||
@ -125,8 +125,8 @@ func (g ginkgoErrors) SuiteNodeInNestedContext(nodeType NodeType, cl CodeLocatio
|
||||
|
||||
func (g ginkgoErrors) SuiteNodeDuringRunPhase(nodeType NodeType, cl CodeLocation) error {
|
||||
docLink := "suite-setup-and-cleanup-beforesuite-and-aftersuite"
|
||||
if nodeType.Is(NodeTypeReportAfterSuite) {
|
||||
docLink = "reporting-nodes---reportaftersuite"
|
||||
if nodeType.Is(NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite) {
|
||||
docLink = "reporting-nodes---reportbeforesuite-and-reportaftersuite"
|
||||
}
|
||||
|
||||
return GinkgoError{
|
||||
@ -298,6 +298,15 @@ func (g ginkgoErrors) SetupNodeNotInOrderedContainer(cl CodeLocation, nodeType N
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) InvalidContinueOnFailureDecoration(cl CodeLocation) error {
|
||||
return GinkgoError{
|
||||
Heading: "ContinueOnFailure not decorating an outermost Ordered Container",
|
||||
Message: "ContinueOnFailure can only decorate an Ordered container, and this Ordered container must be the outermost Ordered container.",
|
||||
CodeLocation: cl,
|
||||
DocLink: "ordered-containers",
|
||||
}
|
||||
}
|
||||
|
||||
/* DeferCleanup errors */
|
||||
func (g ginkgoErrors) DeferCleanupInvalidFunction(cl CodeLocation) error {
|
||||
return GinkgoError{
|
||||
@ -320,7 +329,7 @@ func (g ginkgoErrors) PushingCleanupNodeDuringTreeConstruction(cl CodeLocation)
|
||||
func (g ginkgoErrors) PushingCleanupInReportingNode(cl CodeLocation, nodeType NodeType) error {
|
||||
return GinkgoError{
|
||||
Heading: fmt.Sprintf("DeferCleanup cannot be called in %s", nodeType),
|
||||
Message: "Please inline your cleanup code - Ginkgo won't run cleanup code after a ReportAfterEach or ReportAfterSuite.",
|
||||
Message: "Please inline your cleanup code - Ginkgo won't run cleanup code after a Reporting node.",
|
||||
CodeLocation: cl,
|
||||
DocLink: "cleaning-up-our-cleanup-code-defercleanup",
|
||||
}
|
||||
|
11
vendor/github.com/onsi/ginkgo/v2/types/label_filter.go
generated
vendored
11
vendor/github.com/onsi/ginkgo/v2/types/label_filter.go
generated
vendored
@ -272,12 +272,23 @@ func tokenize(input string) func() (*treeNode, error) {
|
||||
}
|
||||
}
|
||||
|
||||
func MustParseLabelFilter(input string) LabelFilter {
|
||||
filter, err := ParseLabelFilter(input)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return filter
|
||||
}
|
||||
|
||||
func ParseLabelFilter(input string) (LabelFilter, error) {
|
||||
if DEBUG_LABEL_FILTER_PARSING {
|
||||
fmt.Println("\n==============")
|
||||
fmt.Println("Input: ", input)
|
||||
fmt.Print("Tokens: ")
|
||||
}
|
||||
if input == "" {
|
||||
return func(_ []string) bool { return true }, nil
|
||||
}
|
||||
nextToken := tokenize(input)
|
||||
|
||||
root := &treeNode{token: lfTokenRoot}
|
||||
|
14
vendor/github.com/onsi/ginkgo/v2/types/report_entry.go
generated
vendored
14
vendor/github.com/onsi/ginkgo/v2/types/report_entry.go
generated
vendored
@ -6,8 +6,8 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
//ReportEntryValue wraps a report entry's value ensuring it can be encoded and decoded safely into reports
|
||||
//and across the network connection when running in parallel
|
||||
// ReportEntryValue wraps a report entry's value ensuring it can be encoded and decoded safely into reports
|
||||
// and across the network connection when running in parallel
|
||||
type ReportEntryValue struct {
|
||||
raw interface{} //unexported to prevent gob from freaking out about unregistered structs
|
||||
AsJSON string
|
||||
@ -85,10 +85,12 @@ func (rev *ReportEntryValue) GobDecode(data []byte) error {
|
||||
type ReportEntry struct {
|
||||
// Visibility captures the visibility policy for this ReportEntry
|
||||
Visibility ReportEntryVisibility
|
||||
// Time captures the time the AddReportEntry was called
|
||||
Time time.Time
|
||||
// Location captures the location of the AddReportEntry call
|
||||
Location CodeLocation
|
||||
|
||||
Time time.Time //need this for backwards compatibility
|
||||
TimelineLocation TimelineLocation
|
||||
|
||||
// Name captures the name of this report
|
||||
Name string
|
||||
// Value captures the (optional) object passed into AddReportEntry - this can be
|
||||
@ -120,7 +122,9 @@ func (entry ReportEntry) GetRawValue() interface{} {
|
||||
return entry.Value.GetRawValue()
|
||||
}
|
||||
|
||||
|
||||
func (entry ReportEntry) GetTimelineLocation() TimelineLocation {
|
||||
return entry.TimelineLocation
|
||||
}
|
||||
|
||||
type ReportEntries []ReportEntry
|
||||
|
||||
|
310
vendor/github.com/onsi/ginkgo/v2/types/types.go
generated
vendored
310
vendor/github.com/onsi/ginkgo/v2/types/types.go
generated
vendored
@ -2,6 +2,8 @@ package types
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
@ -56,19 +58,20 @@ type Report struct {
|
||||
SuiteConfig SuiteConfig
|
||||
|
||||
//SpecReports is a list of all SpecReports generated by this test run
|
||||
//It is empty when the SuiteReport is provided to ReportBeforeSuite
|
||||
SpecReports SpecReports
|
||||
}
|
||||
|
||||
//PreRunStats contains a set of stats captured before the test run begins. This is primarily used
|
||||
//by Ginkgo's reporter to tell the user how many specs are in the current suite (PreRunStats.TotalSpecs)
|
||||
//and how many it intends to run (PreRunStats.SpecsThatWillRun) after applying any relevant focus or skip filters.
|
||||
// PreRunStats contains a set of stats captured before the test run begins. This is primarily used
|
||||
// by Ginkgo's reporter to tell the user how many specs are in the current suite (PreRunStats.TotalSpecs)
|
||||
// and how many it intends to run (PreRunStats.SpecsThatWillRun) after applying any relevant focus or skip filters.
|
||||
type PreRunStats struct {
|
||||
TotalSpecs int
|
||||
SpecsThatWillRun int
|
||||
}
|
||||
|
||||
//Add is used by Ginkgo's parallel aggregation mechanisms to combine test run reports form individual parallel processes
|
||||
//to form a complete final report.
|
||||
// Add is used by Ginkgo's parallel aggregation mechanisms to combine test run reports form individual parallel processes
|
||||
// to form a complete final report.
|
||||
func (report Report) Add(other Report) Report {
|
||||
report.SuiteSucceeded = report.SuiteSucceeded && other.SuiteSucceeded
|
||||
|
||||
@ -147,6 +150,9 @@ type SpecReport struct {
|
||||
// ParallelProcess captures the parallel process that this spec ran on
|
||||
ParallelProcess int
|
||||
|
||||
// RunningInParallel captures whether this spec is part of a suite that ran in parallel
|
||||
RunningInParallel bool
|
||||
|
||||
//Failure is populated if a spec has failed, panicked, been interrupted, or skipped by the user (e.g. calling Skip())
|
||||
//It includes detailed information about the Failure
|
||||
Failure Failure
|
||||
@ -178,6 +184,9 @@ type SpecReport struct {
|
||||
|
||||
// AdditionalFailures contains any failures that occurred after the initial spec failure. These typically occur in cleanup nodes after the initial failure and are only emitted when running in verbose mode.
|
||||
AdditionalFailures []AdditionalFailure
|
||||
|
||||
// SpecEvents capture additional events that occur during the spec run
|
||||
SpecEvents SpecEvents
|
||||
}
|
||||
|
||||
func (report SpecReport) MarshalJSON() ([]byte, error) {
|
||||
@ -204,6 +213,7 @@ func (report SpecReport) MarshalJSON() ([]byte, error) {
|
||||
ReportEntries ReportEntries `json:",omitempty"`
|
||||
ProgressReports []ProgressReport `json:",omitempty"`
|
||||
AdditionalFailures []AdditionalFailure `json:",omitempty"`
|
||||
SpecEvents SpecEvents `json:",omitempty"`
|
||||
}{
|
||||
ContainerHierarchyTexts: report.ContainerHierarchyTexts,
|
||||
ContainerHierarchyLocations: report.ContainerHierarchyLocations,
|
||||
@ -238,6 +248,9 @@ func (report SpecReport) MarshalJSON() ([]byte, error) {
|
||||
if len(report.AdditionalFailures) > 0 {
|
||||
out.AdditionalFailures = report.AdditionalFailures
|
||||
}
|
||||
if len(report.SpecEvents) > 0 {
|
||||
out.SpecEvents = report.SpecEvents
|
||||
}
|
||||
|
||||
return json.Marshal(out)
|
||||
}
|
||||
@ -255,13 +268,13 @@ func (report SpecReport) CombinedOutput() string {
|
||||
return report.CapturedStdOutErr + "\n" + report.CapturedGinkgoWriterOutput
|
||||
}
|
||||
|
||||
//Failed returns true if report.State is one of the SpecStateFailureStates
|
||||
// Failed returns true if report.State is one of the SpecStateFailureStates
|
||||
// (SpecStateFailed, SpecStatePanicked, SpecStateinterrupted, SpecStateAborted)
|
||||
func (report SpecReport) Failed() bool {
|
||||
return report.State.Is(SpecStateFailureStates)
|
||||
}
|
||||
|
||||
//FullText returns a concatenation of all the report.ContainerHierarchyTexts and report.LeafNodeText
|
||||
// FullText returns a concatenation of all the report.ContainerHierarchyTexts and report.LeafNodeText
|
||||
func (report SpecReport) FullText() string {
|
||||
texts := []string{}
|
||||
texts = append(texts, report.ContainerHierarchyTexts...)
|
||||
@ -271,7 +284,7 @@ func (report SpecReport) FullText() string {
|
||||
return strings.Join(texts, " ")
|
||||
}
|
||||
|
||||
//Labels returns a deduped set of all the spec's Labels.
|
||||
// Labels returns a deduped set of all the spec's Labels.
|
||||
func (report SpecReport) Labels() []string {
|
||||
out := []string{}
|
||||
seen := map[string]bool{}
|
||||
@ -293,7 +306,7 @@ func (report SpecReport) Labels() []string {
|
||||
return out
|
||||
}
|
||||
|
||||
//MatchesLabelFilter returns true if the spec satisfies the passed in label filter query
|
||||
// MatchesLabelFilter returns true if the spec satisfies the passed in label filter query
|
||||
func (report SpecReport) MatchesLabelFilter(query string) (bool, error) {
|
||||
filter, err := ParseLabelFilter(query)
|
||||
if err != nil {
|
||||
@ -302,29 +315,54 @@ func (report SpecReport) MatchesLabelFilter(query string) (bool, error) {
|
||||
return filter(report.Labels()), nil
|
||||
}
|
||||
|
||||
//FileName() returns the name of the file containing the spec
|
||||
// FileName() returns the name of the file containing the spec
|
||||
func (report SpecReport) FileName() string {
|
||||
return report.LeafNodeLocation.FileName
|
||||
}
|
||||
|
||||
//LineNumber() returns the line number of the leaf node
|
||||
// LineNumber() returns the line number of the leaf node
|
||||
func (report SpecReport) LineNumber() int {
|
||||
return report.LeafNodeLocation.LineNumber
|
||||
}
|
||||
|
||||
//FailureMessage() returns the failure message (or empty string if the test hasn't failed)
|
||||
// FailureMessage() returns the failure message (or empty string if the test hasn't failed)
|
||||
func (report SpecReport) FailureMessage() string {
|
||||
return report.Failure.Message
|
||||
}
|
||||
|
||||
//FailureLocation() returns the location of the failure (or an empty CodeLocation if the test hasn't failed)
|
||||
// FailureLocation() returns the location of the failure (or an empty CodeLocation if the test hasn't failed)
|
||||
func (report SpecReport) FailureLocation() CodeLocation {
|
||||
return report.Failure.Location
|
||||
}
|
||||
|
||||
// Timeline() returns a timeline view of the report
|
||||
func (report SpecReport) Timeline() Timeline {
|
||||
timeline := Timeline{}
|
||||
if !report.Failure.IsZero() {
|
||||
timeline = append(timeline, report.Failure)
|
||||
if report.Failure.AdditionalFailure != nil {
|
||||
timeline = append(timeline, *(report.Failure.AdditionalFailure))
|
||||
}
|
||||
}
|
||||
for _, additionalFailure := range report.AdditionalFailures {
|
||||
timeline = append(timeline, additionalFailure)
|
||||
}
|
||||
for _, reportEntry := range report.ReportEntries {
|
||||
timeline = append(timeline, reportEntry)
|
||||
}
|
||||
for _, progressReport := range report.ProgressReports {
|
||||
timeline = append(timeline, progressReport)
|
||||
}
|
||||
for _, specEvent := range report.SpecEvents {
|
||||
timeline = append(timeline, specEvent)
|
||||
}
|
||||
sort.Sort(timeline)
|
||||
return timeline
|
||||
}
|
||||
|
||||
type SpecReports []SpecReport
|
||||
|
||||
//WithLeafNodeType returns the subset of SpecReports with LeafNodeType matching one of the requested NodeTypes
|
||||
// WithLeafNodeType returns the subset of SpecReports with LeafNodeType matching one of the requested NodeTypes
|
||||
func (reports SpecReports) WithLeafNodeType(nodeTypes NodeType) SpecReports {
|
||||
count := 0
|
||||
for i := range reports {
|
||||
@ -344,7 +382,7 @@ func (reports SpecReports) WithLeafNodeType(nodeTypes NodeType) SpecReports {
|
||||
return out
|
||||
}
|
||||
|
||||
//WithState returns the subset of SpecReports with State matching one of the requested SpecStates
|
||||
// WithState returns the subset of SpecReports with State matching one of the requested SpecStates
|
||||
func (reports SpecReports) WithState(states SpecState) SpecReports {
|
||||
count := 0
|
||||
for i := range reports {
|
||||
@ -363,7 +401,7 @@ func (reports SpecReports) WithState(states SpecState) SpecReports {
|
||||
return out
|
||||
}
|
||||
|
||||
//CountWithState returns the number of SpecReports with State matching one of the requested SpecStates
|
||||
// CountWithState returns the number of SpecReports with State matching one of the requested SpecStates
|
||||
func (reports SpecReports) CountWithState(states SpecState) int {
|
||||
n := 0
|
||||
for i := range reports {
|
||||
@ -374,7 +412,7 @@ func (reports SpecReports) CountWithState(states SpecState) int {
|
||||
return n
|
||||
}
|
||||
|
||||
//If the Spec passes, CountOfFlakedSpecs returns the number of SpecReports that failed after multiple attempts.
|
||||
// If the Spec passes, CountOfFlakedSpecs returns the number of SpecReports that failed after multiple attempts.
|
||||
func (reports SpecReports) CountOfFlakedSpecs() int {
|
||||
n := 0
|
||||
for i := range reports {
|
||||
@ -385,7 +423,7 @@ func (reports SpecReports) CountOfFlakedSpecs() int {
|
||||
return n
|
||||
}
|
||||
|
||||
//If the Spec fails, CountOfRepeatedSpecs returns the number of SpecReports that passed after multiple attempts
|
||||
// If the Spec fails, CountOfRepeatedSpecs returns the number of SpecReports that passed after multiple attempts
|
||||
func (reports SpecReports) CountOfRepeatedSpecs() int {
|
||||
n := 0
|
||||
for i := range reports {
|
||||
@ -396,6 +434,53 @@ func (reports SpecReports) CountOfRepeatedSpecs() int {
|
||||
return n
|
||||
}
|
||||
|
||||
// TimelineLocation captures the location of an event in the spec's timeline
|
||||
type TimelineLocation struct {
|
||||
//Offset is the offset (in bytes) of the event relative to the GinkgoWriter stream
|
||||
Offset int `json:",omitempty"`
|
||||
|
||||
//Order is the order of the event with respect to other events. The absolute value of Order
|
||||
//is irrelevant. All that matters is that an event with a lower Order occurs before ane vent with a higher Order
|
||||
Order int `json:",omitempty"`
|
||||
|
||||
Time time.Time
|
||||
}
|
||||
|
||||
// TimelineEvent represent an event on the timeline
|
||||
// consumers of Timeline will need to check the concrete type of each entry to determine how to handle it
|
||||
type TimelineEvent interface {
|
||||
GetTimelineLocation() TimelineLocation
|
||||
}
|
||||
|
||||
type Timeline []TimelineEvent
|
||||
|
||||
func (t Timeline) Len() int { return len(t) }
|
||||
func (t Timeline) Less(i, j int) bool {
|
||||
return t[i].GetTimelineLocation().Order < t[j].GetTimelineLocation().Order
|
||||
}
|
||||
func (t Timeline) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
|
||||
func (t Timeline) WithoutHiddenReportEntries() Timeline {
|
||||
out := Timeline{}
|
||||
for _, event := range t {
|
||||
if reportEntry, isReportEntry := event.(ReportEntry); isReportEntry && reportEntry.Visibility == ReportEntryVisibilityNever {
|
||||
continue
|
||||
}
|
||||
out = append(out, event)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (t Timeline) WithoutVeryVerboseSpecEvents() Timeline {
|
||||
out := Timeline{}
|
||||
for _, event := range t {
|
||||
if specEvent, isSpecEvent := event.(SpecEvent); isSpecEvent && specEvent.IsOnlyVisibleAtVeryVerbose() {
|
||||
continue
|
||||
}
|
||||
out = append(out, event)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Failure captures failure information for an individual test
|
||||
type Failure struct {
|
||||
// Message - the failure message passed into Fail(...). When using a matcher library
|
||||
@ -408,6 +493,8 @@ type Failure struct {
|
||||
// This CodeLocation will include a fully-populated StackTrace
|
||||
Location CodeLocation
|
||||
|
||||
TimelineLocation TimelineLocation
|
||||
|
||||
// ForwardedPanic - if the failure represents a captured panic (i.e. Summary.State == SpecStatePanicked)
|
||||
// then ForwardedPanic will be populated with a string representation of the captured panic.
|
||||
ForwardedPanic string `json:",omitempty"`
|
||||
@ -420,19 +507,29 @@ type Failure struct {
|
||||
// FailureNodeType will contain the NodeType of the node in which the failure occurred.
|
||||
// FailureNodeLocation will contain the CodeLocation of the node in which the failure occurred.
|
||||
// If populated, FailureNodeContainerIndex will be the index into SpecReport.ContainerHierarchyTexts and SpecReport.ContainerHierarchyLocations that represents the parent container of the node in which the failure occurred.
|
||||
FailureNodeContext FailureNodeContext
|
||||
FailureNodeType NodeType
|
||||
FailureNodeLocation CodeLocation
|
||||
FailureNodeContainerIndex int
|
||||
FailureNodeContext FailureNodeContext `json:",omitempty"`
|
||||
|
||||
FailureNodeType NodeType `json:",omitempty"`
|
||||
|
||||
FailureNodeLocation CodeLocation `json:",omitempty"`
|
||||
|
||||
FailureNodeContainerIndex int `json:",omitempty"`
|
||||
|
||||
//ProgressReport is populated if the spec was interrupted or timed out
|
||||
ProgressReport ProgressReport
|
||||
ProgressReport ProgressReport `json:",omitempty"`
|
||||
|
||||
//AdditionalFailure is non-nil if a follow-on failure occurred within the same node after the primary failure. This only happens when a node has timed out or been interrupted. In such cases the AdditionalFailure can include information about where/why the spec was stuck.
|
||||
AdditionalFailure *AdditionalFailure `json:",omitempty"`
|
||||
}
|
||||
|
||||
func (f Failure) IsZero() bool {
|
||||
return f.Message == "" && (f.Location == CodeLocation{})
|
||||
}
|
||||
|
||||
func (f Failure) GetTimelineLocation() TimelineLocation {
|
||||
return f.TimelineLocation
|
||||
}
|
||||
|
||||
// FailureNodeContext captures the location context for the node containing the failing line of code
|
||||
type FailureNodeContext uint
|
||||
|
||||
@ -471,6 +568,10 @@ type AdditionalFailure struct {
|
||||
Failure Failure
|
||||
}
|
||||
|
||||
func (f AdditionalFailure) GetTimelineLocation() TimelineLocation {
|
||||
return f.Failure.TimelineLocation
|
||||
}
|
||||
|
||||
// SpecState captures the state of a spec
|
||||
// To determine if a given `state` represents a failure state, use `state.Is(SpecStateFailureStates)`
|
||||
type SpecState uint
|
||||
@ -503,6 +604,9 @@ var ssEnumSupport = NewEnumSupport(map[uint]string{
|
||||
func (ss SpecState) String() string {
|
||||
return ssEnumSupport.String(uint(ss))
|
||||
}
|
||||
func (ss SpecState) GomegaString() string {
|
||||
return ssEnumSupport.String(uint(ss))
|
||||
}
|
||||
func (ss *SpecState) UnmarshalJSON(b []byte) error {
|
||||
out, err := ssEnumSupport.UnmarshJSON(b)
|
||||
*ss = SpecState(out)
|
||||
@ -520,38 +624,40 @@ func (ss SpecState) Is(states SpecState) bool {
|
||||
|
||||
// ProgressReport captures the progress of the current spec. It is, effectively, a structured Ginkgo-aware stack trace
|
||||
type ProgressReport struct {
|
||||
Message string
|
||||
ParallelProcess int
|
||||
RunningInParallel bool
|
||||
Message string `json:",omitempty"`
|
||||
ParallelProcess int `json:",omitempty"`
|
||||
RunningInParallel bool `json:",omitempty"`
|
||||
|
||||
Time time.Time
|
||||
ContainerHierarchyTexts []string `json:",omitempty"`
|
||||
LeafNodeText string `json:",omitempty"`
|
||||
LeafNodeLocation CodeLocation `json:",omitempty"`
|
||||
SpecStartTime time.Time `json:",omitempty"`
|
||||
|
||||
ContainerHierarchyTexts []string
|
||||
LeafNodeText string
|
||||
LeafNodeLocation CodeLocation
|
||||
SpecStartTime time.Time
|
||||
CurrentNodeType NodeType `json:",omitempty"`
|
||||
CurrentNodeText string `json:",omitempty"`
|
||||
CurrentNodeLocation CodeLocation `json:",omitempty"`
|
||||
CurrentNodeStartTime time.Time `json:",omitempty"`
|
||||
|
||||
CurrentNodeType NodeType
|
||||
CurrentNodeText string
|
||||
CurrentNodeLocation CodeLocation
|
||||
CurrentNodeStartTime time.Time
|
||||
CurrentStepText string `json:",omitempty"`
|
||||
CurrentStepLocation CodeLocation `json:",omitempty"`
|
||||
CurrentStepStartTime time.Time `json:",omitempty"`
|
||||
|
||||
CurrentStepText string
|
||||
CurrentStepLocation CodeLocation
|
||||
CurrentStepStartTime time.Time
|
||||
AdditionalReports []string `json:",omitempty"`
|
||||
|
||||
AdditionalReports []string
|
||||
CapturedGinkgoWriterOutput string `json:",omitempty"`
|
||||
TimelineLocation TimelineLocation `json:",omitempty"`
|
||||
|
||||
CapturedGinkgoWriterOutput string `json:",omitempty"`
|
||||
GinkgoWriterOffset int
|
||||
|
||||
Goroutines []Goroutine
|
||||
Goroutines []Goroutine `json:",omitempty"`
|
||||
}
|
||||
|
||||
func (pr ProgressReport) IsZero() bool {
|
||||
return pr.CurrentNodeType == NodeTypeInvalid
|
||||
}
|
||||
|
||||
func (pr ProgressReport) Time() time.Time {
|
||||
return pr.TimelineLocation.Time
|
||||
}
|
||||
|
||||
func (pr ProgressReport) SpecGoroutine() Goroutine {
|
||||
for _, goroutine := range pr.Goroutines {
|
||||
if goroutine.IsSpecGoroutine {
|
||||
@ -589,6 +695,22 @@ func (pr ProgressReport) WithoutCapturedGinkgoWriterOutput() ProgressReport {
|
||||
return out
|
||||
}
|
||||
|
||||
func (pr ProgressReport) WithoutOtherGoroutines() ProgressReport {
|
||||
out := pr
|
||||
filteredGoroutines := []Goroutine{}
|
||||
for _, goroutine := range pr.Goroutines {
|
||||
if goroutine.IsSpecGoroutine || goroutine.HasHighlights() {
|
||||
filteredGoroutines = append(filteredGoroutines, goroutine)
|
||||
}
|
||||
}
|
||||
out.Goroutines = filteredGoroutines
|
||||
return out
|
||||
}
|
||||
|
||||
func (pr ProgressReport) GetTimelineLocation() TimelineLocation {
|
||||
return pr.TimelineLocation
|
||||
}
|
||||
|
||||
type Goroutine struct {
|
||||
ID uint64
|
||||
State string
|
||||
@ -643,6 +765,7 @@ const (
|
||||
|
||||
NodeTypeReportBeforeEach
|
||||
NodeTypeReportAfterEach
|
||||
NodeTypeReportBeforeSuite
|
||||
NodeTypeReportAfterSuite
|
||||
|
||||
NodeTypeCleanupInvalid
|
||||
@ -652,9 +775,9 @@ const (
|
||||
)
|
||||
|
||||
var NodeTypesForContainerAndIt = NodeTypeContainer | NodeTypeIt
|
||||
var NodeTypesForSuiteLevelNodes = NodeTypeBeforeSuite | NodeTypeSynchronizedBeforeSuite | NodeTypeAfterSuite | NodeTypeSynchronizedAfterSuite | NodeTypeReportAfterSuite | NodeTypeCleanupAfterSuite
|
||||
var NodeTypesForSuiteLevelNodes = NodeTypeBeforeSuite | NodeTypeSynchronizedBeforeSuite | NodeTypeAfterSuite | NodeTypeSynchronizedAfterSuite | NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite | NodeTypeCleanupAfterSuite
|
||||
var NodeTypesAllowedDuringCleanupInterrupt = NodeTypeAfterEach | NodeTypeJustAfterEach | NodeTypeAfterAll | NodeTypeAfterSuite | NodeTypeSynchronizedAfterSuite | NodeTypeCleanupAfterEach | NodeTypeCleanupAfterAll | NodeTypeCleanupAfterSuite
|
||||
var NodeTypesAllowedDuringReportInterrupt = NodeTypeReportBeforeEach | NodeTypeReportAfterEach | NodeTypeReportAfterSuite
|
||||
var NodeTypesAllowedDuringReportInterrupt = NodeTypeReportBeforeEach | NodeTypeReportAfterEach | NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite
|
||||
|
||||
var ntEnumSupport = NewEnumSupport(map[uint]string{
|
||||
uint(NodeTypeInvalid): "INVALID NODE TYPE",
|
||||
@ -672,6 +795,7 @@ var ntEnumSupport = NewEnumSupport(map[uint]string{
|
||||
uint(NodeTypeSynchronizedAfterSuite): "SynchronizedAfterSuite",
|
||||
uint(NodeTypeReportBeforeEach): "ReportBeforeEach",
|
||||
uint(NodeTypeReportAfterEach): "ReportAfterEach",
|
||||
uint(NodeTypeReportBeforeSuite): "ReportBeforeSuite",
|
||||
uint(NodeTypeReportAfterSuite): "ReportAfterSuite",
|
||||
uint(NodeTypeCleanupInvalid): "DeferCleanup",
|
||||
uint(NodeTypeCleanupAfterEach): "DeferCleanup (Each)",
|
||||
@ -694,3 +818,99 @@ func (nt NodeType) MarshalJSON() ([]byte, error) {
|
||||
func (nt NodeType) Is(nodeTypes NodeType) bool {
|
||||
return nt&nodeTypes != 0
|
||||
}
|
||||
|
||||
/*
|
||||
SpecEvent captures a vareity of events that can occur when specs run. See SpecEventType for the list of available events.
|
||||
*/
|
||||
type SpecEvent struct {
|
||||
SpecEventType SpecEventType
|
||||
|
||||
CodeLocation CodeLocation
|
||||
TimelineLocation TimelineLocation
|
||||
|
||||
Message string `json:",omitempty"`
|
||||
Duration time.Duration `json:",omitempty"`
|
||||
NodeType NodeType `json:",omitempty"`
|
||||
Attempt int `json:",omitempty"`
|
||||
}
|
||||
|
||||
func (se SpecEvent) GetTimelineLocation() TimelineLocation {
|
||||
return se.TimelineLocation
|
||||
}
|
||||
|
||||
func (se SpecEvent) IsOnlyVisibleAtVeryVerbose() bool {
|
||||
return se.SpecEventType.Is(SpecEventByEnd | SpecEventNodeStart | SpecEventNodeEnd)
|
||||
}
|
||||
|
||||
func (se SpecEvent) GomegaString() string {
|
||||
out := &strings.Builder{}
|
||||
out.WriteString("[" + se.SpecEventType.String() + " SpecEvent] ")
|
||||
if se.Message != "" {
|
||||
out.WriteString("Message=")
|
||||
out.WriteString(`"` + se.Message + `",`)
|
||||
}
|
||||
if se.Duration != 0 {
|
||||
out.WriteString("Duration=" + se.Duration.String() + ",")
|
||||
}
|
||||
if se.NodeType != NodeTypeInvalid {
|
||||
out.WriteString("NodeType=" + se.NodeType.String() + ",")
|
||||
}
|
||||
if se.Attempt != 0 {
|
||||
out.WriteString(fmt.Sprintf("Attempt=%d", se.Attempt) + ",")
|
||||
}
|
||||
out.WriteString("CL=" + se.CodeLocation.String() + ",")
|
||||
out.WriteString(fmt.Sprintf("TL.Offset=%d", se.TimelineLocation.Offset))
|
||||
|
||||
return out.String()
|
||||
}
|
||||
|
||||
type SpecEvents []SpecEvent
|
||||
|
||||
func (se SpecEvents) WithType(seType SpecEventType) SpecEvents {
|
||||
out := SpecEvents{}
|
||||
for _, event := range se {
|
||||
if event.SpecEventType.Is(seType) {
|
||||
out = append(out, event)
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
type SpecEventType uint
|
||||
|
||||
const (
|
||||
SpecEventInvalid SpecEventType = 0
|
||||
|
||||
SpecEventByStart SpecEventType = 1 << iota
|
||||
SpecEventByEnd
|
||||
SpecEventNodeStart
|
||||
SpecEventNodeEnd
|
||||
SpecEventSpecRepeat
|
||||
SpecEventSpecRetry
|
||||
)
|
||||
|
||||
var seEnumSupport = NewEnumSupport(map[uint]string{
|
||||
uint(SpecEventInvalid): "INVALID SPEC EVENT",
|
||||
uint(SpecEventByStart): "By",
|
||||
uint(SpecEventByEnd): "By (End)",
|
||||
uint(SpecEventNodeStart): "Node",
|
||||
uint(SpecEventNodeEnd): "Node (End)",
|
||||
uint(SpecEventSpecRepeat): "Repeat",
|
||||
uint(SpecEventSpecRetry): "Retry",
|
||||
})
|
||||
|
||||
func (se SpecEventType) String() string {
|
||||
return seEnumSupport.String(uint(se))
|
||||
}
|
||||
func (se *SpecEventType) UnmarshalJSON(b []byte) error {
|
||||
out, err := seEnumSupport.UnmarshJSON(b)
|
||||
*se = SpecEventType(out)
|
||||
return err
|
||||
}
|
||||
func (se SpecEventType) MarshalJSON() ([]byte, error) {
|
||||
return seEnumSupport.MarshJSON(uint(se))
|
||||
}
|
||||
|
||||
func (se SpecEventType) Is(specEventTypes SpecEventType) bool {
|
||||
return se&specEventTypes != 0
|
||||
}
|
||||
|
2
vendor/github.com/onsi/ginkgo/v2/types/version.go
generated
vendored
2
vendor/github.com/onsi/ginkgo/v2/types/version.go
generated
vendored
@ -1,3 +1,3 @@
|
||||
package types
|
||||
|
||||
const VERSION = "2.4.0"
|
||||
const VERSION = "2.9.5"
|
||||
|
Reference in New Issue
Block a user