mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 10:53:34 +00:00
rebase: bump github.com/onsi/ginkgo/v2 from 2.4.0 to 2.8.0
Bumps [github.com/onsi/ginkgo/v2](https://github.com/onsi/ginkgo) from 2.4.0 to 2.8.0. - [Release notes](https://github.com/onsi/ginkgo/releases) - [Changelog](https://github.com/onsi/ginkgo/blob/master/CHANGELOG.md) - [Commits](https://github.com/onsi/ginkgo/compare/v2.4.0...v2.8.0) --- updated-dependencies: - dependency-name: github.com/onsi/ginkgo/v2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
committed by
mergify[bot]
parent
5a460e1d03
commit
4c4c170439
50
vendor/github.com/onsi/ginkgo/v2/internal/group.go
generated
vendored
50
vendor/github.com/onsi/ginkgo/v2/internal/group.go
generated
vendored
@ -94,15 +94,19 @@ type group struct {
|
||||
runOncePairs map[uint]runOncePairs
|
||||
runOnceTracker map[runOncePair]types.SpecState
|
||||
|
||||
succeeded bool
|
||||
succeeded bool
|
||||
failedInARunOnceBefore bool
|
||||
continueOnFailure bool
|
||||
}
|
||||
|
||||
func newGroup(suite *Suite) *group {
|
||||
return &group{
|
||||
suite: suite,
|
||||
runOncePairs: map[uint]runOncePairs{},
|
||||
runOnceTracker: map[runOncePair]types.SpecState{},
|
||||
succeeded: true,
|
||||
suite: suite,
|
||||
runOncePairs: map[uint]runOncePairs{},
|
||||
runOnceTracker: map[runOncePair]types.SpecState{},
|
||||
succeeded: true,
|
||||
failedInARunOnceBefore: false,
|
||||
continueOnFailure: false,
|
||||
}
|
||||
}
|
||||
|
||||
@ -116,6 +120,7 @@ func (g *group) initialReportForSpec(spec Spec) types.SpecReport {
|
||||
LeafNodeText: spec.FirstNodeWithType(types.NodeTypeIt).Text,
|
||||
LeafNodeLabels: []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels),
|
||||
ParallelProcess: g.suite.config.ParallelProcess,
|
||||
RunningInParallel: g.suite.isRunningInParallel(),
|
||||
IsSerial: spec.Nodes.HasNodeMarkedSerial(),
|
||||
IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(),
|
||||
MaxFlakeAttempts: spec.Nodes.GetMaxFlakeAttempts(),
|
||||
@ -136,10 +141,14 @@ func (g *group) evaluateSkipStatus(spec Spec) (types.SpecState, types.Failure) {
|
||||
if !g.suite.deadline.IsZero() && g.suite.deadline.Before(time.Now()) {
|
||||
return types.SpecStateSkipped, types.Failure{}
|
||||
}
|
||||
if !g.succeeded {
|
||||
if !g.succeeded && !g.continueOnFailure {
|
||||
return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt),
|
||||
"Spec skipped because an earlier spec in an ordered container failed")
|
||||
}
|
||||
if g.failedInARunOnceBefore && g.continueOnFailure {
|
||||
return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt),
|
||||
"Spec skipped because a BeforeAll node failed")
|
||||
}
|
||||
beforeOncePairs := g.runOncePairs[spec.SubjectID()].withType(types.NodeTypeBeforeAll | types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach)
|
||||
for _, pair := range beforeOncePairs {
|
||||
if g.runOnceTracker[pair].Is(types.SpecStateSkipped) {
|
||||
@ -167,7 +176,8 @@ func (g *group) isLastSpecWithPair(specID uint, pair runOncePair) bool {
|
||||
return lastSpecID == specID
|
||||
}
|
||||
|
||||
func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) {
|
||||
func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) bool {
|
||||
failedInARunOnceBefore := false
|
||||
pairs := g.runOncePairs[spec.SubjectID()]
|
||||
|
||||
nodes := spec.Nodes.WithType(types.NodeTypeBeforeAll)
|
||||
@ -193,6 +203,7 @@ func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) {
|
||||
}
|
||||
if g.suite.currentSpecReport.State != types.SpecStatePassed {
|
||||
terminatingNode, terminatingPair = node, oncePair
|
||||
failedInARunOnceBefore = !terminatingPair.isZero()
|
||||
break
|
||||
}
|
||||
}
|
||||
@ -215,7 +226,7 @@ func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) {
|
||||
//this node has already been run on this attempt, don't rerun it
|
||||
return false
|
||||
}
|
||||
pair := runOncePair{}
|
||||
var pair runOncePair
|
||||
switch node.NodeType {
|
||||
case types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll:
|
||||
// check if we were generated in an AfterNode that has already run
|
||||
@ -245,9 +256,13 @@ func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) {
|
||||
if !terminatingPair.isZero() && terminatingNode.NestingLevel == node.NestingLevel {
|
||||
return true //...or, a run-once node at our nesting level was skipped which means this is our last chance to run
|
||||
}
|
||||
case types.SpecStateFailed, types.SpecStatePanicked: // the spec has failed...
|
||||
case types.SpecStateFailed, types.SpecStatePanicked, types.SpecStateTimedout: // the spec has failed...
|
||||
if isFinalAttempt {
|
||||
return true //...if this was the last attempt then we're the last spec to run and so the AfterNode should run
|
||||
if g.continueOnFailure {
|
||||
return isLastSpecWithPair || failedInARunOnceBefore //...we're configured to continue on failures - so we should only run if we're the last spec for this pair or if we failed in a runOnceBefore (which means we _are_ the last spec to run)
|
||||
} else {
|
||||
return true //...this was the last attempt and continueOnFailure is false therefore we are the last spec to run and so the AfterNode should run
|
||||
}
|
||||
}
|
||||
if !terminatingPair.isZero() { // ...and it failed in a run-once. which will be running again
|
||||
if node.NodeType.Is(types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll) {
|
||||
@ -280,10 +295,12 @@ func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) {
|
||||
includeDeferCleanups = true
|
||||
}
|
||||
|
||||
return failedInARunOnceBefore
|
||||
}
|
||||
|
||||
func (g *group) run(specs Specs) {
|
||||
g.specs = specs
|
||||
g.continueOnFailure = specs[0].Nodes.FirstNodeMarkedOrdered().MarkedContinueOnFailure
|
||||
for _, spec := range g.specs {
|
||||
g.runOncePairs[spec.SubjectID()] = runOncePairsForSpec(spec)
|
||||
}
|
||||
@ -300,8 +317,8 @@ func (g *group) run(specs Specs) {
|
||||
skip := g.suite.config.DryRun || g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates|types.SpecStateSkipped|types.SpecStatePending)
|
||||
|
||||
g.suite.currentSpecReport.StartTime = time.Now()
|
||||
failedInARunOnceBefore := false
|
||||
if !skip {
|
||||
|
||||
var maxAttempts = 1
|
||||
|
||||
if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 {
|
||||
@ -319,14 +336,14 @@ func (g *group) run(specs Specs) {
|
||||
g.suite.outputInterceptor.StartInterceptingOutput()
|
||||
if attempt > 0 {
|
||||
if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 {
|
||||
fmt.Fprintf(g.suite.writer, "\nGinkgo: Attempt #%d Passed. Repeating...\n", attempt)
|
||||
g.suite.handleSpecEvent(types.SpecEvent{SpecEventType: types.SpecEventSpecRepeat, Attempt: attempt})
|
||||
}
|
||||
if g.suite.currentSpecReport.MaxFlakeAttempts > 0 {
|
||||
fmt.Fprintf(g.suite.writer, "\nGinkgo: Attempt #%d Failed. Retrying...\n", attempt)
|
||||
g.suite.handleSpecEvent(types.SpecEvent{SpecEventType: types.SpecEventSpecRetry, Attempt: attempt})
|
||||
}
|
||||
}
|
||||
|
||||
g.attemptSpec(attempt == maxAttempts-1, spec)
|
||||
failedInARunOnceBefore = g.attemptSpec(attempt == maxAttempts-1, spec)
|
||||
|
||||
g.suite.currentSpecReport.EndTime = time.Now()
|
||||
g.suite.currentSpecReport.RunTime = g.suite.currentSpecReport.EndTime.Sub(g.suite.currentSpecReport.StartTime)
|
||||
@ -341,6 +358,10 @@ func (g *group) run(specs Specs) {
|
||||
if g.suite.currentSpecReport.MaxFlakeAttempts > 0 {
|
||||
if g.suite.currentSpecReport.State.Is(types.SpecStatePassed | types.SpecStateSkipped | types.SpecStateAborted | types.SpecStateInterrupted) {
|
||||
break
|
||||
} else if attempt < maxAttempts-1 {
|
||||
af := types.AdditionalFailure{State: g.suite.currentSpecReport.State, Failure: g.suite.currentSpecReport.Failure}
|
||||
af.Failure.Message = fmt.Sprintf("Failure recorded during attempt %d:\n%s", attempt+1, af.Failure.Message)
|
||||
g.suite.currentSpecReport.AdditionalFailures = append(g.suite.currentSpecReport.AdditionalFailures, af)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -350,6 +371,7 @@ func (g *group) run(specs Specs) {
|
||||
g.suite.processCurrentSpecReport()
|
||||
if g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
|
||||
g.succeeded = false
|
||||
g.failedInARunOnceBefore = g.failedInARunOnceBefore || failedInARunOnceBefore
|
||||
}
|
||||
g.suite.selectiveLock.Lock()
|
||||
g.suite.currentSpecReport = types.SpecReport{}
|
||||
|
62
vendor/github.com/onsi/ginkgo/v2/internal/node.go
generated
vendored
62
vendor/github.com/onsi/ginkgo/v2/internal/node.go
generated
vendored
@ -44,23 +44,23 @@ type Node struct {
|
||||
SynchronizedAfterSuiteProc1Body func(SpecContext)
|
||||
SynchronizedAfterSuiteProc1BodyHasContext bool
|
||||
|
||||
ReportEachBody func(types.SpecReport)
|
||||
ReportAfterSuiteBody func(types.Report)
|
||||
ReportEachBody func(types.SpecReport)
|
||||
ReportSuiteBody func(types.Report)
|
||||
|
||||
MarkedFocus bool
|
||||
MarkedPending bool
|
||||
MarkedSerial bool
|
||||
MarkedOrdered bool
|
||||
MarkedOncePerOrdered bool
|
||||
MarkedSuppressProgressReporting bool
|
||||
FlakeAttempts int
|
||||
MustPassRepeatedly int
|
||||
Labels Labels
|
||||
PollProgressAfter time.Duration
|
||||
PollProgressInterval time.Duration
|
||||
NodeTimeout time.Duration
|
||||
SpecTimeout time.Duration
|
||||
GracePeriod time.Duration
|
||||
MarkedFocus bool
|
||||
MarkedPending bool
|
||||
MarkedSerial bool
|
||||
MarkedOrdered bool
|
||||
MarkedContinueOnFailure bool
|
||||
MarkedOncePerOrdered bool
|
||||
FlakeAttempts int
|
||||
MustPassRepeatedly int
|
||||
Labels Labels
|
||||
PollProgressAfter time.Duration
|
||||
PollProgressInterval time.Duration
|
||||
NodeTimeout time.Duration
|
||||
SpecTimeout time.Duration
|
||||
GracePeriod time.Duration
|
||||
|
||||
NodeIDWhereCleanupWasGenerated uint
|
||||
}
|
||||
@ -70,6 +70,7 @@ type focusType bool
|
||||
type pendingType bool
|
||||
type serialType bool
|
||||
type orderedType bool
|
||||
type continueOnFailureType bool
|
||||
type honorsOrderedType bool
|
||||
type suppressProgressReporting bool
|
||||
|
||||
@ -77,6 +78,7 @@ const Focus = focusType(true)
|
||||
const Pending = pendingType(true)
|
||||
const Serial = serialType(true)
|
||||
const Ordered = orderedType(true)
|
||||
const ContinueOnFailure = continueOnFailureType(true)
|
||||
const OncePerOrdered = honorsOrderedType(true)
|
||||
const SuppressProgressReporting = suppressProgressReporting(true)
|
||||
|
||||
@ -91,6 +93,10 @@ type NodeTimeout time.Duration
|
||||
type SpecTimeout time.Duration
|
||||
type GracePeriod time.Duration
|
||||
|
||||
func (l Labels) MatchesLabelFilter(query string) bool {
|
||||
return types.MustParseLabelFilter(query)(l)
|
||||
}
|
||||
|
||||
func UnionOfLabels(labels ...Labels) Labels {
|
||||
out := Labels{}
|
||||
seen := map[string]bool{}
|
||||
@ -134,6 +140,8 @@ func isDecoration(arg interface{}) bool {
|
||||
return true
|
||||
case t == reflect.TypeOf(Ordered):
|
||||
return true
|
||||
case t == reflect.TypeOf(ContinueOnFailure):
|
||||
return true
|
||||
case t == reflect.TypeOf(OncePerOrdered):
|
||||
return true
|
||||
case t == reflect.TypeOf(SuppressProgressReporting):
|
||||
@ -242,16 +250,18 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
|
||||
if !nodeType.Is(types.NodeTypeContainer) {
|
||||
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Ordered"))
|
||||
}
|
||||
case t == reflect.TypeOf(ContinueOnFailure):
|
||||
node.MarkedContinueOnFailure = bool(arg.(continueOnFailureType))
|
||||
if !nodeType.Is(types.NodeTypeContainer) {
|
||||
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "ContinueOnFailure"))
|
||||
}
|
||||
case t == reflect.TypeOf(OncePerOrdered):
|
||||
node.MarkedOncePerOrdered = bool(arg.(honorsOrderedType))
|
||||
if !nodeType.Is(types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach | types.NodeTypeAfterEach | types.NodeTypeJustAfterEach) {
|
||||
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "OncePerOrdered"))
|
||||
}
|
||||
case t == reflect.TypeOf(SuppressProgressReporting):
|
||||
node.MarkedSuppressProgressReporting = bool(arg.(suppressProgressReporting))
|
||||
if nodeType.Is(types.NodeTypeContainer) {
|
||||
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "SuppressProgressReporting"))
|
||||
}
|
||||
deprecationTracker.TrackDeprecation(types.Deprecations.SuppressProgressReporting())
|
||||
case t == reflect.TypeOf(FlakeAttempts(0)):
|
||||
node.FlakeAttempts = int(arg.(FlakeAttempts))
|
||||
if !nodeType.Is(types.NodeTypesForContainerAndIt) {
|
||||
@ -321,9 +331,9 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
|
||||
trackedFunctionError = true
|
||||
break
|
||||
}
|
||||
} else if nodeType.Is(types.NodeTypeReportAfterSuite) {
|
||||
if node.ReportAfterSuiteBody == nil {
|
||||
node.ReportAfterSuiteBody = arg.(func(types.Report))
|
||||
} else if nodeType.Is(types.NodeTypeReportBeforeSuite | types.NodeTypeReportAfterSuite) {
|
||||
if node.ReportSuiteBody == nil {
|
||||
node.ReportSuiteBody = arg.(func(types.Report))
|
||||
} else {
|
||||
appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
|
||||
trackedFunctionError = true
|
||||
@ -390,13 +400,17 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
|
||||
appendError(types.GinkgoErrors.InvalidDeclarationOfFocusedAndPending(node.CodeLocation, nodeType))
|
||||
}
|
||||
|
||||
if node.MarkedContinueOnFailure && !node.MarkedOrdered {
|
||||
appendError(types.GinkgoErrors.InvalidContinueOnFailureDecoration(node.CodeLocation))
|
||||
}
|
||||
|
||||
hasContext := node.HasContext || node.SynchronizedAfterSuiteProc1BodyHasContext || node.SynchronizedAfterSuiteAllProcsBodyHasContext || node.SynchronizedBeforeSuiteProc1BodyHasContext || node.SynchronizedBeforeSuiteAllProcsBodyHasContext
|
||||
|
||||
if !hasContext && (node.NodeTimeout > 0 || node.SpecTimeout > 0 || node.GracePeriod > 0) && len(errors) == 0 {
|
||||
appendError(types.GinkgoErrors.InvalidTimeoutOrGracePeriodForNonContextNode(node.CodeLocation, nodeType))
|
||||
}
|
||||
|
||||
if !node.NodeType.Is(types.NodeTypeReportBeforeEach|types.NodeTypeReportAfterEach|types.NodeTypeSynchronizedBeforeSuite|types.NodeTypeSynchronizedAfterSuite|types.NodeTypeReportAfterSuite) && node.Body == nil && !node.MarkedPending && !trackedFunctionError {
|
||||
if !node.NodeType.Is(types.NodeTypeReportBeforeEach|types.NodeTypeReportAfterEach|types.NodeTypeSynchronizedBeforeSuite|types.NodeTypeSynchronizedAfterSuite|types.NodeTypeReportBeforeSuite|types.NodeTypeReportAfterSuite) && node.Body == nil && !node.MarkedPending && !trackedFunctionError {
|
||||
appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType))
|
||||
}
|
||||
|
||||
|
77
vendor/github.com/onsi/ginkgo/v2/internal/ordering.go
generated
vendored
77
vendor/github.com/onsi/ginkgo/v2/internal/ordering.go
generated
vendored
@ -7,6 +7,58 @@ import (
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
type SortableSpecs struct {
|
||||
Specs Specs
|
||||
Indexes []int
|
||||
}
|
||||
|
||||
func NewSortableSpecs(specs Specs) *SortableSpecs {
|
||||
indexes := make([]int, len(specs))
|
||||
for i := range specs {
|
||||
indexes[i] = i
|
||||
}
|
||||
return &SortableSpecs{
|
||||
Specs: specs,
|
||||
Indexes: indexes,
|
||||
}
|
||||
}
|
||||
func (s *SortableSpecs) Len() int { return len(s.Indexes) }
|
||||
func (s *SortableSpecs) Swap(i, j int) { s.Indexes[i], s.Indexes[j] = s.Indexes[j], s.Indexes[i] }
|
||||
func (s *SortableSpecs) Less(i, j int) bool {
|
||||
a, b := s.Specs[s.Indexes[i]], s.Specs[s.Indexes[j]]
|
||||
|
||||
firstOrderedA := a.Nodes.FirstNodeMarkedOrdered()
|
||||
firstOrderedB := b.Nodes.FirstNodeMarkedOrdered()
|
||||
if firstOrderedA.ID == firstOrderedB.ID && !firstOrderedA.IsZero() {
|
||||
// strictly preserve order in ordered containers. ID will track this as IDs are generated monotonically
|
||||
return a.FirstNodeWithType(types.NodeTypeIt).ID < b.FirstNodeWithType(types.NodeTypeIt).ID
|
||||
}
|
||||
|
||||
aCLs := a.Nodes.WithType(types.NodeTypesForContainerAndIt).CodeLocations()
|
||||
bCLs := b.Nodes.WithType(types.NodeTypesForContainerAndIt).CodeLocations()
|
||||
for i := 0; i < len(aCLs) && i < len(bCLs); i++ {
|
||||
aCL, bCL := aCLs[i], bCLs[i]
|
||||
if aCL.FileName < bCL.FileName {
|
||||
return true
|
||||
} else if aCL.FileName > bCL.FileName {
|
||||
return false
|
||||
}
|
||||
if aCL.LineNumber < bCL.LineNumber {
|
||||
return true
|
||||
} else if aCL.LineNumber > bCL.LineNumber {
|
||||
return false
|
||||
}
|
||||
}
|
||||
// either everything is equal or we have different lengths of CLs
|
||||
if len(aCLs) < len(bCLs) {
|
||||
return true
|
||||
} else if len(aCLs) > len(bCLs) {
|
||||
return false
|
||||
}
|
||||
// ok, now we are sure everything was equal. so we use the spec text to break ties
|
||||
return a.Text() < b.Text()
|
||||
}
|
||||
|
||||
type GroupedSpecIndices []SpecIndices
|
||||
type SpecIndices []int
|
||||
|
||||
@ -28,12 +80,17 @@ func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices,
|
||||
// Seed a new random source based on thee configured random seed.
|
||||
r := rand.New(rand.NewSource(suiteConfig.RandomSeed))
|
||||
|
||||
// first break things into execution groups
|
||||
// first, we sort the entire suite to ensure a deterministic order. the sort is performed by filename, then line number, and then spec text. this ensures every parallel process has the exact same spec order and is only necessary to cover the edge case where the user iterates over a map to generate specs.
|
||||
sortableSpecs := NewSortableSpecs(specs)
|
||||
sort.Sort(sortableSpecs)
|
||||
|
||||
// then we break things into execution groups
|
||||
// a group represents a single unit of execution and is a collection of SpecIndices
|
||||
// usually a group is just a single spec, however ordered containers must be preserved as a single group
|
||||
executionGroupIDs := []uint{}
|
||||
executionGroups := map[uint]SpecIndices{}
|
||||
for idx, spec := range specs {
|
||||
for _, idx := range sortableSpecs.Indexes {
|
||||
spec := specs[idx]
|
||||
groupNode := spec.Nodes.FirstNodeMarkedOrdered()
|
||||
if groupNode.IsZero() {
|
||||
groupNode = spec.Nodes.FirstNodeWithType(types.NodeTypeIt)
|
||||
@ -48,7 +105,6 @@ func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices,
|
||||
// we shuffle outermost containers. so we need to form shufflable groupings of GroupIDs
|
||||
shufflableGroupingIDs := []uint{}
|
||||
shufflableGroupingIDToGroupIDs := map[uint][]uint{}
|
||||
shufflableGroupingsIDToSortKeys := map[uint]string{}
|
||||
|
||||
// for each execution group we're going to have to pick a node to represent how the
|
||||
// execution group is grouped for shuffling:
|
||||
@ -57,7 +113,7 @@ func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices,
|
||||
nodeTypesToShuffle = types.NodeTypeIt
|
||||
}
|
||||
|
||||
//so, fo reach execution group:
|
||||
//so, for each execution group:
|
||||
for _, groupID := range executionGroupIDs {
|
||||
// pick out a representative spec
|
||||
representativeSpec := specs[executionGroups[groupID][0]]
|
||||
@ -72,22 +128,9 @@ func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices,
|
||||
if len(shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID]) == 1 {
|
||||
// record the shuffleable group ID
|
||||
shufflableGroupingIDs = append(shufflableGroupingIDs, shufflableGroupingNode.ID)
|
||||
// and record the sort key to use
|
||||
shufflableGroupingsIDToSortKeys[shufflableGroupingNode.ID] = shufflableGroupingNode.CodeLocation.String()
|
||||
}
|
||||
}
|
||||
|
||||
// now we sort the shufflable groups by the sort key. We use the shufflable group nodes code location and break ties using its node id
|
||||
sort.SliceStable(shufflableGroupingIDs, func(i, j int) bool {
|
||||
keyA := shufflableGroupingsIDToSortKeys[shufflableGroupingIDs[i]]
|
||||
keyB := shufflableGroupingsIDToSortKeys[shufflableGroupingIDs[j]]
|
||||
if keyA == keyB {
|
||||
return shufflableGroupingIDs[i] < shufflableGroupingIDs[j]
|
||||
} else {
|
||||
return keyA < keyB
|
||||
}
|
||||
})
|
||||
|
||||
// now we permute the sorted shufflable grouping IDs and build the ordered Groups
|
||||
orderedGroups := GroupedSpecIndices{}
|
||||
permutation := r.Perm(len(shufflableGroupingIDs))
|
||||
|
2
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go
generated
vendored
2
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go
generated
vendored
@ -42,6 +42,8 @@ type Client interface {
|
||||
PostSuiteWillBegin(report types.Report) error
|
||||
PostDidRun(report types.SpecReport) error
|
||||
PostSuiteDidEnd(report types.Report) error
|
||||
PostReportBeforeSuiteCompleted(state types.SpecState) error
|
||||
BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error)
|
||||
PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error
|
||||
BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error)
|
||||
BlockUntilNonprimaryProcsHaveFinished() error
|
||||
|
13
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go
generated
vendored
13
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go
generated
vendored
@ -98,6 +98,19 @@ func (client *httpClient) PostEmitProgressReport(report types.ProgressReport) er
|
||||
return client.post("/progress-report", report)
|
||||
}
|
||||
|
||||
func (client *httpClient) PostReportBeforeSuiteCompleted(state types.SpecState) error {
|
||||
return client.post("/report-before-suite-completed", state)
|
||||
}
|
||||
|
||||
func (client *httpClient) BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error) {
|
||||
var state types.SpecState
|
||||
err := client.poll("/report-before-suite-state", &state)
|
||||
if err == ErrorGone {
|
||||
return types.SpecStateFailed, nil
|
||||
}
|
||||
return state, err
|
||||
}
|
||||
|
||||
func (client *httpClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error {
|
||||
beforeSuiteState := BeforeSuiteState{
|
||||
State: state,
|
||||
|
29
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go
generated
vendored
29
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go
generated
vendored
@ -26,7 +26,7 @@ type httpServer struct {
|
||||
handler *ServerHandler
|
||||
}
|
||||
|
||||
//Create a new server, automatically selecting a port
|
||||
// Create a new server, automatically selecting a port
|
||||
func newHttpServer(parallelTotal int, reporter reporters.Reporter) (*httpServer, error) {
|
||||
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
@ -38,7 +38,7 @@ func newHttpServer(parallelTotal int, reporter reporters.Reporter) (*httpServer,
|
||||
}, nil
|
||||
}
|
||||
|
||||
//Start the server. You don't need to `go s.Start()`, just `s.Start()`
|
||||
// Start the server. You don't need to `go s.Start()`, just `s.Start()`
|
||||
func (server *httpServer) Start() {
|
||||
httpServer := &http.Server{}
|
||||
mux := http.NewServeMux()
|
||||
@ -52,6 +52,8 @@ func (server *httpServer) Start() {
|
||||
mux.HandleFunc("/progress-report", server.emitProgressReport)
|
||||
|
||||
//synchronization endpoints
|
||||
mux.HandleFunc("/report-before-suite-completed", server.handleReportBeforeSuiteCompleted)
|
||||
mux.HandleFunc("/report-before-suite-state", server.handleReportBeforeSuiteState)
|
||||
mux.HandleFunc("/before-suite-completed", server.handleBeforeSuiteCompleted)
|
||||
mux.HandleFunc("/before-suite-state", server.handleBeforeSuiteState)
|
||||
mux.HandleFunc("/have-nonprimary-procs-finished", server.handleHaveNonprimaryProcsFinished)
|
||||
@ -63,12 +65,12 @@ func (server *httpServer) Start() {
|
||||
go httpServer.Serve(server.listener)
|
||||
}
|
||||
|
||||
//Stop the server
|
||||
// Stop the server
|
||||
func (server *httpServer) Close() {
|
||||
server.listener.Close()
|
||||
}
|
||||
|
||||
//The address the server can be reached it. Pass this into the `ForwardingReporter`.
|
||||
// The address the server can be reached it. Pass this into the `ForwardingReporter`.
|
||||
func (server *httpServer) Address() string {
|
||||
return "http://" + server.listener.Addr().String()
|
||||
}
|
||||
@ -93,7 +95,7 @@ func (server *httpServer) RegisterAlive(node int, alive func() bool) {
|
||||
// Streaming Endpoints
|
||||
//
|
||||
|
||||
//The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters`
|
||||
// The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters`
|
||||
func (server *httpServer) decode(writer http.ResponseWriter, request *http.Request, object interface{}) bool {
|
||||
defer request.Body.Close()
|
||||
if json.NewDecoder(request.Body).Decode(object) != nil {
|
||||
@ -164,6 +166,23 @@ func (server *httpServer) emitProgressReport(writer http.ResponseWriter, request
|
||||
server.handleError(server.handler.EmitProgressReport(report, voidReceiver), writer)
|
||||
}
|
||||
|
||||
func (server *httpServer) handleReportBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) {
|
||||
var state types.SpecState
|
||||
if !server.decode(writer, request, &state) {
|
||||
return
|
||||
}
|
||||
|
||||
server.handleError(server.handler.ReportBeforeSuiteCompleted(state, voidReceiver), writer)
|
||||
}
|
||||
|
||||
func (server *httpServer) handleReportBeforeSuiteState(writer http.ResponseWriter, request *http.Request) {
|
||||
var state types.SpecState
|
||||
if server.handleError(server.handler.ReportBeforeSuiteState(voidSender, &state), writer) {
|
||||
return
|
||||
}
|
||||
json.NewEncoder(writer).Encode(state)
|
||||
}
|
||||
|
||||
func (server *httpServer) handleBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) {
|
||||
var beforeSuiteState BeforeSuiteState
|
||||
if !server.decode(writer, request, &beforeSuiteState) {
|
||||
|
13
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go
generated
vendored
13
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go
generated
vendored
@ -76,6 +76,19 @@ func (client *rpcClient) PostEmitProgressReport(report types.ProgressReport) err
|
||||
return client.client.Call("Server.EmitProgressReport", report, voidReceiver)
|
||||
}
|
||||
|
||||
func (client *rpcClient) PostReportBeforeSuiteCompleted(state types.SpecState) error {
|
||||
return client.client.Call("Server.ReportBeforeSuiteCompleted", state, voidReceiver)
|
||||
}
|
||||
|
||||
func (client *rpcClient) BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error) {
|
||||
var state types.SpecState
|
||||
err := client.poll("Server.ReportBeforeSuiteState", &state)
|
||||
if err == ErrorGone {
|
||||
return types.SpecStateFailed, nil
|
||||
}
|
||||
return state, err
|
||||
}
|
||||
|
||||
func (client *rpcClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error {
|
||||
beforeSuiteState := BeforeSuiteState{
|
||||
State: state,
|
||||
|
55
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go
generated
vendored
55
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go
generated
vendored
@ -18,16 +18,17 @@ var voidSender Void
|
||||
// It handles all the business logic to avoid duplication between the two servers
|
||||
|
||||
type ServerHandler struct {
|
||||
done chan interface{}
|
||||
outputDestination io.Writer
|
||||
reporter reporters.Reporter
|
||||
alives []func() bool
|
||||
lock *sync.Mutex
|
||||
beforeSuiteState BeforeSuiteState
|
||||
parallelTotal int
|
||||
counter int
|
||||
counterLock *sync.Mutex
|
||||
shouldAbort bool
|
||||
done chan interface{}
|
||||
outputDestination io.Writer
|
||||
reporter reporters.Reporter
|
||||
alives []func() bool
|
||||
lock *sync.Mutex
|
||||
beforeSuiteState BeforeSuiteState
|
||||
reportBeforeSuiteState types.SpecState
|
||||
parallelTotal int
|
||||
counter int
|
||||
counterLock *sync.Mutex
|
||||
shouldAbort bool
|
||||
|
||||
numSuiteDidBegins int
|
||||
numSuiteDidEnds int
|
||||
@ -37,11 +38,12 @@ type ServerHandler struct {
|
||||
|
||||
func newServerHandler(parallelTotal int, reporter reporters.Reporter) *ServerHandler {
|
||||
return &ServerHandler{
|
||||
reporter: reporter,
|
||||
lock: &sync.Mutex{},
|
||||
counterLock: &sync.Mutex{},
|
||||
alives: make([]func() bool, parallelTotal),
|
||||
beforeSuiteState: BeforeSuiteState{Data: nil, State: types.SpecStateInvalid},
|
||||
reporter: reporter,
|
||||
lock: &sync.Mutex{},
|
||||
counterLock: &sync.Mutex{},
|
||||
alives: make([]func() bool, parallelTotal),
|
||||
beforeSuiteState: BeforeSuiteState{Data: nil, State: types.SpecStateInvalid},
|
||||
|
||||
parallelTotal: parallelTotal,
|
||||
outputDestination: os.Stdout,
|
||||
done: make(chan interface{}),
|
||||
@ -140,6 +142,29 @@ func (handler *ServerHandler) haveNonprimaryProcsFinished() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (handler *ServerHandler) ReportBeforeSuiteCompleted(reportBeforeSuiteState types.SpecState, _ *Void) error {
|
||||
handler.lock.Lock()
|
||||
defer handler.lock.Unlock()
|
||||
handler.reportBeforeSuiteState = reportBeforeSuiteState
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (handler *ServerHandler) ReportBeforeSuiteState(_ Void, reportBeforeSuiteState *types.SpecState) error {
|
||||
proc1IsAlive := handler.procIsAlive(1)
|
||||
handler.lock.Lock()
|
||||
defer handler.lock.Unlock()
|
||||
if handler.reportBeforeSuiteState == types.SpecStateInvalid {
|
||||
if proc1IsAlive {
|
||||
return ErrorEarly
|
||||
} else {
|
||||
return ErrorGone
|
||||
}
|
||||
}
|
||||
*reportBeforeSuiteState = handler.reportBeforeSuiteState
|
||||
return nil
|
||||
}
|
||||
|
||||
func (handler *ServerHandler) BeforeSuiteCompleted(beforeSuiteState BeforeSuiteState, _ *Void) error {
|
||||
handler.lock.Lock()
|
||||
defer handler.lock.Unlock()
|
||||
|
26
vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go
generated
vendored
26
vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go
generated
vendored
@ -48,13 +48,10 @@ type ProgressStepCursor struct {
|
||||
StartTime time.Time
|
||||
}
|
||||
|
||||
func NewProgressReport(isRunningInParallel bool, report types.SpecReport, currentNode Node, currentNodeStartTime time.Time, currentStep ProgressStepCursor, gwOutput string, additionalReports []string, sourceRoots []string, includeAll bool) (types.ProgressReport, error) {
|
||||
func NewProgressReport(isRunningInParallel bool, report types.SpecReport, currentNode Node, currentNodeStartTime time.Time, currentStep types.SpecEvent, gwOutput string, timelineLocation types.TimelineLocation, additionalReports []string, sourceRoots []string, includeAll bool) (types.ProgressReport, error) {
|
||||
pr := types.ProgressReport{
|
||||
ParallelProcess: report.ParallelProcess,
|
||||
RunningInParallel: isRunningInParallel,
|
||||
|
||||
Time: time.Now(),
|
||||
|
||||
ParallelProcess: report.ParallelProcess,
|
||||
RunningInParallel: isRunningInParallel,
|
||||
ContainerHierarchyTexts: report.ContainerHierarchyTexts,
|
||||
LeafNodeText: report.LeafNodeText,
|
||||
LeafNodeLocation: report.LeafNodeLocation,
|
||||
@ -65,14 +62,14 @@ func NewProgressReport(isRunningInParallel bool, report types.SpecReport, curren
|
||||
CurrentNodeLocation: currentNode.CodeLocation,
|
||||
CurrentNodeStartTime: currentNodeStartTime,
|
||||
|
||||
CurrentStepText: currentStep.Text,
|
||||
CurrentStepText: currentStep.Message,
|
||||
CurrentStepLocation: currentStep.CodeLocation,
|
||||
CurrentStepStartTime: currentStep.StartTime,
|
||||
CurrentStepStartTime: currentStep.TimelineLocation.Time,
|
||||
|
||||
AdditionalReports: additionalReports,
|
||||
|
||||
CapturedGinkgoWriterOutput: gwOutput,
|
||||
GinkgoWriterOffset: len(gwOutput),
|
||||
TimelineLocation: timelineLocation,
|
||||
}
|
||||
|
||||
goroutines, err := extractRunningGoroutines()
|
||||
@ -186,7 +183,6 @@ func extractRunningGoroutines() ([]types.Goroutine, error) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
r := bufio.NewReader(bytes.NewReader(stack))
|
||||
out := []types.Goroutine{}
|
||||
idx := -1
|
||||
@ -234,12 +230,12 @@ func extractRunningGoroutines() ([]types.Goroutine, error) {
|
||||
return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid function call: %s -- missing file name and line number", functionCall.Function))
|
||||
}
|
||||
line = strings.TrimLeft(line, " \t")
|
||||
fields := strings.SplitN(line, ":", 2)
|
||||
if len(fields) != 2 {
|
||||
return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid filename nad line number: %s", line))
|
||||
delimiterIdx := strings.LastIndex(line, ":")
|
||||
if delimiterIdx == -1 {
|
||||
return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid filename and line number: %s", line))
|
||||
}
|
||||
functionCall.Filename = fields[0]
|
||||
line = strings.Split(fields[1], " ")[0]
|
||||
functionCall.Filename = line[:delimiterIdx]
|
||||
line = strings.Split(line[delimiterIdx+1:], " ")[0]
|
||||
lineNumber, err := strconv.ParseInt(line, 10, 64)
|
||||
functionCall.Line = int(lineNumber)
|
||||
if err != nil {
|
||||
|
21
vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go
generated
vendored
21
vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go
generated
vendored
@ -1,7 +1,6 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
@ -13,20 +12,20 @@ func NewReportEntry(name string, cl types.CodeLocation, args ...interface{}) (Re
|
||||
out := ReportEntry{
|
||||
Visibility: types.ReportEntryVisibilityAlways,
|
||||
Name: name,
|
||||
Time: time.Now(),
|
||||
Location: cl,
|
||||
Time: time.Now(),
|
||||
}
|
||||
var didSetValue = false
|
||||
for _, arg := range args {
|
||||
switch reflect.TypeOf(arg) {
|
||||
case reflect.TypeOf(types.ReportEntryVisibilityAlways):
|
||||
out.Visibility = arg.(types.ReportEntryVisibility)
|
||||
case reflect.TypeOf(types.CodeLocation{}):
|
||||
out.Location = arg.(types.CodeLocation)
|
||||
case reflect.TypeOf(Offset(0)):
|
||||
out.Location = types.NewCodeLocation(2 + int(arg.(Offset)))
|
||||
case reflect.TypeOf(out.Time):
|
||||
out.Time = arg.(time.Time)
|
||||
switch x := arg.(type) {
|
||||
case types.ReportEntryVisibility:
|
||||
out.Visibility = x
|
||||
case types.CodeLocation:
|
||||
out.Location = x
|
||||
case Offset:
|
||||
out.Location = types.NewCodeLocation(2 + int(x))
|
||||
case time.Time:
|
||||
out.Time = x
|
||||
default:
|
||||
if didSetValue {
|
||||
return ReportEntry{}, types.GinkgoErrors.TooManyReportEntryValues(out.Location, arg)
|
||||
|
284
vendor/github.com/onsi/ginkgo/v2/internal/suite.go
generated
vendored
284
vendor/github.com/onsi/ginkgo/v2/internal/suite.go
generated
vendored
@ -44,7 +44,8 @@ type Suite struct {
|
||||
|
||||
currentSpecContext *specContext
|
||||
|
||||
progressStepCursor ProgressStepCursor
|
||||
currentByStep types.SpecEvent
|
||||
timelineOrder int
|
||||
|
||||
/*
|
||||
We don't need to lock around all operations. Just those that *could* happen concurrently.
|
||||
@ -128,7 +129,7 @@ func (suite *Suite) PushNode(node Node) error {
|
||||
return suite.pushCleanupNode(node)
|
||||
}
|
||||
|
||||
if node.NodeType.Is(types.NodeTypeBeforeSuite | types.NodeTypeAfterSuite | types.NodeTypeSynchronizedBeforeSuite | types.NodeTypeSynchronizedAfterSuite | types.NodeTypeReportAfterSuite) {
|
||||
if node.NodeType.Is(types.NodeTypeBeforeSuite | types.NodeTypeAfterSuite | types.NodeTypeSynchronizedBeforeSuite | types.NodeTypeSynchronizedAfterSuite | types.NodeTypeBeforeSuite | types.NodeTypeReportBeforeSuite | types.NodeTypeReportAfterSuite) {
|
||||
return suite.pushSuiteNode(node)
|
||||
}
|
||||
|
||||
@ -150,6 +151,13 @@ func (suite *Suite) PushNode(node Node) error {
|
||||
}
|
||||
}
|
||||
|
||||
if node.MarkedContinueOnFailure {
|
||||
firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered()
|
||||
if !firstOrderedNode.IsZero() {
|
||||
return types.GinkgoErrors.InvalidContinueOnFailureDecoration(node.CodeLocation)
|
||||
}
|
||||
}
|
||||
|
||||
if node.NodeType == types.NodeTypeContainer {
|
||||
// During PhaseBuildTopLevel we only track the top level containers without entering them
|
||||
// We only enter the top level container nodes during PhaseBuildTree
|
||||
@ -221,7 +229,7 @@ func (suite *Suite) pushCleanupNode(node Node) error {
|
||||
node.NodeType = types.NodeTypeCleanupAfterSuite
|
||||
case types.NodeTypeBeforeAll, types.NodeTypeAfterAll:
|
||||
node.NodeType = types.NodeTypeCleanupAfterAll
|
||||
case types.NodeTypeReportBeforeEach, types.NodeTypeReportAfterEach, types.NodeTypeReportAfterSuite:
|
||||
case types.NodeTypeReportBeforeEach, types.NodeTypeReportAfterEach, types.NodeTypeReportBeforeSuite, types.NodeTypeReportAfterSuite:
|
||||
return types.GinkgoErrors.PushingCleanupInReportingNode(node.CodeLocation, suite.currentNode.NodeType)
|
||||
case types.NodeTypeCleanupInvalid, types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll, types.NodeTypeCleanupAfterSuite:
|
||||
return types.GinkgoErrors.PushingCleanupInCleanupNode(node.CodeLocation)
|
||||
@ -236,19 +244,69 @@ func (suite *Suite) pushCleanupNode(node Node) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
Pushing and popping the Step Cursor stack
|
||||
*/
|
||||
|
||||
func (suite *Suite) SetProgressStepCursor(cursor ProgressStepCursor) {
|
||||
func (suite *Suite) generateTimelineLocation() types.TimelineLocation {
|
||||
suite.selectiveLock.Lock()
|
||||
defer suite.selectiveLock.Unlock()
|
||||
|
||||
suite.progressStepCursor = cursor
|
||||
suite.timelineOrder += 1
|
||||
return types.TimelineLocation{
|
||||
Offset: len(suite.currentSpecReport.CapturedGinkgoWriterOutput) + suite.writer.Len(),
|
||||
Order: suite.timelineOrder,
|
||||
Time: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *Suite) handleSpecEvent(event types.SpecEvent) types.SpecEvent {
|
||||
event.TimelineLocation = suite.generateTimelineLocation()
|
||||
suite.selectiveLock.Lock()
|
||||
suite.currentSpecReport.SpecEvents = append(suite.currentSpecReport.SpecEvents, event)
|
||||
suite.selectiveLock.Unlock()
|
||||
suite.reporter.EmitSpecEvent(event)
|
||||
return event
|
||||
}
|
||||
|
||||
func (suite *Suite) handleSpecEventEnd(eventType types.SpecEventType, startEvent types.SpecEvent) {
|
||||
event := startEvent
|
||||
event.SpecEventType = eventType
|
||||
event.TimelineLocation = suite.generateTimelineLocation()
|
||||
event.Duration = event.TimelineLocation.Time.Sub(startEvent.TimelineLocation.Time)
|
||||
suite.selectiveLock.Lock()
|
||||
suite.currentSpecReport.SpecEvents = append(suite.currentSpecReport.SpecEvents, event)
|
||||
suite.selectiveLock.Unlock()
|
||||
suite.reporter.EmitSpecEvent(event)
|
||||
}
|
||||
|
||||
func (suite *Suite) By(text string, callback ...func()) error {
|
||||
cl := types.NewCodeLocation(2)
|
||||
if suite.phase != PhaseRun {
|
||||
return types.GinkgoErrors.ByNotDuringRunPhase(cl)
|
||||
}
|
||||
|
||||
event := suite.handleSpecEvent(types.SpecEvent{
|
||||
SpecEventType: types.SpecEventByStart,
|
||||
CodeLocation: cl,
|
||||
Message: text,
|
||||
})
|
||||
suite.selectiveLock.Lock()
|
||||
suite.currentByStep = event
|
||||
suite.selectiveLock.Unlock()
|
||||
|
||||
if len(callback) == 1 {
|
||||
defer func() {
|
||||
suite.selectiveLock.Lock()
|
||||
suite.currentByStep = types.SpecEvent{}
|
||||
suite.selectiveLock.Unlock()
|
||||
suite.handleSpecEventEnd(types.SpecEventByEnd, event)
|
||||
}()
|
||||
callback[0]()
|
||||
} else if len(callback) > 1 {
|
||||
panic("just one callback per By, please")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
Spec Running methods - used during PhaseRun
|
||||
Spec Running methods - used during PhaseRun
|
||||
*/
|
||||
func (suite *Suite) CurrentSpecReport() types.SpecReport {
|
||||
suite.selectiveLock.Lock()
|
||||
@ -263,16 +321,20 @@ func (suite *Suite) CurrentSpecReport() types.SpecReport {
|
||||
}
|
||||
|
||||
func (suite *Suite) AddReportEntry(entry ReportEntry) error {
|
||||
suite.selectiveLock.Lock()
|
||||
defer suite.selectiveLock.Unlock()
|
||||
if suite.phase != PhaseRun {
|
||||
return types.GinkgoErrors.AddReportEntryNotDuringRunPhase(entry.Location)
|
||||
}
|
||||
entry.TimelineLocation = suite.generateTimelineLocation()
|
||||
entry.Time = entry.TimelineLocation.Time
|
||||
suite.selectiveLock.Lock()
|
||||
suite.currentSpecReport.ReportEntries = append(suite.currentSpecReport.ReportEntries, entry)
|
||||
suite.selectiveLock.Unlock()
|
||||
suite.reporter.EmitReportEntry(entry)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (suite *Suite) generateProgressReport(fullReport bool) types.ProgressReport {
|
||||
timelineLocation := suite.generateTimelineLocation()
|
||||
suite.selectiveLock.Lock()
|
||||
defer suite.selectiveLock.Unlock()
|
||||
|
||||
@ -280,10 +342,8 @@ func (suite *Suite) generateProgressReport(fullReport bool) types.ProgressReport
|
||||
if suite.currentSpecContext != nil {
|
||||
additionalReports = suite.currentSpecContext.QueryProgressReporters()
|
||||
}
|
||||
stepCursor := suite.progressStepCursor
|
||||
|
||||
gwOutput := suite.currentSpecReport.CapturedGinkgoWriterOutput + string(suite.writer.Bytes())
|
||||
pr, err := NewProgressReport(suite.isRunningInParallel(), suite.currentSpecReport, suite.currentNode, suite.currentNodeStartTime, stepCursor, gwOutput, additionalReports, suite.config.SourceRoots, fullReport)
|
||||
pr, err := NewProgressReport(suite.isRunningInParallel(), suite.currentSpecReport, suite.currentNode, suite.currentNodeStartTime, suite.currentByStep, gwOutput, timelineLocation, additionalReports, suite.config.SourceRoots, fullReport)
|
||||
|
||||
if err != nil {
|
||||
fmt.Printf("{{red}}Failed to generate progress report:{{/}}\n%s\n", err.Error())
|
||||
@ -355,7 +415,13 @@ func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath s
|
||||
}
|
||||
|
||||
suite.report.SuiteSucceeded = true
|
||||
suite.runBeforeSuite(numSpecsThatWillBeRun)
|
||||
|
||||
suite.runReportSuiteNodesIfNeedBe(types.NodeTypeReportBeforeSuite)
|
||||
|
||||
ranBeforeSuite := suite.report.SuiteSucceeded
|
||||
if suite.report.SuiteSucceeded {
|
||||
suite.runBeforeSuite(numSpecsThatWillBeRun)
|
||||
}
|
||||
|
||||
if suite.report.SuiteSucceeded {
|
||||
groupedSpecIndices, serialGroupedSpecIndices := OrderSpecs(specs, suite.config)
|
||||
@ -394,7 +460,9 @@ func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath s
|
||||
}
|
||||
}
|
||||
|
||||
suite.runAfterSuiteCleanup(numSpecsThatWillBeRun)
|
||||
if ranBeforeSuite {
|
||||
suite.runAfterSuiteCleanup(numSpecsThatWillBeRun)
|
||||
}
|
||||
|
||||
interruptStatus := suite.interruptHandler.Status()
|
||||
if interruptStatus.Interrupted() {
|
||||
@ -408,9 +476,7 @@ func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath s
|
||||
suite.report.SuiteSucceeded = false
|
||||
}
|
||||
|
||||
if suite.config.ParallelProcess == 1 {
|
||||
suite.runReportAfterSuite()
|
||||
}
|
||||
suite.runReportSuiteNodesIfNeedBe(types.NodeTypeReportAfterSuite)
|
||||
suite.reporter.SuiteDidEnd(suite.report)
|
||||
if suite.isRunningInParallel() {
|
||||
suite.client.PostSuiteDidEnd(suite.report)
|
||||
@ -424,9 +490,10 @@ func (suite *Suite) runBeforeSuite(numSpecsThatWillBeRun int) {
|
||||
if !beforeSuiteNode.IsZero() && numSpecsThatWillBeRun > 0 {
|
||||
suite.selectiveLock.Lock()
|
||||
suite.currentSpecReport = types.SpecReport{
|
||||
LeafNodeType: beforeSuiteNode.NodeType,
|
||||
LeafNodeLocation: beforeSuiteNode.CodeLocation,
|
||||
ParallelProcess: suite.config.ParallelProcess,
|
||||
LeafNodeType: beforeSuiteNode.NodeType,
|
||||
LeafNodeLocation: beforeSuiteNode.CodeLocation,
|
||||
ParallelProcess: suite.config.ParallelProcess,
|
||||
RunningInParallel: suite.isRunningInParallel(),
|
||||
}
|
||||
suite.selectiveLock.Unlock()
|
||||
|
||||
@ -445,9 +512,10 @@ func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) {
|
||||
if !afterSuiteNode.IsZero() && numSpecsThatWillBeRun > 0 {
|
||||
suite.selectiveLock.Lock()
|
||||
suite.currentSpecReport = types.SpecReport{
|
||||
LeafNodeType: afterSuiteNode.NodeType,
|
||||
LeafNodeLocation: afterSuiteNode.CodeLocation,
|
||||
ParallelProcess: suite.config.ParallelProcess,
|
||||
LeafNodeType: afterSuiteNode.NodeType,
|
||||
LeafNodeLocation: afterSuiteNode.CodeLocation,
|
||||
ParallelProcess: suite.config.ParallelProcess,
|
||||
RunningInParallel: suite.isRunningInParallel(),
|
||||
}
|
||||
suite.selectiveLock.Unlock()
|
||||
|
||||
@ -461,9 +529,10 @@ func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) {
|
||||
for _, cleanupNode := range afterSuiteCleanup {
|
||||
suite.selectiveLock.Lock()
|
||||
suite.currentSpecReport = types.SpecReport{
|
||||
LeafNodeType: cleanupNode.NodeType,
|
||||
LeafNodeLocation: cleanupNode.CodeLocation,
|
||||
ParallelProcess: suite.config.ParallelProcess,
|
||||
LeafNodeType: cleanupNode.NodeType,
|
||||
LeafNodeLocation: cleanupNode.CodeLocation,
|
||||
ParallelProcess: suite.config.ParallelProcess,
|
||||
RunningInParallel: suite.isRunningInParallel(),
|
||||
}
|
||||
suite.selectiveLock.Unlock()
|
||||
|
||||
@ -474,23 +543,6 @@ func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) {
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *Suite) runReportAfterSuite() {
|
||||
for _, node := range suite.suiteNodes.WithType(types.NodeTypeReportAfterSuite) {
|
||||
suite.selectiveLock.Lock()
|
||||
suite.currentSpecReport = types.SpecReport{
|
||||
LeafNodeType: node.NodeType,
|
||||
LeafNodeLocation: node.CodeLocation,
|
||||
LeafNodeText: node.Text,
|
||||
ParallelProcess: suite.config.ParallelProcess,
|
||||
}
|
||||
suite.selectiveLock.Unlock()
|
||||
|
||||
suite.reporter.WillRun(suite.currentSpecReport)
|
||||
suite.runReportAfterSuiteNode(node, suite.report)
|
||||
suite.processCurrentSpecReport()
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) {
|
||||
nodes := spec.Nodes.WithType(nodeType)
|
||||
if nodeType == types.NodeTypeReportAfterEach {
|
||||
@ -608,39 +660,80 @@ func (suite *Suite) runSuiteNode(node Node) {
|
||||
|
||||
if err != nil && !suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
|
||||
suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error())
|
||||
suite.reporter.EmitFailure(suite.currentSpecReport.State, suite.currentSpecReport.Failure)
|
||||
}
|
||||
|
||||
suite.currentSpecReport.EndTime = time.Now()
|
||||
suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime)
|
||||
suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
|
||||
suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (suite *Suite) runReportAfterSuiteNode(node Node, report types.Report) {
|
||||
func (suite *Suite) runReportSuiteNodesIfNeedBe(nodeType types.NodeType) {
|
||||
nodes := suite.suiteNodes.WithType(nodeType)
|
||||
// only run ReportAfterSuite on proc 1
|
||||
if nodeType.Is(types.NodeTypeReportAfterSuite) && suite.config.ParallelProcess != 1 {
|
||||
return
|
||||
}
|
||||
// if we're running ReportBeforeSuite on proc > 1 - we should wait until proc 1 has completed
|
||||
if nodeType.Is(types.NodeTypeReportBeforeSuite) && suite.config.ParallelProcess != 1 && len(nodes) > 0 {
|
||||
state, err := suite.client.BlockUntilReportBeforeSuiteCompleted()
|
||||
if err != nil || state.Is(types.SpecStateFailed) {
|
||||
suite.report.SuiteSucceeded = false
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
suite.selectiveLock.Lock()
|
||||
suite.currentSpecReport = types.SpecReport{
|
||||
LeafNodeType: node.NodeType,
|
||||
LeafNodeLocation: node.CodeLocation,
|
||||
LeafNodeText: node.Text,
|
||||
ParallelProcess: suite.config.ParallelProcess,
|
||||
RunningInParallel: suite.isRunningInParallel(),
|
||||
}
|
||||
suite.selectiveLock.Unlock()
|
||||
|
||||
suite.reporter.WillRun(suite.currentSpecReport)
|
||||
suite.runReportSuiteNode(node, suite.report)
|
||||
suite.processCurrentSpecReport()
|
||||
}
|
||||
|
||||
// if we're running ReportBeforeSuite and we're running in parallel - we shuld tell the other procs that we're done
|
||||
if nodeType.Is(types.NodeTypeReportBeforeSuite) && suite.isRunningInParallel() && len(nodes) > 0 {
|
||||
if suite.report.SuiteSucceeded {
|
||||
suite.client.PostReportBeforeSuiteCompleted(types.SpecStatePassed)
|
||||
} else {
|
||||
suite.client.PostReportBeforeSuiteCompleted(types.SpecStateFailed)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *Suite) runReportSuiteNode(node Node, report types.Report) {
|
||||
suite.writer.Truncate()
|
||||
suite.outputInterceptor.StartInterceptingOutput()
|
||||
suite.currentSpecReport.StartTime = time.Now()
|
||||
|
||||
if suite.config.ParallelTotal > 1 {
|
||||
// if we're running a ReportAfterSuite in parallel (on proc 1) we (a) wait until other procs have exited and
|
||||
// (b) always fetch the latest report as prior ReportAfterSuites will contribute to it
|
||||
if node.NodeType.Is(types.NodeTypeReportAfterSuite) && suite.isRunningInParallel() {
|
||||
aggregatedReport, err := suite.client.BlockUntilAggregatedNonprimaryProcsReport()
|
||||
if err != nil {
|
||||
suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error())
|
||||
suite.reporter.EmitFailure(suite.currentSpecReport.State, suite.currentSpecReport.Failure)
|
||||
return
|
||||
}
|
||||
report = report.Add(aggregatedReport)
|
||||
}
|
||||
|
||||
node.Body = func(SpecContext) { node.ReportAfterSuiteBody(report) }
|
||||
node.Body = func(SpecContext) { node.ReportSuiteBody(report) }
|
||||
suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
|
||||
|
||||
suite.currentSpecReport.EndTime = time.Now()
|
||||
suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime)
|
||||
suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
|
||||
suite.currentSpecReport.CapturedStdOutErr = suite.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (types.SpecState, types.Failure) {
|
||||
@ -662,7 +755,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
|
||||
suite.selectiveLock.Lock()
|
||||
suite.currentNode = node
|
||||
suite.currentNodeStartTime = time.Now()
|
||||
suite.progressStepCursor = ProgressStepCursor{}
|
||||
suite.currentByStep = types.SpecEvent{}
|
||||
suite.selectiveLock.Unlock()
|
||||
defer func() {
|
||||
suite.selectiveLock.Lock()
|
||||
@ -671,13 +764,18 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
|
||||
suite.selectiveLock.Unlock()
|
||||
}()
|
||||
|
||||
if suite.config.EmitSpecProgress && !node.MarkedSuppressProgressReporting {
|
||||
if text == "" {
|
||||
text = "TOP-LEVEL"
|
||||
}
|
||||
s := fmt.Sprintf("[%s] %s\n %s\n", node.NodeType.String(), text, node.CodeLocation.String())
|
||||
suite.writer.Write([]byte(s))
|
||||
if text == "" {
|
||||
text = "TOP-LEVEL"
|
||||
}
|
||||
event := suite.handleSpecEvent(types.SpecEvent{
|
||||
SpecEventType: types.SpecEventNodeStart,
|
||||
NodeType: node.NodeType,
|
||||
Message: text,
|
||||
CodeLocation: node.CodeLocation,
|
||||
})
|
||||
defer func() {
|
||||
suite.handleSpecEventEnd(types.SpecEventNodeEnd, event)
|
||||
}()
|
||||
|
||||
var failure types.Failure
|
||||
failure.FailureNodeType, failure.FailureNodeLocation = node.NodeType, node.CodeLocation
|
||||
@ -697,18 +795,23 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
|
||||
|
||||
now := time.Now()
|
||||
deadline := suite.deadline
|
||||
timeoutInPlay := "suite"
|
||||
if deadline.IsZero() || (!specDeadline.IsZero() && specDeadline.Before(deadline)) {
|
||||
deadline = specDeadline
|
||||
timeoutInPlay = "spec"
|
||||
}
|
||||
if node.NodeTimeout > 0 && (deadline.IsZero() || deadline.Sub(now) > node.NodeTimeout) {
|
||||
deadline = now.Add(node.NodeTimeout)
|
||||
timeoutInPlay = "node"
|
||||
}
|
||||
if (!deadline.IsZero() && deadline.Before(now)) || interruptStatus.Interrupted() {
|
||||
//we're out of time already. let's wait for a NodeTimeout if we have it, or GracePeriod if we don't
|
||||
if node.NodeTimeout > 0 {
|
||||
deadline = now.Add(node.NodeTimeout)
|
||||
timeoutInPlay = "node"
|
||||
} else {
|
||||
deadline = now.Add(gracePeriod)
|
||||
timeoutInPlay = "grace period"
|
||||
}
|
||||
}
|
||||
|
||||
@ -743,6 +846,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
|
||||
}
|
||||
|
||||
outcomeFromRun, failureFromRun := suite.failer.Drain()
|
||||
failureFromRun.TimelineLocation = suite.generateTimelineLocation()
|
||||
outcomeC <- outcomeFromRun
|
||||
failureC <- failureFromRun
|
||||
}()
|
||||
@ -772,23 +876,33 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
|
||||
select {
|
||||
case outcomeFromRun := <-outcomeC:
|
||||
failureFromRun := <-failureC
|
||||
if outcome == types.SpecStateInterrupted {
|
||||
// we've already been interrupted. we just managed to actually exit
|
||||
if outcome.Is(types.SpecStateInterrupted | types.SpecStateTimedout) {
|
||||
// we've already been interrupted/timed out. we just managed to actually exit
|
||||
// before the grace period elapsed
|
||||
return outcome, failure
|
||||
} else if outcome == types.SpecStateTimedout {
|
||||
// we've already timed out. we just managed to actually exit
|
||||
// before the grace period elapsed. if we have a failure message we should include it
|
||||
// if we have a failure message we attach it as an additional failure
|
||||
if outcomeFromRun != types.SpecStatePassed {
|
||||
failure.Location, failure.ForwardedPanic = failureFromRun.Location, failureFromRun.ForwardedPanic
|
||||
failure.Message = "This spec timed out and reported the following failure after the timeout:\n\n" + failureFromRun.Message
|
||||
additionalFailure := types.AdditionalFailure{
|
||||
State: outcomeFromRun,
|
||||
Failure: failure, //we make a copy - this will include all the configuration set up above...
|
||||
}
|
||||
//...and then we update the failure with the details from failureFromRun
|
||||
additionalFailure.Failure.Location, additionalFailure.Failure.ForwardedPanic, additionalFailure.Failure.TimelineLocation = failureFromRun.Location, failureFromRun.ForwardedPanic, failureFromRun.TimelineLocation
|
||||
additionalFailure.Failure.ProgressReport = types.ProgressReport{}
|
||||
if outcome == types.SpecStateTimedout {
|
||||
additionalFailure.Failure.Message = fmt.Sprintf("A %s timeout occurred and then the following failure was recorded in the timedout node before it exited:\n%s", timeoutInPlay, failureFromRun.Message)
|
||||
} else {
|
||||
additionalFailure.Failure.Message = fmt.Sprintf("An interrupt occurred and then the following failure was recorded in the interrupted node before it exited:\n%s", failureFromRun.Message)
|
||||
}
|
||||
suite.reporter.EmitFailure(additionalFailure.State, additionalFailure.Failure)
|
||||
failure.AdditionalFailure = &additionalFailure
|
||||
}
|
||||
return outcome, failure
|
||||
}
|
||||
if outcomeFromRun.Is(types.SpecStatePassed) {
|
||||
return outcomeFromRun, types.Failure{}
|
||||
} else {
|
||||
failure.Message, failure.Location, failure.ForwardedPanic = failureFromRun.Message, failureFromRun.Location, failureFromRun.ForwardedPanic
|
||||
failure.Message, failure.Location, failure.ForwardedPanic, failure.TimelineLocation = failureFromRun.Message, failureFromRun.Location, failureFromRun.ForwardedPanic, failureFromRun.TimelineLocation
|
||||
suite.reporter.EmitFailure(outcomeFromRun, failure)
|
||||
return outcomeFromRun, failure
|
||||
}
|
||||
case <-gracePeriodChannel:
|
||||
@ -801,10 +915,12 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
|
||||
case <-deadlineChannel:
|
||||
// we're out of time - the outcome is a timeout and we capture the failure and progress report
|
||||
outcome = types.SpecStateTimedout
|
||||
failure.Message, failure.Location = "Timedout", node.CodeLocation
|
||||
failure.Message, failure.Location, failure.TimelineLocation = fmt.Sprintf("A %s timeout occurred", timeoutInPlay), node.CodeLocation, suite.generateTimelineLocation()
|
||||
failure.ProgressReport = suite.generateProgressReport(false).WithoutCapturedGinkgoWriterOutput()
|
||||
failure.ProgressReport.Message = "{{bold}}This is the Progress Report generated when the timeout occurred:{{/}}"
|
||||
failure.ProgressReport.Message = fmt.Sprintf("{{bold}}This is the Progress Report generated when the %s timeout occurred:{{/}}", timeoutInPlay)
|
||||
deadlineChannel = nil
|
||||
suite.reporter.EmitFailure(outcome, failure)
|
||||
|
||||
// tell the spec to stop. it's important we generate the progress report first to make sure we capture where
|
||||
// the spec is actually stuck
|
||||
sc.cancel()
|
||||
@ -814,36 +930,36 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
|
||||
interruptStatus = suite.interruptHandler.Status()
|
||||
deadlineChannel = nil // don't worry about deadlines, time's up now
|
||||
|
||||
failureTimelineLocation := suite.generateTimelineLocation()
|
||||
progressReport := suite.generateProgressReport(true)
|
||||
|
||||
if outcome == types.SpecStateInvalid {
|
||||
outcome = types.SpecStateInterrupted
|
||||
failure.Message, failure.Location = interruptStatus.Message(), node.CodeLocation
|
||||
failure.Message, failure.Location, failure.TimelineLocation = interruptStatus.Message(), node.CodeLocation, failureTimelineLocation
|
||||
if interruptStatus.ShouldIncludeProgressReport() {
|
||||
failure.ProgressReport = suite.generateProgressReport(true).WithoutCapturedGinkgoWriterOutput()
|
||||
failure.ProgressReport = progressReport.WithoutCapturedGinkgoWriterOutput()
|
||||
failure.ProgressReport.Message = "{{bold}}This is the Progress Report generated when the interrupt was received:{{/}}"
|
||||
}
|
||||
suite.reporter.EmitFailure(outcome, failure)
|
||||
}
|
||||
|
||||
var report types.ProgressReport
|
||||
if interruptStatus.ShouldIncludeProgressReport() {
|
||||
report = suite.generateProgressReport(false)
|
||||
}
|
||||
|
||||
progressReport = progressReport.WithoutOtherGoroutines()
|
||||
sc.cancel()
|
||||
|
||||
if interruptStatus.Level == interrupt_handler.InterruptLevelBailOut {
|
||||
if interruptStatus.ShouldIncludeProgressReport() {
|
||||
report.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\n{{bold}}{{red}}Final interrupt received{{/}}; Ginkgo will not run any cleanup or reporting nodes and will terminate as soon as possible.\nHere's a current progress report:", interruptStatus.Message())
|
||||
suite.emitProgressReport(report)
|
||||
progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\n{{bold}}{{red}}Final interrupt received{{/}}; Ginkgo will not run any cleanup or reporting nodes and will terminate as soon as possible.\nHere's a current progress report:", interruptStatus.Message())
|
||||
suite.emitProgressReport(progressReport)
|
||||
}
|
||||
return outcome, failure
|
||||
}
|
||||
if interruptStatus.ShouldIncludeProgressReport() {
|
||||
if interruptStatus.Level == interrupt_handler.InterruptLevelCleanupAndReport {
|
||||
report.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nFirst interrupt received; Ginkgo will run any cleanup and reporting nodes but will skip all remaining specs. {{bold}}Interrupt again to skip cleanup{{/}}.\nHere's a current progress report:", interruptStatus.Message())
|
||||
progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nFirst interrupt received; Ginkgo will run any cleanup and reporting nodes but will skip all remaining specs. {{bold}}Interrupt again to skip cleanup{{/}}.\nHere's a current progress report:", interruptStatus.Message())
|
||||
} else if interruptStatus.Level == interrupt_handler.InterruptLevelReportOnly {
|
||||
report.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nSecond interrupt received; Ginkgo will run any reporting nodes but will skip all remaining specs and cleanup nodes. {{bold}}Interrupt again to bail immediately{{/}}.\nHere's a current progress report:", interruptStatus.Message())
|
||||
progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nSecond interrupt received; Ginkgo will run any reporting nodes but will skip all remaining specs and cleanup nodes. {{bold}}Interrupt again to bail immediately{{/}}.\nHere's a current progress report:", interruptStatus.Message())
|
||||
}
|
||||
suite.emitProgressReport(report)
|
||||
suite.emitProgressReport(progressReport)
|
||||
}
|
||||
|
||||
if gracePeriodChannel == nil {
|
||||
@ -864,10 +980,12 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: search for usages and consider if reporter.EmitFailure() is necessary
|
||||
func (suite *Suite) failureForLeafNodeWithMessage(node Node, message string) types.Failure {
|
||||
return types.Failure{
|
||||
Message: message,
|
||||
Location: node.CodeLocation,
|
||||
TimelineLocation: suite.generateTimelineLocation(),
|
||||
FailureNodeContext: types.FailureNodeIsLeafNode,
|
||||
FailureNodeType: node.NodeType,
|
||||
FailureNodeLocation: node.CodeLocation,
|
||||
|
2
vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go
generated
vendored
2
vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go
generated
vendored
@ -81,7 +81,7 @@ func (t *ginkgoTestingTProxy) Fatalf(format string, args ...interface{}) {
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Helper() {
|
||||
// No-op
|
||||
types.MarkAsHelper(1)
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Log(args ...interface{}) {
|
||||
|
42
vendor/github.com/onsi/ginkgo/v2/internal/writer.go
generated
vendored
42
vendor/github.com/onsi/ginkgo/v2/internal/writer.go
generated
vendored
@ -22,24 +22,30 @@ type WriterInterface interface {
|
||||
|
||||
Truncate()
|
||||
Bytes() []byte
|
||||
Len() int
|
||||
}
|
||||
|
||||
//Writer implements WriterInterface and GinkgoWriterInterface
|
||||
// Writer implements WriterInterface and GinkgoWriterInterface
|
||||
type Writer struct {
|
||||
buffer *bytes.Buffer
|
||||
outWriter io.Writer
|
||||
lock *sync.Mutex
|
||||
mode WriterMode
|
||||
|
||||
streamIndent []byte
|
||||
indentNext bool
|
||||
|
||||
teeWriters []io.Writer
|
||||
}
|
||||
|
||||
func NewWriter(outWriter io.Writer) *Writer {
|
||||
return &Writer{
|
||||
buffer: &bytes.Buffer{},
|
||||
lock: &sync.Mutex{},
|
||||
outWriter: outWriter,
|
||||
mode: WriterModeStreamAndBuffer,
|
||||
buffer: &bytes.Buffer{},
|
||||
lock: &sync.Mutex{},
|
||||
outWriter: outWriter,
|
||||
mode: WriterModeStreamAndBuffer,
|
||||
streamIndent: []byte(" "),
|
||||
indentNext: true,
|
||||
}
|
||||
}
|
||||
|
||||
@ -49,6 +55,14 @@ func (w *Writer) SetMode(mode WriterMode) {
|
||||
w.mode = mode
|
||||
}
|
||||
|
||||
func (w *Writer) Len() int {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
return w.buffer.Len()
|
||||
}
|
||||
|
||||
var newline = []byte("\n")
|
||||
|
||||
func (w *Writer) Write(b []byte) (n int, err error) {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
@ -58,7 +72,21 @@ func (w *Writer) Write(b []byte) (n int, err error) {
|
||||
}
|
||||
|
||||
if w.mode == WriterModeStreamAndBuffer {
|
||||
w.outWriter.Write(b)
|
||||
line, remaining, found := []byte{}, b, false
|
||||
for len(remaining) > 0 {
|
||||
line, remaining, found = bytes.Cut(remaining, newline)
|
||||
if len(line) > 0 {
|
||||
if w.indentNext {
|
||||
w.outWriter.Write(w.streamIndent)
|
||||
w.indentNext = false
|
||||
}
|
||||
w.outWriter.Write(line)
|
||||
}
|
||||
if found {
|
||||
w.outWriter.Write(newline)
|
||||
w.indentNext = true
|
||||
}
|
||||
}
|
||||
}
|
||||
return w.buffer.Write(b)
|
||||
}
|
||||
@ -78,7 +106,7 @@ func (w *Writer) Bytes() []byte {
|
||||
return copied
|
||||
}
|
||||
|
||||
//GinkgoWriterInterface
|
||||
// GinkgoWriterInterface
|
||||
func (w *Writer) TeeTo(writer io.Writer) {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
|
Reference in New Issue
Block a user