rebase: update github.com/onsi/ginkgo to v2.9.5

There was a `replace` statement in `go.mod` that prevented Ginkgo from
updating. Kubernetes 1.27 requires a new Ginkgo version.

Signed-off-by: Niels de Vos <ndevos@ibm.com>
This commit is contained in:
Niels de Vos
2023-06-02 17:01:18 +02:00
committed by mergify[bot]
parent b1a4590967
commit 6709cdd1d0
117 changed files with 14702 additions and 802 deletions

View File

@ -9,6 +9,7 @@ import (
"github.com/onsi/ginkgo/v2/internal/parallel_support"
"github.com/onsi/ginkgo/v2/reporters"
"github.com/onsi/ginkgo/v2/types"
"golang.org/x/net/context"
)
type Phase uint
@ -19,10 +20,14 @@ const (
PhaseRun
)
var PROGRESS_REPORTER_DEADLING = 5 * time.Second
type Suite struct {
tree *TreeNode
topLevelContainers Nodes
*ProgressReporterManager
phase Phase
suiteNodes Nodes
@ -44,7 +49,8 @@ type Suite struct {
currentSpecContext *specContext
progressStepCursor ProgressStepCursor
currentByStep types.SpecEvent
timelineOrder int
/*
We don't need to lock around all operations. Just those that *could* happen concurrently.
@ -63,8 +69,9 @@ type Suite struct {
func NewSuite() *Suite {
return &Suite{
tree: &TreeNode{},
phase: PhaseBuildTopLevel,
tree: &TreeNode{},
phase: PhaseBuildTopLevel,
ProgressReporterManager: NewProgressReporterManager(),
selectiveLock: &sync.Mutex{},
}
@ -128,7 +135,7 @@ func (suite *Suite) PushNode(node Node) error {
return suite.pushCleanupNode(node)
}
if node.NodeType.Is(types.NodeTypeBeforeSuite | types.NodeTypeAfterSuite | types.NodeTypeSynchronizedBeforeSuite | types.NodeTypeSynchronizedAfterSuite | types.NodeTypeReportAfterSuite) {
if node.NodeType.Is(types.NodeTypeBeforeSuite | types.NodeTypeAfterSuite | types.NodeTypeSynchronizedBeforeSuite | types.NodeTypeSynchronizedAfterSuite | types.NodeTypeBeforeSuite | types.NodeTypeReportBeforeSuite | types.NodeTypeReportAfterSuite) {
return suite.pushSuiteNode(node)
}
@ -150,6 +157,13 @@ func (suite *Suite) PushNode(node Node) error {
}
}
if node.MarkedContinueOnFailure {
firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered()
if !firstOrderedNode.IsZero() {
return types.GinkgoErrors.InvalidContinueOnFailureDecoration(node.CodeLocation)
}
}
if node.NodeType == types.NodeTypeContainer {
// During PhaseBuildTopLevel we only track the top level containers without entering them
// We only enter the top level container nodes during PhaseBuildTree
@ -221,7 +235,7 @@ func (suite *Suite) pushCleanupNode(node Node) error {
node.NodeType = types.NodeTypeCleanupAfterSuite
case types.NodeTypeBeforeAll, types.NodeTypeAfterAll:
node.NodeType = types.NodeTypeCleanupAfterAll
case types.NodeTypeReportBeforeEach, types.NodeTypeReportAfterEach, types.NodeTypeReportAfterSuite:
case types.NodeTypeReportBeforeEach, types.NodeTypeReportAfterEach, types.NodeTypeReportBeforeSuite, types.NodeTypeReportAfterSuite:
return types.GinkgoErrors.PushingCleanupInReportingNode(node.CodeLocation, suite.currentNode.NodeType)
case types.NodeTypeCleanupInvalid, types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll, types.NodeTypeCleanupAfterSuite:
return types.GinkgoErrors.PushingCleanupInCleanupNode(node.CodeLocation)
@ -236,19 +250,69 @@ func (suite *Suite) pushCleanupNode(node Node) error {
return nil
}
/*
Pushing and popping the Step Cursor stack
*/
func (suite *Suite) SetProgressStepCursor(cursor ProgressStepCursor) {
func (suite *Suite) generateTimelineLocation() types.TimelineLocation {
suite.selectiveLock.Lock()
defer suite.selectiveLock.Unlock()
suite.progressStepCursor = cursor
suite.timelineOrder += 1
return types.TimelineLocation{
Offset: len(suite.currentSpecReport.CapturedGinkgoWriterOutput) + suite.writer.Len(),
Order: suite.timelineOrder,
Time: time.Now(),
}
}
func (suite *Suite) handleSpecEvent(event types.SpecEvent) types.SpecEvent {
event.TimelineLocation = suite.generateTimelineLocation()
suite.selectiveLock.Lock()
suite.currentSpecReport.SpecEvents = append(suite.currentSpecReport.SpecEvents, event)
suite.selectiveLock.Unlock()
suite.reporter.EmitSpecEvent(event)
return event
}
func (suite *Suite) handleSpecEventEnd(eventType types.SpecEventType, startEvent types.SpecEvent) {
event := startEvent
event.SpecEventType = eventType
event.TimelineLocation = suite.generateTimelineLocation()
event.Duration = event.TimelineLocation.Time.Sub(startEvent.TimelineLocation.Time)
suite.selectiveLock.Lock()
suite.currentSpecReport.SpecEvents = append(suite.currentSpecReport.SpecEvents, event)
suite.selectiveLock.Unlock()
suite.reporter.EmitSpecEvent(event)
}
func (suite *Suite) By(text string, callback ...func()) error {
cl := types.NewCodeLocation(2)
if suite.phase != PhaseRun {
return types.GinkgoErrors.ByNotDuringRunPhase(cl)
}
event := suite.handleSpecEvent(types.SpecEvent{
SpecEventType: types.SpecEventByStart,
CodeLocation: cl,
Message: text,
})
suite.selectiveLock.Lock()
suite.currentByStep = event
suite.selectiveLock.Unlock()
if len(callback) == 1 {
defer func() {
suite.selectiveLock.Lock()
suite.currentByStep = types.SpecEvent{}
suite.selectiveLock.Unlock()
suite.handleSpecEventEnd(types.SpecEventByEnd, event)
}()
callback[0]()
} else if len(callback) > 1 {
panic("just one callback per By, please")
}
return nil
}
/*
Spec Running methods - used during PhaseRun
Spec Running methods - used during PhaseRun
*/
func (suite *Suite) CurrentSpecReport() types.SpecReport {
suite.selectiveLock.Lock()
@ -263,27 +327,32 @@ func (suite *Suite) CurrentSpecReport() types.SpecReport {
}
func (suite *Suite) AddReportEntry(entry ReportEntry) error {
suite.selectiveLock.Lock()
defer suite.selectiveLock.Unlock()
if suite.phase != PhaseRun {
return types.GinkgoErrors.AddReportEntryNotDuringRunPhase(entry.Location)
}
entry.TimelineLocation = suite.generateTimelineLocation()
entry.Time = entry.TimelineLocation.Time
suite.selectiveLock.Lock()
suite.currentSpecReport.ReportEntries = append(suite.currentSpecReport.ReportEntries, entry)
suite.selectiveLock.Unlock()
suite.reporter.EmitReportEntry(entry)
return nil
}
func (suite *Suite) generateProgressReport(fullReport bool) types.ProgressReport {
timelineLocation := suite.generateTimelineLocation()
suite.selectiveLock.Lock()
defer suite.selectiveLock.Unlock()
deadline, cancel := context.WithTimeout(context.Background(), PROGRESS_REPORTER_DEADLING)
defer cancel()
var additionalReports []string
if suite.currentSpecContext != nil {
additionalReports = suite.currentSpecContext.QueryProgressReporters()
additionalReports = append(additionalReports, suite.currentSpecContext.QueryProgressReporters(deadline, suite.failer)...)
}
stepCursor := suite.progressStepCursor
additionalReports = append(additionalReports, suite.QueryProgressReporters(deadline, suite.failer)...)
gwOutput := suite.currentSpecReport.CapturedGinkgoWriterOutput + string(suite.writer.Bytes())
pr, err := NewProgressReport(suite.isRunningInParallel(), suite.currentSpecReport, suite.currentNode, suite.currentNodeStartTime, stepCursor, gwOutput, additionalReports, suite.config.SourceRoots, fullReport)
pr, err := NewProgressReport(suite.isRunningInParallel(), suite.currentSpecReport, suite.currentNode, suite.currentNodeStartTime, suite.currentByStep, gwOutput, timelineLocation, additionalReports, suite.config.SourceRoots, fullReport)
if err != nil {
fmt.Printf("{{red}}Failed to generate progress report:{{/}}\n%s\n", err.Error())
@ -355,7 +424,13 @@ func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath s
}
suite.report.SuiteSucceeded = true
suite.runBeforeSuite(numSpecsThatWillBeRun)
suite.runReportSuiteNodesIfNeedBe(types.NodeTypeReportBeforeSuite)
ranBeforeSuite := suite.report.SuiteSucceeded
if suite.report.SuiteSucceeded {
suite.runBeforeSuite(numSpecsThatWillBeRun)
}
if suite.report.SuiteSucceeded {
groupedSpecIndices, serialGroupedSpecIndices := OrderSpecs(specs, suite.config)
@ -394,7 +469,9 @@ func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath s
}
}
suite.runAfterSuiteCleanup(numSpecsThatWillBeRun)
if ranBeforeSuite {
suite.runAfterSuiteCleanup(numSpecsThatWillBeRun)
}
interruptStatus := suite.interruptHandler.Status()
if interruptStatus.Interrupted() {
@ -408,9 +485,7 @@ func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath s
suite.report.SuiteSucceeded = false
}
if suite.config.ParallelProcess == 1 {
suite.runReportAfterSuite()
}
suite.runReportSuiteNodesIfNeedBe(types.NodeTypeReportAfterSuite)
suite.reporter.SuiteDidEnd(suite.report)
if suite.isRunningInParallel() {
suite.client.PostSuiteDidEnd(suite.report)
@ -424,9 +499,10 @@ func (suite *Suite) runBeforeSuite(numSpecsThatWillBeRun int) {
if !beforeSuiteNode.IsZero() && numSpecsThatWillBeRun > 0 {
suite.selectiveLock.Lock()
suite.currentSpecReport = types.SpecReport{
LeafNodeType: beforeSuiteNode.NodeType,
LeafNodeLocation: beforeSuiteNode.CodeLocation,
ParallelProcess: suite.config.ParallelProcess,
LeafNodeType: beforeSuiteNode.NodeType,
LeafNodeLocation: beforeSuiteNode.CodeLocation,
ParallelProcess: suite.config.ParallelProcess,
RunningInParallel: suite.isRunningInParallel(),
}
suite.selectiveLock.Unlock()
@ -445,9 +521,10 @@ func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) {
if !afterSuiteNode.IsZero() && numSpecsThatWillBeRun > 0 {
suite.selectiveLock.Lock()
suite.currentSpecReport = types.SpecReport{
LeafNodeType: afterSuiteNode.NodeType,
LeafNodeLocation: afterSuiteNode.CodeLocation,
ParallelProcess: suite.config.ParallelProcess,
LeafNodeType: afterSuiteNode.NodeType,
LeafNodeLocation: afterSuiteNode.CodeLocation,
ParallelProcess: suite.config.ParallelProcess,
RunningInParallel: suite.isRunningInParallel(),
}
suite.selectiveLock.Unlock()
@ -461,9 +538,10 @@ func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) {
for _, cleanupNode := range afterSuiteCleanup {
suite.selectiveLock.Lock()
suite.currentSpecReport = types.SpecReport{
LeafNodeType: cleanupNode.NodeType,
LeafNodeLocation: cleanupNode.CodeLocation,
ParallelProcess: suite.config.ParallelProcess,
LeafNodeType: cleanupNode.NodeType,
LeafNodeLocation: cleanupNode.CodeLocation,
ParallelProcess: suite.config.ParallelProcess,
RunningInParallel: suite.isRunningInParallel(),
}
suite.selectiveLock.Unlock()
@ -474,23 +552,6 @@ func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) {
}
}
func (suite *Suite) runReportAfterSuite() {
for _, node := range suite.suiteNodes.WithType(types.NodeTypeReportAfterSuite) {
suite.selectiveLock.Lock()
suite.currentSpecReport = types.SpecReport{
LeafNodeType: node.NodeType,
LeafNodeLocation: node.CodeLocation,
LeafNodeText: node.Text,
ParallelProcess: suite.config.ParallelProcess,
}
suite.selectiveLock.Unlock()
suite.reporter.WillRun(suite.currentSpecReport)
suite.runReportAfterSuiteNode(node, suite.report)
suite.processCurrentSpecReport()
}
}
func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) {
nodes := spec.Nodes.WithType(nodeType)
if nodeType == types.NodeTypeReportAfterEach {
@ -608,39 +669,80 @@ func (suite *Suite) runSuiteNode(node Node) {
if err != nil && !suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error())
suite.reporter.EmitFailure(suite.currentSpecReport.State, suite.currentSpecReport.Failure)
}
suite.currentSpecReport.EndTime = time.Now()
suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime)
suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
return
}
func (suite *Suite) runReportAfterSuiteNode(node Node, report types.Report) {
func (suite *Suite) runReportSuiteNodesIfNeedBe(nodeType types.NodeType) {
nodes := suite.suiteNodes.WithType(nodeType)
// only run ReportAfterSuite on proc 1
if nodeType.Is(types.NodeTypeReportAfterSuite) && suite.config.ParallelProcess != 1 {
return
}
// if we're running ReportBeforeSuite on proc > 1 - we should wait until proc 1 has completed
if nodeType.Is(types.NodeTypeReportBeforeSuite) && suite.config.ParallelProcess != 1 && len(nodes) > 0 {
state, err := suite.client.BlockUntilReportBeforeSuiteCompleted()
if err != nil || state.Is(types.SpecStateFailed) {
suite.report.SuiteSucceeded = false
}
return
}
for _, node := range nodes {
suite.selectiveLock.Lock()
suite.currentSpecReport = types.SpecReport{
LeafNodeType: node.NodeType,
LeafNodeLocation: node.CodeLocation,
LeafNodeText: node.Text,
ParallelProcess: suite.config.ParallelProcess,
RunningInParallel: suite.isRunningInParallel(),
}
suite.selectiveLock.Unlock()
suite.reporter.WillRun(suite.currentSpecReport)
suite.runReportSuiteNode(node, suite.report)
suite.processCurrentSpecReport()
}
// if we're running ReportBeforeSuite and we're running in parallel - we shuld tell the other procs that we're done
if nodeType.Is(types.NodeTypeReportBeforeSuite) && suite.isRunningInParallel() && len(nodes) > 0 {
if suite.report.SuiteSucceeded {
suite.client.PostReportBeforeSuiteCompleted(types.SpecStatePassed)
} else {
suite.client.PostReportBeforeSuiteCompleted(types.SpecStateFailed)
}
}
}
func (suite *Suite) runReportSuiteNode(node Node, report types.Report) {
suite.writer.Truncate()
suite.outputInterceptor.StartInterceptingOutput()
suite.currentSpecReport.StartTime = time.Now()
if suite.config.ParallelTotal > 1 {
// if we're running a ReportAfterSuite in parallel (on proc 1) we (a) wait until other procs have exited and
// (b) always fetch the latest report as prior ReportAfterSuites will contribute to it
if node.NodeType.Is(types.NodeTypeReportAfterSuite) && suite.isRunningInParallel() {
aggregatedReport, err := suite.client.BlockUntilAggregatedNonprimaryProcsReport()
if err != nil {
suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error())
suite.reporter.EmitFailure(suite.currentSpecReport.State, suite.currentSpecReport.Failure)
return
}
report = report.Add(aggregatedReport)
}
node.Body = func(SpecContext) { node.ReportAfterSuiteBody(report) }
node.Body = func(SpecContext) { node.ReportSuiteBody(report) }
suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
suite.currentSpecReport.EndTime = time.Now()
suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime)
suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
suite.currentSpecReport.CapturedStdOutErr = suite.outputInterceptor.StopInterceptingAndReturnOutput()
return
}
func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (types.SpecState, types.Failure) {
@ -662,7 +764,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
suite.selectiveLock.Lock()
suite.currentNode = node
suite.currentNodeStartTime = time.Now()
suite.progressStepCursor = ProgressStepCursor{}
suite.currentByStep = types.SpecEvent{}
suite.selectiveLock.Unlock()
defer func() {
suite.selectiveLock.Lock()
@ -671,13 +773,18 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
suite.selectiveLock.Unlock()
}()
if suite.config.EmitSpecProgress && !node.MarkedSuppressProgressReporting {
if text == "" {
text = "TOP-LEVEL"
}
s := fmt.Sprintf("[%s] %s\n %s\n", node.NodeType.String(), text, node.CodeLocation.String())
suite.writer.Write([]byte(s))
if text == "" {
text = "TOP-LEVEL"
}
event := suite.handleSpecEvent(types.SpecEvent{
SpecEventType: types.SpecEventNodeStart,
NodeType: node.NodeType,
Message: text,
CodeLocation: node.CodeLocation,
})
defer func() {
suite.handleSpecEventEnd(types.SpecEventNodeEnd, event)
}()
var failure types.Failure
failure.FailureNodeType, failure.FailureNodeLocation = node.NodeType, node.CodeLocation
@ -697,18 +804,23 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
now := time.Now()
deadline := suite.deadline
timeoutInPlay := "suite"
if deadline.IsZero() || (!specDeadline.IsZero() && specDeadline.Before(deadline)) {
deadline = specDeadline
timeoutInPlay = "spec"
}
if node.NodeTimeout > 0 && (deadline.IsZero() || deadline.Sub(now) > node.NodeTimeout) {
deadline = now.Add(node.NodeTimeout)
timeoutInPlay = "node"
}
if (!deadline.IsZero() && deadline.Before(now)) || interruptStatus.Interrupted() {
//we're out of time already. let's wait for a NodeTimeout if we have it, or GracePeriod if we don't
if node.NodeTimeout > 0 {
deadline = now.Add(node.NodeTimeout)
timeoutInPlay = "node"
} else {
deadline = now.Add(gracePeriod)
timeoutInPlay = "grace period"
}
}
@ -743,6 +855,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
}
outcomeFromRun, failureFromRun := suite.failer.Drain()
failureFromRun.TimelineLocation = suite.generateTimelineLocation()
outcomeC <- outcomeFromRun
failureC <- failureFromRun
}()
@ -772,23 +885,33 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
select {
case outcomeFromRun := <-outcomeC:
failureFromRun := <-failureC
if outcome == types.SpecStateInterrupted {
// we've already been interrupted. we just managed to actually exit
if outcome.Is(types.SpecStateInterrupted | types.SpecStateTimedout) {
// we've already been interrupted/timed out. we just managed to actually exit
// before the grace period elapsed
return outcome, failure
} else if outcome == types.SpecStateTimedout {
// we've already timed out. we just managed to actually exit
// before the grace period elapsed. if we have a failure message we should include it
// if we have a failure message we attach it as an additional failure
if outcomeFromRun != types.SpecStatePassed {
failure.Location, failure.ForwardedPanic = failureFromRun.Location, failureFromRun.ForwardedPanic
failure.Message = "This spec timed out and reported the following failure after the timeout:\n\n" + failureFromRun.Message
additionalFailure := types.AdditionalFailure{
State: outcomeFromRun,
Failure: failure, //we make a copy - this will include all the configuration set up above...
}
//...and then we update the failure with the details from failureFromRun
additionalFailure.Failure.Location, additionalFailure.Failure.ForwardedPanic, additionalFailure.Failure.TimelineLocation = failureFromRun.Location, failureFromRun.ForwardedPanic, failureFromRun.TimelineLocation
additionalFailure.Failure.ProgressReport = types.ProgressReport{}
if outcome == types.SpecStateTimedout {
additionalFailure.Failure.Message = fmt.Sprintf("A %s timeout occurred and then the following failure was recorded in the timedout node before it exited:\n%s", timeoutInPlay, failureFromRun.Message)
} else {
additionalFailure.Failure.Message = fmt.Sprintf("An interrupt occurred and then the following failure was recorded in the interrupted node before it exited:\n%s", failureFromRun.Message)
}
suite.reporter.EmitFailure(additionalFailure.State, additionalFailure.Failure)
failure.AdditionalFailure = &additionalFailure
}
return outcome, failure
}
if outcomeFromRun.Is(types.SpecStatePassed) {
return outcomeFromRun, types.Failure{}
} else {
failure.Message, failure.Location, failure.ForwardedPanic = failureFromRun.Message, failureFromRun.Location, failureFromRun.ForwardedPanic
failure.Message, failure.Location, failure.ForwardedPanic, failure.TimelineLocation = failureFromRun.Message, failureFromRun.Location, failureFromRun.ForwardedPanic, failureFromRun.TimelineLocation
suite.reporter.EmitFailure(outcomeFromRun, failure)
return outcomeFromRun, failure
}
case <-gracePeriodChannel:
@ -801,10 +924,12 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
case <-deadlineChannel:
// we're out of time - the outcome is a timeout and we capture the failure and progress report
outcome = types.SpecStateTimedout
failure.Message, failure.Location = "Timedout", node.CodeLocation
failure.Message, failure.Location, failure.TimelineLocation = fmt.Sprintf("A %s timeout occurred", timeoutInPlay), node.CodeLocation, suite.generateTimelineLocation()
failure.ProgressReport = suite.generateProgressReport(false).WithoutCapturedGinkgoWriterOutput()
failure.ProgressReport.Message = "{{bold}}This is the Progress Report generated when the timeout occurred:{{/}}"
failure.ProgressReport.Message = fmt.Sprintf("{{bold}}This is the Progress Report generated when the %s timeout occurred:{{/}}", timeoutInPlay)
deadlineChannel = nil
suite.reporter.EmitFailure(outcome, failure)
// tell the spec to stop. it's important we generate the progress report first to make sure we capture where
// the spec is actually stuck
sc.cancel()
@ -812,38 +937,44 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
gracePeriodChannel = time.After(gracePeriod)
case <-interruptStatus.Channel:
interruptStatus = suite.interruptHandler.Status()
// ignore interruption from other process if we are cleaning up or reporting
if interruptStatus.Cause == interrupt_handler.InterruptCauseAbortByOtherProcess &&
node.NodeType.Is(types.NodeTypesAllowedDuringReportInterrupt|types.NodeTypesAllowedDuringCleanupInterrupt) {
continue
}
deadlineChannel = nil // don't worry about deadlines, time's up now
failureTimelineLocation := suite.generateTimelineLocation()
progressReport := suite.generateProgressReport(true)
if outcome == types.SpecStateInvalid {
outcome = types.SpecStateInterrupted
failure.Message, failure.Location = interruptStatus.Message(), node.CodeLocation
failure.Message, failure.Location, failure.TimelineLocation = interruptStatus.Message(), node.CodeLocation, failureTimelineLocation
if interruptStatus.ShouldIncludeProgressReport() {
failure.ProgressReport = suite.generateProgressReport(true).WithoutCapturedGinkgoWriterOutput()
failure.ProgressReport = progressReport.WithoutCapturedGinkgoWriterOutput()
failure.ProgressReport.Message = "{{bold}}This is the Progress Report generated when the interrupt was received:{{/}}"
}
suite.reporter.EmitFailure(outcome, failure)
}
var report types.ProgressReport
if interruptStatus.ShouldIncludeProgressReport() {
report = suite.generateProgressReport(false)
}
progressReport = progressReport.WithoutOtherGoroutines()
sc.cancel()
if interruptStatus.Level == interrupt_handler.InterruptLevelBailOut {
if interruptStatus.ShouldIncludeProgressReport() {
report.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\n{{bold}}{{red}}Final interrupt received{{/}}; Ginkgo will not run any cleanup or reporting nodes and will terminate as soon as possible.\nHere's a current progress report:", interruptStatus.Message())
suite.emitProgressReport(report)
progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\n{{bold}}{{red}}Final interrupt received{{/}}; Ginkgo will not run any cleanup or reporting nodes and will terminate as soon as possible.\nHere's a current progress report:", interruptStatus.Message())
suite.emitProgressReport(progressReport)
}
return outcome, failure
}
if interruptStatus.ShouldIncludeProgressReport() {
if interruptStatus.Level == interrupt_handler.InterruptLevelCleanupAndReport {
report.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nFirst interrupt received; Ginkgo will run any cleanup and reporting nodes but will skip all remaining specs. {{bold}}Interrupt again to skip cleanup{{/}}.\nHere's a current progress report:", interruptStatus.Message())
progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nFirst interrupt received; Ginkgo will run any cleanup and reporting nodes but will skip all remaining specs. {{bold}}Interrupt again to skip cleanup{{/}}.\nHere's a current progress report:", interruptStatus.Message())
} else if interruptStatus.Level == interrupt_handler.InterruptLevelReportOnly {
report.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nSecond interrupt received; Ginkgo will run any reporting nodes but will skip all remaining specs and cleanup nodes. {{bold}}Interrupt again to bail immediately{{/}}.\nHere's a current progress report:", interruptStatus.Message())
progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nSecond interrupt received; Ginkgo will run any reporting nodes but will skip all remaining specs and cleanup nodes. {{bold}}Interrupt again to bail immediately{{/}}.\nHere's a current progress report:", interruptStatus.Message())
}
suite.emitProgressReport(report)
suite.emitProgressReport(progressReport)
}
if gracePeriodChannel == nil {
@ -864,10 +995,12 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
}
}
// TODO: search for usages and consider if reporter.EmitFailure() is necessary
func (suite *Suite) failureForLeafNodeWithMessage(node Node, message string) types.Failure {
return types.Failure{
Message: message,
Location: node.CodeLocation,
TimelineLocation: suite.generateTimelineLocation(),
FailureNodeContext: types.FailureNodeIsLeafNode,
FailureNodeType: node.NodeType,
FailureNodeLocation: node.CodeLocation,