2022-08-24 02:24:25 +00:00
package internal
import (
"fmt"
2022-09-05 20:11:08 +00:00
"sync"
2022-08-24 02:24:25 +00:00
"time"
"github.com/onsi/ginkgo/v2/internal/interrupt_handler"
"github.com/onsi/ginkgo/v2/internal/parallel_support"
"github.com/onsi/ginkgo/v2/reporters"
"github.com/onsi/ginkgo/v2/types"
2023-06-02 15:01:18 +00:00
"golang.org/x/net/context"
2022-08-24 02:24:25 +00:00
)
type Phase uint
const (
PhaseBuildTopLevel Phase = iota
PhaseBuildTree
PhaseRun
)
2023-06-02 15:01:18 +00:00
var PROGRESS_REPORTER_DEADLING = 5 * time . Second
2022-08-24 02:24:25 +00:00
type Suite struct {
tree * TreeNode
topLevelContainers Nodes
2023-06-02 15:01:18 +00:00
* ProgressReporterManager
2022-08-24 02:24:25 +00:00
phase Phase
suiteNodes Nodes
cleanupNodes Nodes
failer * Failer
reporter reporters . Reporter
writer WriterInterface
outputInterceptor OutputInterceptor
interruptHandler interrupt_handler . InterruptHandlerInterface
config types . SuiteConfig
2022-10-17 17:38:08 +00:00
deadline time . Time
2022-08-24 02:24:25 +00:00
2022-10-17 17:38:08 +00:00
skipAll bool
report types . Report
currentSpecReport types . SpecReport
currentNode Node
currentNodeStartTime time . Time
currentSpecContext * specContext
2023-06-02 15:01:18 +00:00
currentByStep types . SpecEvent
timelineOrder int
2022-10-17 17:38:08 +00:00
/ *
We don ' t need to lock around all operations . Just those that * could * happen concurrently .
Suite , generally , only runs one node at a time - and so the possibiity for races is small . In fact , the presence of a race usually indicates the user has launched a goroutine that has leaked past the node it was launched in .
However , there are some operations that can happen concurrently :
- AddReportEntry and CurrentSpecReport can be accessed at any point by the user - including in goroutines that outlive the node intentionally ( see , e . g . # 1020 ) . They both form a self - contained read - write pair and so a lock in them is sufficent .
- generateProgressReport can be invoked at any point in time by an interrupt or a progres poll . Moreover , it requires access to currentSpecReport , currentNode , currentNodeStartTime , and progressStepCursor . To make it threadsafe we need to lock around generateProgressReport when we read those variables _and_ everywhere those variables are * written * . In general we don ' t need to worry about all possible field writes to these variables as what ` generateProgressReport ` does with these variables is fairly selective ( hence the name of the lock ) . Specifically , we dont ' need to lock around state and failure message changes on ` currentSpecReport ` - just the setting of the variable itself .
* /
selectiveLock * sync . Mutex
2022-08-24 02:24:25 +00:00
client parallel_support . Client
}
func NewSuite ( ) * Suite {
return & Suite {
2023-06-02 15:01:18 +00:00
tree : & TreeNode { } ,
phase : PhaseBuildTopLevel ,
ProgressReporterManager : NewProgressReporterManager ( ) ,
2022-10-17 17:38:08 +00:00
selectiveLock : & sync . Mutex { } ,
2022-08-24 02:24:25 +00:00
}
}
2023-10-10 11:56:00 +00:00
func ( suite * Suite ) Clone ( ) ( * Suite , error ) {
if suite . phase != PhaseBuildTopLevel {
2024-01-15 20:50:20 +00:00
return nil , fmt . Errorf ( "cannot clone suite after tree has been built" )
2023-10-10 11:56:00 +00:00
}
return & Suite {
tree : & TreeNode { } ,
phase : PhaseBuildTopLevel ,
ProgressReporterManager : NewProgressReporterManager ( ) ,
topLevelContainers : suite . topLevelContainers . Clone ( ) ,
suiteNodes : suite . suiteNodes . Clone ( ) ,
selectiveLock : & sync . Mutex { } ,
} , nil
}
2022-08-24 02:24:25 +00:00
func ( suite * Suite ) BuildTree ( ) error {
// During PhaseBuildTopLevel, the top level containers are stored in suite.topLevelCotainers and entered
// We now enter PhaseBuildTree where these top level containers are entered and added to the spec tree
suite . phase = PhaseBuildTree
for _ , topLevelContainer := range suite . topLevelContainers {
err := suite . PushNode ( topLevelContainer )
if err != nil {
return err
}
}
return nil
}
2022-10-17 17:38:08 +00:00
func ( suite * Suite ) Run ( description string , suiteLabels Labels , suitePath string , failer * Failer , reporter reporters . Reporter , writer WriterInterface , outputInterceptor OutputInterceptor , interruptHandler interrupt_handler . InterruptHandlerInterface , client parallel_support . Client , progressSignalRegistrar ProgressSignalRegistrar , suiteConfig types . SuiteConfig ) ( bool , bool ) {
2022-08-24 02:24:25 +00:00
if suite . phase != PhaseBuildTree {
panic ( "cannot run before building the tree = call suite.BuildTree() first" )
}
ApplyNestedFocusPolicyToTree ( suite . tree )
specs := GenerateSpecsFromTreeRoot ( suite . tree )
specs , hasProgrammaticFocus := ApplyFocusToSpecs ( specs , description , suiteLabels , suiteConfig )
suite . phase = PhaseRun
suite . client = client
suite . failer = failer
suite . reporter = reporter
suite . writer = writer
suite . outputInterceptor = outputInterceptor
suite . interruptHandler = interruptHandler
suite . config = suiteConfig
2022-10-17 17:38:08 +00:00
if suite . config . Timeout > 0 {
suite . deadline = time . Now ( ) . Add ( suite . config . Timeout )
}
cancelProgressHandler := progressSignalRegistrar ( suite . handleProgressSignal )
2022-08-24 02:24:25 +00:00
success := suite . runSpecs ( description , suiteLabels , suitePath , hasProgrammaticFocus , specs )
2022-10-17 17:38:08 +00:00
cancelProgressHandler ( )
2022-08-24 02:24:25 +00:00
return success , hasProgrammaticFocus
}
func ( suite * Suite ) InRunPhase ( ) bool {
return suite . phase == PhaseRun
}
/ *
Tree Construction methods
PushNode is used during PhaseBuildTopLevel and PhaseBuildTree
* /
func ( suite * Suite ) PushNode ( node Node ) error {
if node . NodeType . Is ( types . NodeTypeCleanupInvalid | types . NodeTypeCleanupAfterEach | types . NodeTypeCleanupAfterAll | types . NodeTypeCleanupAfterSuite ) {
return suite . pushCleanupNode ( node )
}
2023-06-02 15:01:18 +00:00
if node . NodeType . Is ( types . NodeTypeBeforeSuite | types . NodeTypeAfterSuite | types . NodeTypeSynchronizedBeforeSuite | types . NodeTypeSynchronizedAfterSuite | types . NodeTypeBeforeSuite | types . NodeTypeReportBeforeSuite | types . NodeTypeReportAfterSuite ) {
2022-08-24 02:24:25 +00:00
return suite . pushSuiteNode ( node )
}
if suite . phase == PhaseRun {
return types . GinkgoErrors . PushingNodeInRunPhase ( node . NodeType , node . CodeLocation )
}
if node . MarkedSerial {
firstOrderedNode := suite . tree . AncestorNodeChain ( ) . FirstNodeMarkedOrdered ( )
if ! firstOrderedNode . IsZero ( ) && ! firstOrderedNode . MarkedSerial {
return types . GinkgoErrors . InvalidSerialNodeInNonSerialOrderedContainer ( node . CodeLocation , node . NodeType )
}
}
if node . NodeType . Is ( types . NodeTypeBeforeAll | types . NodeTypeAfterAll ) {
firstOrderedNode := suite . tree . AncestorNodeChain ( ) . FirstNodeMarkedOrdered ( )
if firstOrderedNode . IsZero ( ) {
return types . GinkgoErrors . SetupNodeNotInOrderedContainer ( node . CodeLocation , node . NodeType )
}
}
2023-06-02 15:01:18 +00:00
if node . MarkedContinueOnFailure {
firstOrderedNode := suite . tree . AncestorNodeChain ( ) . FirstNodeMarkedOrdered ( )
if ! firstOrderedNode . IsZero ( ) {
return types . GinkgoErrors . InvalidContinueOnFailureDecoration ( node . CodeLocation )
}
}
2022-08-24 02:24:25 +00:00
if node . NodeType == types . NodeTypeContainer {
// During PhaseBuildTopLevel we only track the top level containers without entering them
// We only enter the top level container nodes during PhaseBuildTree
//
// This ensures the tree is only constructed after `go spec` has called `flag.Parse()` and gives
// the user an opportunity to load suiteConfiguration information in the `TestX` go spec hook just before `RunSpecs`
// is invoked. This makes the lifecycle easier to reason about and solves issues like #693.
if suite . phase == PhaseBuildTopLevel {
suite . topLevelContainers = append ( suite . topLevelContainers , node )
return nil
}
if suite . phase == PhaseBuildTree {
parentTree := suite . tree
suite . tree = & TreeNode { Node : node }
parentTree . AppendChild ( suite . tree )
err := func ( ) ( err error ) {
defer func ( ) {
if e := recover ( ) ; e != nil {
err = types . GinkgoErrors . CaughtPanicDuringABuildPhase ( e , node . CodeLocation )
}
} ( )
2022-10-17 17:38:08 +00:00
node . Body ( nil )
2022-08-24 02:24:25 +00:00
return err
} ( )
suite . tree = parentTree
return err
}
} else {
suite . tree . AppendChild ( & TreeNode { Node : node } )
return nil
}
return nil
}
func ( suite * Suite ) pushSuiteNode ( node Node ) error {
if suite . phase == PhaseBuildTree {
return types . GinkgoErrors . SuiteNodeInNestedContext ( node . NodeType , node . CodeLocation )
}
if suite . phase == PhaseRun {
return types . GinkgoErrors . SuiteNodeDuringRunPhase ( node . NodeType , node . CodeLocation )
}
switch node . NodeType {
case types . NodeTypeBeforeSuite , types . NodeTypeSynchronizedBeforeSuite :
existingBefores := suite . suiteNodes . WithType ( types . NodeTypeBeforeSuite | types . NodeTypeSynchronizedBeforeSuite )
if len ( existingBefores ) > 0 {
return types . GinkgoErrors . MultipleBeforeSuiteNodes ( node . NodeType , node . CodeLocation , existingBefores [ 0 ] . NodeType , existingBefores [ 0 ] . CodeLocation )
}
case types . NodeTypeAfterSuite , types . NodeTypeSynchronizedAfterSuite :
existingAfters := suite . suiteNodes . WithType ( types . NodeTypeAfterSuite | types . NodeTypeSynchronizedAfterSuite )
if len ( existingAfters ) > 0 {
return types . GinkgoErrors . MultipleAfterSuiteNodes ( node . NodeType , node . CodeLocation , existingAfters [ 0 ] . NodeType , existingAfters [ 0 ] . CodeLocation )
}
}
suite . suiteNodes = append ( suite . suiteNodes , node )
return nil
}
func ( suite * Suite ) pushCleanupNode ( node Node ) error {
if suite . phase != PhaseRun || suite . currentNode . IsZero ( ) {
return types . GinkgoErrors . PushingCleanupNodeDuringTreeConstruction ( node . CodeLocation )
}
switch suite . currentNode . NodeType {
case types . NodeTypeBeforeSuite , types . NodeTypeSynchronizedBeforeSuite , types . NodeTypeAfterSuite , types . NodeTypeSynchronizedAfterSuite :
node . NodeType = types . NodeTypeCleanupAfterSuite
case types . NodeTypeBeforeAll , types . NodeTypeAfterAll :
node . NodeType = types . NodeTypeCleanupAfterAll
2023-06-02 15:01:18 +00:00
case types . NodeTypeReportBeforeEach , types . NodeTypeReportAfterEach , types . NodeTypeReportBeforeSuite , types . NodeTypeReportAfterSuite :
2022-08-24 02:24:25 +00:00
return types . GinkgoErrors . PushingCleanupInReportingNode ( node . CodeLocation , suite . currentNode . NodeType )
case types . NodeTypeCleanupInvalid , types . NodeTypeCleanupAfterEach , types . NodeTypeCleanupAfterAll , types . NodeTypeCleanupAfterSuite :
return types . GinkgoErrors . PushingCleanupInCleanupNode ( node . CodeLocation )
default :
node . NodeType = types . NodeTypeCleanupAfterEach
}
node . NodeIDWhereCleanupWasGenerated = suite . currentNode . ID
node . NestingLevel = suite . currentNode . NestingLevel
2023-06-12 21:02:23 +00:00
suite . selectiveLock . Lock ( )
2022-08-24 02:24:25 +00:00
suite . cleanupNodes = append ( suite . cleanupNodes , node )
2023-06-12 21:02:23 +00:00
suite . selectiveLock . Unlock ( )
2022-08-24 02:24:25 +00:00
return nil
}
2023-06-02 15:01:18 +00:00
func ( suite * Suite ) generateTimelineLocation ( ) types . TimelineLocation {
2023-01-30 20:05:01 +00:00
suite . selectiveLock . Lock ( )
2023-02-01 17:06:36 +00:00
defer suite . selectiveLock . Unlock ( )
2023-01-30 20:05:01 +00:00
2023-06-02 15:01:18 +00:00
suite . timelineOrder += 1
return types . TimelineLocation {
Offset : len ( suite . currentSpecReport . CapturedGinkgoWriterOutput ) + suite . writer . Len ( ) ,
Order : suite . timelineOrder ,
Time : time . Now ( ) ,
}
}
func ( suite * Suite ) handleSpecEvent ( event types . SpecEvent ) types . SpecEvent {
event . TimelineLocation = suite . generateTimelineLocation ( )
suite . selectiveLock . Lock ( )
suite . currentSpecReport . SpecEvents = append ( suite . currentSpecReport . SpecEvents , event )
suite . selectiveLock . Unlock ( )
suite . reporter . EmitSpecEvent ( event )
return event
}
func ( suite * Suite ) handleSpecEventEnd ( eventType types . SpecEventType , startEvent types . SpecEvent ) {
event := startEvent
event . SpecEventType = eventType
event . TimelineLocation = suite . generateTimelineLocation ( )
event . Duration = event . TimelineLocation . Time . Sub ( startEvent . TimelineLocation . Time )
suite . selectiveLock . Lock ( )
suite . currentSpecReport . SpecEvents = append ( suite . currentSpecReport . SpecEvents , event )
suite . selectiveLock . Unlock ( )
suite . reporter . EmitSpecEvent ( event )
}
func ( suite * Suite ) By ( text string , callback ... func ( ) ) error {
cl := types . NewCodeLocation ( 2 )
if suite . phase != PhaseRun {
return types . GinkgoErrors . ByNotDuringRunPhase ( cl )
}
event := suite . handleSpecEvent ( types . SpecEvent {
SpecEventType : types . SpecEventByStart ,
CodeLocation : cl ,
Message : text ,
} )
suite . selectiveLock . Lock ( )
suite . currentByStep = event
suite . selectiveLock . Unlock ( )
if len ( callback ) == 1 {
defer func ( ) {
suite . selectiveLock . Lock ( )
suite . currentByStep = types . SpecEvent { }
suite . selectiveLock . Unlock ( )
suite . handleSpecEventEnd ( types . SpecEventByEnd , event )
} ( )
callback [ 0 ] ( )
} else if len ( callback ) > 1 {
panic ( "just one callback per By, please" )
}
return nil
2022-10-17 17:38:08 +00:00
}
2022-08-24 02:24:25 +00:00
/ *
2023-06-02 15:01:18 +00:00
Spec Running methods - used during PhaseRun
2022-08-24 02:24:25 +00:00
* /
func ( suite * Suite ) CurrentSpecReport ( ) types . SpecReport {
2022-10-17 17:38:08 +00:00
suite . selectiveLock . Lock ( )
defer suite . selectiveLock . Unlock ( )
2022-08-24 02:24:25 +00:00
report := suite . currentSpecReport
if suite . writer != nil {
report . CapturedGinkgoWriterOutput = string ( suite . writer . Bytes ( ) )
}
2022-09-05 20:11:08 +00:00
report . ReportEntries = make ( [ ] ReportEntry , len ( report . ReportEntries ) )
copy ( report . ReportEntries , suite . currentSpecReport . ReportEntries )
2022-08-24 02:24:25 +00:00
return report
}
2023-10-10 11:56:00 +00:00
// Only valid in the preview context. In general suite.report only includes
// the specs run by _this_ node - it is only at the end of the suite that
// the parallel reports are aggregated. However in the preview context we run
// in series and
func ( suite * Suite ) GetPreviewReport ( ) types . Report {
suite . selectiveLock . Lock ( )
defer suite . selectiveLock . Unlock ( )
return suite . report
}
2022-08-24 02:24:25 +00:00
func ( suite * Suite ) AddReportEntry ( entry ReportEntry ) error {
if suite . phase != PhaseRun {
return types . GinkgoErrors . AddReportEntryNotDuringRunPhase ( entry . Location )
}
2023-06-02 15:01:18 +00:00
entry . TimelineLocation = suite . generateTimelineLocation ( )
entry . Time = entry . TimelineLocation . Time
suite . selectiveLock . Lock ( )
2022-08-24 02:24:25 +00:00
suite . currentSpecReport . ReportEntries = append ( suite . currentSpecReport . ReportEntries , entry )
2023-06-02 15:01:18 +00:00
suite . selectiveLock . Unlock ( )
suite . reporter . EmitReportEntry ( entry )
2022-08-24 02:24:25 +00:00
return nil
}
2022-10-17 17:38:08 +00:00
func ( suite * Suite ) generateProgressReport ( fullReport bool ) types . ProgressReport {
2023-06-02 15:01:18 +00:00
timelineLocation := suite . generateTimelineLocation ( )
2022-10-17 17:38:08 +00:00
suite . selectiveLock . Lock ( )
defer suite . selectiveLock . Unlock ( )
2023-06-02 15:01:18 +00:00
deadline , cancel := context . WithTimeout ( context . Background ( ) , PROGRESS_REPORTER_DEADLING )
defer cancel ( )
2022-10-17 17:38:08 +00:00
var additionalReports [ ] string
if suite . currentSpecContext != nil {
2023-06-02 15:01:18 +00:00
additionalReports = append ( additionalReports , suite . currentSpecContext . QueryProgressReporters ( deadline , suite . failer ) ... )
2022-10-17 17:38:08 +00:00
}
2023-06-02 15:01:18 +00:00
additionalReports = append ( additionalReports , suite . QueryProgressReporters ( deadline , suite . failer ) ... )
2022-10-17 17:38:08 +00:00
gwOutput := suite . currentSpecReport . CapturedGinkgoWriterOutput + string ( suite . writer . Bytes ( ) )
2023-06-02 15:01:18 +00:00
pr , err := NewProgressReport ( suite . isRunningInParallel ( ) , suite . currentSpecReport , suite . currentNode , suite . currentNodeStartTime , suite . currentByStep , gwOutput , timelineLocation , additionalReports , suite . config . SourceRoots , fullReport )
2022-10-17 17:38:08 +00:00
if err != nil {
fmt . Printf ( "{{red}}Failed to generate progress report:{{/}}\n%s\n" , err . Error ( ) )
}
return pr
}
func ( suite * Suite ) handleProgressSignal ( ) {
report := suite . generateProgressReport ( false )
report . Message = "{{bold}}You've requested a progress report:{{/}}"
suite . emitProgressReport ( report )
}
func ( suite * Suite ) emitProgressReport ( report types . ProgressReport ) {
suite . selectiveLock . Lock ( )
suite . currentSpecReport . ProgressReports = append ( suite . currentSpecReport . ProgressReports , report . WithoutCapturedGinkgoWriterOutput ( ) )
suite . selectiveLock . Unlock ( )
suite . reporter . EmitProgressReport ( report )
if suite . isRunningInParallel ( ) {
err := suite . client . PostEmitProgressReport ( report )
if err != nil {
fmt . Println ( err . Error ( ) )
}
}
}
2022-08-24 02:24:25 +00:00
func ( suite * Suite ) isRunningInParallel ( ) bool {
return suite . config . ParallelTotal > 1
}
func ( suite * Suite ) processCurrentSpecReport ( ) {
suite . reporter . DidRun ( suite . currentSpecReport )
if suite . isRunningInParallel ( ) {
suite . client . PostDidRun ( suite . currentSpecReport )
}
suite . report . SpecReports = append ( suite . report . SpecReports , suite . currentSpecReport )
if suite . currentSpecReport . State . Is ( types . SpecStateFailureStates ) {
suite . report . SuiteSucceeded = false
if suite . config . FailFast || suite . currentSpecReport . State . Is ( types . SpecStateAborted ) {
suite . skipAll = true
if suite . isRunningInParallel ( ) {
suite . client . PostAbort ( )
}
}
}
}
func ( suite * Suite ) runSpecs ( description string , suiteLabels Labels , suitePath string , hasProgrammaticFocus bool , specs Specs ) bool {
numSpecsThatWillBeRun := specs . CountWithoutSkip ( )
suite . report = types . Report {
SuitePath : suitePath ,
SuiteDescription : description ,
SuiteLabels : suiteLabels ,
SuiteConfig : suite . config ,
SuiteHasProgrammaticFocus : hasProgrammaticFocus ,
PreRunStats : types . PreRunStats {
TotalSpecs : len ( specs ) ,
SpecsThatWillRun : numSpecsThatWillBeRun ,
} ,
StartTime : time . Now ( ) ,
}
suite . reporter . SuiteWillBegin ( suite . report )
if suite . isRunningInParallel ( ) {
suite . client . PostSuiteWillBegin ( suite . report )
}
suite . report . SuiteSucceeded = true
2023-06-02 15:01:18 +00:00
suite . runReportSuiteNodesIfNeedBe ( types . NodeTypeReportBeforeSuite )
ranBeforeSuite := suite . report . SuiteSucceeded
if suite . report . SuiteSucceeded {
suite . runBeforeSuite ( numSpecsThatWillBeRun )
}
2022-08-24 02:24:25 +00:00
if suite . report . SuiteSucceeded {
groupedSpecIndices , serialGroupedSpecIndices := OrderSpecs ( specs , suite . config )
nextIndex := MakeIncrementingIndexCounter ( )
if suite . isRunningInParallel ( ) {
nextIndex = suite . client . FetchNextCounter
}
for {
groupedSpecIdx , err := nextIndex ( )
if err != nil {
suite . report . SpecialSuiteFailureReasons = append ( suite . report . SpecialSuiteFailureReasons , fmt . Sprintf ( "Failed to iterate over specs:\n%s" , err . Error ( ) ) )
suite . report . SuiteSucceeded = false
break
}
if groupedSpecIdx >= len ( groupedSpecIndices ) {
if suite . config . ParallelProcess == 1 && len ( serialGroupedSpecIndices ) > 0 {
groupedSpecIndices , serialGroupedSpecIndices , nextIndex = serialGroupedSpecIndices , GroupedSpecIndices { } , MakeIncrementingIndexCounter ( )
suite . client . BlockUntilNonprimaryProcsHaveFinished ( )
continue
}
break
}
// the complexity for running groups of specs is very high because of Ordered containers and FlakeAttempts
// we encapsulate that complexity in the notion of a Group that can run
// Group is really just an extension of suite so it gets passed a suite and has access to all its internals
// Note that group is stateful and intended for single use!
newGroup ( suite ) . run ( specs . AtIndices ( groupedSpecIndices [ groupedSpecIdx ] ) )
}
if specs . HasAnySpecsMarkedPending ( ) && suite . config . FailOnPending {
suite . report . SpecialSuiteFailureReasons = append ( suite . report . SpecialSuiteFailureReasons , "Detected pending specs and --fail-on-pending is set" )
suite . report . SuiteSucceeded = false
}
}
2023-06-02 15:01:18 +00:00
if ranBeforeSuite {
suite . runAfterSuiteCleanup ( numSpecsThatWillBeRun )
}
2022-08-24 02:24:25 +00:00
interruptStatus := suite . interruptHandler . Status ( )
2022-10-17 17:38:08 +00:00
if interruptStatus . Interrupted ( ) {
2022-08-24 02:24:25 +00:00
suite . report . SpecialSuiteFailureReasons = append ( suite . report . SpecialSuiteFailureReasons , interruptStatus . Cause . String ( ) )
suite . report . SuiteSucceeded = false
}
suite . report . EndTime = time . Now ( )
suite . report . RunTime = suite . report . EndTime . Sub ( suite . report . StartTime )
2022-10-17 17:38:08 +00:00
if ! suite . deadline . IsZero ( ) && suite . report . EndTime . After ( suite . deadline ) {
suite . report . SpecialSuiteFailureReasons = append ( suite . report . SpecialSuiteFailureReasons , "Suite Timeout Elapsed" )
suite . report . SuiteSucceeded = false
}
2022-08-24 02:24:25 +00:00
2023-06-02 15:01:18 +00:00
suite . runReportSuiteNodesIfNeedBe ( types . NodeTypeReportAfterSuite )
2022-08-24 02:24:25 +00:00
suite . reporter . SuiteDidEnd ( suite . report )
if suite . isRunningInParallel ( ) {
suite . client . PostSuiteDidEnd ( suite . report )
}
return suite . report . SuiteSucceeded
}
func ( suite * Suite ) runBeforeSuite ( numSpecsThatWillBeRun int ) {
beforeSuiteNode := suite . suiteNodes . FirstNodeWithType ( types . NodeTypeBeforeSuite | types . NodeTypeSynchronizedBeforeSuite )
2022-10-17 17:38:08 +00:00
if ! beforeSuiteNode . IsZero ( ) && numSpecsThatWillBeRun > 0 {
suite . selectiveLock . Lock ( )
2022-08-24 02:24:25 +00:00
suite . currentSpecReport = types . SpecReport {
2023-06-02 15:01:18 +00:00
LeafNodeType : beforeSuiteNode . NodeType ,
LeafNodeLocation : beforeSuiteNode . CodeLocation ,
ParallelProcess : suite . config . ParallelProcess ,
RunningInParallel : suite . isRunningInParallel ( ) ,
2022-08-24 02:24:25 +00:00
}
2022-10-17 17:38:08 +00:00
suite . selectiveLock . Unlock ( )
2022-08-24 02:24:25 +00:00
suite . reporter . WillRun ( suite . currentSpecReport )
2022-10-17 17:38:08 +00:00
suite . runSuiteNode ( beforeSuiteNode )
2022-08-24 02:24:25 +00:00
if suite . currentSpecReport . State . Is ( types . SpecStateSkipped ) {
suite . report . SpecialSuiteFailureReasons = append ( suite . report . SpecialSuiteFailureReasons , "Suite skipped in BeforeSuite" )
suite . skipAll = true
}
suite . processCurrentSpecReport ( )
}
}
func ( suite * Suite ) runAfterSuiteCleanup ( numSpecsThatWillBeRun int ) {
afterSuiteNode := suite . suiteNodes . FirstNodeWithType ( types . NodeTypeAfterSuite | types . NodeTypeSynchronizedAfterSuite )
if ! afterSuiteNode . IsZero ( ) && numSpecsThatWillBeRun > 0 {
2022-10-17 17:38:08 +00:00
suite . selectiveLock . Lock ( )
2022-08-24 02:24:25 +00:00
suite . currentSpecReport = types . SpecReport {
2023-06-02 15:01:18 +00:00
LeafNodeType : afterSuiteNode . NodeType ,
LeafNodeLocation : afterSuiteNode . CodeLocation ,
ParallelProcess : suite . config . ParallelProcess ,
RunningInParallel : suite . isRunningInParallel ( ) ,
2022-08-24 02:24:25 +00:00
}
2022-10-17 17:38:08 +00:00
suite . selectiveLock . Unlock ( )
2022-08-24 02:24:25 +00:00
suite . reporter . WillRun ( suite . currentSpecReport )
2022-10-17 17:38:08 +00:00
suite . runSuiteNode ( afterSuiteNode )
2022-08-24 02:24:25 +00:00
suite . processCurrentSpecReport ( )
}
afterSuiteCleanup := suite . cleanupNodes . WithType ( types . NodeTypeCleanupAfterSuite ) . Reverse ( )
if len ( afterSuiteCleanup ) > 0 {
for _ , cleanupNode := range afterSuiteCleanup {
2022-10-17 17:38:08 +00:00
suite . selectiveLock . Lock ( )
2022-08-24 02:24:25 +00:00
suite . currentSpecReport = types . SpecReport {
2023-06-02 15:01:18 +00:00
LeafNodeType : cleanupNode . NodeType ,
LeafNodeLocation : cleanupNode . CodeLocation ,
ParallelProcess : suite . config . ParallelProcess ,
RunningInParallel : suite . isRunningInParallel ( ) ,
2022-08-24 02:24:25 +00:00
}
2022-10-17 17:38:08 +00:00
suite . selectiveLock . Unlock ( )
2022-08-24 02:24:25 +00:00
suite . reporter . WillRun ( suite . currentSpecReport )
2022-10-17 17:38:08 +00:00
suite . runSuiteNode ( cleanupNode )
2022-08-24 02:24:25 +00:00
suite . processCurrentSpecReport ( )
}
}
}
func ( suite * Suite ) reportEach ( spec Spec , nodeType types . NodeType ) {
nodes := spec . Nodes . WithType ( nodeType )
if nodeType == types . NodeTypeReportAfterEach {
nodes = nodes . SortedByDescendingNestingLevel ( )
}
if nodeType == types . NodeTypeReportBeforeEach {
nodes = nodes . SortedByAscendingNestingLevel ( )
}
if len ( nodes ) == 0 {
return
}
for i := range nodes {
suite . writer . Truncate ( )
suite . outputInterceptor . StartInterceptingOutput ( )
report := suite . currentSpecReport
2024-05-13 20:57:03 +00:00
nodes [ i ] . Body = func ( ctx SpecContext ) {
nodes [ i ] . ReportEachBody ( ctx , report )
2022-08-24 02:24:25 +00:00
}
2022-10-17 17:38:08 +00:00
state , failure := suite . runNode ( nodes [ i ] , time . Time { } , spec . Nodes . BestTextFor ( nodes [ i ] ) )
2022-08-24 02:24:25 +00:00
// If the spec is not in a failure state (i.e. it's Passed/Skipped/Pending) and the reporter has failed, override the state.
// Also, if the reporter is every aborted - always override the state to propagate the abort
if ( ! suite . currentSpecReport . State . Is ( types . SpecStateFailureStates ) && state . Is ( types . SpecStateFailureStates ) ) || state . Is ( types . SpecStateAborted ) {
suite . currentSpecReport . State = state
suite . currentSpecReport . Failure = failure
}
suite . currentSpecReport . CapturedGinkgoWriterOutput += string ( suite . writer . Bytes ( ) )
suite . currentSpecReport . CapturedStdOutErr += suite . outputInterceptor . StopInterceptingAndReturnOutput ( )
}
}
2022-10-17 17:38:08 +00:00
func ( suite * Suite ) runSuiteNode ( node Node ) {
2022-08-24 02:24:25 +00:00
if suite . config . DryRun {
suite . currentSpecReport . State = types . SpecStatePassed
return
}
suite . writer . Truncate ( )
suite . outputInterceptor . StartInterceptingOutput ( )
suite . currentSpecReport . StartTime = time . Now ( )
var err error
switch node . NodeType {
case types . NodeTypeBeforeSuite , types . NodeTypeAfterSuite :
2022-10-17 17:38:08 +00:00
suite . currentSpecReport . State , suite . currentSpecReport . Failure = suite . runNode ( node , time . Time { } , "" )
2022-08-24 02:24:25 +00:00
case types . NodeTypeCleanupAfterSuite :
if suite . config . ParallelTotal > 1 && suite . config . ParallelProcess == 1 {
err = suite . client . BlockUntilNonprimaryProcsHaveFinished ( )
}
if err == nil {
2022-10-17 17:38:08 +00:00
suite . currentSpecReport . State , suite . currentSpecReport . Failure = suite . runNode ( node , time . Time { } , "" )
2022-08-24 02:24:25 +00:00
}
case types . NodeTypeSynchronizedBeforeSuite :
var data [ ] byte
var runAllProcs bool
if suite . config . ParallelProcess == 1 {
if suite . config . ParallelTotal > 1 {
suite . outputInterceptor . StopInterceptingAndReturnOutput ( )
suite . outputInterceptor . StartInterceptingOutputAndForwardTo ( suite . client )
}
2022-10-17 17:38:08 +00:00
node . Body = func ( c SpecContext ) { data = node . SynchronizedBeforeSuiteProc1Body ( c ) }
node . HasContext = node . SynchronizedBeforeSuiteProc1BodyHasContext
suite . currentSpecReport . State , suite . currentSpecReport . Failure = suite . runNode ( node , time . Time { } , "" )
2022-08-24 02:24:25 +00:00
if suite . config . ParallelTotal > 1 {
suite . currentSpecReport . CapturedStdOutErr += suite . outputInterceptor . StopInterceptingAndReturnOutput ( )
suite . outputInterceptor . StartInterceptingOutput ( )
if suite . currentSpecReport . State . Is ( types . SpecStatePassed ) {
err = suite . client . PostSynchronizedBeforeSuiteCompleted ( types . SpecStatePassed , data )
} else {
err = suite . client . PostSynchronizedBeforeSuiteCompleted ( suite . currentSpecReport . State , nil )
}
}
runAllProcs = suite . currentSpecReport . State . Is ( types . SpecStatePassed ) && err == nil
} else {
var proc1State types . SpecState
proc1State , data , err = suite . client . BlockUntilSynchronizedBeforeSuiteData ( )
switch proc1State {
case types . SpecStatePassed :
runAllProcs = true
2022-10-17 17:38:08 +00:00
case types . SpecStateFailed , types . SpecStatePanicked , types . SpecStateTimedout :
2022-08-24 02:24:25 +00:00
err = types . GinkgoErrors . SynchronizedBeforeSuiteFailedOnProc1 ( )
case types . SpecStateInterrupted , types . SpecStateAborted , types . SpecStateSkipped :
suite . currentSpecReport . State = proc1State
}
}
if runAllProcs {
2022-10-17 17:38:08 +00:00
node . Body = func ( c SpecContext ) { node . SynchronizedBeforeSuiteAllProcsBody ( c , data ) }
node . HasContext = node . SynchronizedBeforeSuiteAllProcsBodyHasContext
suite . currentSpecReport . State , suite . currentSpecReport . Failure = suite . runNode ( node , time . Time { } , "" )
2022-08-24 02:24:25 +00:00
}
case types . NodeTypeSynchronizedAfterSuite :
node . Body = node . SynchronizedAfterSuiteAllProcsBody
2022-10-17 17:38:08 +00:00
node . HasContext = node . SynchronizedAfterSuiteAllProcsBodyHasContext
suite . currentSpecReport . State , suite . currentSpecReport . Failure = suite . runNode ( node , time . Time { } , "" )
2022-08-24 02:24:25 +00:00
if suite . config . ParallelProcess == 1 {
if suite . config . ParallelTotal > 1 {
err = suite . client . BlockUntilNonprimaryProcsHaveFinished ( )
}
if err == nil {
if suite . config . ParallelTotal > 1 {
suite . currentSpecReport . CapturedStdOutErr += suite . outputInterceptor . StopInterceptingAndReturnOutput ( )
suite . outputInterceptor . StartInterceptingOutputAndForwardTo ( suite . client )
}
node . Body = node . SynchronizedAfterSuiteProc1Body
2022-10-17 17:38:08 +00:00
node . HasContext = node . SynchronizedAfterSuiteProc1BodyHasContext
state , failure := suite . runNode ( node , time . Time { } , "" )
2022-08-24 02:24:25 +00:00
if suite . currentSpecReport . State . Is ( types . SpecStatePassed ) {
suite . currentSpecReport . State , suite . currentSpecReport . Failure = state , failure
}
}
}
}
if err != nil && ! suite . currentSpecReport . State . Is ( types . SpecStateFailureStates ) {
suite . currentSpecReport . State , suite . currentSpecReport . Failure = types . SpecStateFailed , suite . failureForLeafNodeWithMessage ( node , err . Error ( ) )
2023-06-02 15:01:18 +00:00
suite . reporter . EmitFailure ( suite . currentSpecReport . State , suite . currentSpecReport . Failure )
2022-08-24 02:24:25 +00:00
}
suite . currentSpecReport . EndTime = time . Now ( )
suite . currentSpecReport . RunTime = suite . currentSpecReport . EndTime . Sub ( suite . currentSpecReport . StartTime )
suite . currentSpecReport . CapturedGinkgoWriterOutput = string ( suite . writer . Bytes ( ) )
suite . currentSpecReport . CapturedStdOutErr += suite . outputInterceptor . StopInterceptingAndReturnOutput ( )
2023-06-02 15:01:18 +00:00
}
func ( suite * Suite ) runReportSuiteNodesIfNeedBe ( nodeType types . NodeType ) {
nodes := suite . suiteNodes . WithType ( nodeType )
// only run ReportAfterSuite on proc 1
if nodeType . Is ( types . NodeTypeReportAfterSuite ) && suite . config . ParallelProcess != 1 {
return
}
// if we're running ReportBeforeSuite on proc > 1 - we should wait until proc 1 has completed
if nodeType . Is ( types . NodeTypeReportBeforeSuite ) && suite . config . ParallelProcess != 1 && len ( nodes ) > 0 {
state , err := suite . client . BlockUntilReportBeforeSuiteCompleted ( )
if err != nil || state . Is ( types . SpecStateFailed ) {
suite . report . SuiteSucceeded = false
}
return
}
for _ , node := range nodes {
suite . selectiveLock . Lock ( )
suite . currentSpecReport = types . SpecReport {
LeafNodeType : node . NodeType ,
LeafNodeLocation : node . CodeLocation ,
LeafNodeText : node . Text ,
ParallelProcess : suite . config . ParallelProcess ,
RunningInParallel : suite . isRunningInParallel ( ) ,
}
suite . selectiveLock . Unlock ( )
2022-08-24 02:24:25 +00:00
2023-06-02 15:01:18 +00:00
suite . reporter . WillRun ( suite . currentSpecReport )
suite . runReportSuiteNode ( node , suite . report )
suite . processCurrentSpecReport ( )
}
// if we're running ReportBeforeSuite and we're running in parallel - we shuld tell the other procs that we're done
if nodeType . Is ( types . NodeTypeReportBeforeSuite ) && suite . isRunningInParallel ( ) && len ( nodes ) > 0 {
if suite . report . SuiteSucceeded {
suite . client . PostReportBeforeSuiteCompleted ( types . SpecStatePassed )
} else {
suite . client . PostReportBeforeSuiteCompleted ( types . SpecStateFailed )
}
}
2022-08-24 02:24:25 +00:00
}
2023-06-02 15:01:18 +00:00
func ( suite * Suite ) runReportSuiteNode ( node Node , report types . Report ) {
2022-08-24 02:24:25 +00:00
suite . writer . Truncate ( )
suite . outputInterceptor . StartInterceptingOutput ( )
suite . currentSpecReport . StartTime = time . Now ( )
2023-06-02 15:01:18 +00:00
// if we're running a ReportAfterSuite in parallel (on proc 1) we (a) wait until other procs have exited and
// (b) always fetch the latest report as prior ReportAfterSuites will contribute to it
if node . NodeType . Is ( types . NodeTypeReportAfterSuite ) && suite . isRunningInParallel ( ) {
2022-08-24 02:24:25 +00:00
aggregatedReport , err := suite . client . BlockUntilAggregatedNonprimaryProcsReport ( )
if err != nil {
suite . currentSpecReport . State , suite . currentSpecReport . Failure = types . SpecStateFailed , suite . failureForLeafNodeWithMessage ( node , err . Error ( ) )
2023-06-02 15:01:18 +00:00
suite . reporter . EmitFailure ( suite . currentSpecReport . State , suite . currentSpecReport . Failure )
2022-08-24 02:24:25 +00:00
return
}
report = report . Add ( aggregatedReport )
}
2024-05-13 20:57:03 +00:00
node . Body = func ( ctx SpecContext ) { node . ReportSuiteBody ( ctx , report ) }
2022-10-17 17:38:08 +00:00
suite . currentSpecReport . State , suite . currentSpecReport . Failure = suite . runNode ( node , time . Time { } , "" )
2022-08-24 02:24:25 +00:00
suite . currentSpecReport . EndTime = time . Now ( )
suite . currentSpecReport . RunTime = suite . currentSpecReport . EndTime . Sub ( suite . currentSpecReport . StartTime )
suite . currentSpecReport . CapturedGinkgoWriterOutput = string ( suite . writer . Bytes ( ) )
suite . currentSpecReport . CapturedStdOutErr = suite . outputInterceptor . StopInterceptingAndReturnOutput ( )
}
2022-10-17 17:38:08 +00:00
func ( suite * Suite ) runNode ( node Node , specDeadline time . Time , text string ) ( types . SpecState , types . Failure ) {
2022-08-24 02:24:25 +00:00
if node . NodeType . Is ( types . NodeTypeCleanupAfterEach | types . NodeTypeCleanupAfterAll | types . NodeTypeCleanupAfterSuite ) {
suite . cleanupNodes = suite . cleanupNodes . WithoutNode ( node )
}
2022-10-17 17:38:08 +00:00
interruptStatus := suite . interruptHandler . Status ( )
if interruptStatus . Level == interrupt_handler . InterruptLevelBailOut {
return types . SpecStateSkipped , types . Failure { }
}
if interruptStatus . Level == interrupt_handler . InterruptLevelReportOnly && ! node . NodeType . Is ( types . NodeTypesAllowedDuringReportInterrupt ) {
return types . SpecStateSkipped , types . Failure { }
}
if interruptStatus . Level == interrupt_handler . InterruptLevelCleanupAndReport && ! node . NodeType . Is ( types . NodeTypesAllowedDuringReportInterrupt | types . NodeTypesAllowedDuringCleanupInterrupt ) {
return types . SpecStateSkipped , types . Failure { }
}
suite . selectiveLock . Lock ( )
2022-08-24 02:24:25 +00:00
suite . currentNode = node
2022-10-17 17:38:08 +00:00
suite . currentNodeStartTime = time . Now ( )
2023-06-02 15:01:18 +00:00
suite . currentByStep = types . SpecEvent { }
2022-10-17 17:38:08 +00:00
suite . selectiveLock . Unlock ( )
2022-08-24 02:24:25 +00:00
defer func ( ) {
2022-10-17 17:38:08 +00:00
suite . selectiveLock . Lock ( )
2022-08-24 02:24:25 +00:00
suite . currentNode = Node { }
2022-10-17 17:38:08 +00:00
suite . currentNodeStartTime = time . Time { }
suite . selectiveLock . Unlock ( )
2022-08-24 02:24:25 +00:00
} ( )
2023-06-02 15:01:18 +00:00
if text == "" {
text = "TOP-LEVEL"
2022-08-24 02:24:25 +00:00
}
2023-06-02 15:01:18 +00:00
event := suite . handleSpecEvent ( types . SpecEvent {
SpecEventType : types . SpecEventNodeStart ,
NodeType : node . NodeType ,
Message : text ,
CodeLocation : node . CodeLocation ,
} )
defer func ( ) {
suite . handleSpecEventEnd ( types . SpecEventNodeEnd , event )
} ( )
2022-08-24 02:24:25 +00:00
var failure types . Failure
failure . FailureNodeType , failure . FailureNodeLocation = node . NodeType , node . CodeLocation
if node . NodeType . Is ( types . NodeTypeIt ) || node . NodeType . Is ( types . NodeTypesForSuiteLevelNodes ) {
failure . FailureNodeContext = types . FailureNodeIsLeafNode
} else if node . NestingLevel <= 0 {
failure . FailureNodeContext = types . FailureNodeAtTopLevel
} else {
failure . FailureNodeContext , failure . FailureNodeContainerIndex = types . FailureNodeInContainer , node . NestingLevel - 1
}
2022-10-17 17:38:08 +00:00
var outcome types . SpecState
gracePeriod := suite . config . GracePeriod
if node . GracePeriod >= 0 {
gracePeriod = node . GracePeriod
}
now := time . Now ( )
deadline := suite . deadline
2023-06-02 15:01:18 +00:00
timeoutInPlay := "suite"
2022-10-17 17:38:08 +00:00
if deadline . IsZero ( ) || ( ! specDeadline . IsZero ( ) && specDeadline . Before ( deadline ) ) {
deadline = specDeadline
2023-06-02 15:01:18 +00:00
timeoutInPlay = "spec"
2022-10-17 17:38:08 +00:00
}
if node . NodeTimeout > 0 && ( deadline . IsZero ( ) || deadline . Sub ( now ) > node . NodeTimeout ) {
deadline = now . Add ( node . NodeTimeout )
2023-06-02 15:01:18 +00:00
timeoutInPlay = "node"
2022-10-17 17:38:08 +00:00
}
if ( ! deadline . IsZero ( ) && deadline . Before ( now ) ) || interruptStatus . Interrupted ( ) {
2024-05-13 20:57:03 +00:00
// we're out of time already. let's wait for a NodeTimeout if we have it, or GracePeriod if we don't
2022-10-17 17:38:08 +00:00
if node . NodeTimeout > 0 {
deadline = now . Add ( node . NodeTimeout )
2023-06-02 15:01:18 +00:00
timeoutInPlay = "node"
2022-10-17 17:38:08 +00:00
} else {
deadline = now . Add ( gracePeriod )
2023-06-02 15:01:18 +00:00
timeoutInPlay = "grace period"
2022-10-17 17:38:08 +00:00
}
}
if ! node . HasContext {
// this maps onto the pre-context behavior:
// - an interrupted node exits immediately. with this, context-less nodes that are in a spec with a SpecTimeout and/or are interrupted by other means will simply exit immediately after the timeout/interrupt
// - clean up nodes have up to GracePeriod (formerly hard-coded at 30s) to complete before they are interrupted
gracePeriod = 0
}
sc := NewSpecContext ( suite )
2024-01-29 20:22:47 +00:00
defer sc . cancel ( fmt . Errorf ( "spec has finished" ) )
2022-10-17 17:38:08 +00:00
suite . selectiveLock . Lock ( )
suite . currentSpecContext = sc
suite . selectiveLock . Unlock ( )
var deadlineChannel <- chan time . Time
if ! deadline . IsZero ( ) {
deadlineChannel = time . After ( deadline . Sub ( now ) )
}
var gracePeriodChannel <- chan time . Time
2022-08-24 02:24:25 +00:00
outcomeC := make ( chan types . SpecState )
failureC := make ( chan types . Failure )
go func ( ) {
finished := false
defer func ( ) {
if e := recover ( ) ; e != nil || ! finished {
suite . failer . Panic ( types . NewCodeLocationWithStackTrace ( 2 ) , e )
}
2022-10-17 17:38:08 +00:00
outcomeFromRun , failureFromRun := suite . failer . Drain ( )
2023-06-02 15:01:18 +00:00
failureFromRun . TimelineLocation = suite . generateTimelineLocation ( )
2022-10-17 17:38:08 +00:00
outcomeC <- outcomeFromRun
2022-08-24 02:24:25 +00:00
failureC <- failureFromRun
} ( )
2022-10-17 17:38:08 +00:00
node . Body ( sc )
2022-08-24 02:24:25 +00:00
finished = true
} ( )
2022-10-17 17:38:08 +00:00
// progress polling timer and channel
var emitProgressNow <- chan time . Time
var progressPoller * time . Timer
var pollProgressAfter , pollProgressInterval = suite . config . PollProgressAfter , suite . config . PollProgressInterval
if node . PollProgressAfter >= 0 {
pollProgressAfter = node . PollProgressAfter
}
if node . PollProgressInterval >= 0 {
pollProgressInterval = node . PollProgressInterval
}
if pollProgressAfter > 0 {
progressPoller = time . NewTimer ( pollProgressAfter )
emitProgressNow = progressPoller . C
defer progressPoller . Stop ( )
}
// now we wait for an outcome, an interrupt, a timeout, or a progress poll
for {
select {
case outcomeFromRun := <- outcomeC :
failureFromRun := <- failureC
2023-06-02 15:01:18 +00:00
if outcome . Is ( types . SpecStateInterrupted | types . SpecStateTimedout ) {
// we've already been interrupted/timed out. we just managed to actually exit
2022-10-17 17:38:08 +00:00
// before the grace period elapsed
2023-06-02 15:01:18 +00:00
// if we have a failure message we attach it as an additional failure
2022-10-17 17:38:08 +00:00
if outcomeFromRun != types . SpecStatePassed {
2023-06-02 15:01:18 +00:00
additionalFailure := types . AdditionalFailure {
State : outcomeFromRun ,
2024-05-13 20:57:03 +00:00
Failure : failure , // we make a copy - this will include all the configuration set up above...
2023-06-02 15:01:18 +00:00
}
2024-05-13 20:57:03 +00:00
// ...and then we update the failure with the details from failureFromRun
2023-06-02 15:01:18 +00:00
additionalFailure . Failure . Location , additionalFailure . Failure . ForwardedPanic , additionalFailure . Failure . TimelineLocation = failureFromRun . Location , failureFromRun . ForwardedPanic , failureFromRun . TimelineLocation
additionalFailure . Failure . ProgressReport = types . ProgressReport { }
if outcome == types . SpecStateTimedout {
additionalFailure . Failure . Message = fmt . Sprintf ( "A %s timeout occurred and then the following failure was recorded in the timedout node before it exited:\n%s" , timeoutInPlay , failureFromRun . Message )
} else {
additionalFailure . Failure . Message = fmt . Sprintf ( "An interrupt occurred and then the following failure was recorded in the interrupted node before it exited:\n%s" , failureFromRun . Message )
}
suite . reporter . EmitFailure ( additionalFailure . State , additionalFailure . Failure )
failure . AdditionalFailure = & additionalFailure
2022-10-17 17:38:08 +00:00
}
return outcome , failure
}
if outcomeFromRun . Is ( types . SpecStatePassed ) {
return outcomeFromRun , types . Failure { }
} else {
2023-06-02 15:01:18 +00:00
failure . Message , failure . Location , failure . ForwardedPanic , failure . TimelineLocation = failureFromRun . Message , failureFromRun . Location , failureFromRun . ForwardedPanic , failureFromRun . TimelineLocation
suite . reporter . EmitFailure ( outcomeFromRun , failure )
2022-10-17 17:38:08 +00:00
return outcomeFromRun , failure
}
case <- gracePeriodChannel :
if node . HasContext && outcome . Is ( types . SpecStateTimedout ) {
report := suite . generateProgressReport ( false )
report . Message = "{{bold}}{{orange}}A running node failed to exit in time{{/}}\nGinkgo is moving on but a node has timed out and failed to exit before its grace period elapsed. The node has now leaked and is running in the background.\nHere's a current progress report:"
suite . emitProgressReport ( report )
}
return outcome , failure
case <- deadlineChannel :
// we're out of time - the outcome is a timeout and we capture the failure and progress report
outcome = types . SpecStateTimedout
2023-06-02 15:01:18 +00:00
failure . Message , failure . Location , failure . TimelineLocation = fmt . Sprintf ( "A %s timeout occurred" , timeoutInPlay ) , node . CodeLocation , suite . generateTimelineLocation ( )
2022-10-17 17:38:08 +00:00
failure . ProgressReport = suite . generateProgressReport ( false ) . WithoutCapturedGinkgoWriterOutput ( )
2023-06-02 15:01:18 +00:00
failure . ProgressReport . Message = fmt . Sprintf ( "{{bold}}This is the Progress Report generated when the %s timeout occurred:{{/}}" , timeoutInPlay )
2022-10-17 17:38:08 +00:00
deadlineChannel = nil
2023-06-02 15:01:18 +00:00
suite . reporter . EmitFailure ( outcome , failure )
2022-10-17 17:38:08 +00:00
// tell the spec to stop. it's important we generate the progress report first to make sure we capture where
// the spec is actually stuck
2024-01-29 20:22:47 +00:00
sc . cancel ( fmt . Errorf ( "%s timeout occurred" , timeoutInPlay ) )
2024-05-13 20:57:03 +00:00
// and now we wait for the grace period
2022-10-17 17:38:08 +00:00
gracePeriodChannel = time . After ( gracePeriod )
case <- interruptStatus . Channel :
interruptStatus = suite . interruptHandler . Status ( )
2023-06-02 15:01:18 +00:00
// ignore interruption from other process if we are cleaning up or reporting
if interruptStatus . Cause == interrupt_handler . InterruptCauseAbortByOtherProcess &&
node . NodeType . Is ( types . NodeTypesAllowedDuringReportInterrupt | types . NodeTypesAllowedDuringCleanupInterrupt ) {
continue
}
2022-10-17 17:38:08 +00:00
deadlineChannel = nil // don't worry about deadlines, time's up now
2023-06-02 15:01:18 +00:00
failureTimelineLocation := suite . generateTimelineLocation ( )
progressReport := suite . generateProgressReport ( true )
2022-10-17 17:38:08 +00:00
if outcome == types . SpecStateInvalid {
outcome = types . SpecStateInterrupted
2023-06-02 15:01:18 +00:00
failure . Message , failure . Location , failure . TimelineLocation = interruptStatus . Message ( ) , node . CodeLocation , failureTimelineLocation
2022-10-17 17:38:08 +00:00
if interruptStatus . ShouldIncludeProgressReport ( ) {
2023-06-02 15:01:18 +00:00
failure . ProgressReport = progressReport . WithoutCapturedGinkgoWriterOutput ( )
2022-10-17 17:38:08 +00:00
failure . ProgressReport . Message = "{{bold}}This is the Progress Report generated when the interrupt was received:{{/}}"
}
2023-06-02 15:01:18 +00:00
suite . reporter . EmitFailure ( outcome , failure )
2022-10-17 17:38:08 +00:00
}
2023-06-02 15:01:18 +00:00
progressReport = progressReport . WithoutOtherGoroutines ( )
2024-01-29 20:22:47 +00:00
sc . cancel ( fmt . Errorf ( interruptStatus . Message ( ) ) )
2022-10-17 17:38:08 +00:00
if interruptStatus . Level == interrupt_handler . InterruptLevelBailOut {
if interruptStatus . ShouldIncludeProgressReport ( ) {
2023-06-02 15:01:18 +00:00
progressReport . Message = fmt . Sprintf ( "{{bold}}{{orange}}%s{{/}}\n{{bold}}{{red}}Final interrupt received{{/}}; Ginkgo will not run any cleanup or reporting nodes and will terminate as soon as possible.\nHere's a current progress report:" , interruptStatus . Message ( ) )
suite . emitProgressReport ( progressReport )
2022-10-17 17:38:08 +00:00
}
return outcome , failure
}
if interruptStatus . ShouldIncludeProgressReport ( ) {
if interruptStatus . Level == interrupt_handler . InterruptLevelCleanupAndReport {
2023-06-02 15:01:18 +00:00
progressReport . Message = fmt . Sprintf ( "{{bold}}{{orange}}%s{{/}}\nFirst interrupt received; Ginkgo will run any cleanup and reporting nodes but will skip all remaining specs. {{bold}}Interrupt again to skip cleanup{{/}}.\nHere's a current progress report:" , interruptStatus . Message ( ) )
2022-10-17 17:38:08 +00:00
} else if interruptStatus . Level == interrupt_handler . InterruptLevelReportOnly {
2023-06-02 15:01:18 +00:00
progressReport . Message = fmt . Sprintf ( "{{bold}}{{orange}}%s{{/}}\nSecond interrupt received; Ginkgo will run any reporting nodes but will skip all remaining specs and cleanup nodes. {{bold}}Interrupt again to bail immediately{{/}}.\nHere's a current progress report:" , interruptStatus . Message ( ) )
2022-10-17 17:38:08 +00:00
}
2023-06-02 15:01:18 +00:00
suite . emitProgressReport ( progressReport )
2022-10-17 17:38:08 +00:00
}
if gracePeriodChannel == nil {
// we haven't given grace yet... so let's
gracePeriodChannel = time . After ( gracePeriod )
} else {
// we've already given grace. time's up. now.
return outcome , failure
}
case <- emitProgressNow :
report := suite . generateProgressReport ( false )
report . Message = "{{bold}}Automatically polling progress:{{/}}"
suite . emitProgressReport ( report )
if pollProgressInterval > 0 {
progressPoller . Reset ( pollProgressInterval )
}
2022-08-24 02:24:25 +00:00
}
}
}
2023-06-02 15:01:18 +00:00
// TODO: search for usages and consider if reporter.EmitFailure() is necessary
2022-08-24 02:24:25 +00:00
func ( suite * Suite ) failureForLeafNodeWithMessage ( node Node , message string ) types . Failure {
return types . Failure {
Message : message ,
Location : node . CodeLocation ,
2023-06-02 15:01:18 +00:00
TimelineLocation : suite . generateTimelineLocation ( ) ,
2022-08-24 02:24:25 +00:00
FailureNodeContext : types . FailureNodeIsLeafNode ,
FailureNodeType : node . NodeType ,
FailureNodeLocation : node . CodeLocation ,
}
}
func max ( a , b int ) int {
if a > b {
return a
}
return b
}