mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 18:43:34 +00:00
rebase: update kubernetes to 1.26.1
update kubernetes and its dependencies to v1.26.1 Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
committed by
mergify[bot]
parent
e9e33fb851
commit
9c8de9471e
284
vendor/github.com/onsi/ginkgo/v2/internal/suite.go
generated
vendored
284
vendor/github.com/onsi/ginkgo/v2/internal/suite.go
generated
vendored
@ -44,8 +44,7 @@ type Suite struct {
|
||||
|
||||
currentSpecContext *specContext
|
||||
|
||||
currentByStep types.SpecEvent
|
||||
timelineOrder int
|
||||
progressStepCursor ProgressStepCursor
|
||||
|
||||
/*
|
||||
We don't need to lock around all operations. Just those that *could* happen concurrently.
|
||||
@ -129,7 +128,7 @@ func (suite *Suite) PushNode(node Node) error {
|
||||
return suite.pushCleanupNode(node)
|
||||
}
|
||||
|
||||
if node.NodeType.Is(types.NodeTypeBeforeSuite | types.NodeTypeAfterSuite | types.NodeTypeSynchronizedBeforeSuite | types.NodeTypeSynchronizedAfterSuite | types.NodeTypeBeforeSuite | types.NodeTypeReportBeforeSuite | types.NodeTypeReportAfterSuite) {
|
||||
if node.NodeType.Is(types.NodeTypeBeforeSuite | types.NodeTypeAfterSuite | types.NodeTypeSynchronizedBeforeSuite | types.NodeTypeSynchronizedAfterSuite | types.NodeTypeReportAfterSuite) {
|
||||
return suite.pushSuiteNode(node)
|
||||
}
|
||||
|
||||
@ -151,13 +150,6 @@ func (suite *Suite) PushNode(node Node) error {
|
||||
}
|
||||
}
|
||||
|
||||
if node.MarkedContinueOnFailure {
|
||||
firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered()
|
||||
if !firstOrderedNode.IsZero() {
|
||||
return types.GinkgoErrors.InvalidContinueOnFailureDecoration(node.CodeLocation)
|
||||
}
|
||||
}
|
||||
|
||||
if node.NodeType == types.NodeTypeContainer {
|
||||
// During PhaseBuildTopLevel we only track the top level containers without entering them
|
||||
// We only enter the top level container nodes during PhaseBuildTree
|
||||
@ -229,7 +221,7 @@ func (suite *Suite) pushCleanupNode(node Node) error {
|
||||
node.NodeType = types.NodeTypeCleanupAfterSuite
|
||||
case types.NodeTypeBeforeAll, types.NodeTypeAfterAll:
|
||||
node.NodeType = types.NodeTypeCleanupAfterAll
|
||||
case types.NodeTypeReportBeforeEach, types.NodeTypeReportAfterEach, types.NodeTypeReportBeforeSuite, types.NodeTypeReportAfterSuite:
|
||||
case types.NodeTypeReportBeforeEach, types.NodeTypeReportAfterEach, types.NodeTypeReportAfterSuite:
|
||||
return types.GinkgoErrors.PushingCleanupInReportingNode(node.CodeLocation, suite.currentNode.NodeType)
|
||||
case types.NodeTypeCleanupInvalid, types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll, types.NodeTypeCleanupAfterSuite:
|
||||
return types.GinkgoErrors.PushingCleanupInCleanupNode(node.CodeLocation)
|
||||
@ -244,69 +236,19 @@ func (suite *Suite) pushCleanupNode(node Node) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (suite *Suite) generateTimelineLocation() types.TimelineLocation {
|
||||
/*
|
||||
Pushing and popping the Step Cursor stack
|
||||
*/
|
||||
|
||||
func (suite *Suite) SetProgressStepCursor(cursor ProgressStepCursor) {
|
||||
suite.selectiveLock.Lock()
|
||||
defer suite.selectiveLock.Unlock()
|
||||
|
||||
suite.timelineOrder += 1
|
||||
return types.TimelineLocation{
|
||||
Offset: len(suite.currentSpecReport.CapturedGinkgoWriterOutput) + suite.writer.Len(),
|
||||
Order: suite.timelineOrder,
|
||||
Time: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *Suite) handleSpecEvent(event types.SpecEvent) types.SpecEvent {
|
||||
event.TimelineLocation = suite.generateTimelineLocation()
|
||||
suite.selectiveLock.Lock()
|
||||
suite.currentSpecReport.SpecEvents = append(suite.currentSpecReport.SpecEvents, event)
|
||||
suite.selectiveLock.Unlock()
|
||||
suite.reporter.EmitSpecEvent(event)
|
||||
return event
|
||||
}
|
||||
|
||||
func (suite *Suite) handleSpecEventEnd(eventType types.SpecEventType, startEvent types.SpecEvent) {
|
||||
event := startEvent
|
||||
event.SpecEventType = eventType
|
||||
event.TimelineLocation = suite.generateTimelineLocation()
|
||||
event.Duration = event.TimelineLocation.Time.Sub(startEvent.TimelineLocation.Time)
|
||||
suite.selectiveLock.Lock()
|
||||
suite.currentSpecReport.SpecEvents = append(suite.currentSpecReport.SpecEvents, event)
|
||||
suite.selectiveLock.Unlock()
|
||||
suite.reporter.EmitSpecEvent(event)
|
||||
}
|
||||
|
||||
func (suite *Suite) By(text string, callback ...func()) error {
|
||||
cl := types.NewCodeLocation(2)
|
||||
if suite.phase != PhaseRun {
|
||||
return types.GinkgoErrors.ByNotDuringRunPhase(cl)
|
||||
}
|
||||
|
||||
event := suite.handleSpecEvent(types.SpecEvent{
|
||||
SpecEventType: types.SpecEventByStart,
|
||||
CodeLocation: cl,
|
||||
Message: text,
|
||||
})
|
||||
suite.selectiveLock.Lock()
|
||||
suite.currentByStep = event
|
||||
suite.selectiveLock.Unlock()
|
||||
|
||||
if len(callback) == 1 {
|
||||
defer func() {
|
||||
suite.selectiveLock.Lock()
|
||||
suite.currentByStep = types.SpecEvent{}
|
||||
suite.selectiveLock.Unlock()
|
||||
suite.handleSpecEventEnd(types.SpecEventByEnd, event)
|
||||
}()
|
||||
callback[0]()
|
||||
} else if len(callback) > 1 {
|
||||
panic("just one callback per By, please")
|
||||
}
|
||||
return nil
|
||||
suite.progressStepCursor = cursor
|
||||
}
|
||||
|
||||
/*
|
||||
Spec Running methods - used during PhaseRun
|
||||
Spec Running methods - used during PhaseRun
|
||||
*/
|
||||
func (suite *Suite) CurrentSpecReport() types.SpecReport {
|
||||
suite.selectiveLock.Lock()
|
||||
@ -321,20 +263,16 @@ func (suite *Suite) CurrentSpecReport() types.SpecReport {
|
||||
}
|
||||
|
||||
func (suite *Suite) AddReportEntry(entry ReportEntry) error {
|
||||
suite.selectiveLock.Lock()
|
||||
defer suite.selectiveLock.Unlock()
|
||||
if suite.phase != PhaseRun {
|
||||
return types.GinkgoErrors.AddReportEntryNotDuringRunPhase(entry.Location)
|
||||
}
|
||||
entry.TimelineLocation = suite.generateTimelineLocation()
|
||||
entry.Time = entry.TimelineLocation.Time
|
||||
suite.selectiveLock.Lock()
|
||||
suite.currentSpecReport.ReportEntries = append(suite.currentSpecReport.ReportEntries, entry)
|
||||
suite.selectiveLock.Unlock()
|
||||
suite.reporter.EmitReportEntry(entry)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (suite *Suite) generateProgressReport(fullReport bool) types.ProgressReport {
|
||||
timelineLocation := suite.generateTimelineLocation()
|
||||
suite.selectiveLock.Lock()
|
||||
defer suite.selectiveLock.Unlock()
|
||||
|
||||
@ -342,8 +280,10 @@ func (suite *Suite) generateProgressReport(fullReport bool) types.ProgressReport
|
||||
if suite.currentSpecContext != nil {
|
||||
additionalReports = suite.currentSpecContext.QueryProgressReporters()
|
||||
}
|
||||
stepCursor := suite.progressStepCursor
|
||||
|
||||
gwOutput := suite.currentSpecReport.CapturedGinkgoWriterOutput + string(suite.writer.Bytes())
|
||||
pr, err := NewProgressReport(suite.isRunningInParallel(), suite.currentSpecReport, suite.currentNode, suite.currentNodeStartTime, suite.currentByStep, gwOutput, timelineLocation, additionalReports, suite.config.SourceRoots, fullReport)
|
||||
pr, err := NewProgressReport(suite.isRunningInParallel(), suite.currentSpecReport, suite.currentNode, suite.currentNodeStartTime, stepCursor, gwOutput, additionalReports, suite.config.SourceRoots, fullReport)
|
||||
|
||||
if err != nil {
|
||||
fmt.Printf("{{red}}Failed to generate progress report:{{/}}\n%s\n", err.Error())
|
||||
@ -415,13 +355,7 @@ func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath s
|
||||
}
|
||||
|
||||
suite.report.SuiteSucceeded = true
|
||||
|
||||
suite.runReportSuiteNodesIfNeedBe(types.NodeTypeReportBeforeSuite)
|
||||
|
||||
ranBeforeSuite := suite.report.SuiteSucceeded
|
||||
if suite.report.SuiteSucceeded {
|
||||
suite.runBeforeSuite(numSpecsThatWillBeRun)
|
||||
}
|
||||
suite.runBeforeSuite(numSpecsThatWillBeRun)
|
||||
|
||||
if suite.report.SuiteSucceeded {
|
||||
groupedSpecIndices, serialGroupedSpecIndices := OrderSpecs(specs, suite.config)
|
||||
@ -460,9 +394,7 @@ func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath s
|
||||
}
|
||||
}
|
||||
|
||||
if ranBeforeSuite {
|
||||
suite.runAfterSuiteCleanup(numSpecsThatWillBeRun)
|
||||
}
|
||||
suite.runAfterSuiteCleanup(numSpecsThatWillBeRun)
|
||||
|
||||
interruptStatus := suite.interruptHandler.Status()
|
||||
if interruptStatus.Interrupted() {
|
||||
@ -476,7 +408,9 @@ func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath s
|
||||
suite.report.SuiteSucceeded = false
|
||||
}
|
||||
|
||||
suite.runReportSuiteNodesIfNeedBe(types.NodeTypeReportAfterSuite)
|
||||
if suite.config.ParallelProcess == 1 {
|
||||
suite.runReportAfterSuite()
|
||||
}
|
||||
suite.reporter.SuiteDidEnd(suite.report)
|
||||
if suite.isRunningInParallel() {
|
||||
suite.client.PostSuiteDidEnd(suite.report)
|
||||
@ -490,10 +424,9 @@ func (suite *Suite) runBeforeSuite(numSpecsThatWillBeRun int) {
|
||||
if !beforeSuiteNode.IsZero() && numSpecsThatWillBeRun > 0 {
|
||||
suite.selectiveLock.Lock()
|
||||
suite.currentSpecReport = types.SpecReport{
|
||||
LeafNodeType: beforeSuiteNode.NodeType,
|
||||
LeafNodeLocation: beforeSuiteNode.CodeLocation,
|
||||
ParallelProcess: suite.config.ParallelProcess,
|
||||
RunningInParallel: suite.isRunningInParallel(),
|
||||
LeafNodeType: beforeSuiteNode.NodeType,
|
||||
LeafNodeLocation: beforeSuiteNode.CodeLocation,
|
||||
ParallelProcess: suite.config.ParallelProcess,
|
||||
}
|
||||
suite.selectiveLock.Unlock()
|
||||
|
||||
@ -512,10 +445,9 @@ func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) {
|
||||
if !afterSuiteNode.IsZero() && numSpecsThatWillBeRun > 0 {
|
||||
suite.selectiveLock.Lock()
|
||||
suite.currentSpecReport = types.SpecReport{
|
||||
LeafNodeType: afterSuiteNode.NodeType,
|
||||
LeafNodeLocation: afterSuiteNode.CodeLocation,
|
||||
ParallelProcess: suite.config.ParallelProcess,
|
||||
RunningInParallel: suite.isRunningInParallel(),
|
||||
LeafNodeType: afterSuiteNode.NodeType,
|
||||
LeafNodeLocation: afterSuiteNode.CodeLocation,
|
||||
ParallelProcess: suite.config.ParallelProcess,
|
||||
}
|
||||
suite.selectiveLock.Unlock()
|
||||
|
||||
@ -529,10 +461,9 @@ func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) {
|
||||
for _, cleanupNode := range afterSuiteCleanup {
|
||||
suite.selectiveLock.Lock()
|
||||
suite.currentSpecReport = types.SpecReport{
|
||||
LeafNodeType: cleanupNode.NodeType,
|
||||
LeafNodeLocation: cleanupNode.CodeLocation,
|
||||
ParallelProcess: suite.config.ParallelProcess,
|
||||
RunningInParallel: suite.isRunningInParallel(),
|
||||
LeafNodeType: cleanupNode.NodeType,
|
||||
LeafNodeLocation: cleanupNode.CodeLocation,
|
||||
ParallelProcess: suite.config.ParallelProcess,
|
||||
}
|
||||
suite.selectiveLock.Unlock()
|
||||
|
||||
@ -543,6 +474,23 @@ func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) {
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *Suite) runReportAfterSuite() {
|
||||
for _, node := range suite.suiteNodes.WithType(types.NodeTypeReportAfterSuite) {
|
||||
suite.selectiveLock.Lock()
|
||||
suite.currentSpecReport = types.SpecReport{
|
||||
LeafNodeType: node.NodeType,
|
||||
LeafNodeLocation: node.CodeLocation,
|
||||
LeafNodeText: node.Text,
|
||||
ParallelProcess: suite.config.ParallelProcess,
|
||||
}
|
||||
suite.selectiveLock.Unlock()
|
||||
|
||||
suite.reporter.WillRun(suite.currentSpecReport)
|
||||
suite.runReportAfterSuiteNode(node, suite.report)
|
||||
suite.processCurrentSpecReport()
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) {
|
||||
nodes := spec.Nodes.WithType(nodeType)
|
||||
if nodeType == types.NodeTypeReportAfterEach {
|
||||
@ -660,80 +608,39 @@ func (suite *Suite) runSuiteNode(node Node) {
|
||||
|
||||
if err != nil && !suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
|
||||
suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error())
|
||||
suite.reporter.EmitFailure(suite.currentSpecReport.State, suite.currentSpecReport.Failure)
|
||||
}
|
||||
|
||||
suite.currentSpecReport.EndTime = time.Now()
|
||||
suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime)
|
||||
suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
|
||||
suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (suite *Suite) runReportSuiteNodesIfNeedBe(nodeType types.NodeType) {
|
||||
nodes := suite.suiteNodes.WithType(nodeType)
|
||||
// only run ReportAfterSuite on proc 1
|
||||
if nodeType.Is(types.NodeTypeReportAfterSuite) && suite.config.ParallelProcess != 1 {
|
||||
return
|
||||
}
|
||||
// if we're running ReportBeforeSuite on proc > 1 - we should wait until proc 1 has completed
|
||||
if nodeType.Is(types.NodeTypeReportBeforeSuite) && suite.config.ParallelProcess != 1 && len(nodes) > 0 {
|
||||
state, err := suite.client.BlockUntilReportBeforeSuiteCompleted()
|
||||
if err != nil || state.Is(types.SpecStateFailed) {
|
||||
suite.report.SuiteSucceeded = false
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
suite.selectiveLock.Lock()
|
||||
suite.currentSpecReport = types.SpecReport{
|
||||
LeafNodeType: node.NodeType,
|
||||
LeafNodeLocation: node.CodeLocation,
|
||||
LeafNodeText: node.Text,
|
||||
ParallelProcess: suite.config.ParallelProcess,
|
||||
RunningInParallel: suite.isRunningInParallel(),
|
||||
}
|
||||
suite.selectiveLock.Unlock()
|
||||
|
||||
suite.reporter.WillRun(suite.currentSpecReport)
|
||||
suite.runReportSuiteNode(node, suite.report)
|
||||
suite.processCurrentSpecReport()
|
||||
}
|
||||
|
||||
// if we're running ReportBeforeSuite and we're running in parallel - we shuld tell the other procs that we're done
|
||||
if nodeType.Is(types.NodeTypeReportBeforeSuite) && suite.isRunningInParallel() && len(nodes) > 0 {
|
||||
if suite.report.SuiteSucceeded {
|
||||
suite.client.PostReportBeforeSuiteCompleted(types.SpecStatePassed)
|
||||
} else {
|
||||
suite.client.PostReportBeforeSuiteCompleted(types.SpecStateFailed)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *Suite) runReportSuiteNode(node Node, report types.Report) {
|
||||
func (suite *Suite) runReportAfterSuiteNode(node Node, report types.Report) {
|
||||
suite.writer.Truncate()
|
||||
suite.outputInterceptor.StartInterceptingOutput()
|
||||
suite.currentSpecReport.StartTime = time.Now()
|
||||
|
||||
// if we're running a ReportAfterSuite in parallel (on proc 1) we (a) wait until other procs have exited and
|
||||
// (b) always fetch the latest report as prior ReportAfterSuites will contribute to it
|
||||
if node.NodeType.Is(types.NodeTypeReportAfterSuite) && suite.isRunningInParallel() {
|
||||
if suite.config.ParallelTotal > 1 {
|
||||
aggregatedReport, err := suite.client.BlockUntilAggregatedNonprimaryProcsReport()
|
||||
if err != nil {
|
||||
suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error())
|
||||
suite.reporter.EmitFailure(suite.currentSpecReport.State, suite.currentSpecReport.Failure)
|
||||
return
|
||||
}
|
||||
report = report.Add(aggregatedReport)
|
||||
}
|
||||
|
||||
node.Body = func(SpecContext) { node.ReportSuiteBody(report) }
|
||||
node.Body = func(SpecContext) { node.ReportAfterSuiteBody(report) }
|
||||
suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
|
||||
|
||||
suite.currentSpecReport.EndTime = time.Now()
|
||||
suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime)
|
||||
suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
|
||||
suite.currentSpecReport.CapturedStdOutErr = suite.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (types.SpecState, types.Failure) {
|
||||
@ -755,7 +662,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
|
||||
suite.selectiveLock.Lock()
|
||||
suite.currentNode = node
|
||||
suite.currentNodeStartTime = time.Now()
|
||||
suite.currentByStep = types.SpecEvent{}
|
||||
suite.progressStepCursor = ProgressStepCursor{}
|
||||
suite.selectiveLock.Unlock()
|
||||
defer func() {
|
||||
suite.selectiveLock.Lock()
|
||||
@ -764,18 +671,13 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
|
||||
suite.selectiveLock.Unlock()
|
||||
}()
|
||||
|
||||
if text == "" {
|
||||
text = "TOP-LEVEL"
|
||||
if suite.config.EmitSpecProgress && !node.MarkedSuppressProgressReporting {
|
||||
if text == "" {
|
||||
text = "TOP-LEVEL"
|
||||
}
|
||||
s := fmt.Sprintf("[%s] %s\n %s\n", node.NodeType.String(), text, node.CodeLocation.String())
|
||||
suite.writer.Write([]byte(s))
|
||||
}
|
||||
event := suite.handleSpecEvent(types.SpecEvent{
|
||||
SpecEventType: types.SpecEventNodeStart,
|
||||
NodeType: node.NodeType,
|
||||
Message: text,
|
||||
CodeLocation: node.CodeLocation,
|
||||
})
|
||||
defer func() {
|
||||
suite.handleSpecEventEnd(types.SpecEventNodeEnd, event)
|
||||
}()
|
||||
|
||||
var failure types.Failure
|
||||
failure.FailureNodeType, failure.FailureNodeLocation = node.NodeType, node.CodeLocation
|
||||
@ -795,23 +697,18 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
|
||||
|
||||
now := time.Now()
|
||||
deadline := suite.deadline
|
||||
timeoutInPlay := "suite"
|
||||
if deadline.IsZero() || (!specDeadline.IsZero() && specDeadline.Before(deadline)) {
|
||||
deadline = specDeadline
|
||||
timeoutInPlay = "spec"
|
||||
}
|
||||
if node.NodeTimeout > 0 && (deadline.IsZero() || deadline.Sub(now) > node.NodeTimeout) {
|
||||
deadline = now.Add(node.NodeTimeout)
|
||||
timeoutInPlay = "node"
|
||||
}
|
||||
if (!deadline.IsZero() && deadline.Before(now)) || interruptStatus.Interrupted() {
|
||||
//we're out of time already. let's wait for a NodeTimeout if we have it, or GracePeriod if we don't
|
||||
if node.NodeTimeout > 0 {
|
||||
deadline = now.Add(node.NodeTimeout)
|
||||
timeoutInPlay = "node"
|
||||
} else {
|
||||
deadline = now.Add(gracePeriod)
|
||||
timeoutInPlay = "grace period"
|
||||
}
|
||||
}
|
||||
|
||||
@ -846,7 +743,6 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
|
||||
}
|
||||
|
||||
outcomeFromRun, failureFromRun := suite.failer.Drain()
|
||||
failureFromRun.TimelineLocation = suite.generateTimelineLocation()
|
||||
outcomeC <- outcomeFromRun
|
||||
failureC <- failureFromRun
|
||||
}()
|
||||
@ -876,33 +772,23 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
|
||||
select {
|
||||
case outcomeFromRun := <-outcomeC:
|
||||
failureFromRun := <-failureC
|
||||
if outcome.Is(types.SpecStateInterrupted | types.SpecStateTimedout) {
|
||||
// we've already been interrupted/timed out. we just managed to actually exit
|
||||
if outcome == types.SpecStateInterrupted {
|
||||
// we've already been interrupted. we just managed to actually exit
|
||||
// before the grace period elapsed
|
||||
// if we have a failure message we attach it as an additional failure
|
||||
return outcome, failure
|
||||
} else if outcome == types.SpecStateTimedout {
|
||||
// we've already timed out. we just managed to actually exit
|
||||
// before the grace period elapsed. if we have a failure message we should include it
|
||||
if outcomeFromRun != types.SpecStatePassed {
|
||||
additionalFailure := types.AdditionalFailure{
|
||||
State: outcomeFromRun,
|
||||
Failure: failure, //we make a copy - this will include all the configuration set up above...
|
||||
}
|
||||
//...and then we update the failure with the details from failureFromRun
|
||||
additionalFailure.Failure.Location, additionalFailure.Failure.ForwardedPanic, additionalFailure.Failure.TimelineLocation = failureFromRun.Location, failureFromRun.ForwardedPanic, failureFromRun.TimelineLocation
|
||||
additionalFailure.Failure.ProgressReport = types.ProgressReport{}
|
||||
if outcome == types.SpecStateTimedout {
|
||||
additionalFailure.Failure.Message = fmt.Sprintf("A %s timeout occurred and then the following failure was recorded in the timedout node before it exited:\n%s", timeoutInPlay, failureFromRun.Message)
|
||||
} else {
|
||||
additionalFailure.Failure.Message = fmt.Sprintf("An interrupt occurred and then the following failure was recorded in the interrupted node before it exited:\n%s", failureFromRun.Message)
|
||||
}
|
||||
suite.reporter.EmitFailure(additionalFailure.State, additionalFailure.Failure)
|
||||
failure.AdditionalFailure = &additionalFailure
|
||||
failure.Location, failure.ForwardedPanic = failureFromRun.Location, failureFromRun.ForwardedPanic
|
||||
failure.Message = "This spec timed out and reported the following failure after the timeout:\n\n" + failureFromRun.Message
|
||||
}
|
||||
return outcome, failure
|
||||
}
|
||||
if outcomeFromRun.Is(types.SpecStatePassed) {
|
||||
return outcomeFromRun, types.Failure{}
|
||||
} else {
|
||||
failure.Message, failure.Location, failure.ForwardedPanic, failure.TimelineLocation = failureFromRun.Message, failureFromRun.Location, failureFromRun.ForwardedPanic, failureFromRun.TimelineLocation
|
||||
suite.reporter.EmitFailure(outcomeFromRun, failure)
|
||||
failure.Message, failure.Location, failure.ForwardedPanic = failureFromRun.Message, failureFromRun.Location, failureFromRun.ForwardedPanic
|
||||
return outcomeFromRun, failure
|
||||
}
|
||||
case <-gracePeriodChannel:
|
||||
@ -915,12 +801,10 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
|
||||
case <-deadlineChannel:
|
||||
// we're out of time - the outcome is a timeout and we capture the failure and progress report
|
||||
outcome = types.SpecStateTimedout
|
||||
failure.Message, failure.Location, failure.TimelineLocation = fmt.Sprintf("A %s timeout occurred", timeoutInPlay), node.CodeLocation, suite.generateTimelineLocation()
|
||||
failure.Message, failure.Location = "Timedout", node.CodeLocation
|
||||
failure.ProgressReport = suite.generateProgressReport(false).WithoutCapturedGinkgoWriterOutput()
|
||||
failure.ProgressReport.Message = fmt.Sprintf("{{bold}}This is the Progress Report generated when the %s timeout occurred:{{/}}", timeoutInPlay)
|
||||
failure.ProgressReport.Message = "{{bold}}This is the Progress Report generated when the timeout occurred:{{/}}"
|
||||
deadlineChannel = nil
|
||||
suite.reporter.EmitFailure(outcome, failure)
|
||||
|
||||
// tell the spec to stop. it's important we generate the progress report first to make sure we capture where
|
||||
// the spec is actually stuck
|
||||
sc.cancel()
|
||||
@ -930,36 +814,36 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
|
||||
interruptStatus = suite.interruptHandler.Status()
|
||||
deadlineChannel = nil // don't worry about deadlines, time's up now
|
||||
|
||||
failureTimelineLocation := suite.generateTimelineLocation()
|
||||
progressReport := suite.generateProgressReport(true)
|
||||
|
||||
if outcome == types.SpecStateInvalid {
|
||||
outcome = types.SpecStateInterrupted
|
||||
failure.Message, failure.Location, failure.TimelineLocation = interruptStatus.Message(), node.CodeLocation, failureTimelineLocation
|
||||
failure.Message, failure.Location = interruptStatus.Message(), node.CodeLocation
|
||||
if interruptStatus.ShouldIncludeProgressReport() {
|
||||
failure.ProgressReport = progressReport.WithoutCapturedGinkgoWriterOutput()
|
||||
failure.ProgressReport = suite.generateProgressReport(true).WithoutCapturedGinkgoWriterOutput()
|
||||
failure.ProgressReport.Message = "{{bold}}This is the Progress Report generated when the interrupt was received:{{/}}"
|
||||
}
|
||||
suite.reporter.EmitFailure(outcome, failure)
|
||||
}
|
||||
|
||||
progressReport = progressReport.WithoutOtherGoroutines()
|
||||
var report types.ProgressReport
|
||||
if interruptStatus.ShouldIncludeProgressReport() {
|
||||
report = suite.generateProgressReport(false)
|
||||
}
|
||||
|
||||
sc.cancel()
|
||||
|
||||
if interruptStatus.Level == interrupt_handler.InterruptLevelBailOut {
|
||||
if interruptStatus.ShouldIncludeProgressReport() {
|
||||
progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\n{{bold}}{{red}}Final interrupt received{{/}}; Ginkgo will not run any cleanup or reporting nodes and will terminate as soon as possible.\nHere's a current progress report:", interruptStatus.Message())
|
||||
suite.emitProgressReport(progressReport)
|
||||
report.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\n{{bold}}{{red}}Final interrupt received{{/}}; Ginkgo will not run any cleanup or reporting nodes and will terminate as soon as possible.\nHere's a current progress report:", interruptStatus.Message())
|
||||
suite.emitProgressReport(report)
|
||||
}
|
||||
return outcome, failure
|
||||
}
|
||||
if interruptStatus.ShouldIncludeProgressReport() {
|
||||
if interruptStatus.Level == interrupt_handler.InterruptLevelCleanupAndReport {
|
||||
progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nFirst interrupt received; Ginkgo will run any cleanup and reporting nodes but will skip all remaining specs. {{bold}}Interrupt again to skip cleanup{{/}}.\nHere's a current progress report:", interruptStatus.Message())
|
||||
report.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nFirst interrupt received; Ginkgo will run any cleanup and reporting nodes but will skip all remaining specs. {{bold}}Interrupt again to skip cleanup{{/}}.\nHere's a current progress report:", interruptStatus.Message())
|
||||
} else if interruptStatus.Level == interrupt_handler.InterruptLevelReportOnly {
|
||||
progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nSecond interrupt received; Ginkgo will run any reporting nodes but will skip all remaining specs and cleanup nodes. {{bold}}Interrupt again to bail immediately{{/}}.\nHere's a current progress report:", interruptStatus.Message())
|
||||
report.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nSecond interrupt received; Ginkgo will run any reporting nodes but will skip all remaining specs and cleanup nodes. {{bold}}Interrupt again to bail immediately{{/}}.\nHere's a current progress report:", interruptStatus.Message())
|
||||
}
|
||||
suite.emitProgressReport(progressReport)
|
||||
suite.emitProgressReport(report)
|
||||
}
|
||||
|
||||
if gracePeriodChannel == nil {
|
||||
@ -980,12 +864,10 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: search for usages and consider if reporter.EmitFailure() is necessary
|
||||
func (suite *Suite) failureForLeafNodeWithMessage(node Node, message string) types.Failure {
|
||||
return types.Failure{
|
||||
Message: message,
|
||||
Location: node.CodeLocation,
|
||||
TimelineLocation: suite.generateTimelineLocation(),
|
||||
FailureNodeContext: types.FailureNodeIsLeafNode,
|
||||
FailureNodeType: node.NodeType,
|
||||
FailureNodeLocation: node.CodeLocation,
|
||||
|
Reference in New Issue
Block a user