rebase: bump github.com/onsi/ginkgo/v2 from 2.1.6 to 2.3.1

Bumps [github.com/onsi/ginkgo/v2](https://github.com/onsi/ginkgo) from 2.1.6 to 2.3.1.
- [Release notes](https://github.com/onsi/ginkgo/releases)
- [Changelog](https://github.com/onsi/ginkgo/blob/master/CHANGELOG.md)
- [Commits](https://github.com/onsi/ginkgo/compare/v2.1.6...v2.3.1)

---
updated-dependencies:
- dependency-name: github.com/onsi/ginkgo/v2
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
dependabot[bot]
2022-10-17 17:38:08 +00:00
committed by mergify[bot]
parent 53bb28e0d9
commit 3a490a4df0
46 changed files with 2752 additions and 939 deletions

View File

@ -128,7 +128,10 @@ func (g *group) evaluateSkipStatus(spec Spec) (types.SpecState, types.Failure) {
if spec.Skip {
return types.SpecStateSkipped, types.Failure{}
}
if g.suite.interruptHandler.Status().Interrupted || g.suite.skipAll {
if g.suite.interruptHandler.Status().Interrupted() || g.suite.skipAll {
return types.SpecStateSkipped, types.Failure{}
}
if !g.suite.deadline.IsZero() && g.suite.deadline.Before(time.Now()) {
return types.SpecStateSkipped, types.Failure{}
}
if !g.succeeded {
@ -163,8 +166,6 @@ func (g *group) isLastSpecWithPair(specID uint, pair runOncePair) bool {
}
func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) {
interruptStatus := g.suite.interruptHandler.Status()
pairs := g.runOncePairs[spec.SubjectID()]
nodes := spec.Nodes.WithType(types.NodeTypeBeforeAll)
@ -173,12 +174,17 @@ func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) {
nodes = append(nodes, spec.Nodes.FirstNodeWithType(types.NodeTypeIt))
terminatingNode, terminatingPair := Node{}, runOncePair{}
deadline := time.Time{}
if spec.SpecTimeout() > 0 {
deadline = time.Now().Add(spec.SpecTimeout())
}
for _, node := range nodes {
oncePair := pairs.runOncePairFor(node.ID)
if !oncePair.isZero() && g.runOnceTracker[oncePair].Is(types.SpecStatePassed) {
continue
}
g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure = g.suite.runNode(node, interruptStatus.Channel, spec.Nodes.BestTextFor(node))
g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure = g.suite.runNode(node, deadline, spec.Nodes.BestTextFor(node))
g.suite.currentSpecReport.RunTime = time.Since(g.suite.currentSpecReport.StartTime)
if !oncePair.isZero() {
g.runOnceTracker[oncePair] = g.suite.currentSpecReport.State
@ -260,11 +266,13 @@ func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) {
for _, node := range nodes {
afterNodeWasRun[node.ID] = true
state, failure := g.suite.runNode(node, g.suite.interruptHandler.Status().Channel, spec.Nodes.BestTextFor(node))
state, failure := g.suite.runNode(node, deadline, spec.Nodes.BestTextFor(node))
g.suite.currentSpecReport.RunTime = time.Since(g.suite.currentSpecReport.StartTime)
if g.suite.currentSpecReport.State == types.SpecStatePassed || state == types.SpecStateAborted {
g.suite.currentSpecReport.State = state
g.suite.currentSpecReport.Failure = failure
} else if state.Is(types.SpecStateFailureStates) {
g.suite.currentSpecReport.AdditionalFailures = append(g.suite.currentSpecReport.AdditionalFailures, types.AdditionalFailure{State: state, Failure: failure})
}
}
includeDeferCleanups = true
@ -279,7 +287,10 @@ func (g *group) run(specs Specs) {
}
for _, spec := range g.specs {
g.suite.selectiveLock.Lock()
g.suite.currentSpecReport = g.initialReportForSpec(spec)
g.suite.selectiveLock.Unlock()
g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure = g.evaluateSkipStatus(spec)
g.suite.reporter.WillRun(g.suite.currentSpecReport)
g.suite.reportEach(spec, types.NodeTypeReportBeforeEach)
@ -318,227 +329,8 @@ func (g *group) run(specs Specs) {
if g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
g.succeeded = false
}
g.suite.selectiveLock.Lock()
g.suite.currentSpecReport = types.SpecReport{}
}
}
func (g *group) oldRun(specs Specs) {
var suite = g.suite
nodeState := map[uint]types.SpecState{}
groupSucceeded := true
indexOfLastSpecContainingNodeID := func(id uint) int {
lastIdx := -1
for idx := range specs {
if specs[idx].Nodes.ContainsNodeID(id) && !specs[idx].Skip {
lastIdx = idx
}
}
return lastIdx
}
for i, spec := range specs {
suite.currentSpecReport = types.SpecReport{
ContainerHierarchyTexts: spec.Nodes.WithType(types.NodeTypeContainer).Texts(),
ContainerHierarchyLocations: spec.Nodes.WithType(types.NodeTypeContainer).CodeLocations(),
ContainerHierarchyLabels: spec.Nodes.WithType(types.NodeTypeContainer).Labels(),
LeafNodeLocation: spec.FirstNodeWithType(types.NodeTypeIt).CodeLocation,
LeafNodeType: types.NodeTypeIt,
LeafNodeText: spec.FirstNodeWithType(types.NodeTypeIt).Text,
LeafNodeLabels: []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels),
ParallelProcess: suite.config.ParallelProcess,
IsSerial: spec.Nodes.HasNodeMarkedSerial(),
IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(),
}
skip := spec.Skip
if spec.Nodes.HasNodeMarkedPending() {
skip = true
suite.currentSpecReport.State = types.SpecStatePending
} else {
if suite.interruptHandler.Status().Interrupted || suite.skipAll {
skip = true
}
if !groupSucceeded {
skip = true
suite.currentSpecReport.Failure = suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt),
"Spec skipped because an earlier spec in an ordered container failed")
}
for _, node := range spec.Nodes.WithType(types.NodeTypeBeforeAll) {
if nodeState[node.ID] == types.SpecStateSkipped {
skip = true
suite.currentSpecReport.Failure = suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt),
"Spec skipped because Skip() was called in BeforeAll")
break
}
}
if skip {
suite.currentSpecReport.State = types.SpecStateSkipped
}
}
if suite.config.DryRun && !skip {
skip = true
suite.currentSpecReport.State = types.SpecStatePassed
}
suite.reporter.WillRun(suite.currentSpecReport)
//send the spec report to any attached ReportBeforeEach blocks - this will update suite.currentSpecReport if failures occur in these blocks
suite.reportEach(spec, types.NodeTypeReportBeforeEach)
if suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
//the reportEach failed, skip this spec
skip = true
}
suite.currentSpecReport.StartTime = time.Now()
maxAttempts := max(1, spec.FlakeAttempts())
if suite.config.FlakeAttempts > 0 {
maxAttempts = suite.config.FlakeAttempts
}
for attempt := 0; !skip && (attempt < maxAttempts); attempt++ {
suite.currentSpecReport.NumAttempts = attempt + 1
suite.writer.Truncate()
suite.outputInterceptor.StartInterceptingOutput()
if attempt > 0 {
fmt.Fprintf(suite.writer, "\nGinkgo: Attempt #%d Failed. Retrying...\n", attempt)
}
isFinalAttempt := (attempt == maxAttempts-1)
interruptStatus := suite.interruptHandler.Status()
deepestNestingLevelAttained := -1
var nodes = spec.Nodes.WithType(types.NodeTypeBeforeAll).Filter(func(n Node) bool {
return nodeState[n.ID] != types.SpecStatePassed
})
nodes = nodes.CopyAppend(spec.Nodes.WithType(types.NodeTypeBeforeEach)...).SortedByAscendingNestingLevel()
nodes = nodes.CopyAppend(spec.Nodes.WithType(types.NodeTypeJustBeforeEach).SortedByAscendingNestingLevel()...)
nodes = nodes.CopyAppend(spec.Nodes.WithType(types.NodeTypeIt)...)
var terminatingNode Node
for j := range nodes {
deepestNestingLevelAttained = max(deepestNestingLevelAttained, nodes[j].NestingLevel)
suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(nodes[j], interruptStatus.Channel, spec.Nodes.BestTextFor(nodes[j]))
suite.currentSpecReport.RunTime = time.Since(suite.currentSpecReport.StartTime)
nodeState[nodes[j].ID] = suite.currentSpecReport.State
if suite.currentSpecReport.State != types.SpecStatePassed {
terminatingNode = nodes[j]
break
}
}
afterAllNodesThatRan := map[uint]bool{}
// pull out some shared code so we aren't repeating ourselves down below. this just runs after and cleanup nodes
runAfterAndCleanupNodes := func(nodes Nodes) {
for j := range nodes {
state, failure := suite.runNode(nodes[j], suite.interruptHandler.Status().Channel, spec.Nodes.BestTextFor(nodes[j]))
suite.currentSpecReport.RunTime = time.Since(suite.currentSpecReport.StartTime)
nodeState[nodes[j].ID] = state
if suite.currentSpecReport.State == types.SpecStatePassed || state == types.SpecStateAborted {
suite.currentSpecReport.State = state
suite.currentSpecReport.Failure = failure
if state != types.SpecStatePassed {
terminatingNode = nodes[j]
}
}
if nodes[j].NodeType.Is(types.NodeTypeAfterAll) {
afterAllNodesThatRan[nodes[j].ID] = true
}
}
}
// pull out a helper that captures the logic of whether or not we should run a given After node.
// there is complexity here stemming from the fact that we allow nested ordered contexts and flakey retries
shouldRunAfterNode := func(n Node) bool {
if n.NodeType.Is(types.NodeTypeAfterEach | types.NodeTypeJustAfterEach) {
return true
}
var id uint
if n.NodeType.Is(types.NodeTypeAfterAll) {
id = n.ID
if afterAllNodesThatRan[id] { //we've already run on this attempt. don't run again.
return false
}
}
if n.NodeType.Is(types.NodeTypeCleanupAfterAll) {
id = n.NodeIDWhereCleanupWasGenerated
}
isLastSpecWithNode := indexOfLastSpecContainingNodeID(id) == i
switch suite.currentSpecReport.State {
case types.SpecStatePassed: //we've passed so far...
return isLastSpecWithNode //... and we're the last spec with this AfterNode, so we should run it
case types.SpecStateSkipped: //the spec was skipped by the user...
if isLastSpecWithNode {
return true //...we're the last spec, so we should run the AfterNode
}
if terminatingNode.NodeType.Is(types.NodeTypeBeforeAll) && terminatingNode.NestingLevel == n.NestingLevel {
return true //...or, a BeforeAll was skipped and it's at our nesting level, so our subgroup is going to skip
}
case types.SpecStateFailed, types.SpecStatePanicked: // the spec has failed...
if isFinalAttempt {
return true //...if this was the last attempt then we're the last spec to run and so the AfterNode should run
}
if terminatingNode.NodeType.Is(types.NodeTypeBeforeAll) {
//...we'll be rerunning a BeforeAll so we should cleanup after it if...
if n.NodeType.Is(types.NodeTypeAfterAll) && terminatingNode.NestingLevel == n.NestingLevel {
return true //we're at the same nesting level
}
if n.NodeType.Is(types.NodeTypeCleanupAfterAll) && terminatingNode.ID == n.NodeIDWhereCleanupWasGenerated {
return true //we're a DeferCleanup generated by it
}
}
if terminatingNode.NodeType.Is(types.NodeTypeAfterAll) {
//...we'll be rerunning an AfterAll so we should cleanup after it if...
if n.NodeType.Is(types.NodeTypeCleanupAfterAll) && terminatingNode.ID == n.NodeIDWhereCleanupWasGenerated {
return true //we're a DeferCleanup generated by it
}
}
case types.SpecStateInterrupted, types.SpecStateAborted: // ...we've been interrupted and/or aborted
return true //...that means the test run is over and we should clean up the stack. Run the AfterNode
}
return false
}
// first pass - run all the JustAfterEach, Aftereach, and AfterAlls. Our shoudlRunAfterNode filter function will clean up the AfterAlls for us.
afterNodes := spec.Nodes.WithType(types.NodeTypeJustAfterEach).SortedByDescendingNestingLevel()
afterNodes = afterNodes.CopyAppend(spec.Nodes.WithType(types.NodeTypeAfterEach).CopyAppend(spec.Nodes.WithType(types.NodeTypeAfterAll)...).SortedByDescendingNestingLevel()...)
afterNodes = afterNodes.WithinNestingLevel(deepestNestingLevelAttained)
afterNodes = afterNodes.Filter(shouldRunAfterNode)
runAfterAndCleanupNodes(afterNodes)
// second-pass perhaps we didn't run the AfterAlls but a state change due to an AfterEach now requires us to run the AfterAlls:
afterNodes = spec.Nodes.WithType(types.NodeTypeAfterAll).WithinNestingLevel(deepestNestingLevelAttained).Filter(shouldRunAfterNode)
runAfterAndCleanupNodes(afterNodes)
// now we run any DeferCleanups
afterNodes = suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterEach).Reverse()
afterNodes = append(afterNodes, suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterAll).Filter(shouldRunAfterNode).Reverse()...)
runAfterAndCleanupNodes(afterNodes)
// third-pass, perhaps a DeferCleanup failed and now we need to run the AfterAlls.
afterNodes = spec.Nodes.WithType(types.NodeTypeAfterAll).WithinNestingLevel(deepestNestingLevelAttained).Filter(shouldRunAfterNode)
runAfterAndCleanupNodes(afterNodes)
// and finally - running AfterAlls may have generated some new DeferCleanup nodes, let's run them to finish up
afterNodes = suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterAll).Reverse().Filter(shouldRunAfterNode)
runAfterAndCleanupNodes(afterNodes)
suite.currentSpecReport.EndTime = time.Now()
suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime)
suite.currentSpecReport.CapturedGinkgoWriterOutput += string(suite.writer.Bytes())
suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
if suite.currentSpecReport.State.Is(types.SpecStatePassed | types.SpecStateSkipped | types.SpecStateAborted | types.SpecStateInterrupted) {
break
}
}
//send the spec report to any attached ReportAfterEach blocks - this will update suite.currentSpecReport if failures occur in these blocks
suite.reportEach(spec, types.NodeTypeReportAfterEach)
suite.processCurrentSpecReport()
if suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
groupSucceeded = false
}
suite.currentSpecReport = types.SpecReport{}
g.suite.selectiveLock.Unlock()
}
}

View File

@ -1,39 +1,38 @@
package interrupt_handler
import (
"fmt"
"os"
"os/signal"
"runtime"
"sync"
"syscall"
"time"
"github.com/onsi/ginkgo/v2/formatter"
"github.com/onsi/ginkgo/v2/internal/parallel_support"
)
const TIMEOUT_REPEAT_INTERRUPT_MAXIMUM_DURATION = 30 * time.Second
const TIMEOUT_REPEAT_INTERRUPT_FRACTION_OF_TIMEOUT = 10
const ABORT_POLLING_INTERVAL = 500 * time.Millisecond
const ABORT_REPEAT_INTERRUPT_DURATION = 30 * time.Second
type InterruptCause uint
const (
InterruptCauseInvalid InterruptCause = iota
InterruptCauseSignal
InterruptCauseTimeout
InterruptCauseAbortByOtherProcess
)
type InterruptLevel uint
const (
InterruptLevelUninterrupted InterruptLevel = iota
InterruptLevelCleanupAndReport
InterruptLevelReportOnly
InterruptLevelBailOut
)
func (ic InterruptCause) String() string {
switch ic {
case InterruptCauseSignal:
return "Interrupted by User"
case InterruptCauseTimeout:
return "Interrupted by Timeout"
case InterruptCauseAbortByOtherProcess:
return "Interrupted by Other Ginkgo Process"
}
@ -41,37 +40,49 @@ func (ic InterruptCause) String() string {
}
type InterruptStatus struct {
Interrupted bool
Channel chan interface{}
Cause InterruptCause
Channel chan interface{}
Level InterruptLevel
Cause InterruptCause
}
func (s InterruptStatus) Interrupted() bool {
return s.Level != InterruptLevelUninterrupted
}
func (s InterruptStatus) Message() string {
return s.Cause.String()
}
func (s InterruptStatus) ShouldIncludeProgressReport() bool {
return s.Cause != InterruptCauseAbortByOtherProcess
}
type InterruptHandlerInterface interface {
Status() InterruptStatus
SetInterruptPlaceholderMessage(string)
ClearInterruptPlaceholderMessage()
InterruptMessageWithStackTraces() string
}
type InterruptHandler struct {
c chan interface{}
lock *sync.Mutex
interrupted bool
interruptPlaceholderMessage string
interruptCause InterruptCause
client parallel_support.Client
stop chan interface{}
c chan interface{}
lock *sync.Mutex
level InterruptLevel
cause InterruptCause
client parallel_support.Client
stop chan interface{}
signals []os.Signal
}
func NewInterruptHandler(timeout time.Duration, client parallel_support.Client) *InterruptHandler {
handler := &InterruptHandler{
c: make(chan interface{}),
lock: &sync.Mutex{},
interrupted: false,
stop: make(chan interface{}),
client: client,
func NewInterruptHandler(client parallel_support.Client, signals ...os.Signal) *InterruptHandler {
if len(signals) == 0 {
signals = []os.Signal{os.Interrupt, syscall.SIGTERM}
}
handler.registerForInterrupts(timeout)
handler := &InterruptHandler{
c: make(chan interface{}),
lock: &sync.Mutex{},
stop: make(chan interface{}),
client: client,
signals: signals,
}
handler.registerForInterrupts()
return handler
}
@ -79,30 +90,22 @@ func (handler *InterruptHandler) Stop() {
close(handler.stop)
}
func (handler *InterruptHandler) registerForInterrupts(timeout time.Duration) {
func (handler *InterruptHandler) registerForInterrupts() {
// os signal handling
signalChannel := make(chan os.Signal, 1)
signal.Notify(signalChannel, os.Interrupt, syscall.SIGTERM)
// timeout handling
var timeoutChannel <-chan time.Time
var timeoutTimer *time.Timer
if timeout > 0 {
timeoutTimer = time.NewTimer(timeout)
timeoutChannel = timeoutTimer.C
}
signal.Notify(signalChannel, handler.signals...)
// cross-process abort handling
var abortChannel chan bool
var abortChannel chan interface{}
if handler.client != nil {
abortChannel = make(chan bool)
abortChannel = make(chan interface{})
go func() {
pollTicker := time.NewTicker(ABORT_POLLING_INTERVAL)
for {
select {
case <-pollTicker.C:
if handler.client.ShouldAbort() {
abortChannel <- true
close(abortChannel)
pollTicker.Stop()
return
}
@ -114,55 +117,37 @@ func (handler *InterruptHandler) registerForInterrupts(timeout time.Duration) {
}()
}
// listen for any interrupt signals
// note that some (timeouts, cross-process aborts) will only trigger once
// for these we set up a ticker to keep interrupting the suite until it ends
// this ensures any `AfterEach` or `AfterSuite`s that get stuck cleaning up
// get interrupted eventually
go func() {
go func(abortChannel chan interface{}) {
var interruptCause InterruptCause
var repeatChannel <-chan time.Time
var repeatTicker *time.Ticker
for {
select {
case <-signalChannel:
interruptCause = InterruptCauseSignal
case <-timeoutChannel:
interruptCause = InterruptCauseTimeout
repeatInterruptTimeout := timeout / time.Duration(TIMEOUT_REPEAT_INTERRUPT_FRACTION_OF_TIMEOUT)
if repeatInterruptTimeout > TIMEOUT_REPEAT_INTERRUPT_MAXIMUM_DURATION {
repeatInterruptTimeout = TIMEOUT_REPEAT_INTERRUPT_MAXIMUM_DURATION
}
timeoutTimer.Stop()
repeatTicker = time.NewTicker(repeatInterruptTimeout)
repeatChannel = repeatTicker.C
case <-abortChannel:
interruptCause = InterruptCauseAbortByOtherProcess
repeatTicker = time.NewTicker(ABORT_REPEAT_INTERRUPT_DURATION)
repeatChannel = repeatTicker.C
case <-repeatChannel:
//do nothing, just interrupt again using the same interruptCause
case <-handler.stop:
if timeoutTimer != nil {
timeoutTimer.Stop()
}
if repeatTicker != nil {
repeatTicker.Stop()
}
signal.Stop(signalChannel)
return
}
abortChannel = nil
handler.lock.Lock()
handler.interruptCause = interruptCause
if handler.interruptPlaceholderMessage != "" {
fmt.Println(handler.interruptPlaceholderMessage)
oldLevel := handler.level
handler.cause = interruptCause
if handler.level == InterruptLevelUninterrupted {
handler.level = InterruptLevelCleanupAndReport
} else if handler.level == InterruptLevelCleanupAndReport {
handler.level = InterruptLevelReportOnly
} else if handler.level == InterruptLevelReportOnly {
handler.level = InterruptLevelBailOut
}
if handler.level != oldLevel {
close(handler.c)
handler.c = make(chan interface{})
}
handler.interrupted = true
close(handler.c)
handler.c = make(chan interface{})
handler.lock.Unlock()
}
}()
}(abortChannel)
}
func (handler *InterruptHandler) Status() InterruptStatus {
@ -170,43 +155,8 @@ func (handler *InterruptHandler) Status() InterruptStatus {
defer handler.lock.Unlock()
return InterruptStatus{
Interrupted: handler.interrupted,
Channel: handler.c,
Cause: handler.interruptCause,
Level: handler.level,
Channel: handler.c,
Cause: handler.cause,
}
}
func (handler *InterruptHandler) SetInterruptPlaceholderMessage(message string) {
handler.lock.Lock()
defer handler.lock.Unlock()
handler.interruptPlaceholderMessage = message
}
func (handler *InterruptHandler) ClearInterruptPlaceholderMessage() {
handler.lock.Lock()
defer handler.lock.Unlock()
handler.interruptPlaceholderMessage = ""
}
func (handler *InterruptHandler) InterruptMessageWithStackTraces() string {
handler.lock.Lock()
out := fmt.Sprintf("%s\n\n", handler.interruptCause.String())
defer handler.lock.Unlock()
if handler.interruptCause == InterruptCauseAbortByOtherProcess {
return out
}
out += "Here's a stack trace of all running goroutines:\n"
buf := make([]byte, 8192)
for {
n := runtime.Stack(buf, true)
if n < len(buf) {
buf = buf[:n]
break
}
buf = make([]byte, 2*len(buf))
}
out += formatter.Fi(1, "%s", string(buf))
return out
}

View File

@ -1,9 +1,11 @@
package internal
import (
"context"
"fmt"
"reflect"
"sort"
"time"
"sync"
@ -27,15 +29,20 @@ type Node struct {
NodeType types.NodeType
Text string
Body func()
Body func(SpecContext)
CodeLocation types.CodeLocation
NestingLevel int
HasContext bool
SynchronizedBeforeSuiteProc1Body func() []byte
SynchronizedBeforeSuiteAllProcsBody func([]byte)
SynchronizedBeforeSuiteProc1Body func(SpecContext) []byte
SynchronizedBeforeSuiteProc1BodyHasContext bool
SynchronizedBeforeSuiteAllProcsBody func(SpecContext, []byte)
SynchronizedBeforeSuiteAllProcsBodyHasContext bool
SynchronizedAfterSuiteAllProcsBody func()
SynchronizedAfterSuiteProc1Body func()
SynchronizedAfterSuiteAllProcsBody func(SpecContext)
SynchronizedAfterSuiteAllProcsBodyHasContext bool
SynchronizedAfterSuiteProc1Body func(SpecContext)
SynchronizedAfterSuiteProc1BodyHasContext bool
ReportEachBody func(types.SpecReport)
ReportAfterSuiteBody func(types.Report)
@ -48,6 +55,11 @@ type Node struct {
MarkedSuppressProgressReporting bool
FlakeAttempts int
Labels Labels
PollProgressAfter time.Duration
PollProgressInterval time.Duration
NodeTimeout time.Duration
SpecTimeout time.Duration
GracePeriod time.Duration
NodeIDWhereCleanupWasGenerated uint
}
@ -71,6 +83,11 @@ type FlakeAttempts uint
type Offset uint
type Done chan<- interface{} // Deprecated Done Channel for asynchronous testing
type Labels []string
type PollProgressInterval time.Duration
type PollProgressAfter time.Duration
type NodeTimeout time.Duration
type SpecTimeout time.Duration
type GracePeriod time.Duration
func UnionOfLabels(labels ...Labels) Labels {
out := Labels{}
@ -123,6 +140,16 @@ func isDecoration(arg interface{}) bool {
return true
case t == reflect.TypeOf(Labels{}):
return true
case t == reflect.TypeOf(PollProgressInterval(0)):
return true
case t == reflect.TypeOf(PollProgressAfter(0)):
return true
case t == reflect.TypeOf(NodeTimeout(0)):
return true
case t == reflect.TypeOf(SpecTimeout(0)):
return true
case t == reflect.TypeOf(GracePeriod(0)):
return true
case t.Kind() == reflect.Slice && isSliceOfDecorations(arg):
return true
default:
@ -143,16 +170,23 @@ func isSliceOfDecorations(slice interface{}) bool {
return true
}
var contextType = reflect.TypeOf(new(context.Context)).Elem()
var specContextType = reflect.TypeOf(new(SpecContext)).Elem()
func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeType, text string, args ...interface{}) (Node, []error) {
baseOffset := 2
node := Node{
ID: UniqueNodeID(),
NodeType: nodeType,
Text: text,
Labels: Labels{},
CodeLocation: types.NewCodeLocation(baseOffset),
NestingLevel: -1,
ID: UniqueNodeID(),
NodeType: nodeType,
Text: text,
Labels: Labels{},
CodeLocation: types.NewCodeLocation(baseOffset),
NestingLevel: -1,
PollProgressAfter: -1,
PollProgressInterval: -1,
GracePeriod: -1,
}
errors := []error{}
appendError := func(err error) {
if err != nil {
@ -219,6 +253,31 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
if !nodeType.Is(types.NodeTypesForContainerAndIt) {
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "FlakeAttempts"))
}
case t == reflect.TypeOf(PollProgressAfter(0)):
node.PollProgressAfter = time.Duration(arg.(PollProgressAfter))
if nodeType.Is(types.NodeTypeContainer) {
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "PollProgressAfter"))
}
case t == reflect.TypeOf(PollProgressInterval(0)):
node.PollProgressInterval = time.Duration(arg.(PollProgressInterval))
if nodeType.Is(types.NodeTypeContainer) {
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "PollProgressInterval"))
}
case t == reflect.TypeOf(NodeTimeout(0)):
node.NodeTimeout = time.Duration(arg.(NodeTimeout))
if nodeType.Is(types.NodeTypeContainer) {
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "NodeTimeout"))
}
case t == reflect.TypeOf(SpecTimeout(0)):
node.SpecTimeout = time.Duration(arg.(SpecTimeout))
if !nodeType.Is(types.NodeTypeIt) {
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "SpecTimeout"))
}
case t == reflect.TypeOf(GracePeriod(0)):
node.GracePeriod = time.Duration(arg.(GracePeriod))
if nodeType.Is(types.NodeTypeContainer) {
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "GracePeriod"))
}
case t == reflect.TypeOf(Labels{}):
if !nodeType.Is(types.NodeTypesForContainerAndIt) {
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Label"))
@ -232,35 +291,85 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
}
}
case t.Kind() == reflect.Func:
if nodeType.Is(types.NodeTypeReportBeforeEach | types.NodeTypeReportAfterEach) {
if node.ReportEachBody != nil {
if nodeType.Is(types.NodeTypeContainer) {
if node.Body != nil {
appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
trackedFunctionError = true
break
}
//we can trust that the function is valid because the compiler has our back here
node.ReportEachBody = arg.(func(types.SpecReport))
break
}
if node.Body != nil {
appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
trackedFunctionError = true
break
}
isValid := (t.NumOut() == 0) && (t.NumIn() <= 1) && (t.NumIn() == 0 || t.In(0) == reflect.TypeOf(make(Done)))
if !isValid {
appendError(types.GinkgoErrors.InvalidBodyType(t, node.CodeLocation, nodeType))
trackedFunctionError = true
break
}
if t.NumIn() == 0 {
node.Body = arg.(func())
if t.NumOut() > 0 || t.NumIn() > 0 {
appendError(types.GinkgoErrors.InvalidBodyTypeForContainer(t, node.CodeLocation, nodeType))
trackedFunctionError = true
break
}
body := arg.(func())
node.Body = func(SpecContext) { body() }
} else if nodeType.Is(types.NodeTypeReportBeforeEach | types.NodeTypeReportAfterEach) {
if node.ReportEachBody == nil {
node.ReportEachBody = arg.(func(types.SpecReport))
} else {
appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
trackedFunctionError = true
break
}
} else if nodeType.Is(types.NodeTypeReportAfterSuite) {
if node.ReportAfterSuiteBody == nil {
node.ReportAfterSuiteBody = arg.(func(types.Report))
} else {
appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
trackedFunctionError = true
break
}
} else if nodeType.Is(types.NodeTypeSynchronizedBeforeSuite) {
if node.SynchronizedBeforeSuiteProc1Body != nil && node.SynchronizedBeforeSuiteAllProcsBody != nil {
appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
trackedFunctionError = true
break
}
if node.SynchronizedBeforeSuiteProc1Body == nil {
body, hasContext := extractSynchronizedBeforeSuiteProc1Body(arg)
if body == nil {
appendError(types.GinkgoErrors.InvalidBodyTypeForSynchronizedBeforeSuiteProc1(t, node.CodeLocation))
trackedFunctionError = true
}
node.SynchronizedBeforeSuiteProc1Body, node.SynchronizedBeforeSuiteProc1BodyHasContext = body, hasContext
} else if node.SynchronizedBeforeSuiteAllProcsBody == nil {
body, hasContext := extractSynchronizedBeforeSuiteAllProcsBody(arg)
if body == nil {
appendError(types.GinkgoErrors.InvalidBodyTypeForSynchronizedBeforeSuiteAllProcs(t, node.CodeLocation))
trackedFunctionError = true
}
node.SynchronizedBeforeSuiteAllProcsBody, node.SynchronizedBeforeSuiteAllProcsBodyHasContext = body, hasContext
}
} else if nodeType.Is(types.NodeTypeSynchronizedAfterSuite) {
if node.SynchronizedAfterSuiteAllProcsBody != nil && node.SynchronizedAfterSuiteProc1Body != nil {
appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
trackedFunctionError = true
break
}
body, hasContext := extractBodyFunction(deprecationTracker, node.CodeLocation, arg)
if body == nil {
appendError(types.GinkgoErrors.InvalidBodyType(t, node.CodeLocation, nodeType))
trackedFunctionError = true
break
}
if node.SynchronizedAfterSuiteAllProcsBody == nil {
node.SynchronizedAfterSuiteAllProcsBody, node.SynchronizedAfterSuiteAllProcsBodyHasContext = body, hasContext
} else if node.SynchronizedAfterSuiteProc1Body == nil {
node.SynchronizedAfterSuiteProc1Body, node.SynchronizedAfterSuiteProc1BodyHasContext = body, hasContext
}
} else {
deprecationTracker.TrackDeprecation(types.Deprecations.Async(), node.CodeLocation)
deprecatedAsyncBody := arg.(func(Done))
node.Body = func() { deprecatedAsyncBody(make(Done)) }
if node.Body != nil {
appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
trackedFunctionError = true
break
}
node.Body, node.HasContext = extractBodyFunction(deprecationTracker, node.CodeLocation, arg)
if node.Body == nil {
appendError(types.GinkgoErrors.InvalidBodyType(t, node.CodeLocation, nodeType))
trackedFunctionError = true
break
}
}
default:
remainingArgs = append(remainingArgs, arg)
@ -272,9 +381,24 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
appendError(types.GinkgoErrors.InvalidDeclarationOfFocusedAndPending(node.CodeLocation, nodeType))
}
if node.Body == nil && node.ReportEachBody == nil && !node.MarkedPending && !trackedFunctionError {
hasContext := node.HasContext || node.SynchronizedAfterSuiteProc1BodyHasContext || node.SynchronizedAfterSuiteAllProcsBodyHasContext || node.SynchronizedBeforeSuiteProc1BodyHasContext || node.SynchronizedBeforeSuiteAllProcsBodyHasContext
if !hasContext && (node.NodeTimeout > 0 || node.SpecTimeout > 0 || node.GracePeriod > 0) && len(errors) == 0 {
appendError(types.GinkgoErrors.InvalidTimeoutOrGracePeriodForNonContextNode(node.CodeLocation, nodeType))
}
if !node.NodeType.Is(types.NodeTypeReportBeforeEach|types.NodeTypeReportAfterEach|types.NodeTypeSynchronizedBeforeSuite|types.NodeTypeSynchronizedAfterSuite|types.NodeTypeReportAfterSuite) && node.Body == nil && !node.MarkedPending && !trackedFunctionError {
appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType))
}
if node.NodeType.Is(types.NodeTypeSynchronizedBeforeSuite) && !trackedFunctionError && (node.SynchronizedBeforeSuiteProc1Body == nil || node.SynchronizedBeforeSuiteAllProcsBody == nil) {
appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType))
}
if node.NodeType.Is(types.NodeTypeSynchronizedAfterSuite) && !trackedFunctionError && (node.SynchronizedAfterSuiteProc1Body == nil || node.SynchronizedAfterSuiteAllProcsBody == nil) {
appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType))
}
for _, arg := range remainingArgs {
appendError(types.GinkgoErrors.UnknownDecorator(node.CodeLocation, nodeType, arg))
}
@ -286,76 +410,151 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
return node, errors
}
func NewSynchronizedBeforeSuiteNode(proc1Body func() []byte, allProcsBody func([]byte), codeLocation types.CodeLocation) (Node, []error) {
return Node{
ID: UniqueNodeID(),
NodeType: types.NodeTypeSynchronizedBeforeSuite,
SynchronizedBeforeSuiteProc1Body: proc1Body,
SynchronizedBeforeSuiteAllProcsBody: allProcsBody,
CodeLocation: codeLocation,
}, nil
}
var doneType = reflect.TypeOf(make(Done))
func NewSynchronizedAfterSuiteNode(allProcsBody func(), proc1Body func(), codeLocation types.CodeLocation) (Node, []error) {
return Node{
ID: UniqueNodeID(),
NodeType: types.NodeTypeSynchronizedAfterSuite,
SynchronizedAfterSuiteAllProcsBody: allProcsBody,
SynchronizedAfterSuiteProc1Body: proc1Body,
CodeLocation: codeLocation,
}, nil
}
func NewReportAfterSuiteNode(text string, body func(types.Report), codeLocation types.CodeLocation) (Node, []error) {
return Node{
ID: UniqueNodeID(),
Text: text,
NodeType: types.NodeTypeReportAfterSuite,
ReportAfterSuiteBody: body,
CodeLocation: codeLocation,
}, nil
}
func NewCleanupNode(fail func(string, types.CodeLocation), args ...interface{}) (Node, []error) {
baseOffset := 2
node := Node{
ID: UniqueNodeID(),
NodeType: types.NodeTypeCleanupInvalid,
CodeLocation: types.NewCodeLocation(baseOffset),
NestingLevel: -1,
func extractBodyFunction(deprecationTracker *types.DeprecationTracker, cl types.CodeLocation, arg interface{}) (func(SpecContext), bool) {
t := reflect.TypeOf(arg)
if t.NumOut() > 0 || t.NumIn() > 1 {
return nil, false
}
remainingArgs := []interface{}{}
for _, arg := range args {
if t.NumIn() == 1 {
if t.In(0) == doneType {
deprecationTracker.TrackDeprecation(types.Deprecations.Async(), cl)
deprecatedAsyncBody := arg.(func(Done))
return func(SpecContext) { deprecatedAsyncBody(make(Done)) }, false
} else if t.In(0).Implements(specContextType) {
return arg.(func(SpecContext)), true
} else if t.In(0).Implements(contextType) {
body := arg.(func(context.Context))
return func(c SpecContext) { body(c) }, true
}
return nil, false
}
body := arg.(func())
return func(SpecContext) { body() }, false
}
var byteType = reflect.TypeOf([]byte{})
func extractSynchronizedBeforeSuiteProc1Body(arg interface{}) (func(SpecContext) []byte, bool) {
t := reflect.TypeOf(arg)
v := reflect.ValueOf(arg)
if t.NumOut() > 1 || t.NumIn() > 1 {
return nil, false
} else if t.NumOut() == 1 && t.Out(0) != byteType {
return nil, false
} else if t.NumIn() == 1 && !t.In(0).Implements(contextType) {
return nil, false
}
hasContext := t.NumIn() == 1
return func(c SpecContext) []byte {
var out []reflect.Value
if hasContext {
out = v.Call([]reflect.Value{reflect.ValueOf(c)})
} else {
out = v.Call([]reflect.Value{})
}
if len(out) == 1 {
return (out[0].Interface()).([]byte)
} else {
return []byte{}
}
}, hasContext
}
func extractSynchronizedBeforeSuiteAllProcsBody(arg interface{}) (func(SpecContext, []byte), bool) {
t := reflect.TypeOf(arg)
v := reflect.ValueOf(arg)
hasContext, hasByte := false, false
if t.NumOut() > 0 || t.NumIn() > 2 {
return nil, false
} else if t.NumIn() == 2 && t.In(0).Implements(contextType) && t.In(1) == byteType {
hasContext, hasByte = true, true
} else if t.NumIn() == 1 && t.In(0).Implements(contextType) {
hasContext = true
} else if t.NumIn() == 1 && t.In(0) == byteType {
hasByte = true
} else if t.NumIn() != 0 {
return nil, false
}
return func(c SpecContext, b []byte) {
in := []reflect.Value{}
if hasContext {
in = append(in, reflect.ValueOf(c))
}
if hasByte {
in = append(in, reflect.ValueOf(b))
}
v.Call(in)
}, hasContext
}
func NewCleanupNode(deprecationTracker *types.DeprecationTracker, fail func(string, types.CodeLocation), args ...interface{}) (Node, []error) {
decorations, remainingArgs := PartitionDecorations(args...)
baseOffset := 2
cl := types.NewCodeLocation(baseOffset)
finalArgs := []interface{}{}
for _, arg := range decorations {
switch t := reflect.TypeOf(arg); {
case t == reflect.TypeOf(Offset(0)):
node.CodeLocation = types.NewCodeLocation(baseOffset + int(arg.(Offset)))
cl = types.NewCodeLocation(baseOffset + int(arg.(Offset)))
case t == reflect.TypeOf(types.CodeLocation{}):
node.CodeLocation = arg.(types.CodeLocation)
cl = arg.(types.CodeLocation)
default:
remainingArgs = append(remainingArgs, arg)
finalArgs = append(finalArgs, arg)
}
}
finalArgs = append(finalArgs, cl)
if len(remainingArgs) == 0 {
return Node{}, []error{types.GinkgoErrors.DeferCleanupInvalidFunction(node.CodeLocation)}
return Node{}, []error{types.GinkgoErrors.DeferCleanupInvalidFunction(cl)}
}
callback := reflect.ValueOf(remainingArgs[0])
if !(callback.Kind() == reflect.Func && callback.Type().NumOut() <= 1) {
return Node{}, []error{types.GinkgoErrors.DeferCleanupInvalidFunction(node.CodeLocation)}
return Node{}, []error{types.GinkgoErrors.DeferCleanupInvalidFunction(cl)}
}
callArgs := []reflect.Value{}
for _, arg := range remainingArgs[1:] {
callArgs = append(callArgs, reflect.ValueOf(arg))
}
cl := node.CodeLocation
node.Body = func() {
out := callback.Call(callArgs)
hasContext := false
t := callback.Type()
if t.NumIn() > 0 {
if t.In(0).Implements(specContextType) {
hasContext = true
} else if t.In(0).Implements(contextType) && (len(callArgs) == 0 || !callArgs[0].Type().Implements(contextType)) {
hasContext = true
}
}
handleFailure := func(out []reflect.Value) {
if len(out) == 1 && !out[0].IsNil() {
fail(fmt.Sprintf("DeferCleanup callback returned error: %v", out[0]), cl)
}
}
return node, nil
if hasContext {
finalArgs = append(finalArgs, func(c SpecContext) {
out := callback.Call(append([]reflect.Value{reflect.ValueOf(c)}, callArgs...))
handleFailure(out)
})
} else {
finalArgs = append(finalArgs, func() {
out := callback.Call(callArgs)
handleFailure(out)
})
}
return NewNode(deprecationTracker, types.NodeTypeCleanupInvalid, "", finalArgs...)
}
func (n Node) IsZero() bool {

View File

@ -143,7 +143,7 @@ func (interceptor *genericOutputInterceptor) ResumeIntercepting() {
go startPipeFactory(interceptor.pipeChannel, interceptor.shutdown)
}
// Now we make a pipe, we'll use this to redirect the input to the 1 and 2 file descriptors (this is how everything else in the world is tring to log to stdout and stderr)
// Now we make a pipe, we'll use this to redirect the input to the 1 and 2 file descriptors (this is how everything else in the world is string to log to stdout and stderr)
// we get the pipe from our pipe factory. it runs in the background so we can request the next pipe while the spec being intercepted is running
interceptor.pipe = <-interceptor.pipeChannel

View File

@ -28,7 +28,7 @@ func (impl *dupSyscallOutputInterceptorImpl) CreateStdoutStderrClones() (*os.Fil
// And then wrap the clone file descriptors in files.
// One benefit of this (that we don't use yet) is that we can actually write
// to these files to emit output to the console evne though we're intercepting output
// to these files to emit output to the console even though we're intercepting output
stdoutClone := os.NewFile(uintptr(stdoutCloneFD), "stdout-clone")
stderrClone := os.NewFile(uintptr(stderrCloneFD), "stderr-clone")

View File

@ -49,6 +49,7 @@ type Client interface {
FetchNextCounter() (int, error)
PostAbort() error
ShouldAbort() bool
PostEmitProgressReport(report types.ProgressReport) error
Write(p []byte) (int, error)
}

View File

@ -94,6 +94,10 @@ func (client *httpClient) PostSuiteDidEnd(report types.Report) error {
return client.post("/suite-did-end", report)
}
func (client *httpClient) PostEmitProgressReport(report types.ProgressReport) error {
return client.post("/progress-report", report)
}
func (client *httpClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error {
beforeSuiteState := BeforeSuiteState{
State: state,

View File

@ -49,6 +49,7 @@ func (server *httpServer) Start() {
mux.HandleFunc("/did-run", server.didRun)
mux.HandleFunc("/suite-did-end", server.specSuiteDidEnd)
mux.HandleFunc("/emit-output", server.emitOutput)
mux.HandleFunc("/progress-report", server.emitProgressReport)
//synchronization endpoints
mux.HandleFunc("/before-suite-completed", server.handleBeforeSuiteCompleted)
@ -155,6 +156,14 @@ func (server *httpServer) emitOutput(writer http.ResponseWriter, request *http.R
server.handleError(server.handler.EmitOutput(output, &n), writer)
}
func (server *httpServer) emitProgressReport(writer http.ResponseWriter, request *http.Request) {
var report types.ProgressReport
if !server.decode(writer, request, &report) {
return
}
server.handleError(server.handler.EmitProgressReport(report, voidReceiver), writer)
}
func (server *httpServer) handleBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) {
var beforeSuiteState BeforeSuiteState
if !server.decode(writer, request, &beforeSuiteState) {

View File

@ -72,6 +72,10 @@ func (client *rpcClient) Write(p []byte) (int, error) {
return n, err
}
func (client *rpcClient) PostEmitProgressReport(report types.ProgressReport) error {
return client.client.Call("Server.EmitProgressReport", report, voidReceiver)
}
func (client *rpcClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error {
beforeSuiteState := BeforeSuiteState{
State: state,

View File

@ -108,6 +108,13 @@ func (handler *ServerHandler) EmitOutput(output []byte, n *int) error {
return err
}
func (handler *ServerHandler) EmitProgressReport(report types.ProgressReport, _ *Void) error {
handler.lock.Lock()
defer handler.lock.Unlock()
handler.reporter.EmitProgressReport(report)
return nil
}
func (handler *ServerHandler) registerAlive(proc int, alive func() bool) {
handler.lock.Lock()
defer handler.lock.Unlock()

View File

@ -0,0 +1,291 @@
package internal
import (
"bufio"
"bytes"
"context"
"fmt"
"io"
"os"
"os/signal"
"path/filepath"
"runtime"
"strconv"
"strings"
"time"
"github.com/onsi/ginkgo/v2/types"
)
var _SOURCE_CACHE = map[string][]string{}
type ProgressSignalRegistrar func(func()) context.CancelFunc
func RegisterForProgressSignal(handler func()) context.CancelFunc {
signalChannel := make(chan os.Signal, 1)
if len(PROGRESS_SIGNALS) > 0 {
signal.Notify(signalChannel, PROGRESS_SIGNALS...)
}
ctx, cancel := context.WithCancel(context.Background())
go func() {
for {
select {
case <-signalChannel:
handler()
case <-ctx.Done():
signal.Stop(signalChannel)
return
}
}
}()
return cancel
}
type ProgressStepCursor struct {
Text string
CodeLocation types.CodeLocation
StartTime time.Time
}
func NewProgressReport(isRunningInParallel bool, report types.SpecReport, currentNode Node, currentNodeStartTime time.Time, currentStep ProgressStepCursor, gwOutput string, additionalReports []string, sourceRoots []string, includeAll bool) (types.ProgressReport, error) {
pr := types.ProgressReport{
ParallelProcess: report.ParallelProcess,
RunningInParallel: isRunningInParallel,
Time: time.Now(),
ContainerHierarchyTexts: report.ContainerHierarchyTexts,
LeafNodeText: report.LeafNodeText,
LeafNodeLocation: report.LeafNodeLocation,
SpecStartTime: report.StartTime,
CurrentNodeType: currentNode.NodeType,
CurrentNodeText: currentNode.Text,
CurrentNodeLocation: currentNode.CodeLocation,
CurrentNodeStartTime: currentNodeStartTime,
CurrentStepText: currentStep.Text,
CurrentStepLocation: currentStep.CodeLocation,
CurrentStepStartTime: currentStep.StartTime,
AdditionalReports: additionalReports,
CapturedGinkgoWriterOutput: gwOutput,
GinkgoWriterOffset: len(gwOutput),
}
goroutines, err := extractRunningGoroutines()
if err != nil {
return pr, err
}
pr.Goroutines = goroutines
// now we want to try to find goroutines of interest. these will be goroutines that have any function calls with code in packagesOfInterest:
packagesOfInterest := map[string]bool{}
packageFromFilename := func(filename string) string {
return filepath.Dir(filename)
}
addPackageFor := func(filename string) {
if filename != "" {
packagesOfInterest[packageFromFilename(filename)] = true
}
}
isPackageOfInterest := func(filename string) bool {
stackPackage := packageFromFilename(filename)
for packageOfInterest := range packagesOfInterest {
if strings.HasPrefix(stackPackage, packageOfInterest) {
return true
}
}
return false
}
for _, location := range report.ContainerHierarchyLocations {
addPackageFor(location.FileName)
}
addPackageFor(report.LeafNodeLocation.FileName)
addPackageFor(currentNode.CodeLocation.FileName)
addPackageFor(currentStep.CodeLocation.FileName)
//First, we find the SpecGoroutine - this will be the goroutine that includes `runNode`
specGoRoutineIdx := -1
runNodeFunctionCallIdx := -1
OUTER:
for goroutineIdx, goroutine := range pr.Goroutines {
for functionCallIdx, functionCall := range goroutine.Stack {
if strings.Contains(functionCall.Function, "ginkgo/v2/internal.(*Suite).runNode.func") {
specGoRoutineIdx = goroutineIdx
runNodeFunctionCallIdx = functionCallIdx
break OUTER
}
}
}
//Now, we find the first non-Ginkgo function call
if specGoRoutineIdx > -1 {
for runNodeFunctionCallIdx >= 0 {
fn := goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Function
file := goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Filename
// these are all things that could potentially happen from within ginkgo
if strings.Contains(fn, "ginkgo/v2/internal") || strings.Contains(fn, "reflect.Value") || strings.Contains(file, "ginkgo/table_dsl") || strings.Contains(file, "ginkgo/core_dsl") {
runNodeFunctionCallIdx--
continue
}
if strings.Contains(goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Function, "ginkgo/table_dsl") {
}
//found it! lets add its package of interest
addPackageFor(goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Filename)
break
}
}
ginkgoEntryPointIdx := -1
OUTER_GINKGO_ENTRY_POINT:
for goroutineIdx, goroutine := range pr.Goroutines {
for _, functionCall := range goroutine.Stack {
if strings.Contains(functionCall.Function, "ginkgo/v2.RunSpecs") {
ginkgoEntryPointIdx = goroutineIdx
break OUTER_GINKGO_ENTRY_POINT
}
}
}
// Now we go through all goroutines and highlight any lines with packages in `packagesOfInterest`
// Any goroutines with highlighted lines end up in the HighlightGoRoutines
for goroutineIdx, goroutine := range pr.Goroutines {
if goroutineIdx == ginkgoEntryPointIdx {
continue
}
if goroutineIdx == specGoRoutineIdx {
pr.Goroutines[goroutineIdx].IsSpecGoroutine = true
}
for functionCallIdx, functionCall := range goroutine.Stack {
if isPackageOfInterest(functionCall.Filename) {
goroutine.Stack[functionCallIdx].Highlight = true
goroutine.Stack[functionCallIdx].Source, goroutine.Stack[functionCallIdx].SourceHighlight = fetchSource(functionCall.Filename, functionCall.Line, 2, sourceRoots)
}
}
}
if !includeAll {
goroutines := []types.Goroutine{pr.SpecGoroutine()}
goroutines = append(goroutines, pr.HighlightedGoroutines()...)
pr.Goroutines = goroutines
}
return pr, nil
}
func extractRunningGoroutines() ([]types.Goroutine, error) {
var stack []byte
for size := 64 * 1024; ; size *= 2 {
stack = make([]byte, size)
if n := runtime.Stack(stack, true); n < size {
stack = stack[:n]
break
}
}
r := bufio.NewReader(bytes.NewReader(stack))
out := []types.Goroutine{}
idx := -1
for {
line, err := r.ReadString('\n')
if err == io.EOF {
break
}
line = strings.TrimSuffix(line, "\n")
//skip blank lines
if line == "" {
continue
}
//parse headers for new goroutine frames
if strings.HasPrefix(line, "goroutine") {
out = append(out, types.Goroutine{})
idx = len(out) - 1
line = strings.TrimPrefix(line, "goroutine ")
line = strings.TrimSuffix(line, ":")
fields := strings.SplitN(line, " ", 2)
if len(fields) != 2 {
return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid goroutine frame header: %s", line))
}
out[idx].ID, err = strconv.ParseUint(fields[0], 10, 64)
if err != nil {
return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid goroutine ID: %s", fields[1]))
}
out[idx].State = strings.TrimSuffix(strings.TrimPrefix(fields[1], "["), "]")
continue
}
//if we are here we must be at a function call entry in the stack
functionCall := types.FunctionCall{
Function: strings.TrimPrefix(line, "created by "), // no need to track 'created by'
}
line, err = r.ReadString('\n')
line = strings.TrimSuffix(line, "\n")
if err == io.EOF {
return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid function call: %s -- missing file name and line number", functionCall.Function))
}
line = strings.TrimLeft(line, " \t")
fields := strings.SplitN(line, ":", 2)
if len(fields) != 2 {
return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid filename nad line number: %s", line))
}
functionCall.Filename = fields[0]
line = strings.Split(fields[1], " ")[0]
lineNumber, err := strconv.ParseInt(line, 10, 64)
functionCall.Line = int(lineNumber)
if err != nil {
return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid function call line number: %s\n%s", line, err.Error()))
}
out[idx].Stack = append(out[idx].Stack, functionCall)
}
return out, nil
}
func fetchSource(filename string, lineNumber int, span int, configuredSourceRoots []string) ([]string, int) {
if filename == "" {
return []string{}, 0
}
var lines []string
var ok bool
if lines, ok = _SOURCE_CACHE[filename]; !ok {
sourceRoots := []string{""}
sourceRoots = append(sourceRoots, configuredSourceRoots...)
var data []byte
var err error
var found bool
for _, root := range sourceRoots {
data, err = os.ReadFile(filepath.Join(root, filename))
if err == nil {
found = true
break
}
}
if !found {
return []string{}, 0
}
lines = strings.Split(string(data), "\n")
_SOURCE_CACHE[filename] = lines
}
startIndex := lineNumber - span - 1
endIndex := startIndex + span + span + 1
if startIndex < 0 {
startIndex = 0
}
if endIndex > len(lines) {
endIndex = len(lines)
}
highlightIndex := lineNumber - 1 - startIndex
return lines[startIndex:endIndex], highlightIndex
}

View File

@ -0,0 +1,11 @@
//go:build freebsd || openbsd || netbsd || darwin || dragonfly
// +build freebsd openbsd netbsd darwin dragonfly
package internal
import (
"os"
"syscall"
)
var PROGRESS_SIGNALS = []os.Signal{syscall.SIGINFO, syscall.SIGUSR1}

View File

@ -0,0 +1,11 @@
//go:build linux || solaris
// +build linux solaris
package internal
import (
"os"
"syscall"
)
var PROGRESS_SIGNALS = []os.Signal{syscall.SIGUSR1}

View File

@ -0,0 +1,8 @@
//go:build windows
// +build windows
package internal
import "os"
var PROGRESS_SIGNALS = []os.Signal{}

View File

@ -2,6 +2,7 @@ package internal
import (
"strings"
"time"
"github.com/onsi/ginkgo/v2/types"
)
@ -40,6 +41,10 @@ func (s Spec) FlakeAttempts() int {
return flakeAttempts
}
func (s Spec) SpecTimeout() time.Duration {
return s.FirstNodeWithType(types.NodeTypeIt).SpecTimeout
}
type Specs []Spec
func (s Specs) HasAnySpecsMarkedPending() bool {

View File

@ -0,0 +1,90 @@
package internal
import (
"context"
"sort"
"sync"
"github.com/onsi/ginkgo/v2/types"
)
type SpecContext interface {
context.Context
SpecReport() types.SpecReport
AttachProgressReporter(func() string) func()
}
type specContext struct {
context.Context
cancel context.CancelFunc
lock *sync.Mutex
progressReporters map[int]func() string
prCounter int
suite *Suite
}
/*
SpecContext includes a reference to `suite` and embeds itself in itself as a "GINKGO_SPEC_CONTEXT" value. This allows users to create child Contexts without having down-stream consumers (e.g. Gomega) lose access to the SpecContext and its methods. This allows us to build extensions on top of Ginkgo that simply take an all-encompassing context.
Note that while SpecContext is used to enforce deadlines by Ginkgo it is not configured as a context.WithDeadline. Instead, Ginkgo owns responsibility for cancelling the context when the deadline elapses.
This is because Ginkgo needs finer control over when the context is canceled. Specifically, Ginkgo needs to generate a ProgressReport before it cancels the context to ensure progress is captured where the spec is currently running. The only way to avoid a race here is to manually control the cancellation.
*/
func NewSpecContext(suite *Suite) *specContext {
ctx, cancel := context.WithCancel(context.Background())
sc := &specContext{
cancel: cancel,
suite: suite,
lock: &sync.Mutex{},
prCounter: 0,
progressReporters: map[int]func() string{},
}
ctx = context.WithValue(ctx, "GINKGO_SPEC_CONTEXT", sc) //yes, yes, the go docs say don't use a string for a key... but we'd rather avoid a circular dependency between Gomega and Ginkgo
sc.Context = ctx //thank goodness for garbage collectors that can handle circular dependencies
return sc
}
func (sc *specContext) SpecReport() types.SpecReport {
return sc.suite.CurrentSpecReport()
}
func (sc *specContext) AttachProgressReporter(reporter func() string) func() {
sc.lock.Lock()
defer sc.lock.Unlock()
sc.prCounter += 1
prCounter := sc.prCounter
sc.progressReporters[prCounter] = reporter
return func() {
sc.lock.Lock()
defer sc.lock.Unlock()
delete(sc.progressReporters, prCounter)
}
}
func (sc *specContext) QueryProgressReporters() []string {
sc.lock.Lock()
keys := []int{}
for key := range sc.progressReporters {
keys = append(keys, key)
}
sort.Ints(keys)
reporters := []func() string{}
for _, key := range keys {
reporters = append(reporters, sc.progressReporters[key])
}
sc.lock.Unlock()
if len(reporters) == 0 {
return nil
}
out := []string{}
for _, reporter := range reporters {
out = append(out, reporter())
}
return out
}

View File

@ -5,7 +5,6 @@ import (
"sync"
"time"
"github.com/onsi/ginkgo/v2/formatter"
"github.com/onsi/ginkgo/v2/internal/interrupt_handler"
"github.com/onsi/ginkgo/v2/internal/parallel_support"
"github.com/onsi/ginkgo/v2/reporters"
@ -35,21 +34,39 @@ type Suite struct {
outputInterceptor OutputInterceptor
interruptHandler interrupt_handler.InterruptHandlerInterface
config types.SuiteConfig
deadline time.Time
skipAll bool
report types.Report
currentSpecReport types.SpecReport
currentSpecReportUserAccessLock *sync.Mutex
currentNode Node
skipAll bool
report types.Report
currentSpecReport types.SpecReport
currentNode Node
currentNodeStartTime time.Time
currentSpecContext *specContext
progressStepCursor ProgressStepCursor
/*
We don't need to lock around all operations. Just those that *could* happen concurrently.
Suite, generally, only runs one node at a time - and so the possibiity for races is small. In fact, the presence of a race usually indicates the user has launched a goroutine that has leaked past the node it was launched in.
However, there are some operations that can happen concurrently:
- AddReportEntry and CurrentSpecReport can be accessed at any point by the user - including in goroutines that outlive the node intentionally (see, e.g. #1020). They both form a self-contained read-write pair and so a lock in them is sufficent.
- generateProgressReport can be invoked at any point in time by an interrupt or a progres poll. Moreover, it requires access to currentSpecReport, currentNode, currentNodeStartTime, and progressStepCursor. To make it threadsafe we need to lock around generateProgressReport when we read those variables _and_ everywhere those variables are *written*. In general we don't need to worry about all possible field writes to these variables as what `generateProgressReport` does with these variables is fairly selective (hence the name of the lock). Specifically, we dont' need to lock around state and failure message changes on `currentSpecReport` - just the setting of the variable itself.
*/
selectiveLock *sync.Mutex
client parallel_support.Client
}
func NewSuite() *Suite {
return &Suite{
tree: &TreeNode{},
phase: PhaseBuildTopLevel,
currentSpecReportUserAccessLock: &sync.Mutex{},
tree: &TreeNode{},
phase: PhaseBuildTopLevel,
selectiveLock: &sync.Mutex{},
}
}
@ -66,7 +83,7 @@ func (suite *Suite) BuildTree() error {
return nil
}
func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string, failer *Failer, reporter reporters.Reporter, writer WriterInterface, outputInterceptor OutputInterceptor, interruptHandler interrupt_handler.InterruptHandlerInterface, client parallel_support.Client, suiteConfig types.SuiteConfig) (bool, bool) {
func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string, failer *Failer, reporter reporters.Reporter, writer WriterInterface, outputInterceptor OutputInterceptor, interruptHandler interrupt_handler.InterruptHandlerInterface, client parallel_support.Client, progressSignalRegistrar ProgressSignalRegistrar, suiteConfig types.SuiteConfig) (bool, bool) {
if suite.phase != PhaseBuildTree {
panic("cannot run before building the tree = call suite.BuildTree() first")
}
@ -83,8 +100,16 @@ func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string
suite.interruptHandler = interruptHandler
suite.config = suiteConfig
if suite.config.Timeout > 0 {
suite.deadline = time.Now().Add(suite.config.Timeout)
}
cancelProgressHandler := progressSignalRegistrar(suite.handleProgressSignal)
success := suite.runSpecs(description, suiteLabels, suitePath, hasProgrammaticFocus, specs)
cancelProgressHandler()
return success, hasProgrammaticFocus
}
@ -146,7 +171,7 @@ func (suite *Suite) PushNode(node Node) error {
err = types.GinkgoErrors.CaughtPanicDuringABuildPhase(e, node.CodeLocation)
}
}()
node.Body()
node.Body(nil)
return err
}()
suite.tree = parentTree
@ -211,12 +236,23 @@ func (suite *Suite) pushCleanupNode(node Node) error {
return nil
}
/*
Pushing and popping the Step Cursor stack
*/
func (suite *Suite) SetProgressStepCursor(cursor ProgressStepCursor) {
suite.selectiveLock.Lock()
defer suite.selectiveLock.Unlock()
suite.progressStepCursor = cursor
}
/*
Spec Running methods - used during PhaseRun
*/
func (suite *Suite) CurrentSpecReport() types.SpecReport {
suite.currentSpecReportUserAccessLock.Lock()
defer suite.currentSpecReportUserAccessLock.Unlock()
suite.selectiveLock.Lock()
defer suite.selectiveLock.Unlock()
report := suite.currentSpecReport
if suite.writer != nil {
report.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
@ -227,8 +263,8 @@ func (suite *Suite) CurrentSpecReport() types.SpecReport {
}
func (suite *Suite) AddReportEntry(entry ReportEntry) error {
suite.currentSpecReportUserAccessLock.Lock()
defer suite.currentSpecReportUserAccessLock.Unlock()
suite.selectiveLock.Lock()
defer suite.selectiveLock.Unlock()
if suite.phase != PhaseRun {
return types.GinkgoErrors.AddReportEntryNotDuringRunPhase(entry.Location)
}
@ -236,6 +272,45 @@ func (suite *Suite) AddReportEntry(entry ReportEntry) error {
return nil
}
func (suite *Suite) generateProgressReport(fullReport bool) types.ProgressReport {
suite.selectiveLock.Lock()
defer suite.selectiveLock.Unlock()
var additionalReports []string
if suite.currentSpecContext != nil {
additionalReports = suite.currentSpecContext.QueryProgressReporters()
}
stepCursor := suite.progressStepCursor
gwOutput := suite.currentSpecReport.CapturedGinkgoWriterOutput + string(suite.writer.Bytes())
pr, err := NewProgressReport(suite.isRunningInParallel(), suite.currentSpecReport, suite.currentNode, suite.currentNodeStartTime, stepCursor, gwOutput, additionalReports, suite.config.SourceRoots, fullReport)
if err != nil {
fmt.Printf("{{red}}Failed to generate progress report:{{/}}\n%s\n", err.Error())
}
return pr
}
func (suite *Suite) handleProgressSignal() {
report := suite.generateProgressReport(false)
report.Message = "{{bold}}You've requested a progress report:{{/}}"
suite.emitProgressReport(report)
}
func (suite *Suite) emitProgressReport(report types.ProgressReport) {
suite.selectiveLock.Lock()
suite.currentSpecReport.ProgressReports = append(suite.currentSpecReport.ProgressReports, report.WithoutCapturedGinkgoWriterOutput())
suite.selectiveLock.Unlock()
suite.reporter.EmitProgressReport(report)
if suite.isRunningInParallel() {
err := suite.client.PostEmitProgressReport(report)
if err != nil {
fmt.Println(err.Error())
}
}
}
func (suite *Suite) isRunningInParallel() bool {
return suite.config.ParallelTotal > 1
}
@ -322,12 +397,16 @@ func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath s
suite.runAfterSuiteCleanup(numSpecsThatWillBeRun)
interruptStatus := suite.interruptHandler.Status()
if interruptStatus.Interrupted {
if interruptStatus.Interrupted() {
suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, interruptStatus.Cause.String())
suite.report.SuiteSucceeded = false
}
suite.report.EndTime = time.Now()
suite.report.RunTime = suite.report.EndTime.Sub(suite.report.StartTime)
if !suite.deadline.IsZero() && suite.report.EndTime.After(suite.deadline) {
suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Suite Timeout Elapsed")
suite.report.SuiteSucceeded = false
}
if suite.config.ParallelProcess == 1 {
suite.runReportAfterSuite()
@ -341,16 +420,18 @@ func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath s
}
func (suite *Suite) runBeforeSuite(numSpecsThatWillBeRun int) {
interruptStatus := suite.interruptHandler.Status()
beforeSuiteNode := suite.suiteNodes.FirstNodeWithType(types.NodeTypeBeforeSuite | types.NodeTypeSynchronizedBeforeSuite)
if !beforeSuiteNode.IsZero() && !interruptStatus.Interrupted && numSpecsThatWillBeRun > 0 {
if !beforeSuiteNode.IsZero() && numSpecsThatWillBeRun > 0 {
suite.selectiveLock.Lock()
suite.currentSpecReport = types.SpecReport{
LeafNodeType: beforeSuiteNode.NodeType,
LeafNodeLocation: beforeSuiteNode.CodeLocation,
ParallelProcess: suite.config.ParallelProcess,
}
suite.selectiveLock.Unlock()
suite.reporter.WillRun(suite.currentSpecReport)
suite.runSuiteNode(beforeSuiteNode, interruptStatus.Channel)
suite.runSuiteNode(beforeSuiteNode)
if suite.currentSpecReport.State.Is(types.SpecStateSkipped) {
suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Suite skipped in BeforeSuite")
suite.skipAll = true
@ -362,26 +443,32 @@ func (suite *Suite) runBeforeSuite(numSpecsThatWillBeRun int) {
func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) {
afterSuiteNode := suite.suiteNodes.FirstNodeWithType(types.NodeTypeAfterSuite | types.NodeTypeSynchronizedAfterSuite)
if !afterSuiteNode.IsZero() && numSpecsThatWillBeRun > 0 {
suite.selectiveLock.Lock()
suite.currentSpecReport = types.SpecReport{
LeafNodeType: afterSuiteNode.NodeType,
LeafNodeLocation: afterSuiteNode.CodeLocation,
ParallelProcess: suite.config.ParallelProcess,
}
suite.selectiveLock.Unlock()
suite.reporter.WillRun(suite.currentSpecReport)
suite.runSuiteNode(afterSuiteNode, suite.interruptHandler.Status().Channel)
suite.runSuiteNode(afterSuiteNode)
suite.processCurrentSpecReport()
}
afterSuiteCleanup := suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterSuite).Reverse()
if len(afterSuiteCleanup) > 0 {
for _, cleanupNode := range afterSuiteCleanup {
suite.selectiveLock.Lock()
suite.currentSpecReport = types.SpecReport{
LeafNodeType: cleanupNode.NodeType,
LeafNodeLocation: cleanupNode.CodeLocation,
ParallelProcess: suite.config.ParallelProcess,
}
suite.selectiveLock.Unlock()
suite.reporter.WillRun(suite.currentSpecReport)
suite.runSuiteNode(cleanupNode, suite.interruptHandler.Status().Channel)
suite.runSuiteNode(cleanupNode)
suite.processCurrentSpecReport()
}
}
@ -389,12 +476,15 @@ func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) {
func (suite *Suite) runReportAfterSuite() {
for _, node := range suite.suiteNodes.WithType(types.NodeTypeReportAfterSuite) {
suite.selectiveLock.Lock()
suite.currentSpecReport = types.SpecReport{
LeafNodeType: node.NodeType,
LeafNodeLocation: node.CodeLocation,
LeafNodeText: node.Text,
ParallelProcess: suite.config.ParallelProcess,
}
suite.selectiveLock.Unlock()
suite.reporter.WillRun(suite.currentSpecReport)
suite.runReportAfterSuiteNode(node, suite.report)
suite.processCurrentSpecReport()
@ -417,16 +507,11 @@ func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) {
suite.writer.Truncate()
suite.outputInterceptor.StartInterceptingOutput()
report := suite.currentSpecReport
nodes[i].Body = func() {
nodes[i].Body = func(SpecContext) {
nodes[i].ReportEachBody(report)
}
suite.interruptHandler.SetInterruptPlaceholderMessage(formatter.Fiw(0, formatter.COLS,
"{{yellow}}Ginkgo received an interrupt signal but is currently running a %s node. To avoid an invalid report the %s node will not be interrupted however subsequent tests will be skipped.{{/}}\n\n{{bold}}The running %s node is at:\n%s.{{/}}",
nodeType, nodeType, nodeType,
nodes[i].CodeLocation,
))
state, failure := suite.runNode(nodes[i], nil, spec.Nodes.BestTextFor(nodes[i]))
suite.interruptHandler.ClearInterruptPlaceholderMessage()
state, failure := suite.runNode(nodes[i], time.Time{}, spec.Nodes.BestTextFor(nodes[i]))
// If the spec is not in a failure state (i.e. it's Passed/Skipped/Pending) and the reporter has failed, override the state.
// Also, if the reporter is every aborted - always override the state to propagate the abort
if (!suite.currentSpecReport.State.Is(types.SpecStateFailureStates) && state.Is(types.SpecStateFailureStates)) || state.Is(types.SpecStateAborted) {
@ -438,7 +523,7 @@ func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) {
}
}
func (suite *Suite) runSuiteNode(node Node, interruptChannel chan interface{}) {
func (suite *Suite) runSuiteNode(node Node) {
if suite.config.DryRun {
suite.currentSpecReport.State = types.SpecStatePassed
return
@ -451,13 +536,13 @@ func (suite *Suite) runSuiteNode(node Node, interruptChannel chan interface{}) {
var err error
switch node.NodeType {
case types.NodeTypeBeforeSuite, types.NodeTypeAfterSuite:
suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "")
suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
case types.NodeTypeCleanupAfterSuite:
if suite.config.ParallelTotal > 1 && suite.config.ParallelProcess == 1 {
err = suite.client.BlockUntilNonprimaryProcsHaveFinished()
}
if err == nil {
suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "")
suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
}
case types.NodeTypeSynchronizedBeforeSuite:
var data []byte
@ -467,8 +552,9 @@ func (suite *Suite) runSuiteNode(node Node, interruptChannel chan interface{}) {
suite.outputInterceptor.StopInterceptingAndReturnOutput()
suite.outputInterceptor.StartInterceptingOutputAndForwardTo(suite.client)
}
node.Body = func() { data = node.SynchronizedBeforeSuiteProc1Body() }
suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "")
node.Body = func(c SpecContext) { data = node.SynchronizedBeforeSuiteProc1Body(c) }
node.HasContext = node.SynchronizedBeforeSuiteProc1BodyHasContext
suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
if suite.config.ParallelTotal > 1 {
suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
suite.outputInterceptor.StartInterceptingOutput()
@ -485,19 +571,21 @@ func (suite *Suite) runSuiteNode(node Node, interruptChannel chan interface{}) {
switch proc1State {
case types.SpecStatePassed:
runAllProcs = true
case types.SpecStateFailed, types.SpecStatePanicked:
case types.SpecStateFailed, types.SpecStatePanicked, types.SpecStateTimedout:
err = types.GinkgoErrors.SynchronizedBeforeSuiteFailedOnProc1()
case types.SpecStateInterrupted, types.SpecStateAborted, types.SpecStateSkipped:
suite.currentSpecReport.State = proc1State
}
}
if runAllProcs {
node.Body = func() { node.SynchronizedBeforeSuiteAllProcsBody(data) }
suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "")
node.Body = func(c SpecContext) { node.SynchronizedBeforeSuiteAllProcsBody(c, data) }
node.HasContext = node.SynchronizedBeforeSuiteAllProcsBodyHasContext
suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
}
case types.NodeTypeSynchronizedAfterSuite:
node.Body = node.SynchronizedAfterSuiteAllProcsBody
suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "")
node.HasContext = node.SynchronizedAfterSuiteAllProcsBodyHasContext
suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
if suite.config.ParallelProcess == 1 {
if suite.config.ParallelTotal > 1 {
err = suite.client.BlockUntilNonprimaryProcsHaveFinished()
@ -509,7 +597,8 @@ func (suite *Suite) runSuiteNode(node Node, interruptChannel chan interface{}) {
}
node.Body = node.SynchronizedAfterSuiteProc1Body
state, failure := suite.runNode(node, interruptChannel, "")
node.HasContext = node.SynchronizedAfterSuiteProc1BodyHasContext
state, failure := suite.runNode(node, time.Time{}, "")
if suite.currentSpecReport.State.Is(types.SpecStatePassed) {
suite.currentSpecReport.State, suite.currentSpecReport.Failure = state, failure
}
@ -543,13 +632,8 @@ func (suite *Suite) runReportAfterSuiteNode(node Node, report types.Report) {
report = report.Add(aggregatedReport)
}
node.Body = func() { node.ReportAfterSuiteBody(report) }
suite.interruptHandler.SetInterruptPlaceholderMessage(formatter.Fiw(0, formatter.COLS,
"{{yellow}}Ginkgo received an interrupt signal but is currently running a ReportAfterSuite node. To avoid an invalid report the ReportAfterSuite node will not be interrupted.{{/}}\n\n{{bold}}The running ReportAfterSuite node is at:\n%s.{{/}}",
node.CodeLocation,
))
suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, nil, "")
suite.interruptHandler.ClearInterruptPlaceholderMessage()
node.Body = func(SpecContext) { node.ReportAfterSuiteBody(report) }
suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
suite.currentSpecReport.EndTime = time.Now()
suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime)
@ -559,14 +643,32 @@ func (suite *Suite) runReportAfterSuiteNode(node Node, report types.Report) {
return
}
func (suite *Suite) runNode(node Node, interruptChannel chan interface{}, text string) (types.SpecState, types.Failure) {
func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (types.SpecState, types.Failure) {
if node.NodeType.Is(types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll | types.NodeTypeCleanupAfterSuite) {
suite.cleanupNodes = suite.cleanupNodes.WithoutNode(node)
}
interruptStatus := suite.interruptHandler.Status()
if interruptStatus.Level == interrupt_handler.InterruptLevelBailOut {
return types.SpecStateSkipped, types.Failure{}
}
if interruptStatus.Level == interrupt_handler.InterruptLevelReportOnly && !node.NodeType.Is(types.NodeTypesAllowedDuringReportInterrupt) {
return types.SpecStateSkipped, types.Failure{}
}
if interruptStatus.Level == interrupt_handler.InterruptLevelCleanupAndReport && !node.NodeType.Is(types.NodeTypesAllowedDuringReportInterrupt|types.NodeTypesAllowedDuringCleanupInterrupt) {
return types.SpecStateSkipped, types.Failure{}
}
suite.selectiveLock.Lock()
suite.currentNode = node
suite.currentNodeStartTime = time.Now()
suite.progressStepCursor = ProgressStepCursor{}
suite.selectiveLock.Unlock()
defer func() {
suite.selectiveLock.Lock()
suite.currentNode = Node{}
suite.currentNodeStartTime = time.Time{}
suite.selectiveLock.Unlock()
}()
if suite.config.EmitSpecProgress && !node.MarkedSuppressProgressReporting {
@ -586,6 +688,49 @@ func (suite *Suite) runNode(node Node, interruptChannel chan interface{}, text s
} else {
failure.FailureNodeContext, failure.FailureNodeContainerIndex = types.FailureNodeInContainer, node.NestingLevel-1
}
var outcome types.SpecState
gracePeriod := suite.config.GracePeriod
if node.GracePeriod >= 0 {
gracePeriod = node.GracePeriod
}
now := time.Now()
deadline := suite.deadline
if deadline.IsZero() || (!specDeadline.IsZero() && specDeadline.Before(deadline)) {
deadline = specDeadline
}
if node.NodeTimeout > 0 && (deadline.IsZero() || deadline.Sub(now) > node.NodeTimeout) {
deadline = now.Add(node.NodeTimeout)
}
if (!deadline.IsZero() && deadline.Before(now)) || interruptStatus.Interrupted() {
//we're out of time already. let's wait for a NodeTimeout if we have it, or GracePeriod if we don't
if node.NodeTimeout > 0 {
deadline = now.Add(node.NodeTimeout)
} else {
deadline = now.Add(gracePeriod)
}
}
if !node.HasContext {
// this maps onto the pre-context behavior:
// - an interrupted node exits immediately. with this, context-less nodes that are in a spec with a SpecTimeout and/or are interrupted by other means will simply exit immediately after the timeout/interrupt
// - clean up nodes have up to GracePeriod (formerly hard-coded at 30s) to complete before they are interrupted
gracePeriod = 0
}
sc := NewSpecContext(suite)
defer sc.cancel()
suite.selectiveLock.Lock()
suite.currentSpecContext = sc
suite.selectiveLock.Unlock()
var deadlineChannel <-chan time.Time
if !deadline.IsZero() {
deadlineChannel = time.After(deadline.Sub(now))
}
var gracePeriodChannel <-chan time.Time
outcomeC := make(chan types.SpecState)
failureC := make(chan types.Failure)
@ -597,26 +742,125 @@ func (suite *Suite) runNode(node Node, interruptChannel chan interface{}, text s
suite.failer.Panic(types.NewCodeLocationWithStackTrace(2), e)
}
outcome, failureFromRun := suite.failer.Drain()
outcomeC <- outcome
outcomeFromRun, failureFromRun := suite.failer.Drain()
outcomeC <- outcomeFromRun
failureC <- failureFromRun
}()
node.Body()
node.Body(sc)
finished = true
}()
select {
case outcome := <-outcomeC:
failureFromRun := <-failureC
if outcome == types.SpecStatePassed {
return outcome, types.Failure{}
// progress polling timer and channel
var emitProgressNow <-chan time.Time
var progressPoller *time.Timer
var pollProgressAfter, pollProgressInterval = suite.config.PollProgressAfter, suite.config.PollProgressInterval
if node.PollProgressAfter >= 0 {
pollProgressAfter = node.PollProgressAfter
}
if node.PollProgressInterval >= 0 {
pollProgressInterval = node.PollProgressInterval
}
if pollProgressAfter > 0 {
progressPoller = time.NewTimer(pollProgressAfter)
emitProgressNow = progressPoller.C
defer progressPoller.Stop()
}
// now we wait for an outcome, an interrupt, a timeout, or a progress poll
for {
select {
case outcomeFromRun := <-outcomeC:
failureFromRun := <-failureC
if outcome == types.SpecStateInterrupted {
// we've already been interrupted. we just managed to actually exit
// before the grace period elapsed
return outcome, failure
} else if outcome == types.SpecStateTimedout {
// we've already timed out. we just managed to actually exit
// before the grace period elapsed. if we have a failure message we should include it
if outcomeFromRun != types.SpecStatePassed {
failure.Location, failure.ForwardedPanic = failureFromRun.Location, failureFromRun.ForwardedPanic
failure.Message = "This spec timed out and reported the following failure after the timeout:\n\n" + failureFromRun.Message
}
return outcome, failure
}
if outcomeFromRun.Is(types.SpecStatePassed) {
return outcomeFromRun, types.Failure{}
} else {
failure.Message, failure.Location, failure.ForwardedPanic = failureFromRun.Message, failureFromRun.Location, failureFromRun.ForwardedPanic
return outcomeFromRun, failure
}
case <-gracePeriodChannel:
if node.HasContext && outcome.Is(types.SpecStateTimedout) {
report := suite.generateProgressReport(false)
report.Message = "{{bold}}{{orange}}A running node failed to exit in time{{/}}\nGinkgo is moving on but a node has timed out and failed to exit before its grace period elapsed. The node has now leaked and is running in the background.\nHere's a current progress report:"
suite.emitProgressReport(report)
}
return outcome, failure
case <-deadlineChannel:
// we're out of time - the outcome is a timeout and we capture the failure and progress report
outcome = types.SpecStateTimedout
failure.Message, failure.Location = "Timedout", node.CodeLocation
failure.ProgressReport = suite.generateProgressReport(false).WithoutCapturedGinkgoWriterOutput()
failure.ProgressReport.Message = "{{bold}}This is the Progress Report generated when the timeout occurred:{{/}}"
deadlineChannel = nil
// tell the spec to stop. it's important we generate the progress report first to make sure we capture where
// the spec is actually stuck
sc.cancel()
//and now we wait for the grace period
gracePeriodChannel = time.After(gracePeriod)
case <-interruptStatus.Channel:
interruptStatus = suite.interruptHandler.Status()
deadlineChannel = nil // don't worry about deadlines, time's up now
if outcome == types.SpecStateInvalid {
outcome = types.SpecStateInterrupted
failure.Message, failure.Location = interruptStatus.Message(), node.CodeLocation
if interruptStatus.ShouldIncludeProgressReport() {
failure.ProgressReport = suite.generateProgressReport(true).WithoutCapturedGinkgoWriterOutput()
failure.ProgressReport.Message = "{{bold}}This is the Progress Report generated when the interrupt was received:{{/}}"
}
}
var report types.ProgressReport
if interruptStatus.ShouldIncludeProgressReport() {
report = suite.generateProgressReport(false)
}
sc.cancel()
if interruptStatus.Level == interrupt_handler.InterruptLevelBailOut {
if interruptStatus.ShouldIncludeProgressReport() {
report.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\n{{bold}}{{red}}Final interrupt received{{/}}; Ginkgo will not run any cleanup or reporting nodes and will terminate as soon as possible.\nHere's a current progress report:", interruptStatus.Message())
suite.emitProgressReport(report)
}
return outcome, failure
}
if interruptStatus.ShouldIncludeProgressReport() {
if interruptStatus.Level == interrupt_handler.InterruptLevelCleanupAndReport {
report.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nFirst interrupt received; Ginkgo will run any cleanup and reporting nodes but will skip all remaining specs. {{bold}}Interrupt again to skip cleanup{{/}}.\nHere's a current progress report:", interruptStatus.Message())
} else if interruptStatus.Level == interrupt_handler.InterruptLevelReportOnly {
report.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nSecond interrupt received; Ginkgo will run any reporting nodes but will skip all remaining specs and cleanup nodes. {{bold}}Interrupt again to bail immediately{{/}}.\nHere's a current progress report:", interruptStatus.Message())
}
suite.emitProgressReport(report)
}
if gracePeriodChannel == nil {
// we haven't given grace yet... so let's
gracePeriodChannel = time.After(gracePeriod)
} else {
// we've already given grace. time's up. now.
return outcome, failure
}
case <-emitProgressNow:
report := suite.generateProgressReport(false)
report.Message = "{{bold}}Automatically polling progress:{{/}}"
suite.emitProgressReport(report)
if pollProgressInterval > 0 {
progressPoller.Reset(pollProgressInterval)
}
}
failure.Message, failure.Location, failure.ForwardedPanic = failureFromRun.Message, failureFromRun.Location, failureFromRun.ForwardedPanic
return outcome, failure
case <-interruptChannel:
failure.Message, failure.Location = suite.interruptHandler.InterruptMessageWithStackTraces(), node.CodeLocation
return types.SpecStateInterrupted, failure
}
}