rebase: update kubernetes to v1.25.0

update kubernetes to latest v1.25.0
release.

Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
Madhu Rajanna
2022-08-24 07:54:25 +05:30
committed by mergify[bot]
parent f47839d73d
commit e3bf375035
645 changed files with 42507 additions and 9219 deletions

9
vendor/github.com/onsi/ginkgo/v2/internal/counter.go generated vendored Normal file
View File

@ -0,0 +1,9 @@
package internal
func MakeIncrementingIndexCounter() func() (int, error) {
idx := -1
return func() (int, error) {
idx += 1
return idx, nil
}
}

99
vendor/github.com/onsi/ginkgo/v2/internal/failer.go generated vendored Normal file
View File

@ -0,0 +1,99 @@
package internal
import (
"fmt"
"sync"
"github.com/onsi/ginkgo/v2/types"
)
type Failer struct {
lock *sync.Mutex
failure types.Failure
state types.SpecState
}
func NewFailer() *Failer {
return &Failer{
lock: &sync.Mutex{},
state: types.SpecStatePassed,
}
}
func (f *Failer) GetState() types.SpecState {
f.lock.Lock()
defer f.lock.Unlock()
return f.state
}
func (f *Failer) GetFailure() types.Failure {
f.lock.Lock()
defer f.lock.Unlock()
return f.failure
}
func (f *Failer) Panic(location types.CodeLocation, forwardedPanic interface{}) {
f.lock.Lock()
defer f.lock.Unlock()
if f.state == types.SpecStatePassed {
f.state = types.SpecStatePanicked
f.failure = types.Failure{
Message: "Test Panicked",
Location: location,
ForwardedPanic: fmt.Sprintf("%v", forwardedPanic),
}
}
}
func (f *Failer) Fail(message string, location types.CodeLocation) {
f.lock.Lock()
defer f.lock.Unlock()
if f.state == types.SpecStatePassed {
f.state = types.SpecStateFailed
f.failure = types.Failure{
Message: message,
Location: location,
}
}
}
func (f *Failer) Skip(message string, location types.CodeLocation) {
f.lock.Lock()
defer f.lock.Unlock()
if f.state == types.SpecStatePassed {
f.state = types.SpecStateSkipped
f.failure = types.Failure{
Message: message,
Location: location,
}
}
}
func (f *Failer) AbortSuite(message string, location types.CodeLocation) {
f.lock.Lock()
defer f.lock.Unlock()
if f.state == types.SpecStatePassed {
f.state = types.SpecStateAborted
f.failure = types.Failure{
Message: message,
Location: location,
}
}
}
func (f *Failer) Drain() (types.SpecState, types.Failure) {
f.lock.Lock()
defer f.lock.Unlock()
failure := f.failure
outcome := f.state
f.state = types.SpecStatePassed
f.failure = types.Failure{}
return outcome, failure
}

125
vendor/github.com/onsi/ginkgo/v2/internal/focus.go generated vendored Normal file
View File

@ -0,0 +1,125 @@
package internal
import (
"regexp"
"strings"
"github.com/onsi/ginkgo/v2/types"
)
/*
If a container marked as focus has a descendant that is also marked as focus, Ginkgo's policy is to
unmark the container's focus. This gives developers a more intuitive experience when debugging specs.
It is common to focus a container to just run a subset of specs, then identify the specific specs within the container to focus -
this policy allows the developer to simply focus those specific specs and not need to go back and turn the focus off of the container:
As a common example, consider:
FDescribe("something to debug", function() {
It("works", function() {...})
It("works", function() {...})
FIt("doesn't work", function() {...})
It("works", function() {...})
})
here the developer's intent is to focus in on the `"doesn't work"` spec and not to run the adjacent specs in the focused `"something to debug"` container.
The nested policy applied by this function enables this behavior.
*/
func ApplyNestedFocusPolicyToTree(tree *TreeNode) {
var walkTree func(tree *TreeNode) bool
walkTree = func(tree *TreeNode) bool {
if tree.Node.MarkedPending {
return false
}
hasFocusedDescendant := false
for _, child := range tree.Children {
childHasFocus := walkTree(child)
hasFocusedDescendant = hasFocusedDescendant || childHasFocus
}
tree.Node.MarkedFocus = tree.Node.MarkedFocus && !hasFocusedDescendant
return tree.Node.MarkedFocus || hasFocusedDescendant
}
walkTree(tree)
}
/*
Ginkgo supports focussing specs using `FIt`, `FDescribe`, etc. - this is called "programmatic focus"
It also supports focussing specs using regular expressions on the command line (`-focus=`, `-skip=`) that match against spec text
and file filters (`-focus-files=`, `-skip-files=`) that match against code locations for nodes in specs.
If any of the CLI flags are provided they take precedence. The file filters run first followed by the regex filters.
This function sets the `Skip` property on specs by applying Ginkgo's focus policy:
- If there are no CLI arguments and no programmatic focus, do nothing.
- If there are no CLI arguments but a spec somewhere has programmatic focus, skip any specs that have no programmatic focus.
- If there are CLI arguments parse them and skip any specs that either don't match the focus filters or do match the skip filters.
*Note:* specs with pending nodes are Skipped when created by NewSpec.
*/
func ApplyFocusToSpecs(specs Specs, description string, suiteLabels Labels, suiteConfig types.SuiteConfig) (Specs, bool) {
focusString := strings.Join(suiteConfig.FocusStrings, "|")
skipString := strings.Join(suiteConfig.SkipStrings, "|")
hasFocusCLIFlags := focusString != "" || skipString != "" || len(suiteConfig.SkipFiles) > 0 || len(suiteConfig.FocusFiles) > 0 || suiteConfig.LabelFilter != ""
type SkipCheck func(spec Spec) bool
// by default, skip any specs marked pending
skipChecks := []SkipCheck{func(spec Spec) bool { return spec.Nodes.HasNodeMarkedPending() }}
hasProgrammaticFocus := false
if !hasFocusCLIFlags {
// check for programmatic focus
for _, spec := range specs {
if spec.Nodes.HasNodeMarkedFocus() && !spec.Nodes.HasNodeMarkedPending() {
skipChecks = append(skipChecks, func(spec Spec) bool { return !spec.Nodes.HasNodeMarkedFocus() })
hasProgrammaticFocus = true
break
}
}
}
if suiteConfig.LabelFilter != "" {
labelFilter, _ := types.ParseLabelFilter(suiteConfig.LabelFilter)
skipChecks = append(skipChecks, func(spec Spec) bool {
return !labelFilter(UnionOfLabels(suiteLabels, spec.Nodes.UnionOfLabels()))
})
}
if len(suiteConfig.FocusFiles) > 0 {
focusFilters, _ := types.ParseFileFilters(suiteConfig.FocusFiles)
skipChecks = append(skipChecks, func(spec Spec) bool { return !focusFilters.Matches(spec.Nodes.CodeLocations()) })
}
if len(suiteConfig.SkipFiles) > 0 {
skipFilters, _ := types.ParseFileFilters(suiteConfig.SkipFiles)
skipChecks = append(skipChecks, func(spec Spec) bool { return skipFilters.Matches(spec.Nodes.CodeLocations()) })
}
if focusString != "" {
// skip specs that don't match the focus string
re := regexp.MustCompile(focusString)
skipChecks = append(skipChecks, func(spec Spec) bool { return !re.MatchString(description + " " + spec.Text()) })
}
if skipString != "" {
// skip specs that match the skip string
re := regexp.MustCompile(skipString)
skipChecks = append(skipChecks, func(spec Spec) bool { return re.MatchString(description + " " + spec.Text()) })
}
// skip specs if shouldSkip() is true. note that we do nothing if shouldSkip() is false to avoid overwriting skip status established by the node's pending status
processedSpecs := Specs{}
for _, spec := range specs {
for _, skipCheck := range skipChecks {
if skipCheck(spec) {
spec.Skip = true
break
}
}
processedSpecs = append(processedSpecs, spec)
}
return processedSpecs, hasProgrammaticFocus
}

View File

@ -0,0 +1,17 @@
package global
import (
"github.com/onsi/ginkgo/v2/internal"
)
var Suite *internal.Suite
var Failer *internal.Failer
func init() {
InitializeGlobals()
}
func InitializeGlobals() {
Failer = internal.NewFailer()
Suite = internal.NewSuite()
}

544
vendor/github.com/onsi/ginkgo/v2/internal/group.go generated vendored Normal file
View File

@ -0,0 +1,544 @@
package internal
import (
"fmt"
"time"
"github.com/onsi/ginkgo/v2/types"
)
type runOncePair struct {
//nodeId should only run once...
nodeID uint
nodeType types.NodeType
//...for specs in a hierarchy that includes this context
containerID uint
}
func (pair runOncePair) isZero() bool {
return pair.nodeID == 0
}
func runOncePairForNode(node Node, containerID uint) runOncePair {
return runOncePair{
nodeID: node.ID,
nodeType: node.NodeType,
containerID: containerID,
}
}
type runOncePairs []runOncePair
func runOncePairsForSpec(spec Spec) runOncePairs {
pairs := runOncePairs{}
containers := spec.Nodes.WithType(types.NodeTypeContainer)
for _, node := range spec.Nodes {
if node.NodeType.Is(types.NodeTypeBeforeAll | types.NodeTypeAfterAll) {
pairs = append(pairs, runOncePairForNode(node, containers.FirstWithNestingLevel(node.NestingLevel-1).ID))
} else if node.NodeType.Is(types.NodeTypeBeforeEach|types.NodeTypeJustBeforeEach|types.NodeTypeAfterEach|types.NodeTypeJustAfterEach) && node.MarkedOncePerOrdered {
passedIntoAnOrderedContainer := false
firstOrderedContainerDeeperThanNode := containers.FirstSatisfying(func(container Node) bool {
passedIntoAnOrderedContainer = passedIntoAnOrderedContainer || container.MarkedOrdered
return container.NestingLevel >= node.NestingLevel && passedIntoAnOrderedContainer
})
if firstOrderedContainerDeeperThanNode.IsZero() {
continue
}
pairs = append(pairs, runOncePairForNode(node, firstOrderedContainerDeeperThanNode.ID))
}
}
return pairs
}
func (pairs runOncePairs) runOncePairFor(nodeID uint) runOncePair {
for i := range pairs {
if pairs[i].nodeID == nodeID {
return pairs[i]
}
}
return runOncePair{}
}
func (pairs runOncePairs) hasRunOncePair(pair runOncePair) bool {
for i := range pairs {
if pairs[i] == pair {
return true
}
}
return false
}
func (pairs runOncePairs) withType(nodeTypes types.NodeType) runOncePairs {
count := 0
for i := range pairs {
if pairs[i].nodeType.Is(nodeTypes) {
count++
}
}
out, j := make(runOncePairs, count), 0
for i := range pairs {
if pairs[i].nodeType.Is(nodeTypes) {
out[j] = pairs[i]
j++
}
}
return out
}
type group struct {
suite *Suite
specs Specs
runOncePairs map[uint]runOncePairs
runOnceTracker map[runOncePair]types.SpecState
succeeded bool
}
func newGroup(suite *Suite) *group {
return &group{
suite: suite,
runOncePairs: map[uint]runOncePairs{},
runOnceTracker: map[runOncePair]types.SpecState{},
succeeded: true,
}
}
func (g *group) initialReportForSpec(spec Spec) types.SpecReport {
return types.SpecReport{
ContainerHierarchyTexts: spec.Nodes.WithType(types.NodeTypeContainer).Texts(),
ContainerHierarchyLocations: spec.Nodes.WithType(types.NodeTypeContainer).CodeLocations(),
ContainerHierarchyLabels: spec.Nodes.WithType(types.NodeTypeContainer).Labels(),
LeafNodeLocation: spec.FirstNodeWithType(types.NodeTypeIt).CodeLocation,
LeafNodeType: types.NodeTypeIt,
LeafNodeText: spec.FirstNodeWithType(types.NodeTypeIt).Text,
LeafNodeLabels: []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels),
ParallelProcess: g.suite.config.ParallelProcess,
IsSerial: spec.Nodes.HasNodeMarkedSerial(),
IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(),
}
}
func (g *group) evaluateSkipStatus(spec Spec) (types.SpecState, types.Failure) {
if spec.Nodes.HasNodeMarkedPending() {
return types.SpecStatePending, types.Failure{}
}
if spec.Skip {
return types.SpecStateSkipped, types.Failure{}
}
if g.suite.interruptHandler.Status().Interrupted || g.suite.skipAll {
return types.SpecStateSkipped, types.Failure{}
}
if !g.succeeded {
return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt),
"Spec skipped because an earlier spec in an ordered container failed")
}
beforeOncePairs := g.runOncePairs[spec.SubjectID()].withType(types.NodeTypeBeforeAll | types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach)
for _, pair := range beforeOncePairs {
if g.runOnceTracker[pair].Is(types.SpecStateSkipped) {
return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt),
fmt.Sprintf("Spec skipped because Skip() was called in %s", pair.nodeType))
}
}
if g.suite.config.DryRun {
return types.SpecStatePassed, types.Failure{}
}
return g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure
}
func (g *group) isLastSpecWithPair(specID uint, pair runOncePair) bool {
lastSpecID := uint(0)
for idx := range g.specs {
if g.specs[idx].Skip {
continue
}
sID := g.specs[idx].SubjectID()
if g.runOncePairs[sID].hasRunOncePair(pair) {
lastSpecID = sID
}
}
return lastSpecID == specID
}
func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) {
interruptStatus := g.suite.interruptHandler.Status()
pairs := g.runOncePairs[spec.SubjectID()]
nodes := spec.Nodes.WithType(types.NodeTypeBeforeAll)
nodes = append(nodes, spec.Nodes.WithType(types.NodeTypeBeforeEach)...).SortedByAscendingNestingLevel()
nodes = append(nodes, spec.Nodes.WithType(types.NodeTypeJustBeforeEach).SortedByAscendingNestingLevel()...)
nodes = append(nodes, spec.Nodes.FirstNodeWithType(types.NodeTypeIt))
terminatingNode, terminatingPair := Node{}, runOncePair{}
for _, node := range nodes {
oncePair := pairs.runOncePairFor(node.ID)
if !oncePair.isZero() && g.runOnceTracker[oncePair].Is(types.SpecStatePassed) {
continue
}
g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure = g.suite.runNode(node, interruptStatus.Channel, spec.Nodes.BestTextFor(node))
g.suite.currentSpecReport.RunTime = time.Since(g.suite.currentSpecReport.StartTime)
if !oncePair.isZero() {
g.runOnceTracker[oncePair] = g.suite.currentSpecReport.State
}
if g.suite.currentSpecReport.State != types.SpecStatePassed {
terminatingNode, terminatingPair = node, oncePair
break
}
}
afterNodeWasRun := map[uint]bool{}
includeDeferCleanups := false
for {
nodes := spec.Nodes.WithType(types.NodeTypeAfterEach)
nodes = append(nodes, spec.Nodes.WithType(types.NodeTypeAfterAll)...).SortedByDescendingNestingLevel()
nodes = append(spec.Nodes.WithType(types.NodeTypeJustAfterEach).SortedByDescendingNestingLevel(), nodes...)
if !terminatingNode.IsZero() {
nodes = nodes.WithinNestingLevel(terminatingNode.NestingLevel)
}
if includeDeferCleanups {
nodes = append(nodes, g.suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterEach).Reverse()...)
nodes = append(nodes, g.suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterAll).Reverse()...)
}
nodes = nodes.Filter(func(node Node) bool {
if afterNodeWasRun[node.ID] {
//this node has already been run on this attempt, don't rerun it
return false
}
pair := runOncePair{}
switch node.NodeType {
case types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll:
// check if we were generated in an AfterNode that has already run
if afterNodeWasRun[node.NodeIDWhereCleanupWasGenerated] {
return true // we were, so we should definitely run this cleanup now
}
// looks like this cleanup nodes was generated by a before node or it.
// the run-once status of a cleanup node is governed by the run-once status of its generator
pair = pairs.runOncePairFor(node.NodeIDWhereCleanupWasGenerated)
default:
pair = pairs.runOncePairFor(node.ID)
}
if pair.isZero() {
// this node is not governed by any run-once policy, we should run it
return true
}
// it's our last chance to run if we're the last spec for our oncePair
isLastSpecWithPair := g.isLastSpecWithPair(spec.SubjectID(), pair)
switch g.suite.currentSpecReport.State {
case types.SpecStatePassed: //this attempt is passing...
return isLastSpecWithPair //...we should run-once if we'this is our last chance
case types.SpecStateSkipped: //the spec was skipped by the user...
if isLastSpecWithPair {
return true //...we're the last spec, so we should run the AfterNode
}
if !terminatingPair.isZero() && terminatingNode.NestingLevel == node.NestingLevel {
return true //...or, a run-once node at our nesting level was skipped which means this is our last chance to run
}
case types.SpecStateFailed, types.SpecStatePanicked: // the spec has failed...
if isFinalAttempt {
return true //...if this was the last attempt then we're the last spec to run and so the AfterNode should run
}
if !terminatingPair.isZero() { // ...and it failed in a run-once. which will be running again
if node.NodeType.Is(types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll) {
return terminatingNode.ID == node.NodeIDWhereCleanupWasGenerated // we should run this node if we're a clean-up generated by it
} else {
return terminatingNode.NestingLevel == node.NestingLevel // ...or if we're at the same nesting level
}
}
case types.SpecStateInterrupted, types.SpecStateAborted: // ...we've been interrupted and/or aborted
return true //...that means the test run is over and we should clean up the stack. Run the AfterNode
}
return false
})
if len(nodes) == 0 && includeDeferCleanups {
break
}
for _, node := range nodes {
afterNodeWasRun[node.ID] = true
state, failure := g.suite.runNode(node, g.suite.interruptHandler.Status().Channel, spec.Nodes.BestTextFor(node))
g.suite.currentSpecReport.RunTime = time.Since(g.suite.currentSpecReport.StartTime)
if g.suite.currentSpecReport.State == types.SpecStatePassed || state == types.SpecStateAborted {
g.suite.currentSpecReport.State = state
g.suite.currentSpecReport.Failure = failure
}
}
includeDeferCleanups = true
}
}
func (g *group) run(specs Specs) {
g.specs = specs
for _, spec := range g.specs {
g.runOncePairs[spec.SubjectID()] = runOncePairsForSpec(spec)
}
for _, spec := range g.specs {
g.suite.currentSpecReport = g.initialReportForSpec(spec)
g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure = g.evaluateSkipStatus(spec)
g.suite.reporter.WillRun(g.suite.currentSpecReport)
g.suite.reportEach(spec, types.NodeTypeReportBeforeEach)
skip := g.suite.config.DryRun || g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates|types.SpecStateSkipped|types.SpecStatePending)
g.suite.currentSpecReport.StartTime = time.Now()
if !skip {
maxAttempts := max(1, spec.FlakeAttempts())
if g.suite.config.FlakeAttempts > 0 {
maxAttempts = g.suite.config.FlakeAttempts
}
for attempt := 0; attempt < maxAttempts; attempt++ {
g.suite.currentSpecReport.NumAttempts = attempt + 1
g.suite.writer.Truncate()
g.suite.outputInterceptor.StartInterceptingOutput()
if attempt > 0 {
fmt.Fprintf(g.suite.writer, "\nGinkgo: Attempt #%d Failed. Retrying...\n", attempt)
}
g.attemptSpec(attempt == maxAttempts-1, spec)
g.suite.currentSpecReport.EndTime = time.Now()
g.suite.currentSpecReport.RunTime = g.suite.currentSpecReport.EndTime.Sub(g.suite.currentSpecReport.StartTime)
g.suite.currentSpecReport.CapturedGinkgoWriterOutput += string(g.suite.writer.Bytes())
g.suite.currentSpecReport.CapturedStdOutErr += g.suite.outputInterceptor.StopInterceptingAndReturnOutput()
if g.suite.currentSpecReport.State.Is(types.SpecStatePassed | types.SpecStateSkipped | types.SpecStateAborted | types.SpecStateInterrupted) {
break
}
}
}
g.suite.reportEach(spec, types.NodeTypeReportAfterEach)
g.suite.processCurrentSpecReport()
if g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
g.succeeded = false
}
g.suite.currentSpecReport = types.SpecReport{}
}
}
func (g *group) oldRun(specs Specs) {
var suite = g.suite
nodeState := map[uint]types.SpecState{}
groupSucceeded := true
indexOfLastSpecContainingNodeID := func(id uint) int {
lastIdx := -1
for idx := range specs {
if specs[idx].Nodes.ContainsNodeID(id) && !specs[idx].Skip {
lastIdx = idx
}
}
return lastIdx
}
for i, spec := range specs {
suite.currentSpecReport = types.SpecReport{
ContainerHierarchyTexts: spec.Nodes.WithType(types.NodeTypeContainer).Texts(),
ContainerHierarchyLocations: spec.Nodes.WithType(types.NodeTypeContainer).CodeLocations(),
ContainerHierarchyLabels: spec.Nodes.WithType(types.NodeTypeContainer).Labels(),
LeafNodeLocation: spec.FirstNodeWithType(types.NodeTypeIt).CodeLocation,
LeafNodeType: types.NodeTypeIt,
LeafNodeText: spec.FirstNodeWithType(types.NodeTypeIt).Text,
LeafNodeLabels: []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels),
ParallelProcess: suite.config.ParallelProcess,
IsSerial: spec.Nodes.HasNodeMarkedSerial(),
IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(),
}
skip := spec.Skip
if spec.Nodes.HasNodeMarkedPending() {
skip = true
suite.currentSpecReport.State = types.SpecStatePending
} else {
if suite.interruptHandler.Status().Interrupted || suite.skipAll {
skip = true
}
if !groupSucceeded {
skip = true
suite.currentSpecReport.Failure = suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt),
"Spec skipped because an earlier spec in an ordered container failed")
}
for _, node := range spec.Nodes.WithType(types.NodeTypeBeforeAll) {
if nodeState[node.ID] == types.SpecStateSkipped {
skip = true
suite.currentSpecReport.Failure = suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt),
"Spec skipped because Skip() was called in BeforeAll")
break
}
}
if skip {
suite.currentSpecReport.State = types.SpecStateSkipped
}
}
if suite.config.DryRun && !skip {
skip = true
suite.currentSpecReport.State = types.SpecStatePassed
}
suite.reporter.WillRun(suite.currentSpecReport)
//send the spec report to any attached ReportBeforeEach blocks - this will update suite.currentSpecReport if failures occur in these blocks
suite.reportEach(spec, types.NodeTypeReportBeforeEach)
if suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
//the reportEach failed, skip this spec
skip = true
}
suite.currentSpecReport.StartTime = time.Now()
maxAttempts := max(1, spec.FlakeAttempts())
if suite.config.FlakeAttempts > 0 {
maxAttempts = suite.config.FlakeAttempts
}
for attempt := 0; !skip && (attempt < maxAttempts); attempt++ {
suite.currentSpecReport.NumAttempts = attempt + 1
suite.writer.Truncate()
suite.outputInterceptor.StartInterceptingOutput()
if attempt > 0 {
fmt.Fprintf(suite.writer, "\nGinkgo: Attempt #%d Failed. Retrying...\n", attempt)
}
isFinalAttempt := (attempt == maxAttempts-1)
interruptStatus := suite.interruptHandler.Status()
deepestNestingLevelAttained := -1
var nodes = spec.Nodes.WithType(types.NodeTypeBeforeAll).Filter(func(n Node) bool {
return nodeState[n.ID] != types.SpecStatePassed
})
nodes = nodes.CopyAppend(spec.Nodes.WithType(types.NodeTypeBeforeEach)...).SortedByAscendingNestingLevel()
nodes = nodes.CopyAppend(spec.Nodes.WithType(types.NodeTypeJustBeforeEach).SortedByAscendingNestingLevel()...)
nodes = nodes.CopyAppend(spec.Nodes.WithType(types.NodeTypeIt)...)
var terminatingNode Node
for j := range nodes {
deepestNestingLevelAttained = max(deepestNestingLevelAttained, nodes[j].NestingLevel)
suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(nodes[j], interruptStatus.Channel, spec.Nodes.BestTextFor(nodes[j]))
suite.currentSpecReport.RunTime = time.Since(suite.currentSpecReport.StartTime)
nodeState[nodes[j].ID] = suite.currentSpecReport.State
if suite.currentSpecReport.State != types.SpecStatePassed {
terminatingNode = nodes[j]
break
}
}
afterAllNodesThatRan := map[uint]bool{}
// pull out some shared code so we aren't repeating ourselves down below. this just runs after and cleanup nodes
runAfterAndCleanupNodes := func(nodes Nodes) {
for j := range nodes {
state, failure := suite.runNode(nodes[j], suite.interruptHandler.Status().Channel, spec.Nodes.BestTextFor(nodes[j]))
suite.currentSpecReport.RunTime = time.Since(suite.currentSpecReport.StartTime)
nodeState[nodes[j].ID] = state
if suite.currentSpecReport.State == types.SpecStatePassed || state == types.SpecStateAborted {
suite.currentSpecReport.State = state
suite.currentSpecReport.Failure = failure
if state != types.SpecStatePassed {
terminatingNode = nodes[j]
}
}
if nodes[j].NodeType.Is(types.NodeTypeAfterAll) {
afterAllNodesThatRan[nodes[j].ID] = true
}
}
}
// pull out a helper that captures the logic of whether or not we should run a given After node.
// there is complexity here stemming from the fact that we allow nested ordered contexts and flakey retries
shouldRunAfterNode := func(n Node) bool {
if n.NodeType.Is(types.NodeTypeAfterEach | types.NodeTypeJustAfterEach) {
return true
}
var id uint
if n.NodeType.Is(types.NodeTypeAfterAll) {
id = n.ID
if afterAllNodesThatRan[id] { //we've already run on this attempt. don't run again.
return false
}
}
if n.NodeType.Is(types.NodeTypeCleanupAfterAll) {
id = n.NodeIDWhereCleanupWasGenerated
}
isLastSpecWithNode := indexOfLastSpecContainingNodeID(id) == i
switch suite.currentSpecReport.State {
case types.SpecStatePassed: //we've passed so far...
return isLastSpecWithNode //... and we're the last spec with this AfterNode, so we should run it
case types.SpecStateSkipped: //the spec was skipped by the user...
if isLastSpecWithNode {
return true //...we're the last spec, so we should run the AfterNode
}
if terminatingNode.NodeType.Is(types.NodeTypeBeforeAll) && terminatingNode.NestingLevel == n.NestingLevel {
return true //...or, a BeforeAll was skipped and it's at our nesting level, so our subgroup is going to skip
}
case types.SpecStateFailed, types.SpecStatePanicked: // the spec has failed...
if isFinalAttempt {
return true //...if this was the last attempt then we're the last spec to run and so the AfterNode should run
}
if terminatingNode.NodeType.Is(types.NodeTypeBeforeAll) {
//...we'll be rerunning a BeforeAll so we should cleanup after it if...
if n.NodeType.Is(types.NodeTypeAfterAll) && terminatingNode.NestingLevel == n.NestingLevel {
return true //we're at the same nesting level
}
if n.NodeType.Is(types.NodeTypeCleanupAfterAll) && terminatingNode.ID == n.NodeIDWhereCleanupWasGenerated {
return true //we're a DeferCleanup generated by it
}
}
if terminatingNode.NodeType.Is(types.NodeTypeAfterAll) {
//...we'll be rerunning an AfterAll so we should cleanup after it if...
if n.NodeType.Is(types.NodeTypeCleanupAfterAll) && terminatingNode.ID == n.NodeIDWhereCleanupWasGenerated {
return true //we're a DeferCleanup generated by it
}
}
case types.SpecStateInterrupted, types.SpecStateAborted: // ...we've been interrupted and/or aborted
return true //...that means the test run is over and we should clean up the stack. Run the AfterNode
}
return false
}
// first pass - run all the JustAfterEach, Aftereach, and AfterAlls. Our shoudlRunAfterNode filter function will clean up the AfterAlls for us.
afterNodes := spec.Nodes.WithType(types.NodeTypeJustAfterEach).SortedByDescendingNestingLevel()
afterNodes = afterNodes.CopyAppend(spec.Nodes.WithType(types.NodeTypeAfterEach).CopyAppend(spec.Nodes.WithType(types.NodeTypeAfterAll)...).SortedByDescendingNestingLevel()...)
afterNodes = afterNodes.WithinNestingLevel(deepestNestingLevelAttained)
afterNodes = afterNodes.Filter(shouldRunAfterNode)
runAfterAndCleanupNodes(afterNodes)
// second-pass perhaps we didn't run the AfterAlls but a state change due to an AfterEach now requires us to run the AfterAlls:
afterNodes = spec.Nodes.WithType(types.NodeTypeAfterAll).WithinNestingLevel(deepestNestingLevelAttained).Filter(shouldRunAfterNode)
runAfterAndCleanupNodes(afterNodes)
// now we run any DeferCleanups
afterNodes = suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterEach).Reverse()
afterNodes = append(afterNodes, suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterAll).Filter(shouldRunAfterNode).Reverse()...)
runAfterAndCleanupNodes(afterNodes)
// third-pass, perhaps a DeferCleanup failed and now we need to run the AfterAlls.
afterNodes = spec.Nodes.WithType(types.NodeTypeAfterAll).WithinNestingLevel(deepestNestingLevelAttained).Filter(shouldRunAfterNode)
runAfterAndCleanupNodes(afterNodes)
// and finally - running AfterAlls may have generated some new DeferCleanup nodes, let's run them to finish up
afterNodes = suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterAll).Reverse().Filter(shouldRunAfterNode)
runAfterAndCleanupNodes(afterNodes)
suite.currentSpecReport.EndTime = time.Now()
suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime)
suite.currentSpecReport.CapturedGinkgoWriterOutput += string(suite.writer.Bytes())
suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
if suite.currentSpecReport.State.Is(types.SpecStatePassed | types.SpecStateSkipped | types.SpecStateAborted | types.SpecStateInterrupted) {
break
}
}
//send the spec report to any attached ReportAfterEach blocks - this will update suite.currentSpecReport if failures occur in these blocks
suite.reportEach(spec, types.NodeTypeReportAfterEach)
suite.processCurrentSpecReport()
if suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
groupSucceeded = false
}
suite.currentSpecReport = types.SpecReport{}
}
}

View File

@ -0,0 +1,212 @@
package interrupt_handler
import (
"fmt"
"os"
"os/signal"
"runtime"
"sync"
"syscall"
"time"
"github.com/onsi/ginkgo/v2/formatter"
"github.com/onsi/ginkgo/v2/internal/parallel_support"
)
const TIMEOUT_REPEAT_INTERRUPT_MAXIMUM_DURATION = 30 * time.Second
const TIMEOUT_REPEAT_INTERRUPT_FRACTION_OF_TIMEOUT = 10
const ABORT_POLLING_INTERVAL = 500 * time.Millisecond
const ABORT_REPEAT_INTERRUPT_DURATION = 30 * time.Second
type InterruptCause uint
const (
InterruptCauseInvalid InterruptCause = iota
InterruptCauseSignal
InterruptCauseTimeout
InterruptCauseAbortByOtherProcess
)
func (ic InterruptCause) String() string {
switch ic {
case InterruptCauseSignal:
return "Interrupted by User"
case InterruptCauseTimeout:
return "Interrupted by Timeout"
case InterruptCauseAbortByOtherProcess:
return "Interrupted by Other Ginkgo Process"
}
return "INVALID_INTERRUPT_CAUSE"
}
type InterruptStatus struct {
Interrupted bool
Channel chan interface{}
Cause InterruptCause
}
type InterruptHandlerInterface interface {
Status() InterruptStatus
SetInterruptPlaceholderMessage(string)
ClearInterruptPlaceholderMessage()
InterruptMessageWithStackTraces() string
}
type InterruptHandler struct {
c chan interface{}
lock *sync.Mutex
interrupted bool
interruptPlaceholderMessage string
interruptCause InterruptCause
client parallel_support.Client
stop chan interface{}
}
func NewInterruptHandler(timeout time.Duration, client parallel_support.Client) *InterruptHandler {
handler := &InterruptHandler{
c: make(chan interface{}),
lock: &sync.Mutex{},
interrupted: false,
stop: make(chan interface{}),
client: client,
}
handler.registerForInterrupts(timeout)
return handler
}
func (handler *InterruptHandler) Stop() {
close(handler.stop)
}
func (handler *InterruptHandler) registerForInterrupts(timeout time.Duration) {
// os signal handling
signalChannel := make(chan os.Signal, 1)
signal.Notify(signalChannel, os.Interrupt, syscall.SIGTERM)
// timeout handling
var timeoutChannel <-chan time.Time
var timeoutTimer *time.Timer
if timeout > 0 {
timeoutTimer = time.NewTimer(timeout)
timeoutChannel = timeoutTimer.C
}
// cross-process abort handling
var abortChannel chan bool
if handler.client != nil {
abortChannel = make(chan bool)
go func() {
pollTicker := time.NewTicker(ABORT_POLLING_INTERVAL)
for {
select {
case <-pollTicker.C:
if handler.client.ShouldAbort() {
abortChannel <- true
pollTicker.Stop()
return
}
case <-handler.stop:
pollTicker.Stop()
return
}
}
}()
}
// listen for any interrupt signals
// note that some (timeouts, cross-process aborts) will only trigger once
// for these we set up a ticker to keep interrupting the suite until it ends
// this ensures any `AfterEach` or `AfterSuite`s that get stuck cleaning up
// get interrupted eventually
go func() {
var interruptCause InterruptCause
var repeatChannel <-chan time.Time
var repeatTicker *time.Ticker
for {
select {
case <-signalChannel:
interruptCause = InterruptCauseSignal
case <-timeoutChannel:
interruptCause = InterruptCauseTimeout
repeatInterruptTimeout := timeout / time.Duration(TIMEOUT_REPEAT_INTERRUPT_FRACTION_OF_TIMEOUT)
if repeatInterruptTimeout > TIMEOUT_REPEAT_INTERRUPT_MAXIMUM_DURATION {
repeatInterruptTimeout = TIMEOUT_REPEAT_INTERRUPT_MAXIMUM_DURATION
}
timeoutTimer.Stop()
repeatTicker = time.NewTicker(repeatInterruptTimeout)
repeatChannel = repeatTicker.C
case <-abortChannel:
interruptCause = InterruptCauseAbortByOtherProcess
repeatTicker = time.NewTicker(ABORT_REPEAT_INTERRUPT_DURATION)
repeatChannel = repeatTicker.C
case <-repeatChannel:
//do nothing, just interrupt again using the same interruptCause
case <-handler.stop:
if timeoutTimer != nil {
timeoutTimer.Stop()
}
if repeatTicker != nil {
repeatTicker.Stop()
}
signal.Stop(signalChannel)
return
}
handler.lock.Lock()
handler.interruptCause = interruptCause
if handler.interruptPlaceholderMessage != "" {
fmt.Println(handler.interruptPlaceholderMessage)
}
handler.interrupted = true
close(handler.c)
handler.c = make(chan interface{})
handler.lock.Unlock()
}
}()
}
func (handler *InterruptHandler) Status() InterruptStatus {
handler.lock.Lock()
defer handler.lock.Unlock()
return InterruptStatus{
Interrupted: handler.interrupted,
Channel: handler.c,
Cause: handler.interruptCause,
}
}
func (handler *InterruptHandler) SetInterruptPlaceholderMessage(message string) {
handler.lock.Lock()
defer handler.lock.Unlock()
handler.interruptPlaceholderMessage = message
}
func (handler *InterruptHandler) ClearInterruptPlaceholderMessage() {
handler.lock.Lock()
defer handler.lock.Unlock()
handler.interruptPlaceholderMessage = ""
}
func (handler *InterruptHandler) InterruptMessageWithStackTraces() string {
handler.lock.Lock()
out := fmt.Sprintf("%s\n\n", handler.interruptCause.String())
defer handler.lock.Unlock()
if handler.interruptCause == InterruptCauseAbortByOtherProcess {
return out
}
out += "Here's a stack trace of all running goroutines:\n"
buf := make([]byte, 8192)
for {
n := runtime.Stack(buf, true)
if n < len(buf) {
buf = buf[:n]
break
}
buf = make([]byte, 2*len(buf))
}
out += formatter.Fi(1, "%s", string(buf))
return out
}

View File

@ -0,0 +1,15 @@
//go:build freebsd || openbsd || netbsd || dragonfly || darwin || linux || solaris
// +build freebsd openbsd netbsd dragonfly darwin linux solaris
package interrupt_handler
import (
"os"
"os/signal"
"syscall"
)
func SwallowSigQuit() {
c := make(chan os.Signal, 1024)
signal.Notify(c, syscall.SIGQUIT)
}

View File

@ -0,0 +1,8 @@
//go:build windows
// +build windows
package interrupt_handler
func SwallowSigQuit() {
//noop
}

660
vendor/github.com/onsi/ginkgo/v2/internal/node.go generated vendored Normal file
View File

@ -0,0 +1,660 @@
package internal
import (
"fmt"
"reflect"
"sort"
"sync"
"github.com/onsi/ginkgo/v2/types"
)
var _global_node_id_counter = uint(0)
var _global_id_mutex = &sync.Mutex{}
func UniqueNodeID() uint {
//There's a reace in the internal integration tests if we don't make
//accessing _global_node_id_counter safe across goroutines.
_global_id_mutex.Lock()
defer _global_id_mutex.Unlock()
_global_node_id_counter += 1
return _global_node_id_counter
}
type Node struct {
ID uint
NodeType types.NodeType
Text string
Body func()
CodeLocation types.CodeLocation
NestingLevel int
SynchronizedBeforeSuiteProc1Body func() []byte
SynchronizedBeforeSuiteAllProcsBody func([]byte)
SynchronizedAfterSuiteAllProcsBody func()
SynchronizedAfterSuiteProc1Body func()
ReportEachBody func(types.SpecReport)
ReportAfterSuiteBody func(types.Report)
MarkedFocus bool
MarkedPending bool
MarkedSerial bool
MarkedOrdered bool
MarkedOncePerOrdered bool
FlakeAttempts int
Labels Labels
NodeIDWhereCleanupWasGenerated uint
}
// Decoration Types
type focusType bool
type pendingType bool
type serialType bool
type orderedType bool
type honorsOrderedType bool
const Focus = focusType(true)
const Pending = pendingType(true)
const Serial = serialType(true)
const Ordered = orderedType(true)
const OncePerOrdered = honorsOrderedType(true)
type FlakeAttempts uint
type Offset uint
type Done chan<- interface{} // Deprecated Done Channel for asynchronous testing
type Labels []string
func UnionOfLabels(labels ...Labels) Labels {
out := Labels{}
seen := map[string]bool{}
for _, labelSet := range labels {
for _, label := range labelSet {
if !seen[label] {
seen[label] = true
out = append(out, label)
}
}
}
return out
}
func PartitionDecorations(args ...interface{}) ([]interface{}, []interface{}) {
decorations := []interface{}{}
remainingArgs := []interface{}{}
for _, arg := range args {
if isDecoration(arg) {
decorations = append(decorations, arg)
} else {
remainingArgs = append(remainingArgs, arg)
}
}
return decorations, remainingArgs
}
func isDecoration(arg interface{}) bool {
switch t := reflect.TypeOf(arg); {
case t == nil:
return false
case t == reflect.TypeOf(Offset(0)):
return true
case t == reflect.TypeOf(types.CodeLocation{}):
return true
case t == reflect.TypeOf(Focus):
return true
case t == reflect.TypeOf(Pending):
return true
case t == reflect.TypeOf(Serial):
return true
case t == reflect.TypeOf(Ordered):
return true
case t == reflect.TypeOf(OncePerOrdered):
return true
case t == reflect.TypeOf(FlakeAttempts(0)):
return true
case t == reflect.TypeOf(Labels{}):
return true
case t.Kind() == reflect.Slice && isSliceOfDecorations(arg):
return true
default:
return false
}
}
func isSliceOfDecorations(slice interface{}) bool {
vSlice := reflect.ValueOf(slice)
if vSlice.Len() == 0 {
return false
}
for i := 0; i < vSlice.Len(); i++ {
if !isDecoration(vSlice.Index(i).Interface()) {
return false
}
}
return true
}
func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeType, text string, args ...interface{}) (Node, []error) {
baseOffset := 2
node := Node{
ID: UniqueNodeID(),
NodeType: nodeType,
Text: text,
Labels: Labels{},
CodeLocation: types.NewCodeLocation(baseOffset),
NestingLevel: -1,
}
errors := []error{}
appendError := func(err error) {
if err != nil {
errors = append(errors, err)
}
}
args = unrollInterfaceSlice(args)
remainingArgs := []interface{}{}
//First get the CodeLocation up-to-date
for _, arg := range args {
switch v := arg.(type) {
case Offset:
node.CodeLocation = types.NewCodeLocation(baseOffset + int(v))
case types.CodeLocation:
node.CodeLocation = v
default:
remainingArgs = append(remainingArgs, arg)
}
}
labelsSeen := map[string]bool{}
trackedFunctionError := false
args = remainingArgs
remainingArgs = []interface{}{}
//now process the rest of the args
for _, arg := range args {
switch t := reflect.TypeOf(arg); {
case t == reflect.TypeOf(float64(0)):
break //ignore deprecated timeouts
case t == reflect.TypeOf(Focus):
node.MarkedFocus = bool(arg.(focusType))
if !nodeType.Is(types.NodeTypesForContainerAndIt) {
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Focus"))
}
case t == reflect.TypeOf(Pending):
node.MarkedPending = bool(arg.(pendingType))
if !nodeType.Is(types.NodeTypesForContainerAndIt) {
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Pending"))
}
case t == reflect.TypeOf(Serial):
node.MarkedSerial = bool(arg.(serialType))
if !nodeType.Is(types.NodeTypesForContainerAndIt) {
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Serial"))
}
case t == reflect.TypeOf(Ordered):
node.MarkedOrdered = bool(arg.(orderedType))
if !nodeType.Is(types.NodeTypeContainer) {
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Ordered"))
}
case t == reflect.TypeOf(OncePerOrdered):
node.MarkedOncePerOrdered = bool(arg.(honorsOrderedType))
if !nodeType.Is(types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach | types.NodeTypeAfterEach | types.NodeTypeJustAfterEach) {
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "OncePerOrdered"))
}
case t == reflect.TypeOf(FlakeAttempts(0)):
node.FlakeAttempts = int(arg.(FlakeAttempts))
if !nodeType.Is(types.NodeTypesForContainerAndIt) {
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "FlakeAttempts"))
}
case t == reflect.TypeOf(Labels{}):
if !nodeType.Is(types.NodeTypesForContainerAndIt) {
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Label"))
}
for _, label := range arg.(Labels) {
if !labelsSeen[label] {
labelsSeen[label] = true
label, err := types.ValidateAndCleanupLabel(label, node.CodeLocation)
node.Labels = append(node.Labels, label)
appendError(err)
}
}
case t.Kind() == reflect.Func:
if node.Body != nil {
appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
trackedFunctionError = true
break
}
isValid := (t.NumOut() == 0) && (t.NumIn() <= 1) && (t.NumIn() == 0 || t.In(0) == reflect.TypeOf(make(Done)))
if !isValid {
appendError(types.GinkgoErrors.InvalidBodyType(t, node.CodeLocation, nodeType))
trackedFunctionError = true
break
}
if t.NumIn() == 0 {
node.Body = arg.(func())
} else {
deprecationTracker.TrackDeprecation(types.Deprecations.Async(), node.CodeLocation)
deprecatedAsyncBody := arg.(func(Done))
node.Body = func() { deprecatedAsyncBody(make(Done)) }
}
default:
remainingArgs = append(remainingArgs, arg)
}
}
//validations
if node.MarkedPending && node.MarkedFocus {
appendError(types.GinkgoErrors.InvalidDeclarationOfFocusedAndPending(node.CodeLocation, nodeType))
}
if node.Body == nil && !node.MarkedPending && !trackedFunctionError {
appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType))
}
for _, arg := range remainingArgs {
appendError(types.GinkgoErrors.UnknownDecorator(node.CodeLocation, nodeType, arg))
}
if len(errors) > 0 {
return Node{}, errors
}
return node, errors
}
func NewSynchronizedBeforeSuiteNode(proc1Body func() []byte, allProcsBody func([]byte), codeLocation types.CodeLocation) (Node, []error) {
return Node{
ID: UniqueNodeID(),
NodeType: types.NodeTypeSynchronizedBeforeSuite,
SynchronizedBeforeSuiteProc1Body: proc1Body,
SynchronizedBeforeSuiteAllProcsBody: allProcsBody,
CodeLocation: codeLocation,
}, nil
}
func NewSynchronizedAfterSuiteNode(allProcsBody func(), proc1Body func(), codeLocation types.CodeLocation) (Node, []error) {
return Node{
ID: UniqueNodeID(),
NodeType: types.NodeTypeSynchronizedAfterSuite,
SynchronizedAfterSuiteAllProcsBody: allProcsBody,
SynchronizedAfterSuiteProc1Body: proc1Body,
CodeLocation: codeLocation,
}, nil
}
func NewReportBeforeEachNode(body func(types.SpecReport), codeLocation types.CodeLocation) (Node, []error) {
return Node{
ID: UniqueNodeID(),
NodeType: types.NodeTypeReportBeforeEach,
ReportEachBody: body,
CodeLocation: codeLocation,
NestingLevel: -1,
}, nil
}
func NewReportAfterEachNode(body func(types.SpecReport), codeLocation types.CodeLocation) (Node, []error) {
return Node{
ID: UniqueNodeID(),
NodeType: types.NodeTypeReportAfterEach,
ReportEachBody: body,
CodeLocation: codeLocation,
NestingLevel: -1,
}, nil
}
func NewReportAfterSuiteNode(text string, body func(types.Report), codeLocation types.CodeLocation) (Node, []error) {
return Node{
ID: UniqueNodeID(),
Text: text,
NodeType: types.NodeTypeReportAfterSuite,
ReportAfterSuiteBody: body,
CodeLocation: codeLocation,
}, nil
}
func NewCleanupNode(fail func(string, types.CodeLocation), args ...interface{}) (Node, []error) {
baseOffset := 2
node := Node{
ID: UniqueNodeID(),
NodeType: types.NodeTypeCleanupInvalid,
CodeLocation: types.NewCodeLocation(baseOffset),
NestingLevel: -1,
}
remainingArgs := []interface{}{}
for _, arg := range args {
switch t := reflect.TypeOf(arg); {
case t == reflect.TypeOf(Offset(0)):
node.CodeLocation = types.NewCodeLocation(baseOffset + int(arg.(Offset)))
case t == reflect.TypeOf(types.CodeLocation{}):
node.CodeLocation = arg.(types.CodeLocation)
default:
remainingArgs = append(remainingArgs, arg)
}
}
if len(remainingArgs) == 0 {
return Node{}, []error{types.GinkgoErrors.DeferCleanupInvalidFunction(node.CodeLocation)}
}
callback := reflect.ValueOf(remainingArgs[0])
if !(callback.Kind() == reflect.Func && callback.Type().NumOut() <= 1) {
return Node{}, []error{types.GinkgoErrors.DeferCleanupInvalidFunction(node.CodeLocation)}
}
callArgs := []reflect.Value{}
for _, arg := range remainingArgs[1:] {
callArgs = append(callArgs, reflect.ValueOf(arg))
}
cl := node.CodeLocation
node.Body = func() {
out := callback.Call(callArgs)
if len(out) == 1 && !out[0].IsNil() {
fail(fmt.Sprintf("DeferCleanup callback returned error: %v", out[0]), cl)
}
}
return node, nil
}
func (n Node) IsZero() bool {
return n.ID == 0
}
/* Nodes */
type Nodes []Node
func (n Nodes) CopyAppend(nodes ...Node) Nodes {
numN := len(n)
out := make(Nodes, numN+len(nodes))
for i, node := range n {
out[i] = node
}
for j, node := range nodes {
out[numN+j] = node
}
return out
}
func (n Nodes) SplitAround(pivot Node) (Nodes, Nodes) {
pivotIdx := len(n)
for i := range n {
if n[i].ID == pivot.ID {
pivotIdx = i
break
}
}
left := n[:pivotIdx]
right := Nodes{}
if pivotIdx+1 < len(n) {
right = n[pivotIdx+1:]
}
return left, right
}
func (n Nodes) FirstNodeWithType(nodeTypes types.NodeType) Node {
for i := range n {
if n[i].NodeType.Is(nodeTypes) {
return n[i]
}
}
return Node{}
}
func (n Nodes) WithType(nodeTypes types.NodeType) Nodes {
count := 0
for i := range n {
if n[i].NodeType.Is(nodeTypes) {
count++
}
}
out, j := make(Nodes, count), 0
for i := range n {
if n[i].NodeType.Is(nodeTypes) {
out[j] = n[i]
j++
}
}
return out
}
func (n Nodes) WithoutType(nodeTypes types.NodeType) Nodes {
count := 0
for i := range n {
if !n[i].NodeType.Is(nodeTypes) {
count++
}
}
out, j := make(Nodes, count), 0
for i := range n {
if !n[i].NodeType.Is(nodeTypes) {
out[j] = n[i]
j++
}
}
return out
}
func (n Nodes) WithoutNode(nodeToExclude Node) Nodes {
idxToExclude := len(n)
for i := range n {
if n[i].ID == nodeToExclude.ID {
idxToExclude = i
break
}
}
if idxToExclude == len(n) {
return n
}
out, j := make(Nodes, len(n)-1), 0
for i := range n {
if i == idxToExclude {
continue
}
out[j] = n[i]
j++
}
return out
}
func (n Nodes) Filter(filter func(Node) bool) Nodes {
trufa, count := make([]bool, len(n)), 0
for i := range n {
if filter(n[i]) {
trufa[i] = true
count += 1
}
}
out, j := make(Nodes, count), 0
for i := range n {
if trufa[i] {
out[j] = n[i]
j++
}
}
return out
}
func (n Nodes) FirstSatisfying(filter func(Node) bool) Node {
for i := range n {
if filter(n[i]) {
return n[i]
}
}
return Node{}
}
func (n Nodes) WithinNestingLevel(deepestNestingLevel int) Nodes {
count := 0
for i := range n {
if n[i].NestingLevel <= deepestNestingLevel {
count++
}
}
out, j := make(Nodes, count), 0
for i := range n {
if n[i].NestingLevel <= deepestNestingLevel {
out[j] = n[i]
j++
}
}
return out
}
func (n Nodes) SortedByDescendingNestingLevel() Nodes {
out := make(Nodes, len(n))
copy(out, n)
sort.SliceStable(out, func(i int, j int) bool {
return out[i].NestingLevel > out[j].NestingLevel
})
return out
}
func (n Nodes) SortedByAscendingNestingLevel() Nodes {
out := make(Nodes, len(n))
copy(out, n)
sort.SliceStable(out, func(i int, j int) bool {
return out[i].NestingLevel < out[j].NestingLevel
})
return out
}
func (n Nodes) FirstWithNestingLevel(level int) Node {
for i := range n {
if n[i].NestingLevel == level {
return n[i]
}
}
return Node{}
}
func (n Nodes) Reverse() Nodes {
out := make(Nodes, len(n))
for i := range n {
out[len(n)-1-i] = n[i]
}
return out
}
func (n Nodes) Texts() []string {
out := make([]string, len(n))
for i := range n {
out[i] = n[i].Text
}
return out
}
func (n Nodes) Labels() [][]string {
out := make([][]string, len(n))
for i := range n {
if n[i].Labels == nil {
out[i] = []string{}
} else {
out[i] = []string(n[i].Labels)
}
}
return out
}
func (n Nodes) UnionOfLabels() []string {
out := []string{}
seen := map[string]bool{}
for i := range n {
for _, label := range n[i].Labels {
if !seen[label] {
seen[label] = true
out = append(out, label)
}
}
}
return out
}
func (n Nodes) CodeLocations() []types.CodeLocation {
out := make([]types.CodeLocation, len(n))
for i := range n {
out[i] = n[i].CodeLocation
}
return out
}
func (n Nodes) BestTextFor(node Node) string {
if node.Text != "" {
return node.Text
}
parentNestingLevel := node.NestingLevel - 1
for i := range n {
if n[i].Text != "" && n[i].NestingLevel == parentNestingLevel {
return n[i].Text
}
}
return ""
}
func (n Nodes) ContainsNodeID(id uint) bool {
for i := range n {
if n[i].ID == id {
return true
}
}
return false
}
func (n Nodes) HasNodeMarkedPending() bool {
for i := range n {
if n[i].MarkedPending {
return true
}
}
return false
}
func (n Nodes) HasNodeMarkedFocus() bool {
for i := range n {
if n[i].MarkedFocus {
return true
}
}
return false
}
func (n Nodes) HasNodeMarkedSerial() bool {
for i := range n {
if n[i].MarkedSerial {
return true
}
}
return false
}
func (n Nodes) FirstNodeMarkedOrdered() Node {
for i := range n {
if n[i].MarkedOrdered {
return n[i]
}
}
return Node{}
}
func unrollInterfaceSlice(args interface{}) []interface{} {
v := reflect.ValueOf(args)
if v.Kind() != reflect.Slice {
return []interface{}{args}
}
out := []interface{}{}
for i := 0; i < v.Len(); i++ {
el := reflect.ValueOf(v.Index(i).Interface())
if el.Kind() == reflect.Slice && el.Type() != reflect.TypeOf(Labels{}) {
out = append(out, unrollInterfaceSlice(el.Interface())...)
} else {
out = append(out, v.Index(i).Interface())
}
}
return out
}

121
vendor/github.com/onsi/ginkgo/v2/internal/ordering.go generated vendored Normal file
View File

@ -0,0 +1,121 @@
package internal
import (
"math/rand"
"sort"
"github.com/onsi/ginkgo/v2/types"
)
type GroupedSpecIndices []SpecIndices
type SpecIndices []int
func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices, GroupedSpecIndices) {
/*
Ginkgo has sophisticated support for randomizing specs. Specs are guaranteed to have the same
order for a given seed across test runs.
By default only top-level containers and specs are shuffled - this makes for a more intuitive debugging
experience - specs within a given container run in the order they appear in the file.
Developers can set -randomizeAllSpecs to shuffle _all_ specs.
In addition, spec containers can be marked as Ordered. Specs within an Ordered container are never shuffled.
Finally, specs and spec containers can be marked as Serial. When running in parallel, serial specs run on Process #1 _after_ all other processes have finished.
*/
// Seed a new random source based on thee configured random seed.
r := rand.New(rand.NewSource(suiteConfig.RandomSeed))
// first break things into execution groups
// a group represents a single unit of execution and is a collection of SpecIndices
// usually a group is just a single spec, however ordered containers must be preserved as a single group
executionGroupIDs := []uint{}
executionGroups := map[uint]SpecIndices{}
for idx, spec := range specs {
groupNode := spec.Nodes.FirstNodeMarkedOrdered()
if groupNode.IsZero() {
groupNode = spec.Nodes.FirstNodeWithType(types.NodeTypeIt)
}
executionGroups[groupNode.ID] = append(executionGroups[groupNode.ID], idx)
if len(executionGroups[groupNode.ID]) == 1 {
executionGroupIDs = append(executionGroupIDs, groupNode.ID)
}
}
// now, we only shuffle all the execution groups if we're randomizing all specs, otherwise
// we shuffle outermost containers. so we need to form shufflable groupings of GroupIDs
shufflableGroupingIDs := []uint{}
shufflableGroupingIDToGroupIDs := map[uint][]uint{}
shufflableGroupingsIDToSortKeys := map[uint]string{}
// for each execution group we're going to have to pick a node to represent how the
// execution group is grouped for shuffling:
nodeTypesToShuffle := types.NodeTypesForContainerAndIt
if suiteConfig.RandomizeAllSpecs {
nodeTypesToShuffle = types.NodeTypeIt
}
//so, fo reach execution group:
for _, groupID := range executionGroupIDs {
// pick out a representative spec
representativeSpec := specs[executionGroups[groupID][0]]
// and grab the node on the spec that will represent which shufflable group this execution group belongs tu
shufflableGroupingNode := representativeSpec.Nodes.FirstNodeWithType(nodeTypesToShuffle)
//add the execution group to its shufflable group
shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID] = append(shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID], groupID)
//and if it's the first one in
if len(shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID]) == 1 {
// record the shuffleable group ID
shufflableGroupingIDs = append(shufflableGroupingIDs, shufflableGroupingNode.ID)
// and record the sort key to use
shufflableGroupingsIDToSortKeys[shufflableGroupingNode.ID] = shufflableGroupingNode.CodeLocation.String()
}
}
// now we sort the shufflable groups by the sort key. We use the shufflable group nodes code location and break ties using its node id
sort.SliceStable(shufflableGroupingIDs, func(i, j int) bool {
keyA := shufflableGroupingsIDToSortKeys[shufflableGroupingIDs[i]]
keyB := shufflableGroupingsIDToSortKeys[shufflableGroupingIDs[j]]
if keyA == keyB {
return shufflableGroupingIDs[i] < shufflableGroupingIDs[j]
} else {
return keyA < keyB
}
})
// now we permute the sorted shufflable grouping IDs and build the ordered Groups
orderedGroups := GroupedSpecIndices{}
permutation := r.Perm(len(shufflableGroupingIDs))
for _, j := range permutation {
//let's get the execution group IDs for this shufflable group:
executionGroupIDsForJ := shufflableGroupingIDToGroupIDs[shufflableGroupingIDs[j]]
// and we'll add their associated specindices to the orderedGroups slice:
for _, executionGroupID := range executionGroupIDsForJ {
orderedGroups = append(orderedGroups, executionGroups[executionGroupID])
}
}
// If we're running in series, we're done.
if suiteConfig.ParallelTotal == 1 {
return orderedGroups, GroupedSpecIndices{}
}
// We're running in parallel so we need to partition the ordered groups into a parallelizable set and a serialized set.
// The parallelizable groups will run across all Ginkgo processes...
// ...the serial groups will only run on Process #1 after all other processes have exited.
parallelizableGroups, serialGroups := GroupedSpecIndices{}, GroupedSpecIndices{}
for _, specIndices := range orderedGroups {
if specs[specIndices[0]].Nodes.HasNodeMarkedSerial() {
serialGroups = append(serialGroups, specIndices)
} else {
parallelizableGroups = append(parallelizableGroups, specIndices)
}
}
return parallelizableGroups, serialGroups
}

View File

@ -0,0 +1,250 @@
package internal
import (
"bytes"
"io"
"os"
"time"
)
const BAILOUT_TIME = 1 * time.Second
const BAILOUT_MESSAGE = `Ginkgo detected an issue while intercepting output.
When running in parallel, Ginkgo captures stdout and stderr output
and attaches it to the running spec. It looks like that process is getting
stuck for this suite.
This usually happens if you, or a library you are using, spin up an external
process and set cmd.Stdout = os.Stdout and/or cmd.Stderr = os.Stderr. This
causes the external process to keep Ginkgo's output interceptor pipe open and
causes output interception to hang.
Ginkgo has detected this and shortcircuited the capture process. The specs
will continue running after this message however output from the external
process that caused this issue will not be captured.
You have several options to fix this. In preferred order they are:
1. Pass GinkgoWriter instead of os.Stdout or os.Stderr to your process.
2. Ensure your process exits before the current spec completes. If your
process is long-lived and must cross spec boundaries, this option won't
work for you.
3. Pause Ginkgo's output interceptor before starting your process and then
resume it after. Use PauseOutputInterception() and ResumeOutputInterception()
to do this.
4. Set --output-interceptor-mode=none when running your Ginkgo suite. This will
turn off all output interception but allow specs to run in parallel without this
issue. You may miss important output if you do this including output from Go's
race detector.
More details on issue #851 - https://github.com/onsi/ginkgo/issues/851
`
/*
The OutputInterceptor is used by to
intercept and capture all stdin and stderr output during a test run.
*/
type OutputInterceptor interface {
StartInterceptingOutput()
StartInterceptingOutputAndForwardTo(io.Writer)
StopInterceptingAndReturnOutput() string
PauseIntercepting()
ResumeIntercepting()
Shutdown()
}
type NoopOutputInterceptor struct{}
func (interceptor NoopOutputInterceptor) StartInterceptingOutput() {}
func (interceptor NoopOutputInterceptor) StartInterceptingOutputAndForwardTo(io.Writer) {}
func (interceptor NoopOutputInterceptor) StopInterceptingAndReturnOutput() string { return "" }
func (interceptor NoopOutputInterceptor) PauseIntercepting() {}
func (interceptor NoopOutputInterceptor) ResumeIntercepting() {}
func (interceptor NoopOutputInterceptor) Shutdown() {}
type pipePair struct {
reader *os.File
writer *os.File
}
func startPipeFactory(pipeChannel chan pipePair, shutdown chan interface{}) {
for {
//make the next pipe...
pair := pipePair{}
pair.reader, pair.writer, _ = os.Pipe()
select {
//...and provide it to the next consumer (they are responsible for closing the files)
case pipeChannel <- pair:
continue
//...or close the files if we were told to shutdown
case <-shutdown:
pair.reader.Close()
pair.writer.Close()
return
}
}
}
type interceptorImplementation interface {
CreateStdoutStderrClones() (*os.File, *os.File)
ConnectPipeToStdoutStderr(*os.File)
RestoreStdoutStderrFromClones(*os.File, *os.File)
ShutdownClones(*os.File, *os.File)
}
type genericOutputInterceptor struct {
intercepting bool
stdoutClone *os.File
stderrClone *os.File
pipe pipePair
shutdown chan interface{}
emergencyBailout chan interface{}
pipeChannel chan pipePair
interceptedContent chan string
forwardTo io.Writer
accumulatedOutput string
implementation interceptorImplementation
}
func (interceptor *genericOutputInterceptor) StartInterceptingOutput() {
interceptor.StartInterceptingOutputAndForwardTo(io.Discard)
}
func (interceptor *genericOutputInterceptor) StartInterceptingOutputAndForwardTo(w io.Writer) {
if interceptor.intercepting {
return
}
interceptor.accumulatedOutput = ""
interceptor.forwardTo = w
interceptor.ResumeIntercepting()
}
func (interceptor *genericOutputInterceptor) StopInterceptingAndReturnOutput() string {
if interceptor.intercepting {
interceptor.PauseIntercepting()
}
return interceptor.accumulatedOutput
}
func (interceptor *genericOutputInterceptor) ResumeIntercepting() {
if interceptor.intercepting {
return
}
interceptor.intercepting = true
if interceptor.stdoutClone == nil {
interceptor.stdoutClone, interceptor.stderrClone = interceptor.implementation.CreateStdoutStderrClones()
interceptor.shutdown = make(chan interface{})
go startPipeFactory(interceptor.pipeChannel, interceptor.shutdown)
}
// Now we make a pipe, we'll use this to redirect the input to the 1 and 2 file descriptors (this is how everything else in the world is tring to log to stdout and stderr)
// we get the pipe from our pipe factory. it runs in the background so we can request the next pipe while the spec being intercepted is running
interceptor.pipe = <-interceptor.pipeChannel
interceptor.emergencyBailout = make(chan interface{})
//Spin up a goroutine to copy data from the pipe into a buffer, this is how we capture any output the user is emitting
go func() {
buffer := &bytes.Buffer{}
destination := io.MultiWriter(buffer, interceptor.forwardTo)
copyFinished := make(chan interface{})
reader := interceptor.pipe.reader
go func() {
io.Copy(destination, reader)
reader.Close() // close the read end of the pipe so we don't leak a file descriptor
close(copyFinished)
}()
select {
case <-copyFinished:
interceptor.interceptedContent <- buffer.String()
case <-interceptor.emergencyBailout:
interceptor.interceptedContent <- ""
}
}()
interceptor.implementation.ConnectPipeToStdoutStderr(interceptor.pipe.writer)
}
func (interceptor *genericOutputInterceptor) PauseIntercepting() {
if !interceptor.intercepting {
return
}
// first we have to close the write end of the pipe. To do this we have to close all file descriptors pointing
// to the write end. So that would be the pipewriter itself, and FD #1 and FD #2 if we've Dup2'd them
interceptor.pipe.writer.Close() // the pipewriter itself
// we also need to stop intercepting. we do that by reconnecting the stdout and stderr file descriptions back to their respective #1 and #2 file descriptors;
// this also closes #1 and #2 before it points that their original stdout and stderr file descriptions
interceptor.implementation.RestoreStdoutStderrFromClones(interceptor.stdoutClone, interceptor.stderrClone)
var content string
select {
case content = <-interceptor.interceptedContent:
case <-time.After(BAILOUT_TIME):
/*
By closing all the pipe writer's file descriptors associated with the pipe writer's file description the io.Copy reading from the reader
should eventually receive an EOF and exit.
**However**, if the user has spun up an external process and passed in os.Stdout/os.Stderr to cmd.Stdout/cmd.Stderr then the external process
will have a file descriptor pointing to the pipe writer's file description and it will not close until the external process exits.
That would leave us hanging here waiting for the io.Copy to close forever. Instead we invoke this emergency escape valve. This returns whatever
content we've got but leaves the io.Copy running. This ensures the external process can continue writing without hanging at the cost of leaking a goroutine
and file descriptor (those these will be cleaned up when the process exits).
We tack on a message to notify the user that they've hit this edgecase and encourage them to address it.
*/
close(interceptor.emergencyBailout)
content = <-interceptor.interceptedContent + BAILOUT_MESSAGE
}
interceptor.accumulatedOutput += content
interceptor.intercepting = false
}
func (interceptor *genericOutputInterceptor) Shutdown() {
interceptor.PauseIntercepting()
if interceptor.stdoutClone != nil {
close(interceptor.shutdown)
interceptor.implementation.ShutdownClones(interceptor.stdoutClone, interceptor.stderrClone)
interceptor.stdoutClone = nil
interceptor.stderrClone = nil
}
}
/* This is used on windows builds but included here so it can be explicitly tested on unix systems too */
func NewOSGlobalReassigningOutputInterceptor() OutputInterceptor {
return &genericOutputInterceptor{
interceptedContent: make(chan string),
pipeChannel: make(chan pipePair),
shutdown: make(chan interface{}),
implementation: &osGlobalReassigningOutputInterceptorImpl{},
}
}
type osGlobalReassigningOutputInterceptorImpl struct{}
func (impl *osGlobalReassigningOutputInterceptorImpl) CreateStdoutStderrClones() (*os.File, *os.File) {
return os.Stdout, os.Stderr
}
func (impl *osGlobalReassigningOutputInterceptorImpl) ConnectPipeToStdoutStderr(pipeWriter *os.File) {
os.Stdout = pipeWriter
os.Stderr = pipeWriter
}
func (impl *osGlobalReassigningOutputInterceptorImpl) RestoreStdoutStderrFromClones(stdoutClone *os.File, stderrClone *os.File) {
os.Stdout = stdoutClone
os.Stderr = stderrClone
}
func (impl *osGlobalReassigningOutputInterceptorImpl) ShutdownClones(_ *os.File, _ *os.File) {
//noop
}

View File

@ -0,0 +1,62 @@
//go:build freebsd || openbsd || netbsd || dragonfly || darwin || linux || solaris
// +build freebsd openbsd netbsd dragonfly darwin linux solaris
package internal
import (
"os"
"golang.org/x/sys/unix"
)
func NewOutputInterceptor() OutputInterceptor {
return &genericOutputInterceptor{
interceptedContent: make(chan string),
pipeChannel: make(chan pipePair),
shutdown: make(chan interface{}),
implementation: &dupSyscallOutputInterceptorImpl{},
}
}
type dupSyscallOutputInterceptorImpl struct{}
func (impl *dupSyscallOutputInterceptorImpl) CreateStdoutStderrClones() (*os.File, *os.File) {
// To clone stdout and stderr we:
// First, create two clone file descriptors that point to the stdout and stderr file descriptions
stdoutCloneFD, _ := unix.Dup(1)
stderrCloneFD, _ := unix.Dup(2)
// And then wrap the clone file descriptors in files.
// One benefit of this (that we don't use yet) is that we can actually write
// to these files to emit output to the console evne though we're intercepting output
stdoutClone := os.NewFile(uintptr(stdoutCloneFD), "stdout-clone")
stderrClone := os.NewFile(uintptr(stderrCloneFD), "stderr-clone")
//these clones remain alive throughout the lifecycle of the suite and don't need to be recreated
//this speeds things up a bit, actually.
return stdoutClone, stderrClone
}
func (impl *dupSyscallOutputInterceptorImpl) ConnectPipeToStdoutStderr(pipeWriter *os.File) {
// To redirect output to our pipe we need to point the 1 and 2 file descriptors (which is how the world tries to log things)
// to the write end of the pipe.
// We do this with Dup2 (possibly Dup3 on some architectures) to have file descriptors 1 and 2 point to the same file description as the pipeWriter
// This effectively shunts data written to stdout and stderr to the write end of our pipe
unix.Dup2(int(pipeWriter.Fd()), 1)
unix.Dup2(int(pipeWriter.Fd()), 2)
}
func (impl *dupSyscallOutputInterceptorImpl) RestoreStdoutStderrFromClones(stdoutClone *os.File, stderrClone *os.File) {
// To restore stdour/stderr from the clones we have the 1 and 2 file descriptors
// point to the original file descriptions that we saved off in the clones.
// This has the added benefit of closing the connection between these descriptors and the write end of the pipe
// which is important to cause the io.Copy on the pipe.Reader to end.
unix.Dup2(int(stdoutClone.Fd()), 1)
unix.Dup2(int(stderrClone.Fd()), 2)
}
func (impl *dupSyscallOutputInterceptorImpl) ShutdownClones(stdoutClone *os.File, stderrClone *os.File) {
// We're done with the clones so we can close them to clean up after ourselves
stdoutClone.Close()
stderrClone.Close()
}

View File

@ -0,0 +1,7 @@
// +build windows
package internal
func NewOutputInterceptor() OutputInterceptor {
return NewOSGlobalReassigningOutputInterceptor()
}

View File

@ -0,0 +1,69 @@
package parallel_support
import (
"fmt"
"io"
"os"
"time"
"github.com/onsi/ginkgo/v2/reporters"
"github.com/onsi/ginkgo/v2/types"
)
type BeforeSuiteState struct {
Data []byte
State types.SpecState
}
type ParallelIndexCounter struct {
Index int
}
var ErrorGone = fmt.Errorf("gone")
var ErrorFailed = fmt.Errorf("failed")
var ErrorEarly = fmt.Errorf("early")
var POLLING_INTERVAL = 50 * time.Millisecond
type Server interface {
Start()
Close()
Address() string
RegisterAlive(node int, alive func() bool)
GetSuiteDone() chan interface{}
GetOutputDestination() io.Writer
SetOutputDestination(io.Writer)
}
type Client interface {
Connect() bool
Close() error
PostSuiteWillBegin(report types.Report) error
PostDidRun(report types.SpecReport) error
PostSuiteDidEnd(report types.Report) error
PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error
BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error)
BlockUntilNonprimaryProcsHaveFinished() error
BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error)
FetchNextCounter() (int, error)
PostAbort() error
ShouldAbort() bool
Write(p []byte) (int, error)
}
func NewServer(parallelTotal int, reporter reporters.Reporter) (Server, error) {
if os.Getenv("GINKGO_PARALLEL_PROTOCOL") == "HTTP" {
return newHttpServer(parallelTotal, reporter)
} else {
return newRPCServer(parallelTotal, reporter)
}
}
func NewClient(serverHost string) Client {
if os.Getenv("GINKGO_PARALLEL_PROTOCOL") == "HTTP" {
return newHttpClient(serverHost)
} else {
return newRPCClient(serverHost)
}
}

View File

@ -0,0 +1,152 @@
package parallel_support
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"time"
"github.com/onsi/ginkgo/v2/types"
)
type httpClient struct {
serverHost string
}
func newHttpClient(serverHost string) *httpClient {
return &httpClient{
serverHost: serverHost,
}
}
func (client *httpClient) Connect() bool {
resp, err := http.Get(client.serverHost + "/up")
if err != nil {
return false
}
resp.Body.Close()
return resp.StatusCode == http.StatusOK
}
func (client *httpClient) Close() error {
return nil
}
func (client *httpClient) post(path string, data interface{}) error {
var body io.Reader
if data != nil {
encoded, err := json.Marshal(data)
if err != nil {
return err
}
body = bytes.NewBuffer(encoded)
}
resp, err := http.Post(client.serverHost+path, "application/json", body)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("received unexpected status code %d", resp.StatusCode)
}
return nil
}
func (client *httpClient) poll(path string, data interface{}) error {
for {
resp, err := http.Get(client.serverHost + path)
if err != nil {
return err
}
if resp.StatusCode == http.StatusTooEarly {
resp.Body.Close()
time.Sleep(POLLING_INTERVAL)
continue
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusGone {
return ErrorGone
}
if resp.StatusCode == http.StatusFailedDependency {
return ErrorFailed
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("received unexpected status code %d", resp.StatusCode)
}
if data != nil {
return json.NewDecoder(resp.Body).Decode(data)
}
return nil
}
}
func (client *httpClient) PostSuiteWillBegin(report types.Report) error {
return client.post("/suite-will-begin", report)
}
func (client *httpClient) PostDidRun(report types.SpecReport) error {
return client.post("/did-run", report)
}
func (client *httpClient) PostSuiteDidEnd(report types.Report) error {
return client.post("/suite-did-end", report)
}
func (client *httpClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error {
beforeSuiteState := BeforeSuiteState{
State: state,
Data: data,
}
return client.post("/before-suite-completed", beforeSuiteState)
}
func (client *httpClient) BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) {
var beforeSuiteState BeforeSuiteState
err := client.poll("/before-suite-state", &beforeSuiteState)
if err == ErrorGone {
return types.SpecStateInvalid, nil, types.GinkgoErrors.SynchronizedBeforeSuiteDisappearedOnProc1()
}
return beforeSuiteState.State, beforeSuiteState.Data, err
}
func (client *httpClient) BlockUntilNonprimaryProcsHaveFinished() error {
return client.poll("/have-nonprimary-procs-finished", nil)
}
func (client *httpClient) BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error) {
var report types.Report
err := client.poll("/aggregated-nonprimary-procs-report", &report)
if err == ErrorGone {
return types.Report{}, types.GinkgoErrors.AggregatedReportUnavailableDueToNodeDisappearing()
}
return report, err
}
func (client *httpClient) FetchNextCounter() (int, error) {
var counter ParallelIndexCounter
err := client.poll("/counter", &counter)
return counter.Index, err
}
func (client *httpClient) PostAbort() error {
return client.post("/abort", nil)
}
func (client *httpClient) ShouldAbort() bool {
err := client.poll("/abort", nil)
if err == ErrorGone {
return true
}
return false
}
func (client *httpClient) Write(p []byte) (int, error) {
resp, err := http.Post(client.serverHost+"/emit-output", "text/plain;charset=UTF-8 ", bytes.NewReader(p))
resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return 0, fmt.Errorf("failed to emit output")
}
return len(p), err
}

View File

@ -0,0 +1,214 @@
/*
The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners.
This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser).
*/
package parallel_support
import (
"encoding/json"
"io"
"net"
"net/http"
"github.com/onsi/ginkgo/v2/reporters"
"github.com/onsi/ginkgo/v2/types"
)
/*
httpServer spins up on an automatically selected port and listens for communication from the forwarding reporter.
It then forwards that communication to attached reporters.
*/
type httpServer struct {
listener net.Listener
handler *ServerHandler
}
//Create a new server, automatically selecting a port
func newHttpServer(parallelTotal int, reporter reporters.Reporter) (*httpServer, error) {
listener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
return nil, err
}
return &httpServer{
listener: listener,
handler: newServerHandler(parallelTotal, reporter),
}, nil
}
//Start the server. You don't need to `go s.Start()`, just `s.Start()`
func (server *httpServer) Start() {
httpServer := &http.Server{}
mux := http.NewServeMux()
httpServer.Handler = mux
//streaming endpoints
mux.HandleFunc("/suite-will-begin", server.specSuiteWillBegin)
mux.HandleFunc("/did-run", server.didRun)
mux.HandleFunc("/suite-did-end", server.specSuiteDidEnd)
mux.HandleFunc("/emit-output", server.emitOutput)
//synchronization endpoints
mux.HandleFunc("/before-suite-completed", server.handleBeforeSuiteCompleted)
mux.HandleFunc("/before-suite-state", server.handleBeforeSuiteState)
mux.HandleFunc("/have-nonprimary-procs-finished", server.handleHaveNonprimaryProcsFinished)
mux.HandleFunc("/aggregated-nonprimary-procs-report", server.handleAggregatedNonprimaryProcsReport)
mux.HandleFunc("/counter", server.handleCounter)
mux.HandleFunc("/up", server.handleUp)
mux.HandleFunc("/abort", server.handleAbort)
go httpServer.Serve(server.listener)
}
//Stop the server
func (server *httpServer) Close() {
server.listener.Close()
}
//The address the server can be reached it. Pass this into the `ForwardingReporter`.
func (server *httpServer) Address() string {
return "http://" + server.listener.Addr().String()
}
func (server *httpServer) GetSuiteDone() chan interface{} {
return server.handler.done
}
func (server *httpServer) GetOutputDestination() io.Writer {
return server.handler.outputDestination
}
func (server *httpServer) SetOutputDestination(w io.Writer) {
server.handler.outputDestination = w
}
func (server *httpServer) RegisterAlive(node int, alive func() bool) {
server.handler.registerAlive(node, alive)
}
//
// Streaming Endpoints
//
//The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters`
func (server *httpServer) decode(writer http.ResponseWriter, request *http.Request, object interface{}) bool {
defer request.Body.Close()
if json.NewDecoder(request.Body).Decode(object) != nil {
writer.WriteHeader(http.StatusBadRequest)
return false
}
return true
}
func (server *httpServer) handleError(err error, writer http.ResponseWriter) bool {
if err == nil {
return false
}
switch err {
case ErrorEarly:
writer.WriteHeader(http.StatusTooEarly)
case ErrorGone:
writer.WriteHeader(http.StatusGone)
case ErrorFailed:
writer.WriteHeader(http.StatusFailedDependency)
default:
writer.WriteHeader(http.StatusInternalServerError)
}
return true
}
func (server *httpServer) specSuiteWillBegin(writer http.ResponseWriter, request *http.Request) {
var report types.Report
if !server.decode(writer, request, &report) {
return
}
server.handleError(server.handler.SpecSuiteWillBegin(report, voidReceiver), writer)
}
func (server *httpServer) didRun(writer http.ResponseWriter, request *http.Request) {
var report types.SpecReport
if !server.decode(writer, request, &report) {
return
}
server.handleError(server.handler.DidRun(report, voidReceiver), writer)
}
func (server *httpServer) specSuiteDidEnd(writer http.ResponseWriter, request *http.Request) {
var report types.Report
if !server.decode(writer, request, &report) {
return
}
server.handleError(server.handler.SpecSuiteDidEnd(report, voidReceiver), writer)
}
func (server *httpServer) emitOutput(writer http.ResponseWriter, request *http.Request) {
output, err := io.ReadAll(request.Body)
if err != nil {
writer.WriteHeader(http.StatusInternalServerError)
return
}
var n int
server.handleError(server.handler.EmitOutput(output, &n), writer)
}
func (server *httpServer) handleBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) {
var beforeSuiteState BeforeSuiteState
if !server.decode(writer, request, &beforeSuiteState) {
return
}
server.handleError(server.handler.BeforeSuiteCompleted(beforeSuiteState, voidReceiver), writer)
}
func (server *httpServer) handleBeforeSuiteState(writer http.ResponseWriter, request *http.Request) {
var beforeSuiteState BeforeSuiteState
if server.handleError(server.handler.BeforeSuiteState(voidSender, &beforeSuiteState), writer) {
return
}
json.NewEncoder(writer).Encode(beforeSuiteState)
}
func (server *httpServer) handleHaveNonprimaryProcsFinished(writer http.ResponseWriter, request *http.Request) {
if server.handleError(server.handler.HaveNonprimaryProcsFinished(voidSender, voidReceiver), writer) {
return
}
writer.WriteHeader(http.StatusOK)
}
func (server *httpServer) handleAggregatedNonprimaryProcsReport(writer http.ResponseWriter, request *http.Request) {
var aggregatedReport types.Report
if server.handleError(server.handler.AggregatedNonprimaryProcsReport(voidSender, &aggregatedReport), writer) {
return
}
json.NewEncoder(writer).Encode(aggregatedReport)
}
func (server *httpServer) handleCounter(writer http.ResponseWriter, request *http.Request) {
var n int
if server.handleError(server.handler.Counter(voidSender, &n), writer) {
return
}
json.NewEncoder(writer).Encode(ParallelIndexCounter{Index: n})
}
func (server *httpServer) handleUp(writer http.ResponseWriter, request *http.Request) {
writer.WriteHeader(http.StatusOK)
}
func (server *httpServer) handleAbort(writer http.ResponseWriter, request *http.Request) {
if request.Method == "GET" {
var shouldAbort bool
server.handler.ShouldAbort(voidSender, &shouldAbort)
if shouldAbort {
writer.WriteHeader(http.StatusGone)
} else {
writer.WriteHeader(http.StatusOK)
}
} else {
server.handler.Abort(voidSender, voidReceiver)
}
}

View File

@ -0,0 +1,119 @@
package parallel_support
import (
"net/rpc"
"time"
"github.com/onsi/ginkgo/v2/types"
)
type rpcClient struct {
serverHost string
client *rpc.Client
}
func newRPCClient(serverHost string) *rpcClient {
return &rpcClient{
serverHost: serverHost,
}
}
func (client *rpcClient) Connect() bool {
var err error
if client.client != nil {
return true
}
client.client, err = rpc.DialHTTPPath("tcp", client.serverHost, "/")
if err != nil {
client.client = nil
return false
}
return true
}
func (client *rpcClient) Close() error {
return client.client.Close()
}
func (client *rpcClient) poll(method string, data interface{}) error {
for {
err := client.client.Call(method, voidSender, data)
if err == nil {
return nil
}
switch err.Error() {
case ErrorEarly.Error():
time.Sleep(POLLING_INTERVAL)
case ErrorGone.Error():
return ErrorGone
case ErrorFailed.Error():
return ErrorFailed
default:
return err
}
}
}
func (client *rpcClient) PostSuiteWillBegin(report types.Report) error {
return client.client.Call("Server.SpecSuiteWillBegin", report, voidReceiver)
}
func (client *rpcClient) PostDidRun(report types.SpecReport) error {
return client.client.Call("Server.DidRun", report, voidReceiver)
}
func (client *rpcClient) PostSuiteDidEnd(report types.Report) error {
return client.client.Call("Server.SpecSuiteDidEnd", report, voidReceiver)
}
func (client *rpcClient) Write(p []byte) (int, error) {
var n int
err := client.client.Call("Server.EmitOutput", p, &n)
return n, err
}
func (client *rpcClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error {
beforeSuiteState := BeforeSuiteState{
State: state,
Data: data,
}
return client.client.Call("Server.BeforeSuiteCompleted", beforeSuiteState, voidReceiver)
}
func (client *rpcClient) BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) {
var beforeSuiteState BeforeSuiteState
err := client.poll("Server.BeforeSuiteState", &beforeSuiteState)
if err == ErrorGone {
return types.SpecStateInvalid, nil, types.GinkgoErrors.SynchronizedBeforeSuiteDisappearedOnProc1()
}
return beforeSuiteState.State, beforeSuiteState.Data, err
}
func (client *rpcClient) BlockUntilNonprimaryProcsHaveFinished() error {
return client.poll("Server.HaveNonprimaryProcsFinished", voidReceiver)
}
func (client *rpcClient) BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error) {
var report types.Report
err := client.poll("Server.AggregatedNonprimaryProcsReport", &report)
if err == ErrorGone {
return types.Report{}, types.GinkgoErrors.AggregatedReportUnavailableDueToNodeDisappearing()
}
return report, err
}
func (client *rpcClient) FetchNextCounter() (int, error) {
var counter int
err := client.client.Call("Server.Counter", voidSender, &counter)
return counter, err
}
func (client *rpcClient) PostAbort() error {
return client.client.Call("Server.Abort", voidSender, voidReceiver)
}
func (client *rpcClient) ShouldAbort() bool {
var shouldAbort bool
client.client.Call("Server.ShouldAbort", voidSender, &shouldAbort)
return shouldAbort
}

View File

@ -0,0 +1,75 @@
/*
The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners.
This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser).
*/
package parallel_support
import (
"io"
"net"
"net/http"
"net/rpc"
"github.com/onsi/ginkgo/v2/reporters"
)
/*
RPCServer spins up on an automatically selected port and listens for communication from the forwarding reporter.
It then forwards that communication to attached reporters.
*/
type RPCServer struct {
listener net.Listener
handler *ServerHandler
}
//Create a new server, automatically selecting a port
func newRPCServer(parallelTotal int, reporter reporters.Reporter) (*RPCServer, error) {
listener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
return nil, err
}
return &RPCServer{
listener: listener,
handler: newServerHandler(parallelTotal, reporter),
}, nil
}
//Start the server. You don't need to `go s.Start()`, just `s.Start()`
func (server *RPCServer) Start() {
rpcServer := rpc.NewServer()
rpcServer.RegisterName("Server", server.handler) //register the handler's methods as the server
httpServer := &http.Server{}
httpServer.Handler = rpcServer
go httpServer.Serve(server.listener)
}
//Stop the server
func (server *RPCServer) Close() {
server.listener.Close()
}
//The address the server can be reached it. Pass this into the `ForwardingReporter`.
func (server *RPCServer) Address() string {
return server.listener.Addr().String()
}
func (server *RPCServer) GetSuiteDone() chan interface{} {
return server.handler.done
}
func (server *RPCServer) GetOutputDestination() io.Writer {
return server.handler.outputDestination
}
func (server *RPCServer) SetOutputDestination(w io.Writer) {
server.handler.outputDestination = w
}
func (server *RPCServer) RegisterAlive(node int, alive func() bool) {
server.handler.registerAlive(node, alive)
}

View File

@ -0,0 +1,202 @@
package parallel_support
import (
"io"
"os"
"sync"
"github.com/onsi/ginkgo/v2/reporters"
"github.com/onsi/ginkgo/v2/types"
)
type Void struct{}
var voidReceiver *Void = &Void{}
var voidSender Void
// ServerHandler is an RPC-compatible handler that is shared between the http server and the rpc server.
// It handles all the business logic to avoid duplication between the two servers
type ServerHandler struct {
done chan interface{}
outputDestination io.Writer
reporter reporters.Reporter
alives []func() bool
lock *sync.Mutex
beforeSuiteState BeforeSuiteState
parallelTotal int
counter int
counterLock *sync.Mutex
shouldAbort bool
numSuiteDidBegins int
numSuiteDidEnds int
aggregatedReport types.Report
reportHoldingArea []types.SpecReport
}
func newServerHandler(parallelTotal int, reporter reporters.Reporter) *ServerHandler {
return &ServerHandler{
reporter: reporter,
lock: &sync.Mutex{},
counterLock: &sync.Mutex{},
alives: make([]func() bool, parallelTotal),
beforeSuiteState: BeforeSuiteState{Data: nil, State: types.SpecStateInvalid},
parallelTotal: parallelTotal,
outputDestination: os.Stdout,
done: make(chan interface{}),
}
}
func (handler *ServerHandler) SpecSuiteWillBegin(report types.Report, _ *Void) error {
handler.lock.Lock()
defer handler.lock.Unlock()
handler.numSuiteDidBegins += 1
// all summaries are identical, so it's fine to simply emit the last one of these
if handler.numSuiteDidBegins == handler.parallelTotal {
handler.reporter.SuiteWillBegin(report)
for _, summary := range handler.reportHoldingArea {
handler.reporter.WillRun(summary)
handler.reporter.DidRun(summary)
}
handler.reportHoldingArea = nil
}
return nil
}
func (handler *ServerHandler) DidRun(report types.SpecReport, _ *Void) error {
handler.lock.Lock()
defer handler.lock.Unlock()
if handler.numSuiteDidBegins == handler.parallelTotal {
handler.reporter.WillRun(report)
handler.reporter.DidRun(report)
} else {
handler.reportHoldingArea = append(handler.reportHoldingArea, report)
}
return nil
}
func (handler *ServerHandler) SpecSuiteDidEnd(report types.Report, _ *Void) error {
handler.lock.Lock()
defer handler.lock.Unlock()
handler.numSuiteDidEnds += 1
if handler.numSuiteDidEnds == 1 {
handler.aggregatedReport = report
} else {
handler.aggregatedReport = handler.aggregatedReport.Add(report)
}
if handler.numSuiteDidEnds == handler.parallelTotal {
handler.reporter.SuiteDidEnd(handler.aggregatedReport)
close(handler.done)
}
return nil
}
func (handler *ServerHandler) EmitOutput(output []byte, n *int) error {
var err error
*n, err = handler.outputDestination.Write(output)
return err
}
func (handler *ServerHandler) registerAlive(proc int, alive func() bool) {
handler.lock.Lock()
defer handler.lock.Unlock()
handler.alives[proc-1] = alive
}
func (handler *ServerHandler) procIsAlive(proc int) bool {
handler.lock.Lock()
defer handler.lock.Unlock()
alive := handler.alives[proc-1]
if alive == nil {
return true
}
return alive()
}
func (handler *ServerHandler) haveNonprimaryProcsFinished() bool {
for i := 2; i <= handler.parallelTotal; i++ {
if handler.procIsAlive(i) {
return false
}
}
return true
}
func (handler *ServerHandler) BeforeSuiteCompleted(beforeSuiteState BeforeSuiteState, _ *Void) error {
handler.lock.Lock()
defer handler.lock.Unlock()
handler.beforeSuiteState = beforeSuiteState
return nil
}
func (handler *ServerHandler) BeforeSuiteState(_ Void, beforeSuiteState *BeforeSuiteState) error {
proc1IsAlive := handler.procIsAlive(1)
handler.lock.Lock()
defer handler.lock.Unlock()
if handler.beforeSuiteState.State == types.SpecStateInvalid {
if proc1IsAlive {
return ErrorEarly
} else {
return ErrorGone
}
}
*beforeSuiteState = handler.beforeSuiteState
return nil
}
func (handler *ServerHandler) HaveNonprimaryProcsFinished(_ Void, _ *Void) error {
if handler.haveNonprimaryProcsFinished() {
return nil
} else {
return ErrorEarly
}
}
func (handler *ServerHandler) AggregatedNonprimaryProcsReport(_ Void, report *types.Report) error {
if handler.haveNonprimaryProcsFinished() {
handler.lock.Lock()
defer handler.lock.Unlock()
if handler.numSuiteDidEnds == handler.parallelTotal-1 {
*report = handler.aggregatedReport
return nil
} else {
return ErrorGone
}
} else {
return ErrorEarly
}
}
func (handler *ServerHandler) Counter(_ Void, counter *int) error {
handler.counterLock.Lock()
defer handler.counterLock.Unlock()
*counter = handler.counter
handler.counter++
return nil
}
func (handler *ServerHandler) Abort(_ Void, _ *Void) error {
handler.lock.Lock()
defer handler.lock.Unlock()
handler.shouldAbort = true
return nil
}
func (handler *ServerHandler) ShouldAbort(_ Void, shouldAbort *bool) error {
handler.lock.Lock()
defer handler.lock.Unlock()
*shouldAbort = handler.shouldAbort
return nil
}

View File

@ -0,0 +1,40 @@
package internal
import (
"reflect"
"time"
"github.com/onsi/ginkgo/v2/types"
)
type ReportEntry = types.ReportEntry
func NewReportEntry(name string, cl types.CodeLocation, args ...interface{}) (ReportEntry, error) {
out := ReportEntry{
Visibility: types.ReportEntryVisibilityAlways,
Name: name,
Time: time.Now(),
Location: cl,
}
var didSetValue = false
for _, arg := range args {
switch reflect.TypeOf(arg) {
case reflect.TypeOf(types.ReportEntryVisibilityAlways):
out.Visibility = arg.(types.ReportEntryVisibility)
case reflect.TypeOf(types.CodeLocation{}):
out.Location = arg.(types.CodeLocation)
case reflect.TypeOf(Offset(0)):
out.Location = types.NewCodeLocation(2 + int(arg.(Offset)))
case reflect.TypeOf(out.Time):
out.Time = arg.(time.Time)
default:
if didSetValue {
return ReportEntry{}, types.GinkgoErrors.TooManyReportEntryValues(out.Location, arg)
}
out.Value = types.WrapEntryValue(arg)
didSetValue = true
}
}
return out, nil
}

71
vendor/github.com/onsi/ginkgo/v2/internal/spec.go generated vendored Normal file
View File

@ -0,0 +1,71 @@
package internal
import (
"strings"
"github.com/onsi/ginkgo/v2/types"
)
type Spec struct {
Nodes Nodes
Skip bool
}
func (s Spec) SubjectID() uint {
return s.Nodes.FirstNodeWithType(types.NodeTypeIt).ID
}
func (s Spec) Text() string {
texts := []string{}
for i := range s.Nodes {
if s.Nodes[i].Text != "" {
texts = append(texts, s.Nodes[i].Text)
}
}
return strings.Join(texts, " ")
}
func (s Spec) FirstNodeWithType(nodeTypes types.NodeType) Node {
return s.Nodes.FirstNodeWithType(nodeTypes)
}
func (s Spec) FlakeAttempts() int {
flakeAttempts := 0
for i := range s.Nodes {
if s.Nodes[i].FlakeAttempts > 0 {
flakeAttempts = s.Nodes[i].FlakeAttempts
}
}
return flakeAttempts
}
type Specs []Spec
func (s Specs) HasAnySpecsMarkedPending() bool {
for i := range s {
if s[i].Nodes.HasNodeMarkedPending() {
return true
}
}
return false
}
func (s Specs) CountWithoutSkip() int {
n := 0
for i := range s {
if !s[i].Skip {
n += 1
}
}
return n
}
func (s Specs) AtIndices(indices SpecIndices) Specs {
out := make(Specs, len(indices))
for i, idx := range indices {
out[i] = s[idx]
}
return out
}

629
vendor/github.com/onsi/ginkgo/v2/internal/suite.go generated vendored Normal file
View File

@ -0,0 +1,629 @@
package internal
import (
"fmt"
"time"
"github.com/onsi/ginkgo/v2/formatter"
"github.com/onsi/ginkgo/v2/internal/interrupt_handler"
"github.com/onsi/ginkgo/v2/internal/parallel_support"
"github.com/onsi/ginkgo/v2/reporters"
"github.com/onsi/ginkgo/v2/types"
)
type Phase uint
const (
PhaseBuildTopLevel Phase = iota
PhaseBuildTree
PhaseRun
)
type Suite struct {
tree *TreeNode
topLevelContainers Nodes
phase Phase
suiteNodes Nodes
cleanupNodes Nodes
failer *Failer
reporter reporters.Reporter
writer WriterInterface
outputInterceptor OutputInterceptor
interruptHandler interrupt_handler.InterruptHandlerInterface
config types.SuiteConfig
skipAll bool
report types.Report
currentSpecReport types.SpecReport
currentNode Node
client parallel_support.Client
}
func NewSuite() *Suite {
return &Suite{
tree: &TreeNode{},
phase: PhaseBuildTopLevel,
}
}
func (suite *Suite) BuildTree() error {
// During PhaseBuildTopLevel, the top level containers are stored in suite.topLevelCotainers and entered
// We now enter PhaseBuildTree where these top level containers are entered and added to the spec tree
suite.phase = PhaseBuildTree
for _, topLevelContainer := range suite.topLevelContainers {
err := suite.PushNode(topLevelContainer)
if err != nil {
return err
}
}
return nil
}
func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string, failer *Failer, reporter reporters.Reporter, writer WriterInterface, outputInterceptor OutputInterceptor, interruptHandler interrupt_handler.InterruptHandlerInterface, client parallel_support.Client, suiteConfig types.SuiteConfig) (bool, bool) {
if suite.phase != PhaseBuildTree {
panic("cannot run before building the tree = call suite.BuildTree() first")
}
ApplyNestedFocusPolicyToTree(suite.tree)
specs := GenerateSpecsFromTreeRoot(suite.tree)
specs, hasProgrammaticFocus := ApplyFocusToSpecs(specs, description, suiteLabels, suiteConfig)
suite.phase = PhaseRun
suite.client = client
suite.failer = failer
suite.reporter = reporter
suite.writer = writer
suite.outputInterceptor = outputInterceptor
suite.interruptHandler = interruptHandler
suite.config = suiteConfig
success := suite.runSpecs(description, suiteLabels, suitePath, hasProgrammaticFocus, specs)
return success, hasProgrammaticFocus
}
func (suite *Suite) InRunPhase() bool {
return suite.phase == PhaseRun
}
/*
Tree Construction methods
PushNode is used during PhaseBuildTopLevel and PhaseBuildTree
*/
func (suite *Suite) PushNode(node Node) error {
if node.NodeType.Is(types.NodeTypeCleanupInvalid | types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll | types.NodeTypeCleanupAfterSuite) {
return suite.pushCleanupNode(node)
}
if node.NodeType.Is(types.NodeTypeBeforeSuite | types.NodeTypeAfterSuite | types.NodeTypeSynchronizedBeforeSuite | types.NodeTypeSynchronizedAfterSuite | types.NodeTypeReportAfterSuite) {
return suite.pushSuiteNode(node)
}
if suite.phase == PhaseRun {
return types.GinkgoErrors.PushingNodeInRunPhase(node.NodeType, node.CodeLocation)
}
if node.MarkedSerial {
firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered()
if !firstOrderedNode.IsZero() && !firstOrderedNode.MarkedSerial {
return types.GinkgoErrors.InvalidSerialNodeInNonSerialOrderedContainer(node.CodeLocation, node.NodeType)
}
}
if node.NodeType.Is(types.NodeTypeBeforeAll | types.NodeTypeAfterAll) {
firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered()
if firstOrderedNode.IsZero() {
return types.GinkgoErrors.SetupNodeNotInOrderedContainer(node.CodeLocation, node.NodeType)
}
}
if node.NodeType == types.NodeTypeContainer {
// During PhaseBuildTopLevel we only track the top level containers without entering them
// We only enter the top level container nodes during PhaseBuildTree
//
// This ensures the tree is only constructed after `go spec` has called `flag.Parse()` and gives
// the user an opportunity to load suiteConfiguration information in the `TestX` go spec hook just before `RunSpecs`
// is invoked. This makes the lifecycle easier to reason about and solves issues like #693.
if suite.phase == PhaseBuildTopLevel {
suite.topLevelContainers = append(suite.topLevelContainers, node)
return nil
}
if suite.phase == PhaseBuildTree {
parentTree := suite.tree
suite.tree = &TreeNode{Node: node}
parentTree.AppendChild(suite.tree)
err := func() (err error) {
defer func() {
if e := recover(); e != nil {
err = types.GinkgoErrors.CaughtPanicDuringABuildPhase(e, node.CodeLocation)
}
}()
node.Body()
return err
}()
suite.tree = parentTree
return err
}
} else {
suite.tree.AppendChild(&TreeNode{Node: node})
return nil
}
return nil
}
func (suite *Suite) pushSuiteNode(node Node) error {
if suite.phase == PhaseBuildTree {
return types.GinkgoErrors.SuiteNodeInNestedContext(node.NodeType, node.CodeLocation)
}
if suite.phase == PhaseRun {
return types.GinkgoErrors.SuiteNodeDuringRunPhase(node.NodeType, node.CodeLocation)
}
switch node.NodeType {
case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite:
existingBefores := suite.suiteNodes.WithType(types.NodeTypeBeforeSuite | types.NodeTypeSynchronizedBeforeSuite)
if len(existingBefores) > 0 {
return types.GinkgoErrors.MultipleBeforeSuiteNodes(node.NodeType, node.CodeLocation, existingBefores[0].NodeType, existingBefores[0].CodeLocation)
}
case types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite:
existingAfters := suite.suiteNodes.WithType(types.NodeTypeAfterSuite | types.NodeTypeSynchronizedAfterSuite)
if len(existingAfters) > 0 {
return types.GinkgoErrors.MultipleAfterSuiteNodes(node.NodeType, node.CodeLocation, existingAfters[0].NodeType, existingAfters[0].CodeLocation)
}
}
suite.suiteNodes = append(suite.suiteNodes, node)
return nil
}
func (suite *Suite) pushCleanupNode(node Node) error {
if suite.phase != PhaseRun || suite.currentNode.IsZero() {
return types.GinkgoErrors.PushingCleanupNodeDuringTreeConstruction(node.CodeLocation)
}
switch suite.currentNode.NodeType {
case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite, types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite:
node.NodeType = types.NodeTypeCleanupAfterSuite
case types.NodeTypeBeforeAll, types.NodeTypeAfterAll:
node.NodeType = types.NodeTypeCleanupAfterAll
case types.NodeTypeReportBeforeEach, types.NodeTypeReportAfterEach, types.NodeTypeReportAfterSuite:
return types.GinkgoErrors.PushingCleanupInReportingNode(node.CodeLocation, suite.currentNode.NodeType)
case types.NodeTypeCleanupInvalid, types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll, types.NodeTypeCleanupAfterSuite:
return types.GinkgoErrors.PushingCleanupInCleanupNode(node.CodeLocation)
default:
node.NodeType = types.NodeTypeCleanupAfterEach
}
node.NodeIDWhereCleanupWasGenerated = suite.currentNode.ID
node.NestingLevel = suite.currentNode.NestingLevel
suite.cleanupNodes = append(suite.cleanupNodes, node)
return nil
}
/*
Spec Running methods - used during PhaseRun
*/
func (suite *Suite) CurrentSpecReport() types.SpecReport {
report := suite.currentSpecReport
if suite.writer != nil {
report.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
}
return report
}
func (suite *Suite) AddReportEntry(entry ReportEntry) error {
if suite.phase != PhaseRun {
return types.GinkgoErrors.AddReportEntryNotDuringRunPhase(entry.Location)
}
suite.currentSpecReport.ReportEntries = append(suite.currentSpecReport.ReportEntries, entry)
return nil
}
func (suite *Suite) isRunningInParallel() bool {
return suite.config.ParallelTotal > 1
}
func (suite *Suite) processCurrentSpecReport() {
suite.reporter.DidRun(suite.currentSpecReport)
if suite.isRunningInParallel() {
suite.client.PostDidRun(suite.currentSpecReport)
}
suite.report.SpecReports = append(suite.report.SpecReports, suite.currentSpecReport)
if suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
suite.report.SuiteSucceeded = false
if suite.config.FailFast || suite.currentSpecReport.State.Is(types.SpecStateAborted) {
suite.skipAll = true
if suite.isRunningInParallel() {
suite.client.PostAbort()
}
}
}
}
func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath string, hasProgrammaticFocus bool, specs Specs) bool {
numSpecsThatWillBeRun := specs.CountWithoutSkip()
suite.report = types.Report{
SuitePath: suitePath,
SuiteDescription: description,
SuiteLabels: suiteLabels,
SuiteConfig: suite.config,
SuiteHasProgrammaticFocus: hasProgrammaticFocus,
PreRunStats: types.PreRunStats{
TotalSpecs: len(specs),
SpecsThatWillRun: numSpecsThatWillBeRun,
},
StartTime: time.Now(),
}
suite.reporter.SuiteWillBegin(suite.report)
if suite.isRunningInParallel() {
suite.client.PostSuiteWillBegin(suite.report)
}
suite.report.SuiteSucceeded = true
suite.runBeforeSuite(numSpecsThatWillBeRun)
if suite.report.SuiteSucceeded {
groupedSpecIndices, serialGroupedSpecIndices := OrderSpecs(specs, suite.config)
nextIndex := MakeIncrementingIndexCounter()
if suite.isRunningInParallel() {
nextIndex = suite.client.FetchNextCounter
}
for {
groupedSpecIdx, err := nextIndex()
if err != nil {
suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, fmt.Sprintf("Failed to iterate over specs:\n%s", err.Error()))
suite.report.SuiteSucceeded = false
break
}
if groupedSpecIdx >= len(groupedSpecIndices) {
if suite.config.ParallelProcess == 1 && len(serialGroupedSpecIndices) > 0 {
groupedSpecIndices, serialGroupedSpecIndices, nextIndex = serialGroupedSpecIndices, GroupedSpecIndices{}, MakeIncrementingIndexCounter()
suite.client.BlockUntilNonprimaryProcsHaveFinished()
continue
}
break
}
// the complexity for running groups of specs is very high because of Ordered containers and FlakeAttempts
// we encapsulate that complexity in the notion of a Group that can run
// Group is really just an extension of suite so it gets passed a suite and has access to all its internals
// Note that group is stateful and intended for single use!
newGroup(suite).run(specs.AtIndices(groupedSpecIndices[groupedSpecIdx]))
}
if specs.HasAnySpecsMarkedPending() && suite.config.FailOnPending {
suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Detected pending specs and --fail-on-pending is set")
suite.report.SuiteSucceeded = false
}
}
suite.runAfterSuiteCleanup(numSpecsThatWillBeRun)
interruptStatus := suite.interruptHandler.Status()
if interruptStatus.Interrupted {
suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, interruptStatus.Cause.String())
suite.report.SuiteSucceeded = false
}
suite.report.EndTime = time.Now()
suite.report.RunTime = suite.report.EndTime.Sub(suite.report.StartTime)
if suite.config.ParallelProcess == 1 {
suite.runReportAfterSuite()
}
suite.reporter.SuiteDidEnd(suite.report)
if suite.isRunningInParallel() {
suite.client.PostSuiteDidEnd(suite.report)
}
return suite.report.SuiteSucceeded
}
func (suite *Suite) runBeforeSuite(numSpecsThatWillBeRun int) {
interruptStatus := suite.interruptHandler.Status()
beforeSuiteNode := suite.suiteNodes.FirstNodeWithType(types.NodeTypeBeforeSuite | types.NodeTypeSynchronizedBeforeSuite)
if !beforeSuiteNode.IsZero() && !interruptStatus.Interrupted && numSpecsThatWillBeRun > 0 {
suite.currentSpecReport = types.SpecReport{
LeafNodeType: beforeSuiteNode.NodeType,
LeafNodeLocation: beforeSuiteNode.CodeLocation,
ParallelProcess: suite.config.ParallelProcess,
}
suite.reporter.WillRun(suite.currentSpecReport)
suite.runSuiteNode(beforeSuiteNode, interruptStatus.Channel)
if suite.currentSpecReport.State.Is(types.SpecStateSkipped) {
suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Suite skipped in BeforeSuite")
suite.skipAll = true
}
suite.processCurrentSpecReport()
}
}
func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) {
afterSuiteNode := suite.suiteNodes.FirstNodeWithType(types.NodeTypeAfterSuite | types.NodeTypeSynchronizedAfterSuite)
if !afterSuiteNode.IsZero() && numSpecsThatWillBeRun > 0 {
suite.currentSpecReport = types.SpecReport{
LeafNodeType: afterSuiteNode.NodeType,
LeafNodeLocation: afterSuiteNode.CodeLocation,
ParallelProcess: suite.config.ParallelProcess,
}
suite.reporter.WillRun(suite.currentSpecReport)
suite.runSuiteNode(afterSuiteNode, suite.interruptHandler.Status().Channel)
suite.processCurrentSpecReport()
}
afterSuiteCleanup := suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterSuite).Reverse()
if len(afterSuiteCleanup) > 0 {
for _, cleanupNode := range afterSuiteCleanup {
suite.currentSpecReport = types.SpecReport{
LeafNodeType: cleanupNode.NodeType,
LeafNodeLocation: cleanupNode.CodeLocation,
ParallelProcess: suite.config.ParallelProcess,
}
suite.reporter.WillRun(suite.currentSpecReport)
suite.runSuiteNode(cleanupNode, suite.interruptHandler.Status().Channel)
suite.processCurrentSpecReport()
}
}
}
func (suite *Suite) runReportAfterSuite() {
for _, node := range suite.suiteNodes.WithType(types.NodeTypeReportAfterSuite) {
suite.currentSpecReport = types.SpecReport{
LeafNodeType: node.NodeType,
LeafNodeLocation: node.CodeLocation,
LeafNodeText: node.Text,
ParallelProcess: suite.config.ParallelProcess,
}
suite.reporter.WillRun(suite.currentSpecReport)
suite.runReportAfterSuiteNode(node, suite.report)
suite.processCurrentSpecReport()
}
}
func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) {
nodes := spec.Nodes.WithType(nodeType)
if nodeType == types.NodeTypeReportAfterEach {
nodes = nodes.SortedByDescendingNestingLevel()
}
if nodeType == types.NodeTypeReportBeforeEach {
nodes = nodes.SortedByAscendingNestingLevel()
}
if len(nodes) == 0 {
return
}
for i := range nodes {
suite.writer.Truncate()
suite.outputInterceptor.StartInterceptingOutput()
report := suite.currentSpecReport
nodes[i].Body = func() {
nodes[i].ReportEachBody(report)
}
suite.interruptHandler.SetInterruptPlaceholderMessage(formatter.Fiw(0, formatter.COLS,
"{{yellow}}Ginkgo received an interrupt signal but is currently running a %s node. To avoid an invalid report the %s node will not be interrupted however subsequent tests will be skipped.{{/}}\n\n{{bold}}The running %s node is at:\n%s.{{/}}",
nodeType, nodeType, nodeType,
nodes[i].CodeLocation,
))
state, failure := suite.runNode(nodes[i], nil, spec.Nodes.BestTextFor(nodes[i]))
suite.interruptHandler.ClearInterruptPlaceholderMessage()
// If the spec is not in a failure state (i.e. it's Passed/Skipped/Pending) and the reporter has failed, override the state.
// Also, if the reporter is every aborted - always override the state to propagate the abort
if (!suite.currentSpecReport.State.Is(types.SpecStateFailureStates) && state.Is(types.SpecStateFailureStates)) || state.Is(types.SpecStateAborted) {
suite.currentSpecReport.State = state
suite.currentSpecReport.Failure = failure
}
suite.currentSpecReport.CapturedGinkgoWriterOutput += string(suite.writer.Bytes())
suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
}
}
func (suite *Suite) runSuiteNode(node Node, interruptChannel chan interface{}) {
if suite.config.DryRun {
suite.currentSpecReport.State = types.SpecStatePassed
return
}
suite.writer.Truncate()
suite.outputInterceptor.StartInterceptingOutput()
suite.currentSpecReport.StartTime = time.Now()
var err error
switch node.NodeType {
case types.NodeTypeBeforeSuite, types.NodeTypeAfterSuite:
suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "")
case types.NodeTypeCleanupAfterSuite:
if suite.config.ParallelTotal > 1 && suite.config.ParallelProcess == 1 {
err = suite.client.BlockUntilNonprimaryProcsHaveFinished()
}
if err == nil {
suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "")
}
case types.NodeTypeSynchronizedBeforeSuite:
var data []byte
var runAllProcs bool
if suite.config.ParallelProcess == 1 {
if suite.config.ParallelTotal > 1 {
suite.outputInterceptor.StopInterceptingAndReturnOutput()
suite.outputInterceptor.StartInterceptingOutputAndForwardTo(suite.client)
}
node.Body = func() { data = node.SynchronizedBeforeSuiteProc1Body() }
suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "")
if suite.config.ParallelTotal > 1 {
suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
suite.outputInterceptor.StartInterceptingOutput()
if suite.currentSpecReport.State.Is(types.SpecStatePassed) {
err = suite.client.PostSynchronizedBeforeSuiteCompleted(types.SpecStatePassed, data)
} else {
err = suite.client.PostSynchronizedBeforeSuiteCompleted(suite.currentSpecReport.State, nil)
}
}
runAllProcs = suite.currentSpecReport.State.Is(types.SpecStatePassed) && err == nil
} else {
var proc1State types.SpecState
proc1State, data, err = suite.client.BlockUntilSynchronizedBeforeSuiteData()
switch proc1State {
case types.SpecStatePassed:
runAllProcs = true
case types.SpecStateFailed, types.SpecStatePanicked:
err = types.GinkgoErrors.SynchronizedBeforeSuiteFailedOnProc1()
case types.SpecStateInterrupted, types.SpecStateAborted, types.SpecStateSkipped:
suite.currentSpecReport.State = proc1State
}
}
if runAllProcs {
node.Body = func() { node.SynchronizedBeforeSuiteAllProcsBody(data) }
suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "")
}
case types.NodeTypeSynchronizedAfterSuite:
node.Body = node.SynchronizedAfterSuiteAllProcsBody
suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "")
if suite.config.ParallelProcess == 1 {
if suite.config.ParallelTotal > 1 {
err = suite.client.BlockUntilNonprimaryProcsHaveFinished()
}
if err == nil {
if suite.config.ParallelTotal > 1 {
suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
suite.outputInterceptor.StartInterceptingOutputAndForwardTo(suite.client)
}
node.Body = node.SynchronizedAfterSuiteProc1Body
state, failure := suite.runNode(node, interruptChannel, "")
if suite.currentSpecReport.State.Is(types.SpecStatePassed) {
suite.currentSpecReport.State, suite.currentSpecReport.Failure = state, failure
}
}
}
}
if err != nil && !suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error())
}
suite.currentSpecReport.EndTime = time.Now()
suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime)
suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
return
}
func (suite *Suite) runReportAfterSuiteNode(node Node, report types.Report) {
suite.writer.Truncate()
suite.outputInterceptor.StartInterceptingOutput()
suite.currentSpecReport.StartTime = time.Now()
if suite.config.ParallelTotal > 1 {
aggregatedReport, err := suite.client.BlockUntilAggregatedNonprimaryProcsReport()
if err != nil {
suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error())
return
}
report = report.Add(aggregatedReport)
}
node.Body = func() { node.ReportAfterSuiteBody(report) }
suite.interruptHandler.SetInterruptPlaceholderMessage(formatter.Fiw(0, formatter.COLS,
"{{yellow}}Ginkgo received an interrupt signal but is currently running a ReportAfterSuite node. To avoid an invalid report the ReportAfterSuite node will not be interrupted.{{/}}\n\n{{bold}}The running ReportAfterSuite node is at:\n%s.{{/}}",
node.CodeLocation,
))
suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, nil, "")
suite.interruptHandler.ClearInterruptPlaceholderMessage()
suite.currentSpecReport.EndTime = time.Now()
suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime)
suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
suite.currentSpecReport.CapturedStdOutErr = suite.outputInterceptor.StopInterceptingAndReturnOutput()
return
}
func (suite *Suite) runNode(node Node, interruptChannel chan interface{}, text string) (types.SpecState, types.Failure) {
if node.NodeType.Is(types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll | types.NodeTypeCleanupAfterSuite) {
suite.cleanupNodes = suite.cleanupNodes.WithoutNode(node)
}
suite.currentNode = node
defer func() {
suite.currentNode = Node{}
}()
if suite.config.EmitSpecProgress {
if text == "" {
text = "TOP-LEVEL"
}
s := fmt.Sprintf("[%s] %s\n %s\n", node.NodeType.String(), text, node.CodeLocation.String())
suite.writer.Write([]byte(s))
}
var failure types.Failure
failure.FailureNodeType, failure.FailureNodeLocation = node.NodeType, node.CodeLocation
if node.NodeType.Is(types.NodeTypeIt) || node.NodeType.Is(types.NodeTypesForSuiteLevelNodes) {
failure.FailureNodeContext = types.FailureNodeIsLeafNode
} else if node.NestingLevel <= 0 {
failure.FailureNodeContext = types.FailureNodeAtTopLevel
} else {
failure.FailureNodeContext, failure.FailureNodeContainerIndex = types.FailureNodeInContainer, node.NestingLevel-1
}
outcomeC := make(chan types.SpecState)
failureC := make(chan types.Failure)
go func() {
finished := false
defer func() {
if e := recover(); e != nil || !finished {
suite.failer.Panic(types.NewCodeLocationWithStackTrace(2), e)
}
outcome, failureFromRun := suite.failer.Drain()
outcomeC <- outcome
failureC <- failureFromRun
}()
node.Body()
finished = true
}()
select {
case outcome := <-outcomeC:
failureFromRun := <-failureC
if outcome == types.SpecStatePassed {
return outcome, types.Failure{}
}
failure.Message, failure.Location, failure.ForwardedPanic = failureFromRun.Message, failureFromRun.Location, failureFromRun.ForwardedPanic
return outcome, failure
case <-interruptChannel:
failure.Message, failure.Location = suite.interruptHandler.InterruptMessageWithStackTraces(), node.CodeLocation
return types.SpecStateInterrupted, failure
}
}
func (suite *Suite) failureForLeafNodeWithMessage(node Node, message string) types.Failure {
return types.Failure{
Message: message,
Location: node.CodeLocation,
FailureNodeContext: types.FailureNodeIsLeafNode,
FailureNodeType: node.NodeType,
FailureNodeLocation: node.CodeLocation,
}
}
func max(a, b int) int {
if a > b {
return a
}
return b
}

View File

@ -0,0 +1,128 @@
package testingtproxy
import (
"fmt"
"io"
"os"
"github.com/onsi/ginkgo/v2/internal"
"github.com/onsi/ginkgo/v2/types"
)
type failFunc func(message string, callerSkip ...int)
type skipFunc func(message string, callerSkip ...int)
type cleanupFunc func(args ...interface{})
type reportFunc func() types.SpecReport
func New(writer io.Writer, fail failFunc, skip skipFunc, cleanup cleanupFunc, report reportFunc, offset int) *ginkgoTestingTProxy {
return &ginkgoTestingTProxy{
fail: fail,
offset: offset,
writer: writer,
skip: skip,
cleanup: cleanup,
report: report,
}
}
type ginkgoTestingTProxy struct {
fail failFunc
skip skipFunc
cleanup cleanupFunc
report reportFunc
offset int
writer io.Writer
}
func (t *ginkgoTestingTProxy) Cleanup(f func()) {
t.cleanup(f, internal.Offset(1))
}
func (t *ginkgoTestingTProxy) Setenv(key, value string) {
originalValue, exists := os.LookupEnv(key)
if exists {
t.cleanup(os.Setenv, key, originalValue, internal.Offset(1))
} else {
t.cleanup(os.Unsetenv, key, internal.Offset(1))
}
err := os.Setenv(key, value)
if err != nil {
t.fail(fmt.Sprintf("Failed to set environment variable: %v", err), 1)
}
}
func (t *ginkgoTestingTProxy) Error(args ...interface{}) {
t.fail(fmt.Sprintln(args...), t.offset)
}
func (t *ginkgoTestingTProxy) Errorf(format string, args ...interface{}) {
t.fail(fmt.Sprintf(format, args...), t.offset)
}
func (t *ginkgoTestingTProxy) Fail() {
t.fail("failed", t.offset)
}
func (t *ginkgoTestingTProxy) FailNow() {
t.fail("failed", t.offset)
}
func (t *ginkgoTestingTProxy) Failed() bool {
return t.report().Failed()
}
func (t *ginkgoTestingTProxy) Fatal(args ...interface{}) {
t.fail(fmt.Sprintln(args...), t.offset)
}
func (t *ginkgoTestingTProxy) Fatalf(format string, args ...interface{}) {
t.fail(fmt.Sprintf(format, args...), t.offset)
}
func (t *ginkgoTestingTProxy) Helper() {
// No-op
}
func (t *ginkgoTestingTProxy) Log(args ...interface{}) {
fmt.Fprintln(t.writer, args...)
}
func (t *ginkgoTestingTProxy) Logf(format string, args ...interface{}) {
t.Log(fmt.Sprintf(format, args...))
}
func (t *ginkgoTestingTProxy) Name() string {
return t.report().FullText()
}
func (t *ginkgoTestingTProxy) Parallel() {
// No-op
}
func (t *ginkgoTestingTProxy) Skip(args ...interface{}) {
t.skip(fmt.Sprintln(args...), t.offset)
}
func (t *ginkgoTestingTProxy) SkipNow() {
t.skip("skip", t.offset)
}
func (t *ginkgoTestingTProxy) Skipf(format string, args ...interface{}) {
t.skip(fmt.Sprintf(format, args...), t.offset)
}
func (t *ginkgoTestingTProxy) Skipped() bool {
return t.report().State.Is(types.SpecStateSkipped)
}
func (t *ginkgoTestingTProxy) TempDir() string {
tmpDir, err := os.MkdirTemp("", "ginkgo")
if err != nil {
t.fail(fmt.Sprintf("Failed to create temporary directory: %v", err), 1)
return ""
}
t.cleanup(os.RemoveAll, tmpDir)
return tmpDir
}

77
vendor/github.com/onsi/ginkgo/v2/internal/tree.go generated vendored Normal file
View File

@ -0,0 +1,77 @@
package internal
import "github.com/onsi/ginkgo/v2/types"
type TreeNode struct {
Node Node
Parent *TreeNode
Children TreeNodes
}
func (tn *TreeNode) AppendChild(child *TreeNode) {
tn.Children = append(tn.Children, child)
child.Parent = tn
}
func (tn *TreeNode) AncestorNodeChain() Nodes {
if tn.Parent == nil || tn.Parent.Node.IsZero() {
return Nodes{tn.Node}
}
return append(tn.Parent.AncestorNodeChain(), tn.Node)
}
type TreeNodes []*TreeNode
func (tn TreeNodes) Nodes() Nodes {
out := make(Nodes, len(tn))
for i := range tn {
out[i] = tn[i].Node
}
return out
}
func (tn TreeNodes) WithID(id uint) *TreeNode {
for i := range tn {
if tn[i].Node.ID == id {
return tn[i]
}
}
return nil
}
func GenerateSpecsFromTreeRoot(tree *TreeNode) Specs {
var walkTree func(nestingLevel int, lNodes Nodes, rNodes Nodes, trees TreeNodes) Specs
walkTree = func(nestingLevel int, lNodes Nodes, rNodes Nodes, trees TreeNodes) Specs {
tests := Specs{}
nodes := make(Nodes, len(trees))
for i := range trees {
nodes[i] = trees[i].Node
nodes[i].NestingLevel = nestingLevel
}
for i := range nodes {
if !nodes[i].NodeType.Is(types.NodeTypesForContainerAndIt) {
continue
}
leftNodes, rightNodes := nodes.SplitAround(nodes[i])
leftNodes = leftNodes.WithoutType(types.NodeTypesForContainerAndIt)
rightNodes = rightNodes.WithoutType(types.NodeTypesForContainerAndIt)
leftNodes = lNodes.CopyAppend(leftNodes...)
rightNodes = rightNodes.CopyAppend(rNodes...)
if nodes[i].NodeType.Is(types.NodeTypeIt) {
tests = append(tests, Spec{Nodes: leftNodes.CopyAppend(nodes[i]).CopyAppend(rightNodes...)})
} else {
treeNode := trees.WithID(nodes[i].ID)
tests = append(tests, walkTree(nestingLevel+1, leftNodes.CopyAppend(nodes[i]), rightNodes, treeNode.Children)...)
}
}
return tests
}
return walkTree(0, Nodes{}, Nodes{}, tree.Children)
}

103
vendor/github.com/onsi/ginkgo/v2/internal/writer.go generated vendored Normal file
View File

@ -0,0 +1,103 @@
package internal
import (
"bytes"
"fmt"
"io"
"sync"
)
type WriterMode uint
const (
WriterModeStreamAndBuffer WriterMode = iota
WriterModeBufferOnly
)
type WriterInterface interface {
io.Writer
Truncate()
Bytes() []byte
}
//Writer implements WriterInterface and GinkgoWriterInterface
type Writer struct {
buffer *bytes.Buffer
outWriter io.Writer
lock *sync.Mutex
mode WriterMode
teeWriters []io.Writer
}
func NewWriter(outWriter io.Writer) *Writer {
return &Writer{
buffer: &bytes.Buffer{},
lock: &sync.Mutex{},
outWriter: outWriter,
mode: WriterModeStreamAndBuffer,
}
}
func (w *Writer) SetMode(mode WriterMode) {
w.lock.Lock()
defer w.lock.Unlock()
w.mode = mode
}
func (w *Writer) Write(b []byte) (n int, err error) {
w.lock.Lock()
defer w.lock.Unlock()
for _, teeWriter := range w.teeWriters {
teeWriter.Write(b)
}
if w.mode == WriterModeStreamAndBuffer {
w.outWriter.Write(b)
}
return w.buffer.Write(b)
}
func (w *Writer) Truncate() {
w.lock.Lock()
defer w.lock.Unlock()
w.buffer.Reset()
}
func (w *Writer) Bytes() []byte {
w.lock.Lock()
defer w.lock.Unlock()
b := w.buffer.Bytes()
copied := make([]byte, len(b))
copy(copied, b)
return copied
}
//GinkgoWriterInterface
func (w *Writer) TeeTo(writer io.Writer) {
w.lock.Lock()
defer w.lock.Unlock()
w.teeWriters = append(w.teeWriters, writer)
}
func (w *Writer) ClearTeeWriters() {
w.lock.Lock()
defer w.lock.Unlock()
w.teeWriters = []io.Writer{}
}
func (w *Writer) Print(a ...interface{}) {
fmt.Fprint(w, a...)
}
func (w *Writer) Printf(format string, a ...interface{}) {
fmt.Fprintf(w, format, a...)
}
func (w *Writer) Println(a ...interface{}) {
fmt.Fprintln(w, a...)
}