build: move e2e dependencies into e2e/go.mod

Several packages are only used while running the e2e suite. These
packages are less important to update, as the they can not influence the
final executable that is part of the Ceph-CSI container-image.

By moving these dependencies out of the main Ceph-CSI go.mod, it is
easier to identify if a reported CVE affects Ceph-CSI, or only the
testing (like most of the Kubernetes CVEs).

Signed-off-by: Niels de Vos <ndevos@ibm.com>
This commit is contained in:
Niels de Vos
2025-03-04 08:57:28 +01:00
committed by mergify[bot]
parent 15da101b1b
commit bec6090996
8047 changed files with 1407827 additions and 3453 deletions

View File

@ -0,0 +1,9 @@
package internal
func MakeIncrementingIndexCounter() func() (int, error) {
idx := -1
return func() (int, error) {
idx += 1
return idx, nil
}
}

View File

@ -0,0 +1,99 @@
package internal
import (
"fmt"
"sync"
"github.com/onsi/ginkgo/v2/types"
)
type Failer struct {
lock *sync.Mutex
failure types.Failure
state types.SpecState
}
func NewFailer() *Failer {
return &Failer{
lock: &sync.Mutex{},
state: types.SpecStatePassed,
}
}
func (f *Failer) GetState() types.SpecState {
f.lock.Lock()
defer f.lock.Unlock()
return f.state
}
func (f *Failer) GetFailure() types.Failure {
f.lock.Lock()
defer f.lock.Unlock()
return f.failure
}
func (f *Failer) Panic(location types.CodeLocation, forwardedPanic interface{}) {
f.lock.Lock()
defer f.lock.Unlock()
if f.state == types.SpecStatePassed {
f.state = types.SpecStatePanicked
f.failure = types.Failure{
Message: "Test Panicked",
Location: location,
ForwardedPanic: fmt.Sprintf("%v", forwardedPanic),
}
}
}
func (f *Failer) Fail(message string, location types.CodeLocation) {
f.lock.Lock()
defer f.lock.Unlock()
if f.state == types.SpecStatePassed {
f.state = types.SpecStateFailed
f.failure = types.Failure{
Message: message,
Location: location,
}
}
}
func (f *Failer) Skip(message string, location types.CodeLocation) {
f.lock.Lock()
defer f.lock.Unlock()
if f.state == types.SpecStatePassed {
f.state = types.SpecStateSkipped
f.failure = types.Failure{
Message: message,
Location: location,
}
}
}
func (f *Failer) AbortSuite(message string, location types.CodeLocation) {
f.lock.Lock()
defer f.lock.Unlock()
if f.state == types.SpecStatePassed {
f.state = types.SpecStateAborted
f.failure = types.Failure{
Message: message,
Location: location,
}
}
}
func (f *Failer) Drain() (types.SpecState, types.Failure) {
f.lock.Lock()
defer f.lock.Unlock()
failure := f.failure
outcome := f.state
f.state = types.SpecStatePassed
f.failure = types.Failure{}
return outcome, failure
}

122
e2e/vendor/github.com/onsi/ginkgo/v2/internal/focus.go generated vendored Normal file
View File

@ -0,0 +1,122 @@
package internal
import (
"regexp"
"strings"
"github.com/onsi/ginkgo/v2/types"
)
/*
If a container marked as focus has a descendant that is also marked as focus, Ginkgo's policy is to
unmark the container's focus. This gives developers a more intuitive experience when debugging specs.
It is common to focus a container to just run a subset of specs, then identify the specific specs within the container to focus -
this policy allows the developer to simply focus those specific specs and not need to go back and turn the focus off of the container:
As a common example, consider:
FDescribe("something to debug", function() {
It("works", function() {...})
It("works", function() {...})
FIt("doesn't work", function() {...})
It("works", function() {...})
})
here the developer's intent is to focus in on the `"doesn't work"` spec and not to run the adjacent specs in the focused `"something to debug"` container.
The nested policy applied by this function enables this behavior.
*/
func ApplyNestedFocusPolicyToTree(tree *TreeNode) {
var walkTree func(tree *TreeNode) bool
walkTree = func(tree *TreeNode) bool {
if tree.Node.MarkedPending {
return false
}
hasFocusedDescendant := false
for _, child := range tree.Children {
childHasFocus := walkTree(child)
hasFocusedDescendant = hasFocusedDescendant || childHasFocus
}
tree.Node.MarkedFocus = tree.Node.MarkedFocus && !hasFocusedDescendant
return tree.Node.MarkedFocus || hasFocusedDescendant
}
walkTree(tree)
}
/*
Ginkgo supports focussing specs using `FIt`, `FDescribe`, etc. - this is called "programmatic focus"
It also supports focussing specs using regular expressions on the command line (`-focus=`, `-skip=`) that match against spec text and file filters (`-focus-files=`, `-skip-files=`) that match against code locations for nodes in specs.
When both programmatic and file filters are provided their results are ANDed together. If multiple kinds of filters are provided, the file filters run first followed by the regex filters.
This function sets the `Skip` property on specs by applying Ginkgo's focus policy:
- If there are no CLI arguments and no programmatic focus, do nothing.
- If a spec somewhere has programmatic focus skip any specs that have no programmatic focus.
- If there are CLI arguments parse them and skip any specs that either don't match the focus filters or do match the skip filters.
*Note:* specs with pending nodes are Skipped when created by NewSpec.
*/
func ApplyFocusToSpecs(specs Specs, description string, suiteLabels Labels, suiteConfig types.SuiteConfig) (Specs, bool) {
focusString := strings.Join(suiteConfig.FocusStrings, "|")
skipString := strings.Join(suiteConfig.SkipStrings, "|")
type SkipCheck func(spec Spec) bool
// by default, skip any specs marked pending
skipChecks := []SkipCheck{func(spec Spec) bool { return spec.Nodes.HasNodeMarkedPending() }}
hasProgrammaticFocus := false
for _, spec := range specs {
if spec.Nodes.HasNodeMarkedFocus() && !spec.Nodes.HasNodeMarkedPending() {
hasProgrammaticFocus = true
break
}
}
if hasProgrammaticFocus {
skipChecks = append(skipChecks, func(spec Spec) bool { return !spec.Nodes.HasNodeMarkedFocus() })
}
if suiteConfig.LabelFilter != "" {
labelFilter, _ := types.ParseLabelFilter(suiteConfig.LabelFilter)
skipChecks = append(skipChecks, func(spec Spec) bool {
return !labelFilter(UnionOfLabels(suiteLabels, spec.Nodes.UnionOfLabels()))
})
}
if len(suiteConfig.FocusFiles) > 0 {
focusFilters, _ := types.ParseFileFilters(suiteConfig.FocusFiles)
skipChecks = append(skipChecks, func(spec Spec) bool { return !focusFilters.Matches(spec.Nodes.CodeLocations()) })
}
if len(suiteConfig.SkipFiles) > 0 {
skipFilters, _ := types.ParseFileFilters(suiteConfig.SkipFiles)
skipChecks = append(skipChecks, func(spec Spec) bool { return skipFilters.Matches(spec.Nodes.CodeLocations()) })
}
if focusString != "" {
// skip specs that don't match the focus string
re := regexp.MustCompile(focusString)
skipChecks = append(skipChecks, func(spec Spec) bool { return !re.MatchString(description + " " + spec.Text()) })
}
if skipString != "" {
// skip specs that match the skip string
re := regexp.MustCompile(skipString)
skipChecks = append(skipChecks, func(spec Spec) bool { return re.MatchString(description + " " + spec.Text()) })
}
// skip specs if shouldSkip() is true. note that we do nothing if shouldSkip() is false to avoid overwriting skip status established by the node's pending status
processedSpecs := Specs{}
for _, spec := range specs {
for _, skipCheck := range skipChecks {
if skipCheck(spec) {
spec.Skip = true
break
}
}
processedSpecs = append(processedSpecs, spec)
}
return processedSpecs, hasProgrammaticFocus
}

View File

@ -0,0 +1,28 @@
package global
import (
"github.com/onsi/ginkgo/v2/internal"
)
var Suite *internal.Suite
var Failer *internal.Failer
var backupSuite *internal.Suite
func init() {
InitializeGlobals()
}
func InitializeGlobals() {
Failer = internal.NewFailer()
Suite = internal.NewSuite()
}
func PushClone() error {
var err error
backupSuite, err = Suite.Clone()
return err
}
func PopClone() {
Suite = backupSuite
}

383
e2e/vendor/github.com/onsi/ginkgo/v2/internal/group.go generated vendored Normal file
View File

@ -0,0 +1,383 @@
package internal
import (
"fmt"
"time"
"github.com/onsi/ginkgo/v2/types"
)
type runOncePair struct {
//nodeId should only run once...
nodeID uint
nodeType types.NodeType
//...for specs in a hierarchy that includes this context
containerID uint
}
func (pair runOncePair) isZero() bool {
return pair.nodeID == 0
}
func runOncePairForNode(node Node, containerID uint) runOncePair {
return runOncePair{
nodeID: node.ID,
nodeType: node.NodeType,
containerID: containerID,
}
}
type runOncePairs []runOncePair
func runOncePairsForSpec(spec Spec) runOncePairs {
pairs := runOncePairs{}
containers := spec.Nodes.WithType(types.NodeTypeContainer)
for _, node := range spec.Nodes {
if node.NodeType.Is(types.NodeTypeBeforeAll | types.NodeTypeAfterAll) {
pairs = append(pairs, runOncePairForNode(node, containers.FirstWithNestingLevel(node.NestingLevel-1).ID))
} else if node.NodeType.Is(types.NodeTypeBeforeEach|types.NodeTypeJustBeforeEach|types.NodeTypeAfterEach|types.NodeTypeJustAfterEach) && node.MarkedOncePerOrdered {
passedIntoAnOrderedContainer := false
firstOrderedContainerDeeperThanNode := containers.FirstSatisfying(func(container Node) bool {
passedIntoAnOrderedContainer = passedIntoAnOrderedContainer || container.MarkedOrdered
return container.NestingLevel >= node.NestingLevel && passedIntoAnOrderedContainer
})
if firstOrderedContainerDeeperThanNode.IsZero() {
continue
}
pairs = append(pairs, runOncePairForNode(node, firstOrderedContainerDeeperThanNode.ID))
}
}
return pairs
}
func (pairs runOncePairs) runOncePairFor(nodeID uint) runOncePair {
for i := range pairs {
if pairs[i].nodeID == nodeID {
return pairs[i]
}
}
return runOncePair{}
}
func (pairs runOncePairs) hasRunOncePair(pair runOncePair) bool {
for i := range pairs {
if pairs[i] == pair {
return true
}
}
return false
}
func (pairs runOncePairs) withType(nodeTypes types.NodeType) runOncePairs {
count := 0
for i := range pairs {
if pairs[i].nodeType.Is(nodeTypes) {
count++
}
}
out, j := make(runOncePairs, count), 0
for i := range pairs {
if pairs[i].nodeType.Is(nodeTypes) {
out[j] = pairs[i]
j++
}
}
return out
}
type group struct {
suite *Suite
specs Specs
runOncePairs map[uint]runOncePairs
runOnceTracker map[runOncePair]types.SpecState
succeeded bool
failedInARunOnceBefore bool
continueOnFailure bool
}
func newGroup(suite *Suite) *group {
return &group{
suite: suite,
runOncePairs: map[uint]runOncePairs{},
runOnceTracker: map[runOncePair]types.SpecState{},
succeeded: true,
failedInARunOnceBefore: false,
continueOnFailure: false,
}
}
func (g *group) initialReportForSpec(spec Spec) types.SpecReport {
return types.SpecReport{
ContainerHierarchyTexts: spec.Nodes.WithType(types.NodeTypeContainer).Texts(),
ContainerHierarchyLocations: spec.Nodes.WithType(types.NodeTypeContainer).CodeLocations(),
ContainerHierarchyLabels: spec.Nodes.WithType(types.NodeTypeContainer).Labels(),
LeafNodeLocation: spec.FirstNodeWithType(types.NodeTypeIt).CodeLocation,
LeafNodeType: types.NodeTypeIt,
LeafNodeText: spec.FirstNodeWithType(types.NodeTypeIt).Text,
LeafNodeLabels: []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels),
ParallelProcess: g.suite.config.ParallelProcess,
RunningInParallel: g.suite.isRunningInParallel(),
IsSerial: spec.Nodes.HasNodeMarkedSerial(),
IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(),
MaxFlakeAttempts: spec.Nodes.GetMaxFlakeAttempts(),
MaxMustPassRepeatedly: spec.Nodes.GetMaxMustPassRepeatedly(),
}
}
func (g *group) evaluateSkipStatus(spec Spec) (types.SpecState, types.Failure) {
if spec.Nodes.HasNodeMarkedPending() {
return types.SpecStatePending, types.Failure{}
}
if spec.Skip {
return types.SpecStateSkipped, types.Failure{}
}
if g.suite.interruptHandler.Status().Interrupted() || g.suite.skipAll {
return types.SpecStateSkipped, types.Failure{}
}
if !g.suite.deadline.IsZero() && g.suite.deadline.Before(time.Now()) {
return types.SpecStateSkipped, types.Failure{}
}
if !g.succeeded && !g.continueOnFailure {
return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt),
"Spec skipped because an earlier spec in an ordered container failed")
}
if g.failedInARunOnceBefore && g.continueOnFailure {
return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt),
"Spec skipped because a BeforeAll node failed")
}
beforeOncePairs := g.runOncePairs[spec.SubjectID()].withType(types.NodeTypeBeforeAll | types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach)
for _, pair := range beforeOncePairs {
if g.runOnceTracker[pair].Is(types.SpecStateSkipped) {
return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt),
fmt.Sprintf("Spec skipped because Skip() was called in %s", pair.nodeType))
}
}
if g.suite.config.DryRun {
return types.SpecStatePassed, types.Failure{}
}
return g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure
}
func (g *group) isLastSpecWithPair(specID uint, pair runOncePair) bool {
lastSpecID := uint(0)
for idx := range g.specs {
if g.specs[idx].Skip {
continue
}
sID := g.specs[idx].SubjectID()
if g.runOncePairs[sID].hasRunOncePair(pair) {
lastSpecID = sID
}
}
return lastSpecID == specID
}
func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) bool {
failedInARunOnceBefore := false
pairs := g.runOncePairs[spec.SubjectID()]
nodes := spec.Nodes.WithType(types.NodeTypeBeforeAll)
nodes = append(nodes, spec.Nodes.WithType(types.NodeTypeBeforeEach)...).SortedByAscendingNestingLevel()
nodes = append(nodes, spec.Nodes.WithType(types.NodeTypeJustBeforeEach).SortedByAscendingNestingLevel()...)
nodes = append(nodes, spec.Nodes.FirstNodeWithType(types.NodeTypeIt))
terminatingNode, terminatingPair := Node{}, runOncePair{}
deadline := time.Time{}
if spec.SpecTimeout() > 0 {
deadline = time.Now().Add(spec.SpecTimeout())
}
for _, node := range nodes {
oncePair := pairs.runOncePairFor(node.ID)
if !oncePair.isZero() && g.runOnceTracker[oncePair].Is(types.SpecStatePassed) {
continue
}
g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure = g.suite.runNode(node, deadline, spec.Nodes.BestTextFor(node))
g.suite.currentSpecReport.RunTime = time.Since(g.suite.currentSpecReport.StartTime)
if !oncePair.isZero() {
g.runOnceTracker[oncePair] = g.suite.currentSpecReport.State
}
if g.suite.currentSpecReport.State != types.SpecStatePassed {
terminatingNode, terminatingPair = node, oncePair
failedInARunOnceBefore = !terminatingPair.isZero()
break
}
}
afterNodeWasRun := map[uint]bool{}
includeDeferCleanups := false
for {
nodes := spec.Nodes.WithType(types.NodeTypeAfterEach)
nodes = append(nodes, spec.Nodes.WithType(types.NodeTypeAfterAll)...).SortedByDescendingNestingLevel()
nodes = append(spec.Nodes.WithType(types.NodeTypeJustAfterEach).SortedByDescendingNestingLevel(), nodes...)
if !terminatingNode.IsZero() {
nodes = nodes.WithinNestingLevel(terminatingNode.NestingLevel)
}
if includeDeferCleanups {
nodes = append(nodes, g.suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterEach).Reverse()...)
nodes = append(nodes, g.suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterAll).Reverse()...)
}
nodes = nodes.Filter(func(node Node) bool {
if afterNodeWasRun[node.ID] {
//this node has already been run on this attempt, don't rerun it
return false
}
var pair runOncePair
switch node.NodeType {
case types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll:
// check if we were generated in an AfterNode that has already run
if afterNodeWasRun[node.NodeIDWhereCleanupWasGenerated] {
return true // we were, so we should definitely run this cleanup now
}
// looks like this cleanup nodes was generated by a before node or it.
// the run-once status of a cleanup node is governed by the run-once status of its generator
pair = pairs.runOncePairFor(node.NodeIDWhereCleanupWasGenerated)
default:
pair = pairs.runOncePairFor(node.ID)
}
if pair.isZero() {
// this node is not governed by any run-once policy, we should run it
return true
}
// it's our last chance to run if we're the last spec for our oncePair
isLastSpecWithPair := g.isLastSpecWithPair(spec.SubjectID(), pair)
switch g.suite.currentSpecReport.State {
case types.SpecStatePassed: //this attempt is passing...
return isLastSpecWithPair //...we should run-once if we'this is our last chance
case types.SpecStateSkipped: //the spec was skipped by the user...
if isLastSpecWithPair {
return true //...we're the last spec, so we should run the AfterNode
}
if !terminatingPair.isZero() && terminatingNode.NestingLevel == node.NestingLevel {
return true //...or, a run-once node at our nesting level was skipped which means this is our last chance to run
}
case types.SpecStateFailed, types.SpecStatePanicked, types.SpecStateTimedout: // the spec has failed...
if isFinalAttempt {
if g.continueOnFailure {
return isLastSpecWithPair || failedInARunOnceBefore //...we're configured to continue on failures - so we should only run if we're the last spec for this pair or if we failed in a runOnceBefore (which means we _are_ the last spec to run)
} else {
return true //...this was the last attempt and continueOnFailure is false therefore we are the last spec to run and so the AfterNode should run
}
}
if !terminatingPair.isZero() { // ...and it failed in a run-once. which will be running again
if node.NodeType.Is(types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll) {
return terminatingNode.ID == node.NodeIDWhereCleanupWasGenerated // we should run this node if we're a clean-up generated by it
} else {
return terminatingNode.NestingLevel == node.NestingLevel // ...or if we're at the same nesting level
}
}
case types.SpecStateInterrupted, types.SpecStateAborted: // ...we've been interrupted and/or aborted
return true //...that means the test run is over and we should clean up the stack. Run the AfterNode
}
return false
})
if len(nodes) == 0 && includeDeferCleanups {
break
}
for _, node := range nodes {
afterNodeWasRun[node.ID] = true
state, failure := g.suite.runNode(node, deadline, spec.Nodes.BestTextFor(node))
g.suite.currentSpecReport.RunTime = time.Since(g.suite.currentSpecReport.StartTime)
if g.suite.currentSpecReport.State == types.SpecStatePassed || state == types.SpecStateAborted {
g.suite.currentSpecReport.State = state
g.suite.currentSpecReport.Failure = failure
} else if state.Is(types.SpecStateFailureStates) {
g.suite.currentSpecReport.AdditionalFailures = append(g.suite.currentSpecReport.AdditionalFailures, types.AdditionalFailure{State: state, Failure: failure})
}
}
includeDeferCleanups = true
}
return failedInARunOnceBefore
}
func (g *group) run(specs Specs) {
g.specs = specs
g.continueOnFailure = specs[0].Nodes.FirstNodeMarkedOrdered().MarkedContinueOnFailure
for _, spec := range g.specs {
g.runOncePairs[spec.SubjectID()] = runOncePairsForSpec(spec)
}
for _, spec := range g.specs {
g.suite.selectiveLock.Lock()
g.suite.currentSpecReport = g.initialReportForSpec(spec)
g.suite.selectiveLock.Unlock()
g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure = g.evaluateSkipStatus(spec)
g.suite.reporter.WillRun(g.suite.currentSpecReport)
g.suite.reportEach(spec, types.NodeTypeReportBeforeEach)
skip := g.suite.config.DryRun || g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates|types.SpecStateSkipped|types.SpecStatePending)
g.suite.currentSpecReport.StartTime = time.Now()
failedInARunOnceBefore := false
if !skip {
var maxAttempts = 1
if g.suite.config.MustPassRepeatedly > 0 {
maxAttempts = g.suite.config.MustPassRepeatedly
g.suite.currentSpecReport.MaxMustPassRepeatedly = maxAttempts
} else if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 {
maxAttempts = max(1, spec.MustPassRepeatedly())
} else if g.suite.config.FlakeAttempts > 0 {
maxAttempts = g.suite.config.FlakeAttempts
g.suite.currentSpecReport.MaxFlakeAttempts = maxAttempts
} else if g.suite.currentSpecReport.MaxFlakeAttempts > 0 {
maxAttempts = max(1, spec.FlakeAttempts())
}
for attempt := 0; attempt < maxAttempts; attempt++ {
g.suite.currentSpecReport.NumAttempts = attempt + 1
g.suite.writer.Truncate()
g.suite.outputInterceptor.StartInterceptingOutput()
if attempt > 0 {
if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 {
g.suite.handleSpecEvent(types.SpecEvent{SpecEventType: types.SpecEventSpecRepeat, Attempt: attempt})
}
if g.suite.currentSpecReport.MaxFlakeAttempts > 0 {
g.suite.handleSpecEvent(types.SpecEvent{SpecEventType: types.SpecEventSpecRetry, Attempt: attempt})
}
}
failedInARunOnceBefore = g.attemptSpec(attempt == maxAttempts-1, spec)
g.suite.currentSpecReport.EndTime = time.Now()
g.suite.currentSpecReport.RunTime = g.suite.currentSpecReport.EndTime.Sub(g.suite.currentSpecReport.StartTime)
g.suite.currentSpecReport.CapturedGinkgoWriterOutput += string(g.suite.writer.Bytes())
g.suite.currentSpecReport.CapturedStdOutErr += g.suite.outputInterceptor.StopInterceptingAndReturnOutput()
if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 {
if g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates | types.SpecStateSkipped) {
break
}
}
if g.suite.currentSpecReport.MaxFlakeAttempts > 0 {
if g.suite.currentSpecReport.State.Is(types.SpecStatePassed | types.SpecStateSkipped | types.SpecStateAborted | types.SpecStateInterrupted) {
break
} else if attempt < maxAttempts-1 {
af := types.AdditionalFailure{State: g.suite.currentSpecReport.State, Failure: g.suite.currentSpecReport.Failure}
af.Failure.Message = fmt.Sprintf("Failure recorded during attempt %d:\n%s", attempt+1, af.Failure.Message)
g.suite.currentSpecReport.AdditionalFailures = append(g.suite.currentSpecReport.AdditionalFailures, af)
}
}
}
}
g.suite.reportEach(spec, types.NodeTypeReportAfterEach)
g.suite.processCurrentSpecReport()
if g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
g.succeeded = false
g.failedInARunOnceBefore = g.failedInARunOnceBefore || failedInARunOnceBefore
}
g.suite.selectiveLock.Lock()
g.suite.currentSpecReport = types.SpecReport{}
g.suite.selectiveLock.Unlock()
}
}

View File

@ -0,0 +1,177 @@
package interrupt_handler
import (
"os"
"os/signal"
"sync"
"syscall"
"time"
"github.com/onsi/ginkgo/v2/internal/parallel_support"
)
var ABORT_POLLING_INTERVAL = 500 * time.Millisecond
type InterruptCause uint
const (
InterruptCauseInvalid InterruptCause = iota
InterruptCauseSignal
InterruptCauseAbortByOtherProcess
)
type InterruptLevel uint
const (
InterruptLevelUninterrupted InterruptLevel = iota
InterruptLevelCleanupAndReport
InterruptLevelReportOnly
InterruptLevelBailOut
)
func (ic InterruptCause) String() string {
switch ic {
case InterruptCauseSignal:
return "Interrupted by User"
case InterruptCauseAbortByOtherProcess:
return "Interrupted by Other Ginkgo Process"
}
return "INVALID_INTERRUPT_CAUSE"
}
type InterruptStatus struct {
Channel chan interface{}
Level InterruptLevel
Cause InterruptCause
}
func (s InterruptStatus) Interrupted() bool {
return s.Level != InterruptLevelUninterrupted
}
func (s InterruptStatus) Message() string {
return s.Cause.String()
}
func (s InterruptStatus) ShouldIncludeProgressReport() bool {
return s.Cause != InterruptCauseAbortByOtherProcess
}
type InterruptHandlerInterface interface {
Status() InterruptStatus
}
type InterruptHandler struct {
c chan interface{}
lock *sync.Mutex
level InterruptLevel
cause InterruptCause
client parallel_support.Client
stop chan interface{}
signals []os.Signal
requestAbortCheck chan interface{}
}
func NewInterruptHandler(client parallel_support.Client, signals ...os.Signal) *InterruptHandler {
if len(signals) == 0 {
signals = []os.Signal{os.Interrupt, syscall.SIGTERM}
}
handler := &InterruptHandler{
c: make(chan interface{}),
lock: &sync.Mutex{},
stop: make(chan interface{}),
requestAbortCheck: make(chan interface{}),
client: client,
signals: signals,
}
handler.registerForInterrupts()
return handler
}
func (handler *InterruptHandler) Stop() {
close(handler.stop)
}
func (handler *InterruptHandler) registerForInterrupts() {
// os signal handling
signalChannel := make(chan os.Signal, 1)
signal.Notify(signalChannel, handler.signals...)
// cross-process abort handling
var abortChannel chan interface{}
if handler.client != nil {
abortChannel = make(chan interface{})
go func() {
pollTicker := time.NewTicker(ABORT_POLLING_INTERVAL)
for {
select {
case <-pollTicker.C:
if handler.client.ShouldAbort() {
close(abortChannel)
pollTicker.Stop()
return
}
case <-handler.requestAbortCheck:
if handler.client.ShouldAbort() {
close(abortChannel)
pollTicker.Stop()
return
}
case <-handler.stop:
pollTicker.Stop()
return
}
}
}()
}
go func(abortChannel chan interface{}) {
var interruptCause InterruptCause
for {
select {
case <-signalChannel:
interruptCause = InterruptCauseSignal
case <-abortChannel:
interruptCause = InterruptCauseAbortByOtherProcess
case <-handler.stop:
signal.Stop(signalChannel)
return
}
abortChannel = nil
handler.lock.Lock()
oldLevel := handler.level
handler.cause = interruptCause
if handler.level == InterruptLevelUninterrupted {
handler.level = InterruptLevelCleanupAndReport
} else if handler.level == InterruptLevelCleanupAndReport {
handler.level = InterruptLevelReportOnly
} else if handler.level == InterruptLevelReportOnly {
handler.level = InterruptLevelBailOut
}
if handler.level != oldLevel {
close(handler.c)
handler.c = make(chan interface{})
}
handler.lock.Unlock()
}
}(abortChannel)
}
func (handler *InterruptHandler) Status() InterruptStatus {
handler.lock.Lock()
status := InterruptStatus{
Level: handler.level,
Channel: handler.c,
Cause: handler.cause,
}
handler.lock.Unlock()
if handler.client != nil && handler.client.ShouldAbort() && !status.Interrupted() {
close(handler.requestAbortCheck)
<-status.Channel
return handler.Status()
}
return status
}

View File

@ -0,0 +1,15 @@
//go:build freebsd || openbsd || netbsd || dragonfly || darwin || linux || solaris
// +build freebsd openbsd netbsd dragonfly darwin linux solaris
package interrupt_handler
import (
"os"
"os/signal"
"syscall"
)
func SwallowSigQuit() {
c := make(chan os.Signal, 1024)
signal.Notify(c, syscall.SIGQUIT)
}

View File

@ -0,0 +1,8 @@
//go:build windows
// +build windows
package interrupt_handler
func SwallowSigQuit() {
//noop
}

938
e2e/vendor/github.com/onsi/ginkgo/v2/internal/node.go generated vendored Normal file
View File

@ -0,0 +1,938 @@
package internal
import (
"context"
"fmt"
"reflect"
"sort"
"sync"
"time"
"github.com/onsi/ginkgo/v2/types"
)
var _global_node_id_counter = uint(0)
var _global_id_mutex = &sync.Mutex{}
func UniqueNodeID() uint {
// There's a reace in the internal integration tests if we don't make
// accessing _global_node_id_counter safe across goroutines.
_global_id_mutex.Lock()
defer _global_id_mutex.Unlock()
_global_node_id_counter += 1
return _global_node_id_counter
}
type Node struct {
ID uint
NodeType types.NodeType
Text string
Body func(SpecContext)
CodeLocation types.CodeLocation
NestingLevel int
HasContext bool
SynchronizedBeforeSuiteProc1Body func(SpecContext) []byte
SynchronizedBeforeSuiteProc1BodyHasContext bool
SynchronizedBeforeSuiteAllProcsBody func(SpecContext, []byte)
SynchronizedBeforeSuiteAllProcsBodyHasContext bool
SynchronizedAfterSuiteAllProcsBody func(SpecContext)
SynchronizedAfterSuiteAllProcsBodyHasContext bool
SynchronizedAfterSuiteProc1Body func(SpecContext)
SynchronizedAfterSuiteProc1BodyHasContext bool
ReportEachBody func(SpecContext, types.SpecReport)
ReportSuiteBody func(SpecContext, types.Report)
MarkedFocus bool
MarkedPending bool
MarkedSerial bool
MarkedOrdered bool
MarkedContinueOnFailure bool
MarkedOncePerOrdered bool
FlakeAttempts int
MustPassRepeatedly int
Labels Labels
PollProgressAfter time.Duration
PollProgressInterval time.Duration
NodeTimeout time.Duration
SpecTimeout time.Duration
GracePeriod time.Duration
NodeIDWhereCleanupWasGenerated uint
}
// Decoration Types
type focusType bool
type pendingType bool
type serialType bool
type orderedType bool
type continueOnFailureType bool
type honorsOrderedType bool
type suppressProgressReporting bool
const Focus = focusType(true)
const Pending = pendingType(true)
const Serial = serialType(true)
const Ordered = orderedType(true)
const ContinueOnFailure = continueOnFailureType(true)
const OncePerOrdered = honorsOrderedType(true)
const SuppressProgressReporting = suppressProgressReporting(true)
type FlakeAttempts uint
type MustPassRepeatedly uint
type Offset uint
type Done chan<- interface{} // Deprecated Done Channel for asynchronous testing
type Labels []string
type PollProgressInterval time.Duration
type PollProgressAfter time.Duration
type NodeTimeout time.Duration
type SpecTimeout time.Duration
type GracePeriod time.Duration
func (l Labels) MatchesLabelFilter(query string) bool {
return types.MustParseLabelFilter(query)(l)
}
func UnionOfLabels(labels ...Labels) Labels {
out := Labels{}
seen := map[string]bool{}
for _, labelSet := range labels {
for _, label := range labelSet {
if !seen[label] {
seen[label] = true
out = append(out, label)
}
}
}
return out
}
func PartitionDecorations(args ...interface{}) ([]interface{}, []interface{}) {
decorations := []interface{}{}
remainingArgs := []interface{}{}
for _, arg := range args {
if isDecoration(arg) {
decorations = append(decorations, arg)
} else {
remainingArgs = append(remainingArgs, arg)
}
}
return decorations, remainingArgs
}
func isDecoration(arg interface{}) bool {
switch t := reflect.TypeOf(arg); {
case t == nil:
return false
case t == reflect.TypeOf(Offset(0)):
return true
case t == reflect.TypeOf(types.CodeLocation{}):
return true
case t == reflect.TypeOf(Focus):
return true
case t == reflect.TypeOf(Pending):
return true
case t == reflect.TypeOf(Serial):
return true
case t == reflect.TypeOf(Ordered):
return true
case t == reflect.TypeOf(ContinueOnFailure):
return true
case t == reflect.TypeOf(OncePerOrdered):
return true
case t == reflect.TypeOf(SuppressProgressReporting):
return true
case t == reflect.TypeOf(FlakeAttempts(0)):
return true
case t == reflect.TypeOf(MustPassRepeatedly(0)):
return true
case t == reflect.TypeOf(Labels{}):
return true
case t == reflect.TypeOf(PollProgressInterval(0)):
return true
case t == reflect.TypeOf(PollProgressAfter(0)):
return true
case t == reflect.TypeOf(NodeTimeout(0)):
return true
case t == reflect.TypeOf(SpecTimeout(0)):
return true
case t == reflect.TypeOf(GracePeriod(0)):
return true
case t.Kind() == reflect.Slice && isSliceOfDecorations(arg):
return true
default:
return false
}
}
func isSliceOfDecorations(slice interface{}) bool {
vSlice := reflect.ValueOf(slice)
if vSlice.Len() == 0 {
return false
}
for i := 0; i < vSlice.Len(); i++ {
if !isDecoration(vSlice.Index(i).Interface()) {
return false
}
}
return true
}
var contextType = reflect.TypeOf(new(context.Context)).Elem()
var specContextType = reflect.TypeOf(new(SpecContext)).Elem()
func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeType, text string, args ...interface{}) (Node, []error) {
baseOffset := 2
node := Node{
ID: UniqueNodeID(),
NodeType: nodeType,
Text: text,
Labels: Labels{},
CodeLocation: types.NewCodeLocation(baseOffset),
NestingLevel: -1,
PollProgressAfter: -1,
PollProgressInterval: -1,
GracePeriod: -1,
}
errors := []error{}
appendError := func(err error) {
if err != nil {
errors = append(errors, err)
}
}
args = unrollInterfaceSlice(args)
remainingArgs := []interface{}{}
// First get the CodeLocation up-to-date
for _, arg := range args {
switch v := arg.(type) {
case Offset:
node.CodeLocation = types.NewCodeLocation(baseOffset + int(v))
case types.CodeLocation:
node.CodeLocation = v
default:
remainingArgs = append(remainingArgs, arg)
}
}
labelsSeen := map[string]bool{}
trackedFunctionError := false
args = remainingArgs
remainingArgs = []interface{}{}
// now process the rest of the args
for _, arg := range args {
switch t := reflect.TypeOf(arg); {
case t == reflect.TypeOf(float64(0)):
break // ignore deprecated timeouts
case t == reflect.TypeOf(Focus):
node.MarkedFocus = bool(arg.(focusType))
if !nodeType.Is(types.NodeTypesForContainerAndIt) {
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Focus"))
}
case t == reflect.TypeOf(Pending):
node.MarkedPending = bool(arg.(pendingType))
if !nodeType.Is(types.NodeTypesForContainerAndIt) {
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Pending"))
}
case t == reflect.TypeOf(Serial):
node.MarkedSerial = bool(arg.(serialType))
if !labelsSeen["Serial"] {
node.Labels = append(node.Labels, "Serial")
}
if !nodeType.Is(types.NodeTypesForContainerAndIt) {
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Serial"))
}
case t == reflect.TypeOf(Ordered):
node.MarkedOrdered = bool(arg.(orderedType))
if !nodeType.Is(types.NodeTypeContainer) {
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Ordered"))
}
case t == reflect.TypeOf(ContinueOnFailure):
node.MarkedContinueOnFailure = bool(arg.(continueOnFailureType))
if !nodeType.Is(types.NodeTypeContainer) {
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "ContinueOnFailure"))
}
case t == reflect.TypeOf(OncePerOrdered):
node.MarkedOncePerOrdered = bool(arg.(honorsOrderedType))
if !nodeType.Is(types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach | types.NodeTypeAfterEach | types.NodeTypeJustAfterEach) {
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "OncePerOrdered"))
}
case t == reflect.TypeOf(SuppressProgressReporting):
deprecationTracker.TrackDeprecation(types.Deprecations.SuppressProgressReporting())
case t == reflect.TypeOf(FlakeAttempts(0)):
node.FlakeAttempts = int(arg.(FlakeAttempts))
if !nodeType.Is(types.NodeTypesForContainerAndIt) {
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "FlakeAttempts"))
}
case t == reflect.TypeOf(MustPassRepeatedly(0)):
node.MustPassRepeatedly = int(arg.(MustPassRepeatedly))
if !nodeType.Is(types.NodeTypesForContainerAndIt) {
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "MustPassRepeatedly"))
}
case t == reflect.TypeOf(PollProgressAfter(0)):
node.PollProgressAfter = time.Duration(arg.(PollProgressAfter))
if nodeType.Is(types.NodeTypeContainer) {
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "PollProgressAfter"))
}
case t == reflect.TypeOf(PollProgressInterval(0)):
node.PollProgressInterval = time.Duration(arg.(PollProgressInterval))
if nodeType.Is(types.NodeTypeContainer) {
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "PollProgressInterval"))
}
case t == reflect.TypeOf(NodeTimeout(0)):
node.NodeTimeout = time.Duration(arg.(NodeTimeout))
if nodeType.Is(types.NodeTypeContainer) {
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "NodeTimeout"))
}
case t == reflect.TypeOf(SpecTimeout(0)):
node.SpecTimeout = time.Duration(arg.(SpecTimeout))
if !nodeType.Is(types.NodeTypeIt) {
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "SpecTimeout"))
}
case t == reflect.TypeOf(GracePeriod(0)):
node.GracePeriod = time.Duration(arg.(GracePeriod))
if nodeType.Is(types.NodeTypeContainer) {
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "GracePeriod"))
}
case t == reflect.TypeOf(Labels{}):
if !nodeType.Is(types.NodeTypesForContainerAndIt) {
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Label"))
}
for _, label := range arg.(Labels) {
if !labelsSeen[label] {
labelsSeen[label] = true
label, err := types.ValidateAndCleanupLabel(label, node.CodeLocation)
node.Labels = append(node.Labels, label)
appendError(err)
}
}
case t.Kind() == reflect.Func:
if nodeType.Is(types.NodeTypeContainer) {
if node.Body != nil {
appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
trackedFunctionError = true
break
}
if t.NumOut() > 0 || t.NumIn() > 0 {
appendError(types.GinkgoErrors.InvalidBodyTypeForContainer(t, node.CodeLocation, nodeType))
trackedFunctionError = true
break
}
body := arg.(func())
node.Body = func(SpecContext) { body() }
} else if nodeType.Is(types.NodeTypeReportBeforeEach | types.NodeTypeReportAfterEach) {
if node.ReportEachBody == nil {
if fn, ok := arg.(func(types.SpecReport)); ok {
node.ReportEachBody = func(_ SpecContext, r types.SpecReport) { fn(r) }
} else {
node.ReportEachBody = arg.(func(SpecContext, types.SpecReport))
node.HasContext = true
}
} else {
appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
trackedFunctionError = true
break
}
} else if nodeType.Is(types.NodeTypeReportBeforeSuite | types.NodeTypeReportAfterSuite) {
if node.ReportSuiteBody == nil {
if fn, ok := arg.(func(types.Report)); ok {
node.ReportSuiteBody = func(_ SpecContext, r types.Report) { fn(r) }
} else {
node.ReportSuiteBody = arg.(func(SpecContext, types.Report))
node.HasContext = true
}
} else {
appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
trackedFunctionError = true
break
}
} else if nodeType.Is(types.NodeTypeSynchronizedBeforeSuite) {
if node.SynchronizedBeforeSuiteProc1Body != nil && node.SynchronizedBeforeSuiteAllProcsBody != nil {
appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
trackedFunctionError = true
break
}
if node.SynchronizedBeforeSuiteProc1Body == nil {
body, hasContext := extractSynchronizedBeforeSuiteProc1Body(arg)
if body == nil {
appendError(types.GinkgoErrors.InvalidBodyTypeForSynchronizedBeforeSuiteProc1(t, node.CodeLocation))
trackedFunctionError = true
}
node.SynchronizedBeforeSuiteProc1Body, node.SynchronizedBeforeSuiteProc1BodyHasContext = body, hasContext
} else if node.SynchronizedBeforeSuiteAllProcsBody == nil {
body, hasContext := extractSynchronizedBeforeSuiteAllProcsBody(arg)
if body == nil {
appendError(types.GinkgoErrors.InvalidBodyTypeForSynchronizedBeforeSuiteAllProcs(t, node.CodeLocation))
trackedFunctionError = true
}
node.SynchronizedBeforeSuiteAllProcsBody, node.SynchronizedBeforeSuiteAllProcsBodyHasContext = body, hasContext
}
} else if nodeType.Is(types.NodeTypeSynchronizedAfterSuite) {
if node.SynchronizedAfterSuiteAllProcsBody != nil && node.SynchronizedAfterSuiteProc1Body != nil {
appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
trackedFunctionError = true
break
}
body, hasContext := extractBodyFunction(deprecationTracker, node.CodeLocation, arg)
if body == nil {
appendError(types.GinkgoErrors.InvalidBodyType(t, node.CodeLocation, nodeType))
trackedFunctionError = true
break
}
if node.SynchronizedAfterSuiteAllProcsBody == nil {
node.SynchronizedAfterSuiteAllProcsBody, node.SynchronizedAfterSuiteAllProcsBodyHasContext = body, hasContext
} else if node.SynchronizedAfterSuiteProc1Body == nil {
node.SynchronizedAfterSuiteProc1Body, node.SynchronizedAfterSuiteProc1BodyHasContext = body, hasContext
}
} else {
if node.Body != nil {
appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
trackedFunctionError = true
break
}
node.Body, node.HasContext = extractBodyFunction(deprecationTracker, node.CodeLocation, arg)
if node.Body == nil {
appendError(types.GinkgoErrors.InvalidBodyType(t, node.CodeLocation, nodeType))
trackedFunctionError = true
break
}
}
default:
remainingArgs = append(remainingArgs, arg)
}
}
// validations
if node.MarkedPending && node.MarkedFocus {
appendError(types.GinkgoErrors.InvalidDeclarationOfFocusedAndPending(node.CodeLocation, nodeType))
}
if node.MarkedContinueOnFailure && !node.MarkedOrdered {
appendError(types.GinkgoErrors.InvalidContinueOnFailureDecoration(node.CodeLocation))
}
hasContext := node.HasContext || node.SynchronizedAfterSuiteProc1BodyHasContext || node.SynchronizedAfterSuiteAllProcsBodyHasContext || node.SynchronizedBeforeSuiteProc1BodyHasContext || node.SynchronizedBeforeSuiteAllProcsBodyHasContext
if !hasContext && (node.NodeTimeout > 0 || node.SpecTimeout > 0 || node.GracePeriod > 0) && len(errors) == 0 {
appendError(types.GinkgoErrors.InvalidTimeoutOrGracePeriodForNonContextNode(node.CodeLocation, nodeType))
}
if !node.NodeType.Is(types.NodeTypeReportBeforeEach|types.NodeTypeReportAfterEach|types.NodeTypeSynchronizedBeforeSuite|types.NodeTypeSynchronizedAfterSuite|types.NodeTypeReportBeforeSuite|types.NodeTypeReportAfterSuite) && node.Body == nil && !node.MarkedPending && !trackedFunctionError {
appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType))
}
if node.NodeType.Is(types.NodeTypeSynchronizedBeforeSuite) && !trackedFunctionError && (node.SynchronizedBeforeSuiteProc1Body == nil || node.SynchronizedBeforeSuiteAllProcsBody == nil) {
appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType))
}
if node.NodeType.Is(types.NodeTypeSynchronizedAfterSuite) && !trackedFunctionError && (node.SynchronizedAfterSuiteProc1Body == nil || node.SynchronizedAfterSuiteAllProcsBody == nil) {
appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType))
}
for _, arg := range remainingArgs {
appendError(types.GinkgoErrors.UnknownDecorator(node.CodeLocation, nodeType, arg))
}
if node.FlakeAttempts > 0 && node.MustPassRepeatedly > 0 {
appendError(types.GinkgoErrors.InvalidDeclarationOfFlakeAttemptsAndMustPassRepeatedly(node.CodeLocation, nodeType))
}
if len(errors) > 0 {
return Node{}, errors
}
return node, errors
}
var doneType = reflect.TypeOf(make(Done))
func extractBodyFunction(deprecationTracker *types.DeprecationTracker, cl types.CodeLocation, arg interface{}) (func(SpecContext), bool) {
t := reflect.TypeOf(arg)
if t.NumOut() > 0 || t.NumIn() > 1 {
return nil, false
}
if t.NumIn() == 1 {
if t.In(0) == doneType {
deprecationTracker.TrackDeprecation(types.Deprecations.Async(), cl)
deprecatedAsyncBody := arg.(func(Done))
return func(SpecContext) { deprecatedAsyncBody(make(Done)) }, false
} else if t.In(0).Implements(specContextType) {
return arg.(func(SpecContext)), true
} else if t.In(0).Implements(contextType) {
body := arg.(func(context.Context))
return func(c SpecContext) { body(c) }, true
}
return nil, false
}
body := arg.(func())
return func(SpecContext) { body() }, false
}
var byteType = reflect.TypeOf([]byte{})
func extractSynchronizedBeforeSuiteProc1Body(arg interface{}) (func(SpecContext) []byte, bool) {
t := reflect.TypeOf(arg)
v := reflect.ValueOf(arg)
if t.NumOut() > 1 || t.NumIn() > 1 {
return nil, false
} else if t.NumOut() == 1 && t.Out(0) != byteType {
return nil, false
} else if t.NumIn() == 1 && !t.In(0).Implements(contextType) {
return nil, false
}
hasContext := t.NumIn() == 1
return func(c SpecContext) []byte {
var out []reflect.Value
if hasContext {
out = v.Call([]reflect.Value{reflect.ValueOf(c)})
} else {
out = v.Call([]reflect.Value{})
}
if len(out) == 1 {
return (out[0].Interface()).([]byte)
} else {
return []byte{}
}
}, hasContext
}
func extractSynchronizedBeforeSuiteAllProcsBody(arg interface{}) (func(SpecContext, []byte), bool) {
t := reflect.TypeOf(arg)
v := reflect.ValueOf(arg)
hasContext, hasByte := false, false
if t.NumOut() > 0 || t.NumIn() > 2 {
return nil, false
} else if t.NumIn() == 2 && t.In(0).Implements(contextType) && t.In(1) == byteType {
hasContext, hasByte = true, true
} else if t.NumIn() == 1 && t.In(0).Implements(contextType) {
hasContext = true
} else if t.NumIn() == 1 && t.In(0) == byteType {
hasByte = true
} else if t.NumIn() != 0 {
return nil, false
}
return func(c SpecContext, b []byte) {
in := []reflect.Value{}
if hasContext {
in = append(in, reflect.ValueOf(c))
}
if hasByte {
in = append(in, reflect.ValueOf(b))
}
v.Call(in)
}, hasContext
}
var errInterface = reflect.TypeOf((*error)(nil)).Elem()
func NewCleanupNode(deprecationTracker *types.DeprecationTracker, fail func(string, types.CodeLocation), args ...interface{}) (Node, []error) {
decorations, remainingArgs := PartitionDecorations(args...)
baseOffset := 2
cl := types.NewCodeLocation(baseOffset)
finalArgs := []interface{}{}
for _, arg := range decorations {
switch t := reflect.TypeOf(arg); {
case t == reflect.TypeOf(Offset(0)):
cl = types.NewCodeLocation(baseOffset + int(arg.(Offset)))
case t == reflect.TypeOf(types.CodeLocation{}):
cl = arg.(types.CodeLocation)
default:
finalArgs = append(finalArgs, arg)
}
}
finalArgs = append(finalArgs, cl)
if len(remainingArgs) == 0 {
return Node{}, []error{types.GinkgoErrors.DeferCleanupInvalidFunction(cl)}
}
callback := reflect.ValueOf(remainingArgs[0])
if !(callback.Kind() == reflect.Func) {
return Node{}, []error{types.GinkgoErrors.DeferCleanupInvalidFunction(cl)}
}
callArgs := []reflect.Value{}
for _, arg := range remainingArgs[1:] {
callArgs = append(callArgs, reflect.ValueOf(arg))
}
hasContext := false
t := callback.Type()
if t.NumIn() > 0 {
if t.In(0).Implements(specContextType) {
hasContext = true
} else if t.In(0).Implements(contextType) && (len(callArgs) == 0 || !callArgs[0].Type().Implements(contextType)) {
hasContext = true
}
}
handleFailure := func(out []reflect.Value) {
if len(out) == 0 {
return
}
last := out[len(out)-1]
if last.Type().Implements(errInterface) && !last.IsNil() {
fail(fmt.Sprintf("DeferCleanup callback returned error: %v", last), cl)
}
}
if hasContext {
finalArgs = append(finalArgs, func(c SpecContext) {
out := callback.Call(append([]reflect.Value{reflect.ValueOf(c)}, callArgs...))
handleFailure(out)
})
} else {
finalArgs = append(finalArgs, func() {
out := callback.Call(callArgs)
handleFailure(out)
})
}
return NewNode(deprecationTracker, types.NodeTypeCleanupInvalid, "", finalArgs...)
}
func (n Node) IsZero() bool {
return n.ID == 0
}
/* Nodes */
type Nodes []Node
func (n Nodes) Clone() Nodes {
nodes := make(Nodes, len(n))
copy(nodes, n)
return nodes
}
func (n Nodes) CopyAppend(nodes ...Node) Nodes {
numN := len(n)
out := make(Nodes, numN+len(nodes))
copy(out, n)
for j, node := range nodes {
out[numN+j] = node
}
return out
}
func (n Nodes) SplitAround(pivot Node) (Nodes, Nodes) {
pivotIdx := len(n)
for i := range n {
if n[i].ID == pivot.ID {
pivotIdx = i
break
}
}
left := n[:pivotIdx]
right := Nodes{}
if pivotIdx+1 < len(n) {
right = n[pivotIdx+1:]
}
return left, right
}
func (n Nodes) FirstNodeWithType(nodeTypes types.NodeType) Node {
for i := range n {
if n[i].NodeType.Is(nodeTypes) {
return n[i]
}
}
return Node{}
}
func (n Nodes) WithType(nodeTypes types.NodeType) Nodes {
count := 0
for i := range n {
if n[i].NodeType.Is(nodeTypes) {
count++
}
}
out, j := make(Nodes, count), 0
for i := range n {
if n[i].NodeType.Is(nodeTypes) {
out[j] = n[i]
j++
}
}
return out
}
func (n Nodes) WithoutType(nodeTypes types.NodeType) Nodes {
count := 0
for i := range n {
if !n[i].NodeType.Is(nodeTypes) {
count++
}
}
out, j := make(Nodes, count), 0
for i := range n {
if !n[i].NodeType.Is(nodeTypes) {
out[j] = n[i]
j++
}
}
return out
}
func (n Nodes) WithoutNode(nodeToExclude Node) Nodes {
idxToExclude := len(n)
for i := range n {
if n[i].ID == nodeToExclude.ID {
idxToExclude = i
break
}
}
if idxToExclude == len(n) {
return n
}
out, j := make(Nodes, len(n)-1), 0
for i := range n {
if i == idxToExclude {
continue
}
out[j] = n[i]
j++
}
return out
}
func (n Nodes) Filter(filter func(Node) bool) Nodes {
trufa, count := make([]bool, len(n)), 0
for i := range n {
if filter(n[i]) {
trufa[i] = true
count += 1
}
}
out, j := make(Nodes, count), 0
for i := range n {
if trufa[i] {
out[j] = n[i]
j++
}
}
return out
}
func (n Nodes) FirstSatisfying(filter func(Node) bool) Node {
for i := range n {
if filter(n[i]) {
return n[i]
}
}
return Node{}
}
func (n Nodes) WithinNestingLevel(deepestNestingLevel int) Nodes {
count := 0
for i := range n {
if n[i].NestingLevel <= deepestNestingLevel {
count++
}
}
out, j := make(Nodes, count), 0
for i := range n {
if n[i].NestingLevel <= deepestNestingLevel {
out[j] = n[i]
j++
}
}
return out
}
func (n Nodes) SortedByDescendingNestingLevel() Nodes {
out := make(Nodes, len(n))
copy(out, n)
sort.SliceStable(out, func(i int, j int) bool {
return out[i].NestingLevel > out[j].NestingLevel
})
return out
}
func (n Nodes) SortedByAscendingNestingLevel() Nodes {
out := make(Nodes, len(n))
copy(out, n)
sort.SliceStable(out, func(i int, j int) bool {
return out[i].NestingLevel < out[j].NestingLevel
})
return out
}
func (n Nodes) FirstWithNestingLevel(level int) Node {
for i := range n {
if n[i].NestingLevel == level {
return n[i]
}
}
return Node{}
}
func (n Nodes) Reverse() Nodes {
out := make(Nodes, len(n))
for i := range n {
out[len(n)-1-i] = n[i]
}
return out
}
func (n Nodes) Texts() []string {
out := make([]string, len(n))
for i := range n {
out[i] = n[i].Text
}
return out
}
func (n Nodes) Labels() [][]string {
out := make([][]string, len(n))
for i := range n {
if n[i].Labels == nil {
out[i] = []string{}
} else {
out[i] = []string(n[i].Labels)
}
}
return out
}
func (n Nodes) UnionOfLabels() []string {
out := []string{}
seen := map[string]bool{}
for i := range n {
for _, label := range n[i].Labels {
if !seen[label] {
seen[label] = true
out = append(out, label)
}
}
}
return out
}
func (n Nodes) CodeLocations() []types.CodeLocation {
out := make([]types.CodeLocation, len(n))
for i := range n {
out[i] = n[i].CodeLocation
}
return out
}
func (n Nodes) BestTextFor(node Node) string {
if node.Text != "" {
return node.Text
}
parentNestingLevel := node.NestingLevel - 1
for i := range n {
if n[i].Text != "" && n[i].NestingLevel == parentNestingLevel {
return n[i].Text
}
}
return ""
}
func (n Nodes) ContainsNodeID(id uint) bool {
for i := range n {
if n[i].ID == id {
return true
}
}
return false
}
func (n Nodes) HasNodeMarkedPending() bool {
for i := range n {
if n[i].MarkedPending {
return true
}
}
return false
}
func (n Nodes) HasNodeMarkedFocus() bool {
for i := range n {
if n[i].MarkedFocus {
return true
}
}
return false
}
func (n Nodes) HasNodeMarkedSerial() bool {
for i := range n {
if n[i].MarkedSerial {
return true
}
}
return false
}
func (n Nodes) FirstNodeMarkedOrdered() Node {
for i := range n {
if n[i].MarkedOrdered {
return n[i]
}
}
return Node{}
}
func (n Nodes) IndexOfFirstNodeMarkedOrdered() int {
for i := range n {
if n[i].MarkedOrdered {
return i
}
}
return -1
}
func (n Nodes) GetMaxFlakeAttempts() int {
maxFlakeAttempts := 0
for i := range n {
if n[i].FlakeAttempts > 0 {
maxFlakeAttempts = n[i].FlakeAttempts
}
}
return maxFlakeAttempts
}
func (n Nodes) GetMaxMustPassRepeatedly() int {
maxMustPassRepeatedly := 0
for i := range n {
if n[i].MustPassRepeatedly > 0 {
maxMustPassRepeatedly = n[i].MustPassRepeatedly
}
}
return maxMustPassRepeatedly
}
func unrollInterfaceSlice(args interface{}) []interface{} {
v := reflect.ValueOf(args)
if v.Kind() != reflect.Slice {
return []interface{}{args}
}
out := []interface{}{}
for i := 0; i < v.Len(); i++ {
el := reflect.ValueOf(v.Index(i).Interface())
if el.Kind() == reflect.Slice && el.Type() != reflect.TypeOf(Labels{}) {
out = append(out, unrollInterfaceSlice(el.Interface())...)
} else {
out = append(out, v.Index(i).Interface())
}
}
return out
}

View File

@ -0,0 +1,171 @@
package internal
import (
"math/rand"
"sort"
"github.com/onsi/ginkgo/v2/types"
)
type SortableSpecs struct {
Specs Specs
Indexes []int
}
func NewSortableSpecs(specs Specs) *SortableSpecs {
indexes := make([]int, len(specs))
for i := range specs {
indexes[i] = i
}
return &SortableSpecs{
Specs: specs,
Indexes: indexes,
}
}
func (s *SortableSpecs) Len() int { return len(s.Indexes) }
func (s *SortableSpecs) Swap(i, j int) { s.Indexes[i], s.Indexes[j] = s.Indexes[j], s.Indexes[i] }
func (s *SortableSpecs) Less(i, j int) bool {
a, b := s.Specs[s.Indexes[i]], s.Specs[s.Indexes[j]]
aNodes, bNodes := a.Nodes.WithType(types.NodeTypesForContainerAndIt), b.Nodes.WithType(types.NodeTypesForContainerAndIt)
firstOrderedAIdx, firstOrderedBIdx := aNodes.IndexOfFirstNodeMarkedOrdered(), bNodes.IndexOfFirstNodeMarkedOrdered()
if firstOrderedAIdx > -1 && firstOrderedBIdx > -1 && aNodes[firstOrderedAIdx].ID == bNodes[firstOrderedBIdx].ID {
// strictly preserve order within an ordered containers. ID will track this as IDs are generated monotonically
return aNodes.FirstNodeWithType(types.NodeTypeIt).ID < bNodes.FirstNodeWithType(types.NodeTypeIt).ID
}
// if either spec is in an ordered container - only use the nodes up to the outermost ordered container
if firstOrderedAIdx > -1 {
aNodes = aNodes[:firstOrderedAIdx+1]
}
if firstOrderedBIdx > -1 {
bNodes = bNodes[:firstOrderedBIdx+1]
}
for i := 0; i < len(aNodes) && i < len(bNodes); i++ {
aCL, bCL := aNodes[i].CodeLocation, bNodes[i].CodeLocation
if aCL.FileName != bCL.FileName {
return aCL.FileName < bCL.FileName
}
if aCL.LineNumber != bCL.LineNumber {
return aCL.LineNumber < bCL.LineNumber
}
}
// either everything is equal or we have different lengths of CLs
if len(aNodes) != len(bNodes) {
return len(aNodes) < len(bNodes)
}
// ok, now we are sure everything was equal. so we use the spec text to break ties
for i := 0; i < len(aNodes); i++ {
if aNodes[i].Text != bNodes[i].Text {
return aNodes[i].Text < bNodes[i].Text
}
}
// ok, all those texts were equal. we'll use the ID of the most deeply nested node as a last resort
return aNodes[len(aNodes)-1].ID < bNodes[len(bNodes)-1].ID
}
type GroupedSpecIndices []SpecIndices
type SpecIndices []int
func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices, GroupedSpecIndices) {
/*
Ginkgo has sophisticated support for randomizing specs. Specs are guaranteed to have the same
order for a given seed across test runs.
By default only top-level containers and specs are shuffled - this makes for a more intuitive debugging
experience - specs within a given container run in the order they appear in the file.
Developers can set -randomizeAllSpecs to shuffle _all_ specs.
In addition, spec containers can be marked as Ordered. Specs within an Ordered container are never shuffled.
Finally, specs and spec containers can be marked as Serial. When running in parallel, serial specs run on Process #1 _after_ all other processes have finished.
*/
// Seed a new random source based on thee configured random seed.
r := rand.New(rand.NewSource(suiteConfig.RandomSeed))
// first, we sort the entire suite to ensure a deterministic order. the sort is performed by filename, then line number, and then spec text. this ensures every parallel process has the exact same spec order and is only necessary to cover the edge case where the user iterates over a map to generate specs.
sortableSpecs := NewSortableSpecs(specs)
sort.Sort(sortableSpecs)
// then we break things into execution groups
// a group represents a single unit of execution and is a collection of SpecIndices
// usually a group is just a single spec, however ordered containers must be preserved as a single group
executionGroupIDs := []uint{}
executionGroups := map[uint]SpecIndices{}
for _, idx := range sortableSpecs.Indexes {
spec := specs[idx]
groupNode := spec.Nodes.FirstNodeMarkedOrdered()
if groupNode.IsZero() {
groupNode = spec.Nodes.FirstNodeWithType(types.NodeTypeIt)
}
executionGroups[groupNode.ID] = append(executionGroups[groupNode.ID], idx)
if len(executionGroups[groupNode.ID]) == 1 {
executionGroupIDs = append(executionGroupIDs, groupNode.ID)
}
}
// now, we only shuffle all the execution groups if we're randomizing all specs, otherwise
// we shuffle outermost containers. so we need to form shufflable groupings of GroupIDs
shufflableGroupingIDs := []uint{}
shufflableGroupingIDToGroupIDs := map[uint][]uint{}
// for each execution group we're going to have to pick a node to represent how the
// execution group is grouped for shuffling:
nodeTypesToShuffle := types.NodeTypesForContainerAndIt
if suiteConfig.RandomizeAllSpecs {
nodeTypesToShuffle = types.NodeTypeIt
}
//so, for each execution group:
for _, groupID := range executionGroupIDs {
// pick out a representative spec
representativeSpec := specs[executionGroups[groupID][0]]
// and grab the node on the spec that will represent which shufflable group this execution group belongs tu
shufflableGroupingNode := representativeSpec.Nodes.FirstNodeWithType(nodeTypesToShuffle)
//add the execution group to its shufflable group
shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID] = append(shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID], groupID)
//and if it's the first one in
if len(shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID]) == 1 {
// record the shuffleable group ID
shufflableGroupingIDs = append(shufflableGroupingIDs, shufflableGroupingNode.ID)
}
}
// now we permute the sorted shufflable grouping IDs and build the ordered Groups
orderedGroups := GroupedSpecIndices{}
permutation := r.Perm(len(shufflableGroupingIDs))
for _, j := range permutation {
//let's get the execution group IDs for this shufflable group:
executionGroupIDsForJ := shufflableGroupingIDToGroupIDs[shufflableGroupingIDs[j]]
// and we'll add their associated specindices to the orderedGroups slice:
for _, executionGroupID := range executionGroupIDsForJ {
orderedGroups = append(orderedGroups, executionGroups[executionGroupID])
}
}
// If we're running in series, we're done.
if suiteConfig.ParallelTotal == 1 {
return orderedGroups, GroupedSpecIndices{}
}
// We're running in parallel so we need to partition the ordered groups into a parallelizable set and a serialized set.
// The parallelizable groups will run across all Ginkgo processes...
// ...the serial groups will only run on Process #1 after all other processes have exited.
parallelizableGroups, serialGroups := GroupedSpecIndices{}, GroupedSpecIndices{}
for _, specIndices := range orderedGroups {
if specs[specIndices[0]].Nodes.HasNodeMarkedSerial() {
serialGroups = append(serialGroups, specIndices)
} else {
parallelizableGroups = append(parallelizableGroups, specIndices)
}
}
return parallelizableGroups, serialGroups
}

View File

@ -0,0 +1,250 @@
package internal
import (
"bytes"
"io"
"os"
"time"
)
const BAILOUT_TIME = 1 * time.Second
const BAILOUT_MESSAGE = `Ginkgo detected an issue while intercepting output.
When running in parallel, Ginkgo captures stdout and stderr output
and attaches it to the running spec. It looks like that process is getting
stuck for this suite.
This usually happens if you, or a library you are using, spin up an external
process and set cmd.Stdout = os.Stdout and/or cmd.Stderr = os.Stderr. This
causes the external process to keep Ginkgo's output interceptor pipe open and
causes output interception to hang.
Ginkgo has detected this and shortcircuited the capture process. The specs
will continue running after this message however output from the external
process that caused this issue will not be captured.
You have several options to fix this. In preferred order they are:
1. Pass GinkgoWriter instead of os.Stdout or os.Stderr to your process.
2. Ensure your process exits before the current spec completes. If your
process is long-lived and must cross spec boundaries, this option won't
work for you.
3. Pause Ginkgo's output interceptor before starting your process and then
resume it after. Use PauseOutputInterception() and ResumeOutputInterception()
to do this.
4. Set --output-interceptor-mode=none when running your Ginkgo suite. This will
turn off all output interception but allow specs to run in parallel without this
issue. You may miss important output if you do this including output from Go's
race detector.
More details on issue #851 - https://github.com/onsi/ginkgo/issues/851
`
/*
The OutputInterceptor is used by to
intercept and capture all stdin and stderr output during a test run.
*/
type OutputInterceptor interface {
StartInterceptingOutput()
StartInterceptingOutputAndForwardTo(io.Writer)
StopInterceptingAndReturnOutput() string
PauseIntercepting()
ResumeIntercepting()
Shutdown()
}
type NoopOutputInterceptor struct{}
func (interceptor NoopOutputInterceptor) StartInterceptingOutput() {}
func (interceptor NoopOutputInterceptor) StartInterceptingOutputAndForwardTo(io.Writer) {}
func (interceptor NoopOutputInterceptor) StopInterceptingAndReturnOutput() string { return "" }
func (interceptor NoopOutputInterceptor) PauseIntercepting() {}
func (interceptor NoopOutputInterceptor) ResumeIntercepting() {}
func (interceptor NoopOutputInterceptor) Shutdown() {}
type pipePair struct {
reader *os.File
writer *os.File
}
func startPipeFactory(pipeChannel chan pipePair, shutdown chan interface{}) {
for {
//make the next pipe...
pair := pipePair{}
pair.reader, pair.writer, _ = os.Pipe()
select {
//...and provide it to the next consumer (they are responsible for closing the files)
case pipeChannel <- pair:
continue
//...or close the files if we were told to shutdown
case <-shutdown:
pair.reader.Close()
pair.writer.Close()
return
}
}
}
type interceptorImplementation interface {
CreateStdoutStderrClones() (*os.File, *os.File)
ConnectPipeToStdoutStderr(*os.File)
RestoreStdoutStderrFromClones(*os.File, *os.File)
ShutdownClones(*os.File, *os.File)
}
type genericOutputInterceptor struct {
intercepting bool
stdoutClone *os.File
stderrClone *os.File
pipe pipePair
shutdown chan interface{}
emergencyBailout chan interface{}
pipeChannel chan pipePair
interceptedContent chan string
forwardTo io.Writer
accumulatedOutput string
implementation interceptorImplementation
}
func (interceptor *genericOutputInterceptor) StartInterceptingOutput() {
interceptor.StartInterceptingOutputAndForwardTo(io.Discard)
}
func (interceptor *genericOutputInterceptor) StartInterceptingOutputAndForwardTo(w io.Writer) {
if interceptor.intercepting {
return
}
interceptor.accumulatedOutput = ""
interceptor.forwardTo = w
interceptor.ResumeIntercepting()
}
func (interceptor *genericOutputInterceptor) StopInterceptingAndReturnOutput() string {
if interceptor.intercepting {
interceptor.PauseIntercepting()
}
return interceptor.accumulatedOutput
}
func (interceptor *genericOutputInterceptor) ResumeIntercepting() {
if interceptor.intercepting {
return
}
interceptor.intercepting = true
if interceptor.stdoutClone == nil {
interceptor.stdoutClone, interceptor.stderrClone = interceptor.implementation.CreateStdoutStderrClones()
interceptor.shutdown = make(chan interface{})
go startPipeFactory(interceptor.pipeChannel, interceptor.shutdown)
}
// Now we make a pipe, we'll use this to redirect the input to the 1 and 2 file descriptors (this is how everything else in the world is string to log to stdout and stderr)
// we get the pipe from our pipe factory. it runs in the background so we can request the next pipe while the spec being intercepted is running
interceptor.pipe = <-interceptor.pipeChannel
interceptor.emergencyBailout = make(chan interface{})
//Spin up a goroutine to copy data from the pipe into a buffer, this is how we capture any output the user is emitting
go func() {
buffer := &bytes.Buffer{}
destination := io.MultiWriter(buffer, interceptor.forwardTo)
copyFinished := make(chan interface{})
reader := interceptor.pipe.reader
go func() {
io.Copy(destination, reader)
reader.Close() // close the read end of the pipe so we don't leak a file descriptor
close(copyFinished)
}()
select {
case <-copyFinished:
interceptor.interceptedContent <- buffer.String()
case <-interceptor.emergencyBailout:
interceptor.interceptedContent <- ""
}
}()
interceptor.implementation.ConnectPipeToStdoutStderr(interceptor.pipe.writer)
}
func (interceptor *genericOutputInterceptor) PauseIntercepting() {
if !interceptor.intercepting {
return
}
// first we have to close the write end of the pipe. To do this we have to close all file descriptors pointing
// to the write end. So that would be the pipewriter itself, and FD #1 and FD #2 if we've Dup2'd them
interceptor.pipe.writer.Close() // the pipewriter itself
// we also need to stop intercepting. we do that by reconnecting the stdout and stderr file descriptions back to their respective #1 and #2 file descriptors;
// this also closes #1 and #2 before it points that their original stdout and stderr file descriptions
interceptor.implementation.RestoreStdoutStderrFromClones(interceptor.stdoutClone, interceptor.stderrClone)
var content string
select {
case content = <-interceptor.interceptedContent:
case <-time.After(BAILOUT_TIME):
/*
By closing all the pipe writer's file descriptors associated with the pipe writer's file description the io.Copy reading from the reader
should eventually receive an EOF and exit.
**However**, if the user has spun up an external process and passed in os.Stdout/os.Stderr to cmd.Stdout/cmd.Stderr then the external process
will have a file descriptor pointing to the pipe writer's file description and it will not close until the external process exits.
That would leave us hanging here waiting for the io.Copy to close forever. Instead we invoke this emergency escape valve. This returns whatever
content we've got but leaves the io.Copy running. This ensures the external process can continue writing without hanging at the cost of leaking a goroutine
and file descriptor (those these will be cleaned up when the process exits).
We tack on a message to notify the user that they've hit this edgecase and encourage them to address it.
*/
close(interceptor.emergencyBailout)
content = <-interceptor.interceptedContent + BAILOUT_MESSAGE
}
interceptor.accumulatedOutput += content
interceptor.intercepting = false
}
func (interceptor *genericOutputInterceptor) Shutdown() {
interceptor.PauseIntercepting()
if interceptor.stdoutClone != nil {
close(interceptor.shutdown)
interceptor.implementation.ShutdownClones(interceptor.stdoutClone, interceptor.stderrClone)
interceptor.stdoutClone = nil
interceptor.stderrClone = nil
}
}
/* This is used on windows builds but included here so it can be explicitly tested on unix systems too */
func NewOSGlobalReassigningOutputInterceptor() OutputInterceptor {
return &genericOutputInterceptor{
interceptedContent: make(chan string),
pipeChannel: make(chan pipePair),
shutdown: make(chan interface{}),
implementation: &osGlobalReassigningOutputInterceptorImpl{},
}
}
type osGlobalReassigningOutputInterceptorImpl struct{}
func (impl *osGlobalReassigningOutputInterceptorImpl) CreateStdoutStderrClones() (*os.File, *os.File) {
return os.Stdout, os.Stderr
}
func (impl *osGlobalReassigningOutputInterceptorImpl) ConnectPipeToStdoutStderr(pipeWriter *os.File) {
os.Stdout = pipeWriter
os.Stderr = pipeWriter
}
func (impl *osGlobalReassigningOutputInterceptorImpl) RestoreStdoutStderrFromClones(stdoutClone *os.File, stderrClone *os.File) {
os.Stdout = stdoutClone
os.Stderr = stderrClone
}
func (impl *osGlobalReassigningOutputInterceptorImpl) ShutdownClones(_ *os.File, _ *os.File) {
//noop
}

View File

@ -0,0 +1,73 @@
//go:build freebsd || openbsd || netbsd || dragonfly || darwin || linux || solaris
// +build freebsd openbsd netbsd dragonfly darwin linux solaris
package internal
import (
"os"
"golang.org/x/sys/unix"
)
func NewOutputInterceptor() OutputInterceptor {
return &genericOutputInterceptor{
interceptedContent: make(chan string),
pipeChannel: make(chan pipePair),
shutdown: make(chan interface{}),
implementation: &dupSyscallOutputInterceptorImpl{},
}
}
type dupSyscallOutputInterceptorImpl struct{}
func (impl *dupSyscallOutputInterceptorImpl) CreateStdoutStderrClones() (*os.File, *os.File) {
// To clone stdout and stderr we:
// First, create two clone file descriptors that point to the stdout and stderr file descriptions
stdoutCloneFD, _ := unix.Dup(1)
stderrCloneFD, _ := unix.Dup(2)
// Important, set the fds to FD_CLOEXEC to prevent them leaking into childs
// https://github.com/onsi/ginkgo/issues/1191
flags, err := unix.FcntlInt(uintptr(stdoutCloneFD), unix.F_GETFD, 0)
if err == nil {
unix.FcntlInt(uintptr(stdoutCloneFD), unix.F_SETFD, flags|unix.FD_CLOEXEC)
}
flags, err = unix.FcntlInt(uintptr(stderrCloneFD), unix.F_GETFD, 0)
if err == nil {
unix.FcntlInt(uintptr(stderrCloneFD), unix.F_SETFD, flags|unix.FD_CLOEXEC)
}
// And then wrap the clone file descriptors in files.
// One benefit of this (that we don't use yet) is that we can actually write
// to these files to emit output to the console even though we're intercepting output
stdoutClone := os.NewFile(uintptr(stdoutCloneFD), "stdout-clone")
stderrClone := os.NewFile(uintptr(stderrCloneFD), "stderr-clone")
//these clones remain alive throughout the lifecycle of the suite and don't need to be recreated
//this speeds things up a bit, actually.
return stdoutClone, stderrClone
}
func (impl *dupSyscallOutputInterceptorImpl) ConnectPipeToStdoutStderr(pipeWriter *os.File) {
// To redirect output to our pipe we need to point the 1 and 2 file descriptors (which is how the world tries to log things)
// to the write end of the pipe.
// We do this with Dup2 (possibly Dup3 on some architectures) to have file descriptors 1 and 2 point to the same file description as the pipeWriter
// This effectively shunts data written to stdout and stderr to the write end of our pipe
unix.Dup2(int(pipeWriter.Fd()), 1)
unix.Dup2(int(pipeWriter.Fd()), 2)
}
func (impl *dupSyscallOutputInterceptorImpl) RestoreStdoutStderrFromClones(stdoutClone *os.File, stderrClone *os.File) {
// To restore stdour/stderr from the clones we have the 1 and 2 file descriptors
// point to the original file descriptions that we saved off in the clones.
// This has the added benefit of closing the connection between these descriptors and the write end of the pipe
// which is important to cause the io.Copy on the pipe.Reader to end.
unix.Dup2(int(stdoutClone.Fd()), 1)
unix.Dup2(int(stderrClone.Fd()), 2)
}
func (impl *dupSyscallOutputInterceptorImpl) ShutdownClones(stdoutClone *os.File, stderrClone *os.File) {
// We're done with the clones so we can close them to clean up after ourselves
stdoutClone.Close()
stderrClone.Close()
}

View File

@ -0,0 +1,7 @@
//go:build wasm
package internal
func NewOutputInterceptor() OutputInterceptor {
return &NoopOutputInterceptor{}
}

View File

@ -0,0 +1,7 @@
// +build windows
package internal
func NewOutputInterceptor() OutputInterceptor {
return NewOSGlobalReassigningOutputInterceptor()
}

View File

@ -0,0 +1,72 @@
package parallel_support
import (
"fmt"
"io"
"os"
"time"
"github.com/onsi/ginkgo/v2/reporters"
"github.com/onsi/ginkgo/v2/types"
)
type BeforeSuiteState struct {
Data []byte
State types.SpecState
}
type ParallelIndexCounter struct {
Index int
}
var ErrorGone = fmt.Errorf("gone")
var ErrorFailed = fmt.Errorf("failed")
var ErrorEarly = fmt.Errorf("early")
var POLLING_INTERVAL = 50 * time.Millisecond
type Server interface {
Start()
Close()
Address() string
RegisterAlive(node int, alive func() bool)
GetSuiteDone() chan interface{}
GetOutputDestination() io.Writer
SetOutputDestination(io.Writer)
}
type Client interface {
Connect() bool
Close() error
PostSuiteWillBegin(report types.Report) error
PostDidRun(report types.SpecReport) error
PostSuiteDidEnd(report types.Report) error
PostReportBeforeSuiteCompleted(state types.SpecState) error
BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error)
PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error
BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error)
BlockUntilNonprimaryProcsHaveFinished() error
BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error)
FetchNextCounter() (int, error)
PostAbort() error
ShouldAbort() bool
PostEmitProgressReport(report types.ProgressReport) error
Write(p []byte) (int, error)
}
func NewServer(parallelTotal int, reporter reporters.Reporter) (Server, error) {
if os.Getenv("GINKGO_PARALLEL_PROTOCOL") == "HTTP" {
return newHttpServer(parallelTotal, reporter)
} else {
return newRPCServer(parallelTotal, reporter)
}
}
func NewClient(serverHost string) Client {
if os.Getenv("GINKGO_PARALLEL_PROTOCOL") == "HTTP" {
return newHttpClient(serverHost)
} else {
return newRPCClient(serverHost)
}
}

View File

@ -0,0 +1,169 @@
package parallel_support
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"time"
"github.com/onsi/ginkgo/v2/types"
)
type httpClient struct {
serverHost string
}
func newHttpClient(serverHost string) *httpClient {
return &httpClient{
serverHost: serverHost,
}
}
func (client *httpClient) Connect() bool {
resp, err := http.Get(client.serverHost + "/up")
if err != nil {
return false
}
resp.Body.Close()
return resp.StatusCode == http.StatusOK
}
func (client *httpClient) Close() error {
return nil
}
func (client *httpClient) post(path string, data interface{}) error {
var body io.Reader
if data != nil {
encoded, err := json.Marshal(data)
if err != nil {
return err
}
body = bytes.NewBuffer(encoded)
}
resp, err := http.Post(client.serverHost+path, "application/json", body)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("received unexpected status code %d", resp.StatusCode)
}
return nil
}
func (client *httpClient) poll(path string, data interface{}) error {
for {
resp, err := http.Get(client.serverHost + path)
if err != nil {
return err
}
if resp.StatusCode == http.StatusTooEarly {
resp.Body.Close()
time.Sleep(POLLING_INTERVAL)
continue
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusGone {
return ErrorGone
}
if resp.StatusCode == http.StatusFailedDependency {
return ErrorFailed
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("received unexpected status code %d", resp.StatusCode)
}
if data != nil {
return json.NewDecoder(resp.Body).Decode(data)
}
return nil
}
}
func (client *httpClient) PostSuiteWillBegin(report types.Report) error {
return client.post("/suite-will-begin", report)
}
func (client *httpClient) PostDidRun(report types.SpecReport) error {
return client.post("/did-run", report)
}
func (client *httpClient) PostSuiteDidEnd(report types.Report) error {
return client.post("/suite-did-end", report)
}
func (client *httpClient) PostEmitProgressReport(report types.ProgressReport) error {
return client.post("/progress-report", report)
}
func (client *httpClient) PostReportBeforeSuiteCompleted(state types.SpecState) error {
return client.post("/report-before-suite-completed", state)
}
func (client *httpClient) BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error) {
var state types.SpecState
err := client.poll("/report-before-suite-state", &state)
if err == ErrorGone {
return types.SpecStateFailed, nil
}
return state, err
}
func (client *httpClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error {
beforeSuiteState := BeforeSuiteState{
State: state,
Data: data,
}
return client.post("/before-suite-completed", beforeSuiteState)
}
func (client *httpClient) BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) {
var beforeSuiteState BeforeSuiteState
err := client.poll("/before-suite-state", &beforeSuiteState)
if err == ErrorGone {
return types.SpecStateInvalid, nil, types.GinkgoErrors.SynchronizedBeforeSuiteDisappearedOnProc1()
}
return beforeSuiteState.State, beforeSuiteState.Data, err
}
func (client *httpClient) BlockUntilNonprimaryProcsHaveFinished() error {
return client.poll("/have-nonprimary-procs-finished", nil)
}
func (client *httpClient) BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error) {
var report types.Report
err := client.poll("/aggregated-nonprimary-procs-report", &report)
if err == ErrorGone {
return types.Report{}, types.GinkgoErrors.AggregatedReportUnavailableDueToNodeDisappearing()
}
return report, err
}
func (client *httpClient) FetchNextCounter() (int, error) {
var counter ParallelIndexCounter
err := client.poll("/counter", &counter)
return counter.Index, err
}
func (client *httpClient) PostAbort() error {
return client.post("/abort", nil)
}
func (client *httpClient) ShouldAbort() bool {
err := client.poll("/abort", nil)
if err == ErrorGone {
return true
}
return false
}
func (client *httpClient) Write(p []byte) (int, error) {
resp, err := http.Post(client.serverHost+"/emit-output", "text/plain;charset=UTF-8 ", bytes.NewReader(p))
resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return 0, fmt.Errorf("failed to emit output")
}
return len(p), err
}

View File

@ -0,0 +1,242 @@
/*
The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners.
This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser).
*/
package parallel_support
import (
"encoding/json"
"io"
"net"
"net/http"
"github.com/onsi/ginkgo/v2/reporters"
"github.com/onsi/ginkgo/v2/types"
)
/*
httpServer spins up on an automatically selected port and listens for communication from the forwarding reporter.
It then forwards that communication to attached reporters.
*/
type httpServer struct {
listener net.Listener
handler *ServerHandler
}
// Create a new server, automatically selecting a port
func newHttpServer(parallelTotal int, reporter reporters.Reporter) (*httpServer, error) {
listener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
return nil, err
}
return &httpServer{
listener: listener,
handler: newServerHandler(parallelTotal, reporter),
}, nil
}
// Start the server. You don't need to `go s.Start()`, just `s.Start()`
func (server *httpServer) Start() {
httpServer := &http.Server{}
mux := http.NewServeMux()
httpServer.Handler = mux
//streaming endpoints
mux.HandleFunc("/suite-will-begin", server.specSuiteWillBegin)
mux.HandleFunc("/did-run", server.didRun)
mux.HandleFunc("/suite-did-end", server.specSuiteDidEnd)
mux.HandleFunc("/emit-output", server.emitOutput)
mux.HandleFunc("/progress-report", server.emitProgressReport)
//synchronization endpoints
mux.HandleFunc("/report-before-suite-completed", server.handleReportBeforeSuiteCompleted)
mux.HandleFunc("/report-before-suite-state", server.handleReportBeforeSuiteState)
mux.HandleFunc("/before-suite-completed", server.handleBeforeSuiteCompleted)
mux.HandleFunc("/before-suite-state", server.handleBeforeSuiteState)
mux.HandleFunc("/have-nonprimary-procs-finished", server.handleHaveNonprimaryProcsFinished)
mux.HandleFunc("/aggregated-nonprimary-procs-report", server.handleAggregatedNonprimaryProcsReport)
mux.HandleFunc("/counter", server.handleCounter)
mux.HandleFunc("/up", server.handleUp)
mux.HandleFunc("/abort", server.handleAbort)
go httpServer.Serve(server.listener)
}
// Stop the server
func (server *httpServer) Close() {
server.listener.Close()
}
// The address the server can be reached it. Pass this into the `ForwardingReporter`.
func (server *httpServer) Address() string {
return "http://" + server.listener.Addr().String()
}
func (server *httpServer) GetSuiteDone() chan interface{} {
return server.handler.done
}
func (server *httpServer) GetOutputDestination() io.Writer {
return server.handler.outputDestination
}
func (server *httpServer) SetOutputDestination(w io.Writer) {
server.handler.outputDestination = w
}
func (server *httpServer) RegisterAlive(node int, alive func() bool) {
server.handler.registerAlive(node, alive)
}
//
// Streaming Endpoints
//
// The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters`
func (server *httpServer) decode(writer http.ResponseWriter, request *http.Request, object interface{}) bool {
defer request.Body.Close()
if json.NewDecoder(request.Body).Decode(object) != nil {
writer.WriteHeader(http.StatusBadRequest)
return false
}
return true
}
func (server *httpServer) handleError(err error, writer http.ResponseWriter) bool {
if err == nil {
return false
}
switch err {
case ErrorEarly:
writer.WriteHeader(http.StatusTooEarly)
case ErrorGone:
writer.WriteHeader(http.StatusGone)
case ErrorFailed:
writer.WriteHeader(http.StatusFailedDependency)
default:
writer.WriteHeader(http.StatusInternalServerError)
}
return true
}
func (server *httpServer) specSuiteWillBegin(writer http.ResponseWriter, request *http.Request) {
var report types.Report
if !server.decode(writer, request, &report) {
return
}
server.handleError(server.handler.SpecSuiteWillBegin(report, voidReceiver), writer)
}
func (server *httpServer) didRun(writer http.ResponseWriter, request *http.Request) {
var report types.SpecReport
if !server.decode(writer, request, &report) {
return
}
server.handleError(server.handler.DidRun(report, voidReceiver), writer)
}
func (server *httpServer) specSuiteDidEnd(writer http.ResponseWriter, request *http.Request) {
var report types.Report
if !server.decode(writer, request, &report) {
return
}
server.handleError(server.handler.SpecSuiteDidEnd(report, voidReceiver), writer)
}
func (server *httpServer) emitOutput(writer http.ResponseWriter, request *http.Request) {
output, err := io.ReadAll(request.Body)
if err != nil {
writer.WriteHeader(http.StatusInternalServerError)
return
}
var n int
server.handleError(server.handler.EmitOutput(output, &n), writer)
}
func (server *httpServer) emitProgressReport(writer http.ResponseWriter, request *http.Request) {
var report types.ProgressReport
if !server.decode(writer, request, &report) {
return
}
server.handleError(server.handler.EmitProgressReport(report, voidReceiver), writer)
}
func (server *httpServer) handleReportBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) {
var state types.SpecState
if !server.decode(writer, request, &state) {
return
}
server.handleError(server.handler.ReportBeforeSuiteCompleted(state, voidReceiver), writer)
}
func (server *httpServer) handleReportBeforeSuiteState(writer http.ResponseWriter, request *http.Request) {
var state types.SpecState
if server.handleError(server.handler.ReportBeforeSuiteState(voidSender, &state), writer) {
return
}
json.NewEncoder(writer).Encode(state)
}
func (server *httpServer) handleBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) {
var beforeSuiteState BeforeSuiteState
if !server.decode(writer, request, &beforeSuiteState) {
return
}
server.handleError(server.handler.BeforeSuiteCompleted(beforeSuiteState, voidReceiver), writer)
}
func (server *httpServer) handleBeforeSuiteState(writer http.ResponseWriter, request *http.Request) {
var beforeSuiteState BeforeSuiteState
if server.handleError(server.handler.BeforeSuiteState(voidSender, &beforeSuiteState), writer) {
return
}
json.NewEncoder(writer).Encode(beforeSuiteState)
}
func (server *httpServer) handleHaveNonprimaryProcsFinished(writer http.ResponseWriter, request *http.Request) {
if server.handleError(server.handler.HaveNonprimaryProcsFinished(voidSender, voidReceiver), writer) {
return
}
writer.WriteHeader(http.StatusOK)
}
func (server *httpServer) handleAggregatedNonprimaryProcsReport(writer http.ResponseWriter, request *http.Request) {
var aggregatedReport types.Report
if server.handleError(server.handler.AggregatedNonprimaryProcsReport(voidSender, &aggregatedReport), writer) {
return
}
json.NewEncoder(writer).Encode(aggregatedReport)
}
func (server *httpServer) handleCounter(writer http.ResponseWriter, request *http.Request) {
var n int
if server.handleError(server.handler.Counter(voidSender, &n), writer) {
return
}
json.NewEncoder(writer).Encode(ParallelIndexCounter{Index: n})
}
func (server *httpServer) handleUp(writer http.ResponseWriter, request *http.Request) {
writer.WriteHeader(http.StatusOK)
}
func (server *httpServer) handleAbort(writer http.ResponseWriter, request *http.Request) {
if request.Method == "GET" {
var shouldAbort bool
server.handler.ShouldAbort(voidSender, &shouldAbort)
if shouldAbort {
writer.WriteHeader(http.StatusGone)
} else {
writer.WriteHeader(http.StatusOK)
}
} else {
server.handler.Abort(voidSender, voidReceiver)
}
}

View File

@ -0,0 +1,136 @@
package parallel_support
import (
"net/rpc"
"time"
"github.com/onsi/ginkgo/v2/types"
)
type rpcClient struct {
serverHost string
client *rpc.Client
}
func newRPCClient(serverHost string) *rpcClient {
return &rpcClient{
serverHost: serverHost,
}
}
func (client *rpcClient) Connect() bool {
var err error
if client.client != nil {
return true
}
client.client, err = rpc.DialHTTPPath("tcp", client.serverHost, "/")
if err != nil {
client.client = nil
return false
}
return true
}
func (client *rpcClient) Close() error {
return client.client.Close()
}
func (client *rpcClient) poll(method string, data interface{}) error {
for {
err := client.client.Call(method, voidSender, data)
if err == nil {
return nil
}
switch err.Error() {
case ErrorEarly.Error():
time.Sleep(POLLING_INTERVAL)
case ErrorGone.Error():
return ErrorGone
case ErrorFailed.Error():
return ErrorFailed
default:
return err
}
}
}
func (client *rpcClient) PostSuiteWillBegin(report types.Report) error {
return client.client.Call("Server.SpecSuiteWillBegin", report, voidReceiver)
}
func (client *rpcClient) PostDidRun(report types.SpecReport) error {
return client.client.Call("Server.DidRun", report, voidReceiver)
}
func (client *rpcClient) PostSuiteDidEnd(report types.Report) error {
return client.client.Call("Server.SpecSuiteDidEnd", report, voidReceiver)
}
func (client *rpcClient) Write(p []byte) (int, error) {
var n int
err := client.client.Call("Server.EmitOutput", p, &n)
return n, err
}
func (client *rpcClient) PostEmitProgressReport(report types.ProgressReport) error {
return client.client.Call("Server.EmitProgressReport", report, voidReceiver)
}
func (client *rpcClient) PostReportBeforeSuiteCompleted(state types.SpecState) error {
return client.client.Call("Server.ReportBeforeSuiteCompleted", state, voidReceiver)
}
func (client *rpcClient) BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error) {
var state types.SpecState
err := client.poll("Server.ReportBeforeSuiteState", &state)
if err == ErrorGone {
return types.SpecStateFailed, nil
}
return state, err
}
func (client *rpcClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error {
beforeSuiteState := BeforeSuiteState{
State: state,
Data: data,
}
return client.client.Call("Server.BeforeSuiteCompleted", beforeSuiteState, voidReceiver)
}
func (client *rpcClient) BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) {
var beforeSuiteState BeforeSuiteState
err := client.poll("Server.BeforeSuiteState", &beforeSuiteState)
if err == ErrorGone {
return types.SpecStateInvalid, nil, types.GinkgoErrors.SynchronizedBeforeSuiteDisappearedOnProc1()
}
return beforeSuiteState.State, beforeSuiteState.Data, err
}
func (client *rpcClient) BlockUntilNonprimaryProcsHaveFinished() error {
return client.poll("Server.HaveNonprimaryProcsFinished", voidReceiver)
}
func (client *rpcClient) BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error) {
var report types.Report
err := client.poll("Server.AggregatedNonprimaryProcsReport", &report)
if err == ErrorGone {
return types.Report{}, types.GinkgoErrors.AggregatedReportUnavailableDueToNodeDisappearing()
}
return report, err
}
func (client *rpcClient) FetchNextCounter() (int, error) {
var counter int
err := client.client.Call("Server.Counter", voidSender, &counter)
return counter, err
}
func (client *rpcClient) PostAbort() error {
return client.client.Call("Server.Abort", voidSender, voidReceiver)
}
func (client *rpcClient) ShouldAbort() bool {
var shouldAbort bool
client.client.Call("Server.ShouldAbort", voidSender, &shouldAbort)
return shouldAbort
}

View File

@ -0,0 +1,75 @@
/*
The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners.
This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser).
*/
package parallel_support
import (
"io"
"net"
"net/http"
"net/rpc"
"github.com/onsi/ginkgo/v2/reporters"
)
/*
RPCServer spins up on an automatically selected port and listens for communication from the forwarding reporter.
It then forwards that communication to attached reporters.
*/
type RPCServer struct {
listener net.Listener
handler *ServerHandler
}
//Create a new server, automatically selecting a port
func newRPCServer(parallelTotal int, reporter reporters.Reporter) (*RPCServer, error) {
listener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
return nil, err
}
return &RPCServer{
listener: listener,
handler: newServerHandler(parallelTotal, reporter),
}, nil
}
//Start the server. You don't need to `go s.Start()`, just `s.Start()`
func (server *RPCServer) Start() {
rpcServer := rpc.NewServer()
rpcServer.RegisterName("Server", server.handler) //register the handler's methods as the server
httpServer := &http.Server{}
httpServer.Handler = rpcServer
go httpServer.Serve(server.listener)
}
//Stop the server
func (server *RPCServer) Close() {
server.listener.Close()
}
//The address the server can be reached it. Pass this into the `ForwardingReporter`.
func (server *RPCServer) Address() string {
return server.listener.Addr().String()
}
func (server *RPCServer) GetSuiteDone() chan interface{} {
return server.handler.done
}
func (server *RPCServer) GetOutputDestination() io.Writer {
return server.handler.outputDestination
}
func (server *RPCServer) SetOutputDestination(w io.Writer) {
server.handler.outputDestination = w
}
func (server *RPCServer) RegisterAlive(node int, alive func() bool) {
server.handler.registerAlive(node, alive)
}

View File

@ -0,0 +1,234 @@
package parallel_support
import (
"io"
"os"
"sync"
"github.com/onsi/ginkgo/v2/reporters"
"github.com/onsi/ginkgo/v2/types"
)
type Void struct{}
var voidReceiver *Void = &Void{}
var voidSender Void
// ServerHandler is an RPC-compatible handler that is shared between the http server and the rpc server.
// It handles all the business logic to avoid duplication between the two servers
type ServerHandler struct {
done chan interface{}
outputDestination io.Writer
reporter reporters.Reporter
alives []func() bool
lock *sync.Mutex
beforeSuiteState BeforeSuiteState
reportBeforeSuiteState types.SpecState
parallelTotal int
counter int
counterLock *sync.Mutex
shouldAbort bool
numSuiteDidBegins int
numSuiteDidEnds int
aggregatedReport types.Report
reportHoldingArea []types.SpecReport
}
func newServerHandler(parallelTotal int, reporter reporters.Reporter) *ServerHandler {
return &ServerHandler{
reporter: reporter,
lock: &sync.Mutex{},
counterLock: &sync.Mutex{},
alives: make([]func() bool, parallelTotal),
beforeSuiteState: BeforeSuiteState{Data: nil, State: types.SpecStateInvalid},
parallelTotal: parallelTotal,
outputDestination: os.Stdout,
done: make(chan interface{}),
}
}
func (handler *ServerHandler) SpecSuiteWillBegin(report types.Report, _ *Void) error {
handler.lock.Lock()
defer handler.lock.Unlock()
handler.numSuiteDidBegins += 1
// all summaries are identical, so it's fine to simply emit the last one of these
if handler.numSuiteDidBegins == handler.parallelTotal {
handler.reporter.SuiteWillBegin(report)
for _, summary := range handler.reportHoldingArea {
handler.reporter.WillRun(summary)
handler.reporter.DidRun(summary)
}
handler.reportHoldingArea = nil
}
return nil
}
func (handler *ServerHandler) DidRun(report types.SpecReport, _ *Void) error {
handler.lock.Lock()
defer handler.lock.Unlock()
if handler.numSuiteDidBegins == handler.parallelTotal {
handler.reporter.WillRun(report)
handler.reporter.DidRun(report)
} else {
handler.reportHoldingArea = append(handler.reportHoldingArea, report)
}
return nil
}
func (handler *ServerHandler) SpecSuiteDidEnd(report types.Report, _ *Void) error {
handler.lock.Lock()
defer handler.lock.Unlock()
handler.numSuiteDidEnds += 1
if handler.numSuiteDidEnds == 1 {
handler.aggregatedReport = report
} else {
handler.aggregatedReport = handler.aggregatedReport.Add(report)
}
if handler.numSuiteDidEnds == handler.parallelTotal {
handler.reporter.SuiteDidEnd(handler.aggregatedReport)
close(handler.done)
}
return nil
}
func (handler *ServerHandler) EmitOutput(output []byte, n *int) error {
var err error
*n, err = handler.outputDestination.Write(output)
return err
}
func (handler *ServerHandler) EmitProgressReport(report types.ProgressReport, _ *Void) error {
handler.lock.Lock()
defer handler.lock.Unlock()
handler.reporter.EmitProgressReport(report)
return nil
}
func (handler *ServerHandler) registerAlive(proc int, alive func() bool) {
handler.lock.Lock()
defer handler.lock.Unlock()
handler.alives[proc-1] = alive
}
func (handler *ServerHandler) procIsAlive(proc int) bool {
handler.lock.Lock()
defer handler.lock.Unlock()
alive := handler.alives[proc-1]
if alive == nil {
return true
}
return alive()
}
func (handler *ServerHandler) haveNonprimaryProcsFinished() bool {
for i := 2; i <= handler.parallelTotal; i++ {
if handler.procIsAlive(i) {
return false
}
}
return true
}
func (handler *ServerHandler) ReportBeforeSuiteCompleted(reportBeforeSuiteState types.SpecState, _ *Void) error {
handler.lock.Lock()
defer handler.lock.Unlock()
handler.reportBeforeSuiteState = reportBeforeSuiteState
return nil
}
func (handler *ServerHandler) ReportBeforeSuiteState(_ Void, reportBeforeSuiteState *types.SpecState) error {
proc1IsAlive := handler.procIsAlive(1)
handler.lock.Lock()
defer handler.lock.Unlock()
if handler.reportBeforeSuiteState == types.SpecStateInvalid {
if proc1IsAlive {
return ErrorEarly
} else {
return ErrorGone
}
}
*reportBeforeSuiteState = handler.reportBeforeSuiteState
return nil
}
func (handler *ServerHandler) BeforeSuiteCompleted(beforeSuiteState BeforeSuiteState, _ *Void) error {
handler.lock.Lock()
defer handler.lock.Unlock()
handler.beforeSuiteState = beforeSuiteState
return nil
}
func (handler *ServerHandler) BeforeSuiteState(_ Void, beforeSuiteState *BeforeSuiteState) error {
proc1IsAlive := handler.procIsAlive(1)
handler.lock.Lock()
defer handler.lock.Unlock()
if handler.beforeSuiteState.State == types.SpecStateInvalid {
if proc1IsAlive {
return ErrorEarly
} else {
return ErrorGone
}
}
*beforeSuiteState = handler.beforeSuiteState
return nil
}
func (handler *ServerHandler) HaveNonprimaryProcsFinished(_ Void, _ *Void) error {
if handler.haveNonprimaryProcsFinished() {
return nil
} else {
return ErrorEarly
}
}
func (handler *ServerHandler) AggregatedNonprimaryProcsReport(_ Void, report *types.Report) error {
if handler.haveNonprimaryProcsFinished() {
handler.lock.Lock()
defer handler.lock.Unlock()
if handler.numSuiteDidEnds == handler.parallelTotal-1 {
*report = handler.aggregatedReport
return nil
} else {
return ErrorGone
}
} else {
return ErrorEarly
}
}
func (handler *ServerHandler) Counter(_ Void, counter *int) error {
handler.counterLock.Lock()
defer handler.counterLock.Unlock()
*counter = handler.counter
handler.counter++
return nil
}
func (handler *ServerHandler) Abort(_ Void, _ *Void) error {
handler.lock.Lock()
defer handler.lock.Unlock()
handler.shouldAbort = true
return nil
}
func (handler *ServerHandler) ShouldAbort(_ Void, shouldAbort *bool) error {
handler.lock.Lock()
defer handler.lock.Unlock()
*shouldAbort = handler.shouldAbort
return nil
}

View File

@ -0,0 +1,287 @@
package internal
import (
"bufio"
"bytes"
"context"
"fmt"
"io"
"os"
"os/signal"
"path/filepath"
"runtime"
"strconv"
"strings"
"time"
"github.com/onsi/ginkgo/v2/types"
)
var _SOURCE_CACHE = map[string][]string{}
type ProgressSignalRegistrar func(func()) context.CancelFunc
func RegisterForProgressSignal(handler func()) context.CancelFunc {
signalChannel := make(chan os.Signal, 1)
if len(PROGRESS_SIGNALS) > 0 {
signal.Notify(signalChannel, PROGRESS_SIGNALS...)
}
ctx, cancel := context.WithCancel(context.Background())
go func() {
for {
select {
case <-signalChannel:
handler()
case <-ctx.Done():
signal.Stop(signalChannel)
return
}
}
}()
return cancel
}
type ProgressStepCursor struct {
Text string
CodeLocation types.CodeLocation
StartTime time.Time
}
func NewProgressReport(isRunningInParallel bool, report types.SpecReport, currentNode Node, currentNodeStartTime time.Time, currentStep types.SpecEvent, gwOutput string, timelineLocation types.TimelineLocation, additionalReports []string, sourceRoots []string, includeAll bool) (types.ProgressReport, error) {
pr := types.ProgressReport{
ParallelProcess: report.ParallelProcess,
RunningInParallel: isRunningInParallel,
ContainerHierarchyTexts: report.ContainerHierarchyTexts,
LeafNodeText: report.LeafNodeText,
LeafNodeLocation: report.LeafNodeLocation,
SpecStartTime: report.StartTime,
CurrentNodeType: currentNode.NodeType,
CurrentNodeText: currentNode.Text,
CurrentNodeLocation: currentNode.CodeLocation,
CurrentNodeStartTime: currentNodeStartTime,
CurrentStepText: currentStep.Message,
CurrentStepLocation: currentStep.CodeLocation,
CurrentStepStartTime: currentStep.TimelineLocation.Time,
AdditionalReports: additionalReports,
CapturedGinkgoWriterOutput: gwOutput,
TimelineLocation: timelineLocation,
}
goroutines, err := extractRunningGoroutines()
if err != nil {
return pr, err
}
pr.Goroutines = goroutines
// now we want to try to find goroutines of interest. these will be goroutines that have any function calls with code in packagesOfInterest:
packagesOfInterest := map[string]bool{}
packageFromFilename := func(filename string) string {
return filepath.Dir(filename)
}
addPackageFor := func(filename string) {
if filename != "" {
packagesOfInterest[packageFromFilename(filename)] = true
}
}
isPackageOfInterest := func(filename string) bool {
stackPackage := packageFromFilename(filename)
for packageOfInterest := range packagesOfInterest {
if strings.HasPrefix(stackPackage, packageOfInterest) {
return true
}
}
return false
}
for _, location := range report.ContainerHierarchyLocations {
addPackageFor(location.FileName)
}
addPackageFor(report.LeafNodeLocation.FileName)
addPackageFor(currentNode.CodeLocation.FileName)
addPackageFor(currentStep.CodeLocation.FileName)
//First, we find the SpecGoroutine - this will be the goroutine that includes `runNode`
specGoRoutineIdx := -1
runNodeFunctionCallIdx := -1
OUTER:
for goroutineIdx, goroutine := range pr.Goroutines {
for functionCallIdx, functionCall := range goroutine.Stack {
if strings.Contains(functionCall.Function, "ginkgo/v2/internal.(*Suite).runNode.func") {
specGoRoutineIdx = goroutineIdx
runNodeFunctionCallIdx = functionCallIdx
break OUTER
}
}
}
//Now, we find the first non-Ginkgo function call
if specGoRoutineIdx > -1 {
for runNodeFunctionCallIdx >= 0 {
fn := goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Function
file := goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Filename
// these are all things that could potentially happen from within ginkgo
if strings.Contains(fn, "ginkgo/v2/internal") || strings.Contains(fn, "reflect.Value") || strings.Contains(file, "ginkgo/table_dsl") || strings.Contains(file, "ginkgo/core_dsl") {
runNodeFunctionCallIdx--
continue
}
if strings.Contains(goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Function, "ginkgo/table_dsl") {
}
//found it! lets add its package of interest
addPackageFor(goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Filename)
break
}
}
ginkgoEntryPointIdx := -1
OUTER_GINKGO_ENTRY_POINT:
for goroutineIdx, goroutine := range pr.Goroutines {
for _, functionCall := range goroutine.Stack {
if strings.Contains(functionCall.Function, "ginkgo/v2.RunSpecs") {
ginkgoEntryPointIdx = goroutineIdx
break OUTER_GINKGO_ENTRY_POINT
}
}
}
// Now we go through all goroutines and highlight any lines with packages in `packagesOfInterest`
// Any goroutines with highlighted lines end up in the HighlightGoRoutines
for goroutineIdx, goroutine := range pr.Goroutines {
if goroutineIdx == ginkgoEntryPointIdx {
continue
}
if goroutineIdx == specGoRoutineIdx {
pr.Goroutines[goroutineIdx].IsSpecGoroutine = true
}
for functionCallIdx, functionCall := range goroutine.Stack {
if isPackageOfInterest(functionCall.Filename) {
goroutine.Stack[functionCallIdx].Highlight = true
goroutine.Stack[functionCallIdx].Source, goroutine.Stack[functionCallIdx].SourceHighlight = fetchSource(functionCall.Filename, functionCall.Line, 2, sourceRoots)
}
}
}
if !includeAll {
goroutines := []types.Goroutine{pr.SpecGoroutine()}
goroutines = append(goroutines, pr.HighlightedGoroutines()...)
pr.Goroutines = goroutines
}
return pr, nil
}
func extractRunningGoroutines() ([]types.Goroutine, error) {
var stack []byte
for size := 64 * 1024; ; size *= 2 {
stack = make([]byte, size)
if n := runtime.Stack(stack, true); n < size {
stack = stack[:n]
break
}
}
r := bufio.NewReader(bytes.NewReader(stack))
out := []types.Goroutine{}
idx := -1
for {
line, err := r.ReadString('\n')
if err == io.EOF {
break
}
line = strings.TrimSuffix(line, "\n")
//skip blank lines
if line == "" {
continue
}
//parse headers for new goroutine frames
if strings.HasPrefix(line, "goroutine") {
out = append(out, types.Goroutine{})
idx = len(out) - 1
line = strings.TrimPrefix(line, "goroutine ")
line = strings.TrimSuffix(line, ":")
fields := strings.SplitN(line, " ", 2)
if len(fields) != 2 {
return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid goroutine frame header: %s", line))
}
out[idx].ID, err = strconv.ParseUint(fields[0], 10, 64)
if err != nil {
return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid goroutine ID: %s", fields[1]))
}
out[idx].State = strings.TrimSuffix(strings.TrimPrefix(fields[1], "["), "]")
continue
}
//if we are here we must be at a function call entry in the stack
functionCall := types.FunctionCall{
Function: strings.TrimPrefix(line, "created by "), // no need to track 'created by'
}
line, err = r.ReadString('\n')
line = strings.TrimSuffix(line, "\n")
if err == io.EOF {
return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid function call: %s -- missing file name and line number", functionCall.Function))
}
line = strings.TrimLeft(line, " \t")
delimiterIdx := strings.LastIndex(line, ":")
if delimiterIdx == -1 {
return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid filename and line number: %s", line))
}
functionCall.Filename = line[:delimiterIdx]
line = strings.Split(line[delimiterIdx+1:], " ")[0]
lineNumber, err := strconv.ParseInt(line, 10, 64)
functionCall.Line = int(lineNumber)
if err != nil {
return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid function call line number: %s\n%s", line, err.Error()))
}
out[idx].Stack = append(out[idx].Stack, functionCall)
}
return out, nil
}
func fetchSource(filename string, lineNumber int, span int, configuredSourceRoots []string) ([]string, int) {
if filename == "" {
return []string{}, 0
}
var lines []string
var ok bool
if lines, ok = _SOURCE_CACHE[filename]; !ok {
sourceRoots := []string{""}
sourceRoots = append(sourceRoots, configuredSourceRoots...)
var data []byte
var err error
var found bool
for _, root := range sourceRoots {
data, err = os.ReadFile(filepath.Join(root, filename))
if err == nil {
found = true
break
}
}
if !found {
return []string{}, 0
}
lines = strings.Split(string(data), "\n")
_SOURCE_CACHE[filename] = lines
}
startIndex := lineNumber - span - 1
endIndex := startIndex + span + span + 1
if startIndex < 0 {
startIndex = 0
}
if endIndex > len(lines) {
endIndex = len(lines)
}
highlightIndex := lineNumber - 1 - startIndex
return lines[startIndex:endIndex], highlightIndex
}

View File

@ -0,0 +1,11 @@
//go:build freebsd || openbsd || netbsd || darwin || dragonfly
// +build freebsd openbsd netbsd darwin dragonfly
package internal
import (
"os"
"syscall"
)
var PROGRESS_SIGNALS = []os.Signal{syscall.SIGINFO, syscall.SIGUSR1}

View File

@ -0,0 +1,11 @@
//go:build linux || solaris
// +build linux solaris
package internal
import (
"os"
"syscall"
)
var PROGRESS_SIGNALS = []os.Signal{syscall.SIGUSR1}

View File

@ -0,0 +1,10 @@
//go:build wasm
package internal
import (
"os"
"syscall"
)
var PROGRESS_SIGNALS = []os.Signal{syscall.SIGUSR1}

View File

@ -0,0 +1,8 @@
//go:build windows
// +build windows
package internal
import "os"
var PROGRESS_SIGNALS = []os.Signal{}

View File

@ -0,0 +1,79 @@
package internal
import (
"context"
"sort"
"strings"
"sync"
"github.com/onsi/ginkgo/v2/types"
)
type ProgressReporterManager struct {
lock *sync.Mutex
progressReporters map[int]func() string
prCounter int
}
func NewProgressReporterManager() *ProgressReporterManager {
return &ProgressReporterManager{
progressReporters: map[int]func() string{},
lock: &sync.Mutex{},
}
}
func (prm *ProgressReporterManager) AttachProgressReporter(reporter func() string) func() {
prm.lock.Lock()
defer prm.lock.Unlock()
prm.prCounter += 1
prCounter := prm.prCounter
prm.progressReporters[prCounter] = reporter
return func() {
prm.lock.Lock()
defer prm.lock.Unlock()
delete(prm.progressReporters, prCounter)
}
}
func (prm *ProgressReporterManager) QueryProgressReporters(ctx context.Context, failer *Failer) []string {
prm.lock.Lock()
keys := []int{}
for key := range prm.progressReporters {
keys = append(keys, key)
}
sort.Ints(keys)
reporters := []func() string{}
for _, key := range keys {
reporters = append(reporters, prm.progressReporters[key])
}
prm.lock.Unlock()
if len(reporters) == 0 {
return nil
}
out := []string{}
for _, reporter := range reporters {
reportC := make(chan string, 1)
go func() {
defer func() {
e := recover()
if e != nil {
failer.Panic(types.NewCodeLocationWithStackTrace(1), e)
reportC <- "failed to query attached progress reporter"
}
}()
reportC <- reporter()
}()
var report string
select {
case report = <-reportC:
case <-ctx.Done():
return out
}
if strings.TrimSpace(report) != "" {
out = append(out, report)
}
}
return out
}

View File

@ -0,0 +1,39 @@
package internal
import (
"time"
"github.com/onsi/ginkgo/v2/types"
)
type ReportEntry = types.ReportEntry
func NewReportEntry(name string, cl types.CodeLocation, args ...interface{}) (ReportEntry, error) {
out := ReportEntry{
Visibility: types.ReportEntryVisibilityAlways,
Name: name,
Location: cl,
Time: time.Now(),
}
var didSetValue = false
for _, arg := range args {
switch x := arg.(type) {
case types.ReportEntryVisibility:
out.Visibility = x
case types.CodeLocation:
out.Location = x
case Offset:
out.Location = types.NewCodeLocation(2 + int(x))
case time.Time:
out.Time = x
default:
if didSetValue {
return ReportEntry{}, types.GinkgoErrors.TooManyReportEntryValues(out.Location, arg)
}
out.Value = types.WrapEntryValue(arg)
didSetValue = true
}
}
return out, nil
}

87
e2e/vendor/github.com/onsi/ginkgo/v2/internal/spec.go generated vendored Normal file
View File

@ -0,0 +1,87 @@
package internal
import (
"strings"
"time"
"github.com/onsi/ginkgo/v2/types"
)
type Spec struct {
Nodes Nodes
Skip bool
}
func (s Spec) SubjectID() uint {
return s.Nodes.FirstNodeWithType(types.NodeTypeIt).ID
}
func (s Spec) Text() string {
texts := []string{}
for i := range s.Nodes {
if s.Nodes[i].Text != "" {
texts = append(texts, s.Nodes[i].Text)
}
}
return strings.Join(texts, " ")
}
func (s Spec) FirstNodeWithType(nodeTypes types.NodeType) Node {
return s.Nodes.FirstNodeWithType(nodeTypes)
}
func (s Spec) FlakeAttempts() int {
flakeAttempts := 0
for i := range s.Nodes {
if s.Nodes[i].FlakeAttempts > 0 {
flakeAttempts = s.Nodes[i].FlakeAttempts
}
}
return flakeAttempts
}
func (s Spec) MustPassRepeatedly() int {
mustPassRepeatedly := 0
for i := range s.Nodes {
if s.Nodes[i].MustPassRepeatedly > 0 {
mustPassRepeatedly = s.Nodes[i].MustPassRepeatedly
}
}
return mustPassRepeatedly
}
func (s Spec) SpecTimeout() time.Duration {
return s.FirstNodeWithType(types.NodeTypeIt).SpecTimeout
}
type Specs []Spec
func (s Specs) HasAnySpecsMarkedPending() bool {
for i := range s {
if s[i].Nodes.HasNodeMarkedPending() {
return true
}
}
return false
}
func (s Specs) CountWithoutSkip() int {
n := 0
for i := range s {
if !s[i].Skip {
n += 1
}
}
return n
}
func (s Specs) AtIndices(indices SpecIndices) Specs {
out := make(Specs, len(indices))
for i, idx := range indices {
out[i] = s[idx]
}
return out
}

View File

@ -0,0 +1,47 @@
package internal
import (
"context"
"github.com/onsi/ginkgo/v2/types"
)
type SpecContext interface {
context.Context
SpecReport() types.SpecReport
AttachProgressReporter(func() string) func()
}
type specContext struct {
context.Context
*ProgressReporterManager
cancel context.CancelCauseFunc
suite *Suite
}
/*
SpecContext includes a reference to `suite` and embeds itself in itself as a "GINKGO_SPEC_CONTEXT" value. This allows users to create child Contexts without having down-stream consumers (e.g. Gomega) lose access to the SpecContext and its methods. This allows us to build extensions on top of Ginkgo that simply take an all-encompassing context.
Note that while SpecContext is used to enforce deadlines by Ginkgo it is not configured as a context.WithDeadline. Instead, Ginkgo owns responsibility for cancelling the context when the deadline elapses.
This is because Ginkgo needs finer control over when the context is canceled. Specifically, Ginkgo needs to generate a ProgressReport before it cancels the context to ensure progress is captured where the spec is currently running. The only way to avoid a race here is to manually control the cancellation.
*/
func NewSpecContext(suite *Suite) *specContext {
ctx, cancel := context.WithCancelCause(context.Background())
sc := &specContext{
cancel: cancel,
suite: suite,
ProgressReporterManager: NewProgressReporterManager(),
}
ctx = context.WithValue(ctx, "GINKGO_SPEC_CONTEXT", sc) //yes, yes, the go docs say don't use a string for a key... but we'd rather avoid a circular dependency between Gomega and Ginkgo
sc.Context = ctx //thank goodness for garbage collectors that can handle circular dependencies
return sc
}
func (sc *specContext) SpecReport() types.SpecReport {
return sc.suite.CurrentSpecReport()
}

1046
e2e/vendor/github.com/onsi/ginkgo/v2/internal/suite.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,210 @@
package testingtproxy
import (
"fmt"
"io"
"os"
"github.com/onsi/ginkgo/v2/formatter"
"github.com/onsi/ginkgo/v2/internal"
"github.com/onsi/ginkgo/v2/reporters"
"github.com/onsi/ginkgo/v2/types"
)
type failFunc func(message string, callerSkip ...int)
type skipFunc func(message string, callerSkip ...int)
type cleanupFunc func(args ...any)
type reportFunc func() types.SpecReport
type addReportEntryFunc func(names string, args ...any)
type ginkgoWriterInterface interface {
io.Writer
Print(a ...interface{})
Printf(format string, a ...interface{})
Println(a ...interface{})
}
type ginkgoRecoverFunc func()
type attachProgressReporterFunc func(func() string) func()
func New(writer ginkgoWriterInterface, fail failFunc, skip skipFunc, cleanup cleanupFunc, report reportFunc, addReportEntry addReportEntryFunc, ginkgoRecover ginkgoRecoverFunc, attachProgressReporter attachProgressReporterFunc, randomSeed int64, parallelProcess int, parallelTotal int, noColor bool, offset int) *ginkgoTestingTProxy {
return &ginkgoTestingTProxy{
fail: fail,
offset: offset,
writer: writer,
skip: skip,
cleanup: cleanup,
report: report,
addReportEntry: addReportEntry,
ginkgoRecover: ginkgoRecover,
attachProgressReporter: attachProgressReporter,
randomSeed: randomSeed,
parallelProcess: parallelProcess,
parallelTotal: parallelTotal,
f: formatter.NewWithNoColorBool(noColor),
}
}
type ginkgoTestingTProxy struct {
fail failFunc
skip skipFunc
cleanup cleanupFunc
report reportFunc
offset int
writer ginkgoWriterInterface
addReportEntry addReportEntryFunc
ginkgoRecover ginkgoRecoverFunc
attachProgressReporter attachProgressReporterFunc
randomSeed int64
parallelProcess int
parallelTotal int
f formatter.Formatter
}
// basic testing.T support
func (t *ginkgoTestingTProxy) Cleanup(f func()) {
t.cleanup(f, internal.Offset(1))
}
func (t *ginkgoTestingTProxy) Setenv(key, value string) {
originalValue, exists := os.LookupEnv(key)
if exists {
t.cleanup(os.Setenv, key, originalValue, internal.Offset(1))
} else {
t.cleanup(os.Unsetenv, key, internal.Offset(1))
}
err := os.Setenv(key, value)
if err != nil {
t.fail(fmt.Sprintf("Failed to set environment variable: %v", err), 1)
}
}
func (t *ginkgoTestingTProxy) Error(args ...interface{}) {
t.fail(fmt.Sprintln(args...), t.offset)
}
func (t *ginkgoTestingTProxy) Errorf(format string, args ...interface{}) {
t.fail(fmt.Sprintf(format, args...), t.offset)
}
func (t *ginkgoTestingTProxy) Fail() {
t.fail("failed", t.offset)
}
func (t *ginkgoTestingTProxy) FailNow() {
t.fail("failed", t.offset)
}
func (t *ginkgoTestingTProxy) Failed() bool {
return t.report().Failed()
}
func (t *ginkgoTestingTProxy) Fatal(args ...interface{}) {
t.fail(fmt.Sprintln(args...), t.offset)
}
func (t *ginkgoTestingTProxy) Fatalf(format string, args ...interface{}) {
t.fail(fmt.Sprintf(format, args...), t.offset)
}
func (t *ginkgoTestingTProxy) Helper() {
types.MarkAsHelper(1)
}
func (t *ginkgoTestingTProxy) Log(args ...interface{}) {
fmt.Fprintln(t.writer, args...)
}
func (t *ginkgoTestingTProxy) Logf(format string, args ...interface{}) {
t.Log(fmt.Sprintf(format, args...))
}
func (t *ginkgoTestingTProxy) Name() string {
return t.report().FullText()
}
func (t *ginkgoTestingTProxy) Parallel() {
// No-op
}
func (t *ginkgoTestingTProxy) Skip(args ...interface{}) {
t.skip(fmt.Sprintln(args...), t.offset)
}
func (t *ginkgoTestingTProxy) SkipNow() {
t.skip("skip", t.offset)
}
func (t *ginkgoTestingTProxy) Skipf(format string, args ...interface{}) {
t.skip(fmt.Sprintf(format, args...), t.offset)
}
func (t *ginkgoTestingTProxy) Skipped() bool {
return t.report().State.Is(types.SpecStateSkipped)
}
func (t *ginkgoTestingTProxy) TempDir() string {
tmpDir, err := os.MkdirTemp("", "ginkgo")
if err != nil {
t.fail(fmt.Sprintf("Failed to create temporary directory: %v", err), 1)
return ""
}
t.cleanup(os.RemoveAll, tmpDir)
return tmpDir
}
// FullGinkgoTInterface
func (t *ginkgoTestingTProxy) AddReportEntryVisibilityAlways(name string, args ...any) {
finalArgs := []any{internal.Offset(1), types.ReportEntryVisibilityAlways}
t.addReportEntry(name, append(finalArgs, args...)...)
}
func (t *ginkgoTestingTProxy) AddReportEntryVisibilityFailureOrVerbose(name string, args ...any) {
finalArgs := []any{internal.Offset(1), types.ReportEntryVisibilityFailureOrVerbose}
t.addReportEntry(name, append(finalArgs, args...)...)
}
func (t *ginkgoTestingTProxy) AddReportEntryVisibilityNever(name string, args ...any) {
finalArgs := []any{internal.Offset(1), types.ReportEntryVisibilityNever}
t.addReportEntry(name, append(finalArgs, args...)...)
}
func (t *ginkgoTestingTProxy) Print(a ...any) {
t.writer.Print(a...)
}
func (t *ginkgoTestingTProxy) Printf(format string, a ...any) {
t.writer.Printf(format, a...)
}
func (t *ginkgoTestingTProxy) Println(a ...any) {
t.writer.Println(a...)
}
func (t *ginkgoTestingTProxy) F(format string, args ...any) string {
return t.f.F(format, args...)
}
func (t *ginkgoTestingTProxy) Fi(indentation uint, format string, args ...any) string {
return t.f.Fi(indentation, format, args...)
}
func (t *ginkgoTestingTProxy) Fiw(indentation uint, maxWidth uint, format string, args ...any) string {
return t.f.Fiw(indentation, maxWidth, format, args...)
}
func (t *ginkgoTestingTProxy) RenderTimeline() string {
return reporters.RenderTimeline(t.report(), false)
}
func (t *ginkgoTestingTProxy) GinkgoRecover() {
t.ginkgoRecover()
}
func (t *ginkgoTestingTProxy) DeferCleanup(args ...any) {
finalArgs := []any{internal.Offset(1)}
t.cleanup(append(finalArgs, args...)...)
}
func (t *ginkgoTestingTProxy) RandomSeed() int64 {
return t.randomSeed
}
func (t *ginkgoTestingTProxy) ParallelProcess() int {
return t.parallelProcess
}
func (t *ginkgoTestingTProxy) ParallelTotal() int {
return t.parallelTotal
}
func (t *ginkgoTestingTProxy) AttachProgressReporter(f func() string) func() {
return t.attachProgressReporter(f)
}

77
e2e/vendor/github.com/onsi/ginkgo/v2/internal/tree.go generated vendored Normal file
View File

@ -0,0 +1,77 @@
package internal
import "github.com/onsi/ginkgo/v2/types"
type TreeNode struct {
Node Node
Parent *TreeNode
Children TreeNodes
}
func (tn *TreeNode) AppendChild(child *TreeNode) {
tn.Children = append(tn.Children, child)
child.Parent = tn
}
func (tn *TreeNode) AncestorNodeChain() Nodes {
if tn.Parent == nil || tn.Parent.Node.IsZero() {
return Nodes{tn.Node}
}
return append(tn.Parent.AncestorNodeChain(), tn.Node)
}
type TreeNodes []*TreeNode
func (tn TreeNodes) Nodes() Nodes {
out := make(Nodes, len(tn))
for i := range tn {
out[i] = tn[i].Node
}
return out
}
func (tn TreeNodes) WithID(id uint) *TreeNode {
for i := range tn {
if tn[i].Node.ID == id {
return tn[i]
}
}
return nil
}
func GenerateSpecsFromTreeRoot(tree *TreeNode) Specs {
var walkTree func(nestingLevel int, lNodes Nodes, rNodes Nodes, trees TreeNodes) Specs
walkTree = func(nestingLevel int, lNodes Nodes, rNodes Nodes, trees TreeNodes) Specs {
tests := Specs{}
nodes := make(Nodes, len(trees))
for i := range trees {
nodes[i] = trees[i].Node
nodes[i].NestingLevel = nestingLevel
}
for i := range nodes {
if !nodes[i].NodeType.Is(types.NodeTypesForContainerAndIt) {
continue
}
leftNodes, rightNodes := nodes.SplitAround(nodes[i])
leftNodes = leftNodes.WithoutType(types.NodeTypesForContainerAndIt)
rightNodes = rightNodes.WithoutType(types.NodeTypesForContainerAndIt)
leftNodes = lNodes.CopyAppend(leftNodes...)
rightNodes = rightNodes.CopyAppend(rNodes...)
if nodes[i].NodeType.Is(types.NodeTypeIt) {
tests = append(tests, Spec{Nodes: leftNodes.CopyAppend(nodes[i]).CopyAppend(rightNodes...)})
} else {
treeNode := trees.WithID(nodes[i].ID)
tests = append(tests, walkTree(nestingLevel+1, leftNodes.CopyAppend(nodes[i]), rightNodes, treeNode.Children)...)
}
}
return tests
}
return walkTree(0, Nodes{}, Nodes{}, tree.Children)
}

144
e2e/vendor/github.com/onsi/ginkgo/v2/internal/writer.go generated vendored Normal file
View File

@ -0,0 +1,144 @@
package internal
import (
"bytes"
"fmt"
"io"
"sync"
"github.com/go-logr/logr"
"github.com/go-logr/logr/funcr"
)
type WriterMode uint
const (
WriterModeStreamAndBuffer WriterMode = iota
WriterModeBufferOnly
)
type WriterInterface interface {
io.Writer
Truncate()
Bytes() []byte
Len() int
}
// Writer implements WriterInterface and GinkgoWriterInterface
type Writer struct {
buffer *bytes.Buffer
outWriter io.Writer
lock *sync.Mutex
mode WriterMode
streamIndent []byte
indentNext bool
teeWriters []io.Writer
}
func NewWriter(outWriter io.Writer) *Writer {
return &Writer{
buffer: &bytes.Buffer{},
lock: &sync.Mutex{},
outWriter: outWriter,
mode: WriterModeStreamAndBuffer,
streamIndent: []byte(" "),
indentNext: true,
}
}
func (w *Writer) SetMode(mode WriterMode) {
w.lock.Lock()
defer w.lock.Unlock()
w.mode = mode
}
func (w *Writer) Len() int {
w.lock.Lock()
defer w.lock.Unlock()
return w.buffer.Len()
}
var newline = []byte("\n")
func (w *Writer) Write(b []byte) (n int, err error) {
w.lock.Lock()
defer w.lock.Unlock()
for _, teeWriter := range w.teeWriters {
teeWriter.Write(b)
}
if w.mode == WriterModeStreamAndBuffer {
line, remaining, found := []byte{}, b, false
for len(remaining) > 0 {
line, remaining, found = bytes.Cut(remaining, newline)
if len(line) > 0 {
if w.indentNext {
w.outWriter.Write(w.streamIndent)
w.indentNext = false
}
w.outWriter.Write(line)
}
if found {
w.outWriter.Write(newline)
w.indentNext = true
}
}
}
return w.buffer.Write(b)
}
func (w *Writer) Truncate() {
w.lock.Lock()
defer w.lock.Unlock()
w.buffer.Reset()
}
func (w *Writer) Bytes() []byte {
w.lock.Lock()
defer w.lock.Unlock()
b := w.buffer.Bytes()
copied := make([]byte, len(b))
copy(copied, b)
return copied
}
// GinkgoWriterInterface
func (w *Writer) TeeTo(writer io.Writer) {
w.lock.Lock()
defer w.lock.Unlock()
w.teeWriters = append(w.teeWriters, writer)
}
func (w *Writer) ClearTeeWriters() {
w.lock.Lock()
defer w.lock.Unlock()
w.teeWriters = []io.Writer{}
}
func (w *Writer) Print(a ...interface{}) {
fmt.Fprint(w, a...)
}
func (w *Writer) Printf(format string, a ...interface{}) {
fmt.Fprintf(w, format, a...)
}
func (w *Writer) Println(a ...interface{}) {
fmt.Fprintln(w, a...)
}
func GinkgoLogrFunc(writer *Writer) logr.Logger {
return funcr.New(func(prefix, args string) {
if prefix == "" {
writer.Printf("%s\n", args)
} else {
writer.Printf("%s %s\n", prefix, args)
}
}, funcr.Options{})
}