mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
vendor update for E2E framework
Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
32
vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go
generated
vendored
Normal file
32
vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
package codelocation
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
func New(skip int) types.CodeLocation {
|
||||
_, file, line, _ := runtime.Caller(skip + 1)
|
||||
stackTrace := PruneStack(string(debug.Stack()), skip)
|
||||
return types.CodeLocation{FileName: file, LineNumber: line, FullStackTrace: stackTrace}
|
||||
}
|
||||
|
||||
func PruneStack(fullStackTrace string, skip int) string {
|
||||
stack := strings.Split(fullStackTrace, "\n")
|
||||
if len(stack) > 2*(skip+1) {
|
||||
stack = stack[2*(skip+1):]
|
||||
}
|
||||
prunedStack := []string{}
|
||||
re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`)
|
||||
for i := 0; i < len(stack)/2; i++ {
|
||||
if !re.Match([]byte(stack[i*2])) {
|
||||
prunedStack = append(prunedStack, stack[i*2])
|
||||
prunedStack = append(prunedStack, stack[i*2+1])
|
||||
}
|
||||
}
|
||||
return strings.Join(prunedStack, "\n")
|
||||
}
|
151
vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go
generated
vendored
Normal file
151
vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go
generated
vendored
Normal file
@ -0,0 +1,151 @@
|
||||
package containernode
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sort"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/leafnodes"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type subjectOrContainerNode struct {
|
||||
containerNode *ContainerNode
|
||||
subjectNode leafnodes.SubjectNode
|
||||
}
|
||||
|
||||
func (n subjectOrContainerNode) text() string {
|
||||
if n.containerNode != nil {
|
||||
return n.containerNode.Text()
|
||||
} else {
|
||||
return n.subjectNode.Text()
|
||||
}
|
||||
}
|
||||
|
||||
type CollatedNodes struct {
|
||||
Containers []*ContainerNode
|
||||
Subject leafnodes.SubjectNode
|
||||
}
|
||||
|
||||
type ContainerNode struct {
|
||||
text string
|
||||
flag types.FlagType
|
||||
codeLocation types.CodeLocation
|
||||
|
||||
setupNodes []leafnodes.BasicNode
|
||||
subjectAndContainerNodes []subjectOrContainerNode
|
||||
}
|
||||
|
||||
func New(text string, flag types.FlagType, codeLocation types.CodeLocation) *ContainerNode {
|
||||
return &ContainerNode{
|
||||
text: text,
|
||||
flag: flag,
|
||||
codeLocation: codeLocation,
|
||||
}
|
||||
}
|
||||
|
||||
func (container *ContainerNode) Shuffle(r *rand.Rand) {
|
||||
sort.Sort(container)
|
||||
permutation := r.Perm(len(container.subjectAndContainerNodes))
|
||||
shuffledNodes := make([]subjectOrContainerNode, len(container.subjectAndContainerNodes))
|
||||
for i, j := range permutation {
|
||||
shuffledNodes[i] = container.subjectAndContainerNodes[j]
|
||||
}
|
||||
container.subjectAndContainerNodes = shuffledNodes
|
||||
}
|
||||
|
||||
func (node *ContainerNode) BackPropagateProgrammaticFocus() bool {
|
||||
if node.flag == types.FlagTypePending {
|
||||
return false
|
||||
}
|
||||
|
||||
shouldUnfocus := false
|
||||
for _, subjectOrContainerNode := range node.subjectAndContainerNodes {
|
||||
if subjectOrContainerNode.containerNode != nil {
|
||||
shouldUnfocus = subjectOrContainerNode.containerNode.BackPropagateProgrammaticFocus() || shouldUnfocus
|
||||
} else {
|
||||
shouldUnfocus = (subjectOrContainerNode.subjectNode.Flag() == types.FlagTypeFocused) || shouldUnfocus
|
||||
}
|
||||
}
|
||||
|
||||
if shouldUnfocus {
|
||||
if node.flag == types.FlagTypeFocused {
|
||||
node.flag = types.FlagTypeNone
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
return node.flag == types.FlagTypeFocused
|
||||
}
|
||||
|
||||
func (node *ContainerNode) Collate() []CollatedNodes {
|
||||
return node.collate([]*ContainerNode{})
|
||||
}
|
||||
|
||||
func (node *ContainerNode) collate(enclosingContainers []*ContainerNode) []CollatedNodes {
|
||||
collated := make([]CollatedNodes, 0)
|
||||
|
||||
containers := make([]*ContainerNode, len(enclosingContainers))
|
||||
copy(containers, enclosingContainers)
|
||||
containers = append(containers, node)
|
||||
|
||||
for _, subjectOrContainer := range node.subjectAndContainerNodes {
|
||||
if subjectOrContainer.containerNode != nil {
|
||||
collated = append(collated, subjectOrContainer.containerNode.collate(containers)...)
|
||||
} else {
|
||||
collated = append(collated, CollatedNodes{
|
||||
Containers: containers,
|
||||
Subject: subjectOrContainer.subjectNode,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return collated
|
||||
}
|
||||
|
||||
func (node *ContainerNode) PushContainerNode(container *ContainerNode) {
|
||||
node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{containerNode: container})
|
||||
}
|
||||
|
||||
func (node *ContainerNode) PushSubjectNode(subject leafnodes.SubjectNode) {
|
||||
node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{subjectNode: subject})
|
||||
}
|
||||
|
||||
func (node *ContainerNode) PushSetupNode(setupNode leafnodes.BasicNode) {
|
||||
node.setupNodes = append(node.setupNodes, setupNode)
|
||||
}
|
||||
|
||||
func (node *ContainerNode) SetupNodesOfType(nodeType types.SpecComponentType) []leafnodes.BasicNode {
|
||||
nodes := []leafnodes.BasicNode{}
|
||||
for _, setupNode := range node.setupNodes {
|
||||
if setupNode.Type() == nodeType {
|
||||
nodes = append(nodes, setupNode)
|
||||
}
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
func (node *ContainerNode) Text() string {
|
||||
return node.text
|
||||
}
|
||||
|
||||
func (node *ContainerNode) CodeLocation() types.CodeLocation {
|
||||
return node.codeLocation
|
||||
}
|
||||
|
||||
func (node *ContainerNode) Flag() types.FlagType {
|
||||
return node.flag
|
||||
}
|
||||
|
||||
//sort.Interface
|
||||
|
||||
func (node *ContainerNode) Len() int {
|
||||
return len(node.subjectAndContainerNodes)
|
||||
}
|
||||
|
||||
func (node *ContainerNode) Less(i, j int) bool {
|
||||
return node.subjectAndContainerNodes[i].text() < node.subjectAndContainerNodes[j].text()
|
||||
}
|
||||
|
||||
func (node *ContainerNode) Swap(i, j int) {
|
||||
node.subjectAndContainerNodes[i], node.subjectAndContainerNodes[j] = node.subjectAndContainerNodes[j], node.subjectAndContainerNodes[i]
|
||||
}
|
92
vendor/github.com/onsi/ginkgo/internal/failer/failer.go
generated
vendored
Normal file
92
vendor/github.com/onsi/ginkgo/internal/failer/failer.go
generated
vendored
Normal file
@ -0,0 +1,92 @@
|
||||
package failer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type Failer struct {
|
||||
lock *sync.Mutex
|
||||
failure types.SpecFailure
|
||||
state types.SpecState
|
||||
}
|
||||
|
||||
func New() *Failer {
|
||||
return &Failer{
|
||||
lock: &sync.Mutex{},
|
||||
state: types.SpecStatePassed,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Failer) Panic(location types.CodeLocation, forwardedPanic interface{}) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
if f.state == types.SpecStatePassed {
|
||||
f.state = types.SpecStatePanicked
|
||||
f.failure = types.SpecFailure{
|
||||
Message: "Test Panicked",
|
||||
Location: location,
|
||||
ForwardedPanic: fmt.Sprintf("%v", forwardedPanic),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Failer) Timeout(location types.CodeLocation) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
if f.state == types.SpecStatePassed {
|
||||
f.state = types.SpecStateTimedOut
|
||||
f.failure = types.SpecFailure{
|
||||
Message: "Timed out",
|
||||
Location: location,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Failer) Fail(message string, location types.CodeLocation) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
if f.state == types.SpecStatePassed {
|
||||
f.state = types.SpecStateFailed
|
||||
f.failure = types.SpecFailure{
|
||||
Message: message,
|
||||
Location: location,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Failer) Drain(componentType types.SpecComponentType, componentIndex int, componentCodeLocation types.CodeLocation) (types.SpecFailure, types.SpecState) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
failure := f.failure
|
||||
outcome := f.state
|
||||
if outcome != types.SpecStatePassed {
|
||||
failure.ComponentType = componentType
|
||||
failure.ComponentIndex = componentIndex
|
||||
failure.ComponentCodeLocation = componentCodeLocation
|
||||
}
|
||||
|
||||
f.state = types.SpecStatePassed
|
||||
f.failure = types.SpecFailure{}
|
||||
|
||||
return failure, outcome
|
||||
}
|
||||
|
||||
func (f *Failer) Skip(message string, location types.CodeLocation) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
if f.state == types.SpecStatePassed {
|
||||
f.state = types.SpecStateSkipped
|
||||
f.failure = types.SpecFailure{
|
||||
Message: message,
|
||||
Location: location,
|
||||
}
|
||||
}
|
||||
}
|
103
vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go
generated
vendored
Normal file
103
vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go
generated
vendored
Normal file
@ -0,0 +1,103 @@
|
||||
package leafnodes
|
||||
|
||||
import (
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"sync"
|
||||
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type benchmarker struct {
|
||||
mu sync.Mutex
|
||||
measurements map[string]*types.SpecMeasurement
|
||||
orderCounter int
|
||||
}
|
||||
|
||||
func newBenchmarker() *benchmarker {
|
||||
return &benchmarker{
|
||||
measurements: make(map[string]*types.SpecMeasurement, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func (b *benchmarker) Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration) {
|
||||
t := time.Now()
|
||||
body()
|
||||
elapsedTime = time.Since(t)
|
||||
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
measurement := b.getMeasurement(name, "Fastest Time", "Slowest Time", "Average Time", "s", 3, info...)
|
||||
measurement.Results = append(measurement.Results, elapsedTime.Seconds())
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (b *benchmarker) RecordValue(name string, value float64, info ...interface{}) {
|
||||
b.mu.Lock()
|
||||
measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", "", 3, info...)
|
||||
defer b.mu.Unlock()
|
||||
measurement.Results = append(measurement.Results, value)
|
||||
}
|
||||
|
||||
func (b *benchmarker) RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{}) {
|
||||
b.mu.Lock()
|
||||
measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", units, precision, info...)
|
||||
defer b.mu.Unlock()
|
||||
measurement.Results = append(measurement.Results, value)
|
||||
}
|
||||
|
||||
func (b *benchmarker) getMeasurement(name string, smallestLabel string, largestLabel string, averageLabel string, units string, precision int, info ...interface{}) *types.SpecMeasurement {
|
||||
measurement, ok := b.measurements[name]
|
||||
if !ok {
|
||||
var computedInfo interface{}
|
||||
computedInfo = nil
|
||||
if len(info) > 0 {
|
||||
computedInfo = info[0]
|
||||
}
|
||||
measurement = &types.SpecMeasurement{
|
||||
Name: name,
|
||||
Info: computedInfo,
|
||||
Order: b.orderCounter,
|
||||
SmallestLabel: smallestLabel,
|
||||
LargestLabel: largestLabel,
|
||||
AverageLabel: averageLabel,
|
||||
Units: units,
|
||||
Precision: precision,
|
||||
Results: make([]float64, 0),
|
||||
}
|
||||
b.measurements[name] = measurement
|
||||
b.orderCounter++
|
||||
}
|
||||
|
||||
return measurement
|
||||
}
|
||||
|
||||
func (b *benchmarker) measurementsReport() map[string]*types.SpecMeasurement {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
for _, measurement := range b.measurements {
|
||||
measurement.Smallest = math.MaxFloat64
|
||||
measurement.Largest = -math.MaxFloat64
|
||||
sum := float64(0)
|
||||
sumOfSquares := float64(0)
|
||||
|
||||
for _, result := range measurement.Results {
|
||||
if result > measurement.Largest {
|
||||
measurement.Largest = result
|
||||
}
|
||||
if result < measurement.Smallest {
|
||||
measurement.Smallest = result
|
||||
}
|
||||
sum += result
|
||||
sumOfSquares += result * result
|
||||
}
|
||||
|
||||
n := float64(len(measurement.Results))
|
||||
measurement.Average = sum / n
|
||||
measurement.StdDeviation = math.Sqrt(sumOfSquares/n - (sum/n)*(sum/n))
|
||||
}
|
||||
|
||||
return b.measurements
|
||||
}
|
19
vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go
generated
vendored
Normal file
19
vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
package leafnodes
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type BasicNode interface {
|
||||
Type() types.SpecComponentType
|
||||
Run() (types.SpecState, types.SpecFailure)
|
||||
CodeLocation() types.CodeLocation
|
||||
}
|
||||
|
||||
type SubjectNode interface {
|
||||
BasicNode
|
||||
|
||||
Text() string
|
||||
Flag() types.FlagType
|
||||
Samples() int
|
||||
}
|
47
vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go
generated
vendored
Normal file
47
vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go
generated
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
package leafnodes
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type ItNode struct {
|
||||
runner *runner
|
||||
|
||||
flag types.FlagType
|
||||
text string
|
||||
}
|
||||
|
||||
func NewItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *ItNode {
|
||||
return &ItNode{
|
||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeIt, componentIndex),
|
||||
flag: flag,
|
||||
text: text,
|
||||
}
|
||||
}
|
||||
|
||||
func (node *ItNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
|
||||
return node.runner.run()
|
||||
}
|
||||
|
||||
func (node *ItNode) Type() types.SpecComponentType {
|
||||
return types.SpecComponentTypeIt
|
||||
}
|
||||
|
||||
func (node *ItNode) Text() string {
|
||||
return node.text
|
||||
}
|
||||
|
||||
func (node *ItNode) Flag() types.FlagType {
|
||||
return node.flag
|
||||
}
|
||||
|
||||
func (node *ItNode) CodeLocation() types.CodeLocation {
|
||||
return node.runner.codeLocation
|
||||
}
|
||||
|
||||
func (node *ItNode) Samples() int {
|
||||
return 1
|
||||
}
|
62
vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go
generated
vendored
Normal file
62
vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
package leafnodes
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type MeasureNode struct {
|
||||
runner *runner
|
||||
|
||||
text string
|
||||
flag types.FlagType
|
||||
samples int
|
||||
benchmarker *benchmarker
|
||||
}
|
||||
|
||||
func NewMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int, failer *failer.Failer, componentIndex int) *MeasureNode {
|
||||
benchmarker := newBenchmarker()
|
||||
|
||||
wrappedBody := func() {
|
||||
reflect.ValueOf(body).Call([]reflect.Value{reflect.ValueOf(benchmarker)})
|
||||
}
|
||||
|
||||
return &MeasureNode{
|
||||
runner: newRunner(wrappedBody, codeLocation, 0, failer, types.SpecComponentTypeMeasure, componentIndex),
|
||||
|
||||
text: text,
|
||||
flag: flag,
|
||||
samples: samples,
|
||||
benchmarker: benchmarker,
|
||||
}
|
||||
}
|
||||
|
||||
func (node *MeasureNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
|
||||
return node.runner.run()
|
||||
}
|
||||
|
||||
func (node *MeasureNode) MeasurementsReport() map[string]*types.SpecMeasurement {
|
||||
return node.benchmarker.measurementsReport()
|
||||
}
|
||||
|
||||
func (node *MeasureNode) Type() types.SpecComponentType {
|
||||
return types.SpecComponentTypeMeasure
|
||||
}
|
||||
|
||||
func (node *MeasureNode) Text() string {
|
||||
return node.text
|
||||
}
|
||||
|
||||
func (node *MeasureNode) Flag() types.FlagType {
|
||||
return node.flag
|
||||
}
|
||||
|
||||
func (node *MeasureNode) CodeLocation() types.CodeLocation {
|
||||
return node.runner.codeLocation
|
||||
}
|
||||
|
||||
func (node *MeasureNode) Samples() int {
|
||||
return node.samples
|
||||
}
|
117
vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go
generated
vendored
Normal file
117
vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go
generated
vendored
Normal file
@ -0,0 +1,117 @@
|
||||
package leafnodes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/codelocation"
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type runner struct {
|
||||
isAsync bool
|
||||
asyncFunc func(chan<- interface{})
|
||||
syncFunc func()
|
||||
codeLocation types.CodeLocation
|
||||
timeoutThreshold time.Duration
|
||||
nodeType types.SpecComponentType
|
||||
componentIndex int
|
||||
failer *failer.Failer
|
||||
}
|
||||
|
||||
func newRunner(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, nodeType types.SpecComponentType, componentIndex int) *runner {
|
||||
bodyType := reflect.TypeOf(body)
|
||||
if bodyType.Kind() != reflect.Func {
|
||||
panic(fmt.Sprintf("Expected a function but got something else at %v", codeLocation))
|
||||
}
|
||||
|
||||
runner := &runner{
|
||||
codeLocation: codeLocation,
|
||||
timeoutThreshold: timeout,
|
||||
failer: failer,
|
||||
nodeType: nodeType,
|
||||
componentIndex: componentIndex,
|
||||
}
|
||||
|
||||
switch bodyType.NumIn() {
|
||||
case 0:
|
||||
runner.syncFunc = body.(func())
|
||||
return runner
|
||||
case 1:
|
||||
if !(bodyType.In(0).Kind() == reflect.Chan && bodyType.In(0).Elem().Kind() == reflect.Interface) {
|
||||
panic(fmt.Sprintf("Must pass a Done channel to function at %v", codeLocation))
|
||||
}
|
||||
|
||||
wrappedBody := func(done chan<- interface{}) {
|
||||
bodyValue := reflect.ValueOf(body)
|
||||
bodyValue.Call([]reflect.Value{reflect.ValueOf(done)})
|
||||
}
|
||||
|
||||
runner.isAsync = true
|
||||
runner.asyncFunc = wrappedBody
|
||||
return runner
|
||||
}
|
||||
|
||||
panic(fmt.Sprintf("Too many arguments to function at %v", codeLocation))
|
||||
}
|
||||
|
||||
func (r *runner) run() (outcome types.SpecState, failure types.SpecFailure) {
|
||||
if r.isAsync {
|
||||
return r.runAsync()
|
||||
} else {
|
||||
return r.runSync()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *runner) runAsync() (outcome types.SpecState, failure types.SpecFailure) {
|
||||
done := make(chan interface{}, 1)
|
||||
|
||||
go func() {
|
||||
finished := false
|
||||
|
||||
defer func() {
|
||||
if e := recover(); e != nil || !finished {
|
||||
r.failer.Panic(codelocation.New(2), e)
|
||||
select {
|
||||
case <-done:
|
||||
break
|
||||
default:
|
||||
close(done)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
r.asyncFunc(done)
|
||||
finished = true
|
||||
}()
|
||||
|
||||
// If this goroutine gets no CPU time before the select block,
|
||||
// the <-done case may complete even if the test took longer than the timeoutThreshold.
|
||||
// This can cause flaky behaviour, but we haven't seen it in the wild.
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(r.timeoutThreshold):
|
||||
r.failer.Timeout(r.codeLocation)
|
||||
}
|
||||
|
||||
failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation)
|
||||
return
|
||||
}
|
||||
func (r *runner) runSync() (outcome types.SpecState, failure types.SpecFailure) {
|
||||
finished := false
|
||||
|
||||
defer func() {
|
||||
if e := recover(); e != nil || !finished {
|
||||
r.failer.Panic(codelocation.New(2), e)
|
||||
}
|
||||
|
||||
failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation)
|
||||
}()
|
||||
|
||||
r.syncFunc()
|
||||
finished = true
|
||||
|
||||
return
|
||||
}
|
48
vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go
generated
vendored
Normal file
48
vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go
generated
vendored
Normal file
@ -0,0 +1,48 @@
|
||||
package leafnodes
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type SetupNode struct {
|
||||
runner *runner
|
||||
}
|
||||
|
||||
func (node *SetupNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
|
||||
return node.runner.run()
|
||||
}
|
||||
|
||||
func (node *SetupNode) Type() types.SpecComponentType {
|
||||
return node.runner.nodeType
|
||||
}
|
||||
|
||||
func (node *SetupNode) CodeLocation() types.CodeLocation {
|
||||
return node.runner.codeLocation
|
||||
}
|
||||
|
||||
func NewBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
|
||||
return &SetupNode{
|
||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeEach, componentIndex),
|
||||
}
|
||||
}
|
||||
|
||||
func NewAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
|
||||
return &SetupNode{
|
||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterEach, componentIndex),
|
||||
}
|
||||
}
|
||||
|
||||
func NewJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
|
||||
return &SetupNode{
|
||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeJustBeforeEach, componentIndex),
|
||||
}
|
||||
}
|
||||
|
||||
func NewJustAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
|
||||
return &SetupNode{
|
||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeJustAfterEach, componentIndex),
|
||||
}
|
||||
}
|
55
vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go
generated
vendored
Normal file
55
vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go
generated
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
package leafnodes
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type SuiteNode interface {
|
||||
Run(parallelNode int, parallelTotal int, syncHost string) bool
|
||||
Passed() bool
|
||||
Summary() *types.SetupSummary
|
||||
}
|
||||
|
||||
type simpleSuiteNode struct {
|
||||
runner *runner
|
||||
outcome types.SpecState
|
||||
failure types.SpecFailure
|
||||
runTime time.Duration
|
||||
}
|
||||
|
||||
func (node *simpleSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
|
||||
t := time.Now()
|
||||
node.outcome, node.failure = node.runner.run()
|
||||
node.runTime = time.Since(t)
|
||||
|
||||
return node.outcome == types.SpecStatePassed
|
||||
}
|
||||
|
||||
func (node *simpleSuiteNode) Passed() bool {
|
||||
return node.outcome == types.SpecStatePassed
|
||||
}
|
||||
|
||||
func (node *simpleSuiteNode) Summary() *types.SetupSummary {
|
||||
return &types.SetupSummary{
|
||||
ComponentType: node.runner.nodeType,
|
||||
CodeLocation: node.runner.codeLocation,
|
||||
State: node.outcome,
|
||||
RunTime: node.runTime,
|
||||
Failure: node.failure,
|
||||
}
|
||||
}
|
||||
|
||||
func NewBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
|
||||
return &simpleSuiteNode{
|
||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func NewAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
|
||||
return &simpleSuiteNode{
|
||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
|
||||
}
|
||||
}
|
90
vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go
generated
vendored
Normal file
90
vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go
generated
vendored
Normal file
@ -0,0 +1,90 @@
|
||||
package leafnodes
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type synchronizedAfterSuiteNode struct {
|
||||
runnerA *runner
|
||||
runnerB *runner
|
||||
|
||||
outcome types.SpecState
|
||||
failure types.SpecFailure
|
||||
runTime time.Duration
|
||||
}
|
||||
|
||||
func NewSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
|
||||
return &synchronizedAfterSuiteNode{
|
||||
runnerA: newRunner(bodyA, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
|
||||
runnerB: newRunner(bodyB, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func (node *synchronizedAfterSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
|
||||
node.outcome, node.failure = node.runnerA.run()
|
||||
|
||||
if parallelNode == 1 {
|
||||
if parallelTotal > 1 {
|
||||
node.waitUntilOtherNodesAreDone(syncHost)
|
||||
}
|
||||
|
||||
outcome, failure := node.runnerB.run()
|
||||
|
||||
if node.outcome == types.SpecStatePassed {
|
||||
node.outcome, node.failure = outcome, failure
|
||||
}
|
||||
}
|
||||
|
||||
return node.outcome == types.SpecStatePassed
|
||||
}
|
||||
|
||||
func (node *synchronizedAfterSuiteNode) Passed() bool {
|
||||
return node.outcome == types.SpecStatePassed
|
||||
}
|
||||
|
||||
func (node *synchronizedAfterSuiteNode) Summary() *types.SetupSummary {
|
||||
return &types.SetupSummary{
|
||||
ComponentType: node.runnerA.nodeType,
|
||||
CodeLocation: node.runnerA.codeLocation,
|
||||
State: node.outcome,
|
||||
RunTime: node.runTime,
|
||||
Failure: node.failure,
|
||||
}
|
||||
}
|
||||
|
||||
func (node *synchronizedAfterSuiteNode) waitUntilOtherNodesAreDone(syncHost string) {
|
||||
for {
|
||||
if node.canRun(syncHost) {
|
||||
return
|
||||
}
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
func (node *synchronizedAfterSuiteNode) canRun(syncHost string) bool {
|
||||
resp, err := http.Get(syncHost + "/RemoteAfterSuiteData")
|
||||
if err != nil || resp.StatusCode != http.StatusOK {
|
||||
return false
|
||||
}
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
resp.Body.Close()
|
||||
|
||||
afterSuiteData := types.RemoteAfterSuiteData{}
|
||||
err = json.Unmarshal(body, &afterSuiteData)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return afterSuiteData.CanRun
|
||||
}
|
181
vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go
generated
vendored
Normal file
181
vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go
generated
vendored
Normal file
@ -0,0 +1,181 @@
|
||||
package leafnodes
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type synchronizedBeforeSuiteNode struct {
|
||||
runnerA *runner
|
||||
runnerB *runner
|
||||
|
||||
data []byte
|
||||
|
||||
outcome types.SpecState
|
||||
failure types.SpecFailure
|
||||
runTime time.Duration
|
||||
}
|
||||
|
||||
func NewSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
|
||||
node := &synchronizedBeforeSuiteNode{}
|
||||
|
||||
node.runnerA = newRunner(node.wrapA(bodyA), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0)
|
||||
node.runnerB = newRunner(node.wrapB(bodyB), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0)
|
||||
|
||||
return node
|
||||
}
|
||||
|
||||
func (node *synchronizedBeforeSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
|
||||
t := time.Now()
|
||||
defer func() {
|
||||
node.runTime = time.Since(t)
|
||||
}()
|
||||
|
||||
if parallelNode == 1 {
|
||||
node.outcome, node.failure = node.runA(parallelTotal, syncHost)
|
||||
} else {
|
||||
node.outcome, node.failure = node.waitForA(syncHost)
|
||||
}
|
||||
|
||||
if node.outcome != types.SpecStatePassed {
|
||||
return false
|
||||
}
|
||||
node.outcome, node.failure = node.runnerB.run()
|
||||
|
||||
return node.outcome == types.SpecStatePassed
|
||||
}
|
||||
|
||||
func (node *synchronizedBeforeSuiteNode) runA(parallelTotal int, syncHost string) (types.SpecState, types.SpecFailure) {
|
||||
outcome, failure := node.runnerA.run()
|
||||
|
||||
if parallelTotal > 1 {
|
||||
state := types.RemoteBeforeSuiteStatePassed
|
||||
if outcome != types.SpecStatePassed {
|
||||
state = types.RemoteBeforeSuiteStateFailed
|
||||
}
|
||||
json := (types.RemoteBeforeSuiteData{
|
||||
Data: node.data,
|
||||
State: state,
|
||||
}).ToJSON()
|
||||
http.Post(syncHost+"/BeforeSuiteState", "application/json", bytes.NewBuffer(json))
|
||||
}
|
||||
|
||||
return outcome, failure
|
||||
}
|
||||
|
||||
func (node *synchronizedBeforeSuiteNode) waitForA(syncHost string) (types.SpecState, types.SpecFailure) {
|
||||
failure := func(message string) types.SpecFailure {
|
||||
return types.SpecFailure{
|
||||
Message: message,
|
||||
Location: node.runnerA.codeLocation,
|
||||
ComponentType: node.runnerA.nodeType,
|
||||
ComponentIndex: node.runnerA.componentIndex,
|
||||
ComponentCodeLocation: node.runnerA.codeLocation,
|
||||
}
|
||||
}
|
||||
for {
|
||||
resp, err := http.Get(syncHost + "/BeforeSuiteState")
|
||||
if err != nil || resp.StatusCode != http.StatusOK {
|
||||
return types.SpecStateFailed, failure("Failed to fetch BeforeSuite state")
|
||||
}
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return types.SpecStateFailed, failure("Failed to read BeforeSuite state")
|
||||
}
|
||||
resp.Body.Close()
|
||||
|
||||
beforeSuiteData := types.RemoteBeforeSuiteData{}
|
||||
err = json.Unmarshal(body, &beforeSuiteData)
|
||||
if err != nil {
|
||||
return types.SpecStateFailed, failure("Failed to decode BeforeSuite state")
|
||||
}
|
||||
|
||||
switch beforeSuiteData.State {
|
||||
case types.RemoteBeforeSuiteStatePassed:
|
||||
node.data = beforeSuiteData.Data
|
||||
return types.SpecStatePassed, types.SpecFailure{}
|
||||
case types.RemoteBeforeSuiteStateFailed:
|
||||
return types.SpecStateFailed, failure("BeforeSuite on Node 1 failed")
|
||||
case types.RemoteBeforeSuiteStateDisappeared:
|
||||
return types.SpecStateFailed, failure("Node 1 disappeared before completing BeforeSuite")
|
||||
}
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
func (node *synchronizedBeforeSuiteNode) Passed() bool {
|
||||
return node.outcome == types.SpecStatePassed
|
||||
}
|
||||
|
||||
func (node *synchronizedBeforeSuiteNode) Summary() *types.SetupSummary {
|
||||
return &types.SetupSummary{
|
||||
ComponentType: node.runnerA.nodeType,
|
||||
CodeLocation: node.runnerA.codeLocation,
|
||||
State: node.outcome,
|
||||
RunTime: node.runTime,
|
||||
Failure: node.failure,
|
||||
}
|
||||
}
|
||||
|
||||
func (node *synchronizedBeforeSuiteNode) wrapA(bodyA interface{}) interface{} {
|
||||
typeA := reflect.TypeOf(bodyA)
|
||||
if typeA.Kind() != reflect.Func {
|
||||
panic("SynchronizedBeforeSuite expects a function as its first argument")
|
||||
}
|
||||
|
||||
takesNothing := typeA.NumIn() == 0
|
||||
takesADoneChannel := typeA.NumIn() == 1 && typeA.In(0).Kind() == reflect.Chan && typeA.In(0).Elem().Kind() == reflect.Interface
|
||||
returnsBytes := typeA.NumOut() == 1 && typeA.Out(0).Kind() == reflect.Slice && typeA.Out(0).Elem().Kind() == reflect.Uint8
|
||||
|
||||
if !((takesNothing || takesADoneChannel) && returnsBytes) {
|
||||
panic("SynchronizedBeforeSuite's first argument should be a function that returns []byte and either takes no arguments or takes a Done channel.")
|
||||
}
|
||||
|
||||
if takesADoneChannel {
|
||||
return func(done chan<- interface{}) {
|
||||
out := reflect.ValueOf(bodyA).Call([]reflect.Value{reflect.ValueOf(done)})
|
||||
node.data = out[0].Interface().([]byte)
|
||||
}
|
||||
}
|
||||
|
||||
return func() {
|
||||
out := reflect.ValueOf(bodyA).Call([]reflect.Value{})
|
||||
node.data = out[0].Interface().([]byte)
|
||||
}
|
||||
}
|
||||
|
||||
func (node *synchronizedBeforeSuiteNode) wrapB(bodyB interface{}) interface{} {
|
||||
typeB := reflect.TypeOf(bodyB)
|
||||
if typeB.Kind() != reflect.Func {
|
||||
panic("SynchronizedBeforeSuite expects a function as its second argument")
|
||||
}
|
||||
|
||||
returnsNothing := typeB.NumOut() == 0
|
||||
takesBytesOnly := typeB.NumIn() == 1 && typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8
|
||||
takesBytesAndDone := typeB.NumIn() == 2 &&
|
||||
typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8 &&
|
||||
typeB.In(1).Kind() == reflect.Chan && typeB.In(1).Elem().Kind() == reflect.Interface
|
||||
|
||||
if !((takesBytesOnly || takesBytesAndDone) && returnsNothing) {
|
||||
panic("SynchronizedBeforeSuite's second argument should be a function that returns nothing and either takes []byte or ([]byte, Done)")
|
||||
}
|
||||
|
||||
if takesBytesAndDone {
|
||||
return func(done chan<- interface{}) {
|
||||
reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data), reflect.ValueOf(done)})
|
||||
}
|
||||
}
|
||||
|
||||
return func() {
|
||||
reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data)})
|
||||
}
|
||||
}
|
249
vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go
generated
vendored
Normal file
249
vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go
generated
vendored
Normal file
@ -0,0 +1,249 @@
|
||||
/*
|
||||
|
||||
Aggregator is a reporter used by the Ginkgo CLI to aggregate and present parallel test output
|
||||
coherently as tests complete. You shouldn't need to use this in your code. To run tests in parallel:
|
||||
|
||||
ginkgo -nodes=N
|
||||
|
||||
where N is the number of nodes you desire.
|
||||
*/
|
||||
package remote
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/reporters/stenographer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type configAndSuite struct {
|
||||
config config.GinkgoConfigType
|
||||
summary *types.SuiteSummary
|
||||
}
|
||||
|
||||
type Aggregator struct {
|
||||
nodeCount int
|
||||
config config.DefaultReporterConfigType
|
||||
stenographer stenographer.Stenographer
|
||||
result chan bool
|
||||
|
||||
suiteBeginnings chan configAndSuite
|
||||
aggregatedSuiteBeginnings []configAndSuite
|
||||
|
||||
beforeSuites chan *types.SetupSummary
|
||||
aggregatedBeforeSuites []*types.SetupSummary
|
||||
|
||||
afterSuites chan *types.SetupSummary
|
||||
aggregatedAfterSuites []*types.SetupSummary
|
||||
|
||||
specCompletions chan *types.SpecSummary
|
||||
completedSpecs []*types.SpecSummary
|
||||
|
||||
suiteEndings chan *types.SuiteSummary
|
||||
aggregatedSuiteEndings []*types.SuiteSummary
|
||||
specs []*types.SpecSummary
|
||||
|
||||
startTime time.Time
|
||||
}
|
||||
|
||||
func NewAggregator(nodeCount int, result chan bool, config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *Aggregator {
|
||||
aggregator := &Aggregator{
|
||||
nodeCount: nodeCount,
|
||||
result: result,
|
||||
config: config,
|
||||
stenographer: stenographer,
|
||||
|
||||
suiteBeginnings: make(chan configAndSuite, 0),
|
||||
beforeSuites: make(chan *types.SetupSummary, 0),
|
||||
afterSuites: make(chan *types.SetupSummary, 0),
|
||||
specCompletions: make(chan *types.SpecSummary, 0),
|
||||
suiteEndings: make(chan *types.SuiteSummary, 0),
|
||||
}
|
||||
|
||||
go aggregator.mux()
|
||||
|
||||
return aggregator
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||
aggregator.suiteBeginnings <- configAndSuite{config, summary}
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
aggregator.beforeSuites <- setupSummary
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
aggregator.afterSuites <- setupSummary
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) SpecWillRun(specSummary *types.SpecSummary) {
|
||||
//noop
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||
aggregator.specCompletions <- specSummary
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||
aggregator.suiteEndings <- summary
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) mux() {
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case configAndSuite := <-aggregator.suiteBeginnings:
|
||||
aggregator.registerSuiteBeginning(configAndSuite)
|
||||
case setupSummary := <-aggregator.beforeSuites:
|
||||
aggregator.registerBeforeSuite(setupSummary)
|
||||
case setupSummary := <-aggregator.afterSuites:
|
||||
aggregator.registerAfterSuite(setupSummary)
|
||||
case specSummary := <-aggregator.specCompletions:
|
||||
aggregator.registerSpecCompletion(specSummary)
|
||||
case suite := <-aggregator.suiteEndings:
|
||||
finished, passed := aggregator.registerSuiteEnding(suite)
|
||||
if finished {
|
||||
aggregator.result <- passed
|
||||
break loop
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) registerSuiteBeginning(configAndSuite configAndSuite) {
|
||||
aggregator.aggregatedSuiteBeginnings = append(aggregator.aggregatedSuiteBeginnings, configAndSuite)
|
||||
|
||||
if len(aggregator.aggregatedSuiteBeginnings) == 1 {
|
||||
aggregator.startTime = time.Now()
|
||||
}
|
||||
|
||||
if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount {
|
||||
return
|
||||
}
|
||||
|
||||
aggregator.stenographer.AnnounceSuite(configAndSuite.summary.SuiteDescription, configAndSuite.config.RandomSeed, configAndSuite.config.RandomizeAllSpecs, aggregator.config.Succinct)
|
||||
|
||||
totalNumberOfSpecs := 0
|
||||
if len(aggregator.aggregatedSuiteBeginnings) > 0 {
|
||||
totalNumberOfSpecs = configAndSuite.summary.NumberOfSpecsBeforeParallelization
|
||||
}
|
||||
|
||||
aggregator.stenographer.AnnounceTotalNumberOfSpecs(totalNumberOfSpecs, aggregator.config.Succinct)
|
||||
aggregator.stenographer.AnnounceAggregatedParallelRun(aggregator.nodeCount, aggregator.config.Succinct)
|
||||
aggregator.flushCompletedSpecs()
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) registerBeforeSuite(setupSummary *types.SetupSummary) {
|
||||
aggregator.aggregatedBeforeSuites = append(aggregator.aggregatedBeforeSuites, setupSummary)
|
||||
aggregator.flushCompletedSpecs()
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) registerAfterSuite(setupSummary *types.SetupSummary) {
|
||||
aggregator.aggregatedAfterSuites = append(aggregator.aggregatedAfterSuites, setupSummary)
|
||||
aggregator.flushCompletedSpecs()
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) registerSpecCompletion(specSummary *types.SpecSummary) {
|
||||
aggregator.completedSpecs = append(aggregator.completedSpecs, specSummary)
|
||||
aggregator.specs = append(aggregator.specs, specSummary)
|
||||
aggregator.flushCompletedSpecs()
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) flushCompletedSpecs() {
|
||||
if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount {
|
||||
return
|
||||
}
|
||||
|
||||
for _, setupSummary := range aggregator.aggregatedBeforeSuites {
|
||||
aggregator.announceBeforeSuite(setupSummary)
|
||||
}
|
||||
|
||||
for _, specSummary := range aggregator.completedSpecs {
|
||||
aggregator.announceSpec(specSummary)
|
||||
}
|
||||
|
||||
for _, setupSummary := range aggregator.aggregatedAfterSuites {
|
||||
aggregator.announceAfterSuite(setupSummary)
|
||||
}
|
||||
|
||||
aggregator.aggregatedBeforeSuites = []*types.SetupSummary{}
|
||||
aggregator.completedSpecs = []*types.SpecSummary{}
|
||||
aggregator.aggregatedAfterSuites = []*types.SetupSummary{}
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) announceBeforeSuite(setupSummary *types.SetupSummary) {
|
||||
aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput)
|
||||
if setupSummary.State != types.SpecStatePassed {
|
||||
aggregator.stenographer.AnnounceBeforeSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||
}
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) announceAfterSuite(setupSummary *types.SetupSummary) {
|
||||
aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput)
|
||||
if setupSummary.State != types.SpecStatePassed {
|
||||
aggregator.stenographer.AnnounceAfterSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||
}
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) announceSpec(specSummary *types.SpecSummary) {
|
||||
if aggregator.config.Verbose && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped {
|
||||
aggregator.stenographer.AnnounceSpecWillRun(specSummary)
|
||||
}
|
||||
|
||||
aggregator.stenographer.AnnounceCapturedOutput(specSummary.CapturedOutput)
|
||||
|
||||
switch specSummary.State {
|
||||
case types.SpecStatePassed:
|
||||
if specSummary.IsMeasurement {
|
||||
aggregator.stenographer.AnnounceSuccesfulMeasurement(specSummary, aggregator.config.Succinct)
|
||||
} else if specSummary.RunTime.Seconds() >= aggregator.config.SlowSpecThreshold {
|
||||
aggregator.stenographer.AnnounceSuccesfulSlowSpec(specSummary, aggregator.config.Succinct)
|
||||
} else {
|
||||
aggregator.stenographer.AnnounceSuccesfulSpec(specSummary)
|
||||
}
|
||||
|
||||
case types.SpecStatePending:
|
||||
aggregator.stenographer.AnnouncePendingSpec(specSummary, aggregator.config.NoisyPendings && !aggregator.config.Succinct)
|
||||
case types.SpecStateSkipped:
|
||||
aggregator.stenographer.AnnounceSkippedSpec(specSummary, aggregator.config.Succinct || !aggregator.config.NoisySkippings, aggregator.config.FullTrace)
|
||||
case types.SpecStateTimedOut:
|
||||
aggregator.stenographer.AnnounceSpecTimedOut(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||
case types.SpecStatePanicked:
|
||||
aggregator.stenographer.AnnounceSpecPanicked(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||
case types.SpecStateFailed:
|
||||
aggregator.stenographer.AnnounceSpecFailed(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||
}
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) registerSuiteEnding(suite *types.SuiteSummary) (finished bool, passed bool) {
|
||||
aggregator.aggregatedSuiteEndings = append(aggregator.aggregatedSuiteEndings, suite)
|
||||
if len(aggregator.aggregatedSuiteEndings) < aggregator.nodeCount {
|
||||
return false, false
|
||||
}
|
||||
|
||||
aggregatedSuiteSummary := &types.SuiteSummary{}
|
||||
aggregatedSuiteSummary.SuiteSucceeded = true
|
||||
|
||||
for _, suiteSummary := range aggregator.aggregatedSuiteEndings {
|
||||
if suiteSummary.SuiteSucceeded == false {
|
||||
aggregatedSuiteSummary.SuiteSucceeded = false
|
||||
}
|
||||
|
||||
aggregatedSuiteSummary.NumberOfSpecsThatWillBeRun += suiteSummary.NumberOfSpecsThatWillBeRun
|
||||
aggregatedSuiteSummary.NumberOfTotalSpecs += suiteSummary.NumberOfTotalSpecs
|
||||
aggregatedSuiteSummary.NumberOfPassedSpecs += suiteSummary.NumberOfPassedSpecs
|
||||
aggregatedSuiteSummary.NumberOfFailedSpecs += suiteSummary.NumberOfFailedSpecs
|
||||
aggregatedSuiteSummary.NumberOfPendingSpecs += suiteSummary.NumberOfPendingSpecs
|
||||
aggregatedSuiteSummary.NumberOfSkippedSpecs += suiteSummary.NumberOfSkippedSpecs
|
||||
aggregatedSuiteSummary.NumberOfFlakedSpecs += suiteSummary.NumberOfFlakedSpecs
|
||||
}
|
||||
|
||||
aggregatedSuiteSummary.RunTime = time.Since(aggregator.startTime)
|
||||
|
||||
aggregator.stenographer.SummarizeFailures(aggregator.specs)
|
||||
aggregator.stenographer.AnnounceSpecRunCompletion(aggregatedSuiteSummary, aggregator.config.Succinct)
|
||||
|
||||
return true, aggregatedSuiteSummary.SuiteSucceeded
|
||||
}
|
147
vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go
generated
vendored
Normal file
147
vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go
generated
vendored
Normal file
@ -0,0 +1,147 @@
|
||||
package remote
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/writer"
|
||||
"github.com/onsi/ginkgo/reporters"
|
||||
"github.com/onsi/ginkgo/reporters/stenographer"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
//An interface to net/http's client to allow the injection of fakes under test
|
||||
type Poster interface {
|
||||
Post(url string, bodyType string, body io.Reader) (resp *http.Response, err error)
|
||||
}
|
||||
|
||||
/*
|
||||
The ForwardingReporter is a Ginkgo reporter that forwards information to
|
||||
a Ginkgo remote server.
|
||||
|
||||
When streaming parallel test output, this repoter is automatically installed by Ginkgo.
|
||||
|
||||
This is accomplished by passing in the GINKGO_REMOTE_REPORTING_SERVER environment variable to `go test`, the Ginkgo test runner
|
||||
detects this environment variable (which should contain the host of the server) and automatically installs a ForwardingReporter
|
||||
in place of Ginkgo's DefaultReporter.
|
||||
*/
|
||||
|
||||
type ForwardingReporter struct {
|
||||
serverHost string
|
||||
poster Poster
|
||||
outputInterceptor OutputInterceptor
|
||||
debugMode bool
|
||||
debugFile *os.File
|
||||
nestedReporter *reporters.DefaultReporter
|
||||
}
|
||||
|
||||
func NewForwardingReporter(config config.DefaultReporterConfigType, serverHost string, poster Poster, outputInterceptor OutputInterceptor, ginkgoWriter *writer.Writer, debugFile string) *ForwardingReporter {
|
||||
reporter := &ForwardingReporter{
|
||||
serverHost: serverHost,
|
||||
poster: poster,
|
||||
outputInterceptor: outputInterceptor,
|
||||
}
|
||||
|
||||
if debugFile != "" {
|
||||
var err error
|
||||
reporter.debugMode = true
|
||||
reporter.debugFile, err = os.Create(debugFile)
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if !config.Verbose {
|
||||
//if verbose is true then the GinkgoWriter emits to stdout. Don't _also_ redirect GinkgoWriter output as that will result in duplication.
|
||||
ginkgoWriter.AndRedirectTo(reporter.debugFile)
|
||||
}
|
||||
outputInterceptor.StreamTo(reporter.debugFile) //This is not working
|
||||
|
||||
stenographer := stenographer.New(false, true, reporter.debugFile)
|
||||
config.Succinct = false
|
||||
config.Verbose = true
|
||||
config.FullTrace = true
|
||||
reporter.nestedReporter = reporters.NewDefaultReporter(config, stenographer)
|
||||
}
|
||||
|
||||
return reporter
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) post(path string, data interface{}) {
|
||||
encoded, _ := json.Marshal(data)
|
||||
buffer := bytes.NewBuffer(encoded)
|
||||
reporter.poster.Post(reporter.serverHost+path, "application/json", buffer)
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) SpecSuiteWillBegin(conf config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||
data := struct {
|
||||
Config config.GinkgoConfigType `json:"config"`
|
||||
Summary *types.SuiteSummary `json:"suite-summary"`
|
||||
}{
|
||||
conf,
|
||||
summary,
|
||||
}
|
||||
|
||||
reporter.outputInterceptor.StartInterceptingOutput()
|
||||
if reporter.debugMode {
|
||||
reporter.nestedReporter.SpecSuiteWillBegin(conf, summary)
|
||||
reporter.debugFile.Sync()
|
||||
}
|
||||
reporter.post("/SpecSuiteWillBegin", data)
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||
reporter.outputInterceptor.StartInterceptingOutput()
|
||||
setupSummary.CapturedOutput = output
|
||||
if reporter.debugMode {
|
||||
reporter.nestedReporter.BeforeSuiteDidRun(setupSummary)
|
||||
reporter.debugFile.Sync()
|
||||
}
|
||||
reporter.post("/BeforeSuiteDidRun", setupSummary)
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) SpecWillRun(specSummary *types.SpecSummary) {
|
||||
if reporter.debugMode {
|
||||
reporter.nestedReporter.SpecWillRun(specSummary)
|
||||
reporter.debugFile.Sync()
|
||||
}
|
||||
reporter.post("/SpecWillRun", specSummary)
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||
output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||
reporter.outputInterceptor.StartInterceptingOutput()
|
||||
specSummary.CapturedOutput = output
|
||||
if reporter.debugMode {
|
||||
reporter.nestedReporter.SpecDidComplete(specSummary)
|
||||
reporter.debugFile.Sync()
|
||||
}
|
||||
reporter.post("/SpecDidComplete", specSummary)
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||
reporter.outputInterceptor.StartInterceptingOutput()
|
||||
setupSummary.CapturedOutput = output
|
||||
if reporter.debugMode {
|
||||
reporter.nestedReporter.AfterSuiteDidRun(setupSummary)
|
||||
reporter.debugFile.Sync()
|
||||
}
|
||||
reporter.post("/AfterSuiteDidRun", setupSummary)
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||
reporter.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||
if reporter.debugMode {
|
||||
reporter.nestedReporter.SpecSuiteDidEnd(summary)
|
||||
reporter.debugFile.Sync()
|
||||
}
|
||||
reporter.post("/SpecSuiteDidEnd", summary)
|
||||
}
|
13
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go
generated
vendored
Normal file
13
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
package remote
|
||||
|
||||
import "os"
|
||||
|
||||
/*
|
||||
The OutputInterceptor is used by the ForwardingReporter to
|
||||
intercept and capture all stdin and stderr output during a test run.
|
||||
*/
|
||||
type OutputInterceptor interface {
|
||||
StartInterceptingOutput() error
|
||||
StopInterceptingAndReturnOutput() (string, error)
|
||||
StreamTo(*os.File)
|
||||
}
|
83
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go
generated
vendored
Normal file
83
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go
generated
vendored
Normal file
@ -0,0 +1,83 @@
|
||||
// +build freebsd openbsd netbsd dragonfly darwin linux solaris
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/hpcloud/tail"
|
||||
)
|
||||
|
||||
func NewOutputInterceptor() OutputInterceptor {
|
||||
return &outputInterceptor{}
|
||||
}
|
||||
|
||||
type outputInterceptor struct {
|
||||
redirectFile *os.File
|
||||
streamTarget *os.File
|
||||
intercepting bool
|
||||
tailer *tail.Tail
|
||||
doneTailing chan bool
|
||||
}
|
||||
|
||||
func (interceptor *outputInterceptor) StartInterceptingOutput() error {
|
||||
if interceptor.intercepting {
|
||||
return errors.New("Already intercepting output!")
|
||||
}
|
||||
interceptor.intercepting = true
|
||||
|
||||
var err error
|
||||
|
||||
interceptor.redirectFile, err = ioutil.TempFile("", "ginkgo-output")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Call a function in ./syscall_dup_*.go
|
||||
// If building for everything other than linux_arm64,
|
||||
// use a "normal" syscall.Dup2(oldfd, newfd) call. If building for linux_arm64 (which doesn't have syscall.Dup2)
|
||||
// call syscall.Dup3(oldfd, newfd, 0). They are nearly identical, see: http://linux.die.net/man/2/dup3
|
||||
syscallDup(int(interceptor.redirectFile.Fd()), 1)
|
||||
syscallDup(int(interceptor.redirectFile.Fd()), 2)
|
||||
|
||||
if interceptor.streamTarget != nil {
|
||||
interceptor.tailer, _ = tail.TailFile(interceptor.redirectFile.Name(), tail.Config{Follow: true})
|
||||
interceptor.doneTailing = make(chan bool)
|
||||
|
||||
go func() {
|
||||
for line := range interceptor.tailer.Lines {
|
||||
interceptor.streamTarget.Write([]byte(line.Text + "\n"))
|
||||
}
|
||||
close(interceptor.doneTailing)
|
||||
}()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) {
|
||||
if !interceptor.intercepting {
|
||||
return "", errors.New("Not intercepting output!")
|
||||
}
|
||||
|
||||
interceptor.redirectFile.Close()
|
||||
output, err := ioutil.ReadFile(interceptor.redirectFile.Name())
|
||||
os.Remove(interceptor.redirectFile.Name())
|
||||
|
||||
interceptor.intercepting = false
|
||||
|
||||
if interceptor.streamTarget != nil {
|
||||
interceptor.tailer.Stop()
|
||||
interceptor.tailer.Cleanup()
|
||||
<-interceptor.doneTailing
|
||||
interceptor.streamTarget.Sync()
|
||||
}
|
||||
|
||||
return string(output), err
|
||||
}
|
||||
|
||||
func (interceptor *outputInterceptor) StreamTo(out *os.File) {
|
||||
interceptor.streamTarget = out
|
||||
}
|
36
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go
generated
vendored
Normal file
36
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
// +build windows
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
)
|
||||
|
||||
func NewOutputInterceptor() OutputInterceptor {
|
||||
return &outputInterceptor{}
|
||||
}
|
||||
|
||||
type outputInterceptor struct {
|
||||
intercepting bool
|
||||
}
|
||||
|
||||
func (interceptor *outputInterceptor) StartInterceptingOutput() error {
|
||||
if interceptor.intercepting {
|
||||
return errors.New("Already intercepting output!")
|
||||
}
|
||||
interceptor.intercepting = true
|
||||
|
||||
// not working on windows...
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) {
|
||||
// not working on windows...
|
||||
interceptor.intercepting = false
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (interceptor *outputInterceptor) StreamTo(*os.File) {}
|
224
vendor/github.com/onsi/ginkgo/internal/remote/server.go
generated
vendored
Normal file
224
vendor/github.com/onsi/ginkgo/internal/remote/server.go
generated
vendored
Normal file
@ -0,0 +1,224 @@
|
||||
/*
|
||||
|
||||
The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners.
|
||||
This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser).
|
||||
|
||||
*/
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/spec_iterator"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/reporters"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
/*
|
||||
Server spins up on an automatically selected port and listens for communication from the forwarding reporter.
|
||||
It then forwards that communication to attached reporters.
|
||||
*/
|
||||
type Server struct {
|
||||
listener net.Listener
|
||||
reporters []reporters.Reporter
|
||||
alives []func() bool
|
||||
lock *sync.Mutex
|
||||
beforeSuiteData types.RemoteBeforeSuiteData
|
||||
parallelTotal int
|
||||
counter int
|
||||
}
|
||||
|
||||
//Create a new server, automatically selecting a port
|
||||
func NewServer(parallelTotal int) (*Server, error) {
|
||||
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Server{
|
||||
listener: listener,
|
||||
lock: &sync.Mutex{},
|
||||
alives: make([]func() bool, parallelTotal),
|
||||
beforeSuiteData: types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending},
|
||||
parallelTotal: parallelTotal,
|
||||
}, nil
|
||||
}
|
||||
|
||||
//Start the server. You don't need to `go s.Start()`, just `s.Start()`
|
||||
func (server *Server) Start() {
|
||||
httpServer := &http.Server{}
|
||||
mux := http.NewServeMux()
|
||||
httpServer.Handler = mux
|
||||
|
||||
//streaming endpoints
|
||||
mux.HandleFunc("/SpecSuiteWillBegin", server.specSuiteWillBegin)
|
||||
mux.HandleFunc("/BeforeSuiteDidRun", server.beforeSuiteDidRun)
|
||||
mux.HandleFunc("/AfterSuiteDidRun", server.afterSuiteDidRun)
|
||||
mux.HandleFunc("/SpecWillRun", server.specWillRun)
|
||||
mux.HandleFunc("/SpecDidComplete", server.specDidComplete)
|
||||
mux.HandleFunc("/SpecSuiteDidEnd", server.specSuiteDidEnd)
|
||||
|
||||
//synchronization endpoints
|
||||
mux.HandleFunc("/BeforeSuiteState", server.handleBeforeSuiteState)
|
||||
mux.HandleFunc("/RemoteAfterSuiteData", server.handleRemoteAfterSuiteData)
|
||||
mux.HandleFunc("/counter", server.handleCounter)
|
||||
mux.HandleFunc("/has-counter", server.handleHasCounter) //for backward compatibility
|
||||
|
||||
go httpServer.Serve(server.listener)
|
||||
}
|
||||
|
||||
//Stop the server
|
||||
func (server *Server) Close() {
|
||||
server.listener.Close()
|
||||
}
|
||||
|
||||
//The address the server can be reached it. Pass this into the `ForwardingReporter`.
|
||||
func (server *Server) Address() string {
|
||||
return "http://" + server.listener.Addr().String()
|
||||
}
|
||||
|
||||
//
|
||||
// Streaming Endpoints
|
||||
//
|
||||
|
||||
//The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters`
|
||||
func (server *Server) readAll(request *http.Request) []byte {
|
||||
defer request.Body.Close()
|
||||
body, _ := ioutil.ReadAll(request.Body)
|
||||
return body
|
||||
}
|
||||
|
||||
func (server *Server) RegisterReporters(reporters ...reporters.Reporter) {
|
||||
server.reporters = reporters
|
||||
}
|
||||
|
||||
func (server *Server) specSuiteWillBegin(writer http.ResponseWriter, request *http.Request) {
|
||||
body := server.readAll(request)
|
||||
|
||||
var data struct {
|
||||
Config config.GinkgoConfigType `json:"config"`
|
||||
Summary *types.SuiteSummary `json:"suite-summary"`
|
||||
}
|
||||
|
||||
json.Unmarshal(body, &data)
|
||||
|
||||
for _, reporter := range server.reporters {
|
||||
reporter.SpecSuiteWillBegin(data.Config, data.Summary)
|
||||
}
|
||||
}
|
||||
|
||||
func (server *Server) beforeSuiteDidRun(writer http.ResponseWriter, request *http.Request) {
|
||||
body := server.readAll(request)
|
||||
var setupSummary *types.SetupSummary
|
||||
json.Unmarshal(body, &setupSummary)
|
||||
|
||||
for _, reporter := range server.reporters {
|
||||
reporter.BeforeSuiteDidRun(setupSummary)
|
||||
}
|
||||
}
|
||||
|
||||
func (server *Server) afterSuiteDidRun(writer http.ResponseWriter, request *http.Request) {
|
||||
body := server.readAll(request)
|
||||
var setupSummary *types.SetupSummary
|
||||
json.Unmarshal(body, &setupSummary)
|
||||
|
||||
for _, reporter := range server.reporters {
|
||||
reporter.AfterSuiteDidRun(setupSummary)
|
||||
}
|
||||
}
|
||||
|
||||
func (server *Server) specWillRun(writer http.ResponseWriter, request *http.Request) {
|
||||
body := server.readAll(request)
|
||||
var specSummary *types.SpecSummary
|
||||
json.Unmarshal(body, &specSummary)
|
||||
|
||||
for _, reporter := range server.reporters {
|
||||
reporter.SpecWillRun(specSummary)
|
||||
}
|
||||
}
|
||||
|
||||
func (server *Server) specDidComplete(writer http.ResponseWriter, request *http.Request) {
|
||||
body := server.readAll(request)
|
||||
var specSummary *types.SpecSummary
|
||||
json.Unmarshal(body, &specSummary)
|
||||
|
||||
for _, reporter := range server.reporters {
|
||||
reporter.SpecDidComplete(specSummary)
|
||||
}
|
||||
}
|
||||
|
||||
func (server *Server) specSuiteDidEnd(writer http.ResponseWriter, request *http.Request) {
|
||||
body := server.readAll(request)
|
||||
var suiteSummary *types.SuiteSummary
|
||||
json.Unmarshal(body, &suiteSummary)
|
||||
|
||||
for _, reporter := range server.reporters {
|
||||
reporter.SpecSuiteDidEnd(suiteSummary)
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Synchronization Endpoints
|
||||
//
|
||||
|
||||
func (server *Server) RegisterAlive(node int, alive func() bool) {
|
||||
server.lock.Lock()
|
||||
defer server.lock.Unlock()
|
||||
server.alives[node-1] = alive
|
||||
}
|
||||
|
||||
func (server *Server) nodeIsAlive(node int) bool {
|
||||
server.lock.Lock()
|
||||
defer server.lock.Unlock()
|
||||
alive := server.alives[node-1]
|
||||
if alive == nil {
|
||||
return true
|
||||
}
|
||||
return alive()
|
||||
}
|
||||
|
||||
func (server *Server) handleBeforeSuiteState(writer http.ResponseWriter, request *http.Request) {
|
||||
if request.Method == "POST" {
|
||||
dec := json.NewDecoder(request.Body)
|
||||
dec.Decode(&(server.beforeSuiteData))
|
||||
} else {
|
||||
beforeSuiteData := server.beforeSuiteData
|
||||
if beforeSuiteData.State == types.RemoteBeforeSuiteStatePending && !server.nodeIsAlive(1) {
|
||||
beforeSuiteData.State = types.RemoteBeforeSuiteStateDisappeared
|
||||
}
|
||||
enc := json.NewEncoder(writer)
|
||||
enc.Encode(beforeSuiteData)
|
||||
}
|
||||
}
|
||||
|
||||
func (server *Server) handleRemoteAfterSuiteData(writer http.ResponseWriter, request *http.Request) {
|
||||
afterSuiteData := types.RemoteAfterSuiteData{
|
||||
CanRun: true,
|
||||
}
|
||||
for i := 2; i <= server.parallelTotal; i++ {
|
||||
afterSuiteData.CanRun = afterSuiteData.CanRun && !server.nodeIsAlive(i)
|
||||
}
|
||||
|
||||
enc := json.NewEncoder(writer)
|
||||
enc.Encode(afterSuiteData)
|
||||
}
|
||||
|
||||
func (server *Server) handleCounter(writer http.ResponseWriter, request *http.Request) {
|
||||
c := spec_iterator.Counter{}
|
||||
server.lock.Lock()
|
||||
c.Index = server.counter
|
||||
server.counter = server.counter + 1
|
||||
server.lock.Unlock()
|
||||
|
||||
json.NewEncoder(writer).Encode(c)
|
||||
}
|
||||
|
||||
func (server *Server) handleHasCounter(writer http.ResponseWriter, request *http.Request) {
|
||||
writer.Write([]byte(""))
|
||||
}
|
11
vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go
generated
vendored
Normal file
11
vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
// +build linux,arm64
|
||||
|
||||
package remote
|
||||
|
||||
import "syscall"
|
||||
|
||||
// linux_arm64 doesn't have syscall.Dup2 which ginkgo uses, so
|
||||
// use the nearly identical syscall.Dup3 instead
|
||||
func syscallDup(oldfd int, newfd int) (err error) {
|
||||
return syscall.Dup3(oldfd, newfd, 0)
|
||||
}
|
9
vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go
generated
vendored
Normal file
9
vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
// +build solaris
|
||||
|
||||
package remote
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
func syscallDup(oldfd int, newfd int) (err error) {
|
||||
return unix.Dup2(oldfd, newfd)
|
||||
}
|
11
vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go
generated
vendored
Normal file
11
vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
// +build !linux !arm64
|
||||
// +build !windows
|
||||
// +build !solaris
|
||||
|
||||
package remote
|
||||
|
||||
import "syscall"
|
||||
|
||||
func syscallDup(oldfd int, newfd int) (err error) {
|
||||
return syscall.Dup2(oldfd, newfd)
|
||||
}
|
247
vendor/github.com/onsi/ginkgo/internal/spec/spec.go
generated
vendored
Normal file
247
vendor/github.com/onsi/ginkgo/internal/spec/spec.go
generated
vendored
Normal file
@ -0,0 +1,247 @@
|
||||
package spec
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"sync"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/containernode"
|
||||
"github.com/onsi/ginkgo/internal/leafnodes"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type Spec struct {
|
||||
subject leafnodes.SubjectNode
|
||||
focused bool
|
||||
announceProgress bool
|
||||
|
||||
containers []*containernode.ContainerNode
|
||||
|
||||
state types.SpecState
|
||||
runTime time.Duration
|
||||
startTime time.Time
|
||||
failure types.SpecFailure
|
||||
previousFailures bool
|
||||
|
||||
stateMutex *sync.Mutex
|
||||
}
|
||||
|
||||
func New(subject leafnodes.SubjectNode, containers []*containernode.ContainerNode, announceProgress bool) *Spec {
|
||||
spec := &Spec{
|
||||
subject: subject,
|
||||
containers: containers,
|
||||
focused: subject.Flag() == types.FlagTypeFocused,
|
||||
announceProgress: announceProgress,
|
||||
stateMutex: &sync.Mutex{},
|
||||
}
|
||||
|
||||
spec.processFlag(subject.Flag())
|
||||
for i := len(containers) - 1; i >= 0; i-- {
|
||||
spec.processFlag(containers[i].Flag())
|
||||
}
|
||||
|
||||
return spec
|
||||
}
|
||||
|
||||
func (spec *Spec) processFlag(flag types.FlagType) {
|
||||
if flag == types.FlagTypeFocused {
|
||||
spec.focused = true
|
||||
} else if flag == types.FlagTypePending {
|
||||
spec.setState(types.SpecStatePending)
|
||||
}
|
||||
}
|
||||
|
||||
func (spec *Spec) Skip() {
|
||||
spec.setState(types.SpecStateSkipped)
|
||||
}
|
||||
|
||||
func (spec *Spec) Failed() bool {
|
||||
return spec.getState() == types.SpecStateFailed || spec.getState() == types.SpecStatePanicked || spec.getState() == types.SpecStateTimedOut
|
||||
}
|
||||
|
||||
func (spec *Spec) Passed() bool {
|
||||
return spec.getState() == types.SpecStatePassed
|
||||
}
|
||||
|
||||
func (spec *Spec) Flaked() bool {
|
||||
return spec.getState() == types.SpecStatePassed && spec.previousFailures
|
||||
}
|
||||
|
||||
func (spec *Spec) Pending() bool {
|
||||
return spec.getState() == types.SpecStatePending
|
||||
}
|
||||
|
||||
func (spec *Spec) Skipped() bool {
|
||||
return spec.getState() == types.SpecStateSkipped
|
||||
}
|
||||
|
||||
func (spec *Spec) Focused() bool {
|
||||
return spec.focused
|
||||
}
|
||||
|
||||
func (spec *Spec) IsMeasurement() bool {
|
||||
return spec.subject.Type() == types.SpecComponentTypeMeasure
|
||||
}
|
||||
|
||||
func (spec *Spec) Summary(suiteID string) *types.SpecSummary {
|
||||
componentTexts := make([]string, len(spec.containers)+1)
|
||||
componentCodeLocations := make([]types.CodeLocation, len(spec.containers)+1)
|
||||
|
||||
for i, container := range spec.containers {
|
||||
componentTexts[i] = container.Text()
|
||||
componentCodeLocations[i] = container.CodeLocation()
|
||||
}
|
||||
|
||||
componentTexts[len(spec.containers)] = spec.subject.Text()
|
||||
componentCodeLocations[len(spec.containers)] = spec.subject.CodeLocation()
|
||||
|
||||
runTime := spec.runTime
|
||||
if runTime == 0 && !spec.startTime.IsZero() {
|
||||
runTime = time.Since(spec.startTime)
|
||||
}
|
||||
|
||||
return &types.SpecSummary{
|
||||
IsMeasurement: spec.IsMeasurement(),
|
||||
NumberOfSamples: spec.subject.Samples(),
|
||||
ComponentTexts: componentTexts,
|
||||
ComponentCodeLocations: componentCodeLocations,
|
||||
State: spec.getState(),
|
||||
RunTime: runTime,
|
||||
Failure: spec.failure,
|
||||
Measurements: spec.measurementsReport(),
|
||||
SuiteID: suiteID,
|
||||
}
|
||||
}
|
||||
|
||||
func (spec *Spec) ConcatenatedString() string {
|
||||
s := ""
|
||||
for _, container := range spec.containers {
|
||||
s += container.Text() + " "
|
||||
}
|
||||
|
||||
return s + spec.subject.Text()
|
||||
}
|
||||
|
||||
func (spec *Spec) Run(writer io.Writer) {
|
||||
if spec.getState() == types.SpecStateFailed {
|
||||
spec.previousFailures = true
|
||||
}
|
||||
|
||||
spec.startTime = time.Now()
|
||||
defer func() {
|
||||
spec.runTime = time.Since(spec.startTime)
|
||||
}()
|
||||
|
||||
for sample := 0; sample < spec.subject.Samples(); sample++ {
|
||||
spec.runSample(sample, writer)
|
||||
|
||||
if spec.getState() != types.SpecStatePassed {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (spec *Spec) getState() types.SpecState {
|
||||
spec.stateMutex.Lock()
|
||||
defer spec.stateMutex.Unlock()
|
||||
return spec.state
|
||||
}
|
||||
|
||||
func (spec *Spec) setState(state types.SpecState) {
|
||||
spec.stateMutex.Lock()
|
||||
defer spec.stateMutex.Unlock()
|
||||
spec.state = state
|
||||
}
|
||||
|
||||
func (spec *Spec) runSample(sample int, writer io.Writer) {
|
||||
spec.setState(types.SpecStatePassed)
|
||||
spec.failure = types.SpecFailure{}
|
||||
innerMostContainerIndexToUnwind := -1
|
||||
|
||||
defer func() {
|
||||
for i := innerMostContainerIndexToUnwind; i >= 0; i-- {
|
||||
container := spec.containers[i]
|
||||
for _, justAfterEach := range container.SetupNodesOfType(types.SpecComponentTypeJustAfterEach) {
|
||||
spec.announceSetupNode(writer, "JustAfterEach", container, justAfterEach)
|
||||
justAfterEachState, justAfterEachFailure := justAfterEach.Run()
|
||||
if justAfterEachState != types.SpecStatePassed && spec.state == types.SpecStatePassed {
|
||||
spec.state = justAfterEachState
|
||||
spec.failure = justAfterEachFailure
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for i := innerMostContainerIndexToUnwind; i >= 0; i-- {
|
||||
container := spec.containers[i]
|
||||
for _, afterEach := range container.SetupNodesOfType(types.SpecComponentTypeAfterEach) {
|
||||
spec.announceSetupNode(writer, "AfterEach", container, afterEach)
|
||||
afterEachState, afterEachFailure := afterEach.Run()
|
||||
if afterEachState != types.SpecStatePassed && spec.getState() == types.SpecStatePassed {
|
||||
spec.setState(afterEachState)
|
||||
spec.failure = afterEachFailure
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for i, container := range spec.containers {
|
||||
innerMostContainerIndexToUnwind = i
|
||||
for _, beforeEach := range container.SetupNodesOfType(types.SpecComponentTypeBeforeEach) {
|
||||
spec.announceSetupNode(writer, "BeforeEach", container, beforeEach)
|
||||
s, f := beforeEach.Run()
|
||||
spec.failure = f
|
||||
spec.setState(s)
|
||||
if spec.getState() != types.SpecStatePassed {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, container := range spec.containers {
|
||||
for _, justBeforeEach := range container.SetupNodesOfType(types.SpecComponentTypeJustBeforeEach) {
|
||||
spec.announceSetupNode(writer, "JustBeforeEach", container, justBeforeEach)
|
||||
s, f := justBeforeEach.Run()
|
||||
spec.failure = f
|
||||
spec.setState(s)
|
||||
if spec.getState() != types.SpecStatePassed {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
spec.announceSubject(writer, spec.subject)
|
||||
s, f := spec.subject.Run()
|
||||
spec.failure = f
|
||||
spec.setState(s)
|
||||
}
|
||||
|
||||
func (spec *Spec) announceSetupNode(writer io.Writer, nodeType string, container *containernode.ContainerNode, setupNode leafnodes.BasicNode) {
|
||||
if spec.announceProgress {
|
||||
s := fmt.Sprintf("[%s] %s\n %s\n", nodeType, container.Text(), setupNode.CodeLocation().String())
|
||||
writer.Write([]byte(s))
|
||||
}
|
||||
}
|
||||
|
||||
func (spec *Spec) announceSubject(writer io.Writer, subject leafnodes.SubjectNode) {
|
||||
if spec.announceProgress {
|
||||
nodeType := ""
|
||||
switch subject.Type() {
|
||||
case types.SpecComponentTypeIt:
|
||||
nodeType = "It"
|
||||
case types.SpecComponentTypeMeasure:
|
||||
nodeType = "Measure"
|
||||
}
|
||||
s := fmt.Sprintf("[%s] %s\n %s\n", nodeType, subject.Text(), subject.CodeLocation().String())
|
||||
writer.Write([]byte(s))
|
||||
}
|
||||
}
|
||||
|
||||
func (spec *Spec) measurementsReport() map[string]*types.SpecMeasurement {
|
||||
if !spec.IsMeasurement() || spec.Failed() {
|
||||
return map[string]*types.SpecMeasurement{}
|
||||
}
|
||||
|
||||
return spec.subject.(*leafnodes.MeasureNode).MeasurementsReport()
|
||||
}
|
144
vendor/github.com/onsi/ginkgo/internal/spec/specs.go
generated
vendored
Normal file
144
vendor/github.com/onsi/ginkgo/internal/spec/specs.go
generated
vendored
Normal file
@ -0,0 +1,144 @@
|
||||
package spec
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"regexp"
|
||||
"sort"
|
||||
)
|
||||
|
||||
type Specs struct {
|
||||
specs []*Spec
|
||||
names []string
|
||||
|
||||
hasProgrammaticFocus bool
|
||||
RegexScansFilePath bool
|
||||
}
|
||||
|
||||
func NewSpecs(specs []*Spec) *Specs {
|
||||
names := make([]string, len(specs))
|
||||
for i, spec := range specs {
|
||||
names[i] = spec.ConcatenatedString()
|
||||
}
|
||||
return &Specs{
|
||||
specs: specs,
|
||||
names: names,
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Specs) Specs() []*Spec {
|
||||
return e.specs
|
||||
}
|
||||
|
||||
func (e *Specs) HasProgrammaticFocus() bool {
|
||||
return e.hasProgrammaticFocus
|
||||
}
|
||||
|
||||
func (e *Specs) Shuffle(r *rand.Rand) {
|
||||
sort.Sort(e)
|
||||
permutation := r.Perm(len(e.specs))
|
||||
shuffledSpecs := make([]*Spec, len(e.specs))
|
||||
names := make([]string, len(e.specs))
|
||||
for i, j := range permutation {
|
||||
shuffledSpecs[i] = e.specs[j]
|
||||
names[i] = e.names[j]
|
||||
}
|
||||
e.specs = shuffledSpecs
|
||||
e.names = names
|
||||
}
|
||||
|
||||
func (e *Specs) ApplyFocus(description string, focusString string, skipString string) {
|
||||
if focusString == "" && skipString == "" {
|
||||
e.applyProgrammaticFocus()
|
||||
} else {
|
||||
e.applyRegExpFocusAndSkip(description, focusString, skipString)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Specs) applyProgrammaticFocus() {
|
||||
e.hasProgrammaticFocus = false
|
||||
for _, spec := range e.specs {
|
||||
if spec.Focused() && !spec.Pending() {
|
||||
e.hasProgrammaticFocus = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if e.hasProgrammaticFocus {
|
||||
for _, spec := range e.specs {
|
||||
if !spec.Focused() {
|
||||
spec.Skip()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// toMatch returns a byte[] to be used by regex matchers. When adding new behaviours to the matching function,
|
||||
// this is the place which we append to.
|
||||
func (e *Specs) toMatch(description string, i int) []byte {
|
||||
if i > len(e.names) {
|
||||
return nil
|
||||
}
|
||||
if e.RegexScansFilePath {
|
||||
return []byte(
|
||||
description + " " +
|
||||
e.names[i] + " " +
|
||||
e.specs[i].subject.CodeLocation().FileName)
|
||||
} else {
|
||||
return []byte(
|
||||
description + " " +
|
||||
e.names[i])
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Specs) applyRegExpFocusAndSkip(description string, focusString string, skipString string) {
|
||||
var focusFilter *regexp.Regexp
|
||||
if focusString != "" {
|
||||
focusFilter = regexp.MustCompile(focusString)
|
||||
}
|
||||
var skipFilter *regexp.Regexp
|
||||
if skipString != "" {
|
||||
skipFilter = regexp.MustCompile(skipString)
|
||||
}
|
||||
|
||||
for i, spec := range e.specs {
|
||||
matchesFocus := true
|
||||
matchesSkip := false
|
||||
|
||||
toMatch := e.toMatch(description, i)
|
||||
|
||||
if focusFilter != nil {
|
||||
matchesFocus = focusFilter.Match([]byte(toMatch))
|
||||
}
|
||||
|
||||
if skipFilter != nil {
|
||||
matchesSkip = skipFilter.Match([]byte(toMatch))
|
||||
}
|
||||
|
||||
if !matchesFocus || matchesSkip {
|
||||
spec.Skip()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Specs) SkipMeasurements() {
|
||||
for _, spec := range e.specs {
|
||||
if spec.IsMeasurement() {
|
||||
spec.Skip()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//sort.Interface
|
||||
|
||||
func (e *Specs) Len() int {
|
||||
return len(e.specs)
|
||||
}
|
||||
|
||||
func (e *Specs) Less(i, j int) bool {
|
||||
return e.names[i] < e.names[j]
|
||||
}
|
||||
|
||||
func (e *Specs) Swap(i, j int) {
|
||||
e.names[i], e.names[j] = e.names[j], e.names[i]
|
||||
e.specs[i], e.specs[j] = e.specs[j], e.specs[i]
|
||||
}
|
55
vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go
generated
vendored
Normal file
55
vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go
generated
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
package spec_iterator
|
||||
|
||||
func ParallelizedIndexRange(length int, parallelTotal int, parallelNode int) (startIndex int, count int) {
|
||||
if length == 0 {
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
// We have more nodes than tests. Trivial case.
|
||||
if parallelTotal >= length {
|
||||
if parallelNode > length {
|
||||
return 0, 0
|
||||
} else {
|
||||
return parallelNode - 1, 1
|
||||
}
|
||||
}
|
||||
|
||||
// This is the minimum amount of tests that a node will be required to run
|
||||
minTestsPerNode := length / parallelTotal
|
||||
|
||||
// This is the maximum amount of tests that a node will be required to run
|
||||
// The algorithm guarantees that this would be equal to at least the minimum amount
|
||||
// and at most one more
|
||||
maxTestsPerNode := minTestsPerNode
|
||||
if length%parallelTotal != 0 {
|
||||
maxTestsPerNode++
|
||||
}
|
||||
|
||||
// Number of nodes that will have to run the maximum amount of tests per node
|
||||
numMaxLoadNodes := length % parallelTotal
|
||||
|
||||
// Number of nodes that precede the current node and will have to run the maximum amount of tests per node
|
||||
var numPrecedingMaxLoadNodes int
|
||||
if parallelNode > numMaxLoadNodes {
|
||||
numPrecedingMaxLoadNodes = numMaxLoadNodes
|
||||
} else {
|
||||
numPrecedingMaxLoadNodes = parallelNode - 1
|
||||
}
|
||||
|
||||
// Number of nodes that precede the current node and will have to run the minimum amount of tests per node
|
||||
var numPrecedingMinLoadNodes int
|
||||
if parallelNode <= numMaxLoadNodes {
|
||||
numPrecedingMinLoadNodes = 0
|
||||
} else {
|
||||
numPrecedingMinLoadNodes = parallelNode - numMaxLoadNodes - 1
|
||||
}
|
||||
|
||||
// Evaluate the test start index and number of tests to run
|
||||
startIndex = numPrecedingMaxLoadNodes*maxTestsPerNode + numPrecedingMinLoadNodes*minTestsPerNode
|
||||
if parallelNode > numMaxLoadNodes {
|
||||
count = minTestsPerNode
|
||||
} else {
|
||||
count = maxTestsPerNode
|
||||
}
|
||||
return
|
||||
}
|
59
vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go
generated
vendored
Normal file
59
vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go
generated
vendored
Normal file
@ -0,0 +1,59 @@
|
||||
package spec_iterator
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/spec"
|
||||
)
|
||||
|
||||
type ParallelIterator struct {
|
||||
specs []*spec.Spec
|
||||
host string
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
func NewParallelIterator(specs []*spec.Spec, host string) *ParallelIterator {
|
||||
return &ParallelIterator{
|
||||
specs: specs,
|
||||
host: host,
|
||||
client: &http.Client{},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ParallelIterator) Next() (*spec.Spec, error) {
|
||||
resp, err := s.client.Get(s.host + "/counter")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("unexpected status code %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
var counter Counter
|
||||
err = json.NewDecoder(resp.Body).Decode(&counter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if counter.Index >= len(s.specs) {
|
||||
return nil, ErrClosed
|
||||
}
|
||||
|
||||
return s.specs[counter.Index], nil
|
||||
}
|
||||
|
||||
func (s *ParallelIterator) NumberOfSpecsPriorToIteration() int {
|
||||
return len(s.specs)
|
||||
}
|
||||
|
||||
func (s *ParallelIterator) NumberOfSpecsToProcessIfKnown() (int, bool) {
|
||||
return -1, false
|
||||
}
|
||||
|
||||
func (s *ParallelIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) {
|
||||
return -1, false
|
||||
}
|
45
vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go
generated
vendored
Normal file
45
vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
package spec_iterator
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/internal/spec"
|
||||
)
|
||||
|
||||
type SerialIterator struct {
|
||||
specs []*spec.Spec
|
||||
index int
|
||||
}
|
||||
|
||||
func NewSerialIterator(specs []*spec.Spec) *SerialIterator {
|
||||
return &SerialIterator{
|
||||
specs: specs,
|
||||
index: 0,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SerialIterator) Next() (*spec.Spec, error) {
|
||||
if s.index >= len(s.specs) {
|
||||
return nil, ErrClosed
|
||||
}
|
||||
|
||||
spec := s.specs[s.index]
|
||||
s.index += 1
|
||||
return spec, nil
|
||||
}
|
||||
|
||||
func (s *SerialIterator) NumberOfSpecsPriorToIteration() int {
|
||||
return len(s.specs)
|
||||
}
|
||||
|
||||
func (s *SerialIterator) NumberOfSpecsToProcessIfKnown() (int, bool) {
|
||||
return len(s.specs), true
|
||||
}
|
||||
|
||||
func (s *SerialIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) {
|
||||
count := 0
|
||||
for _, s := range s.specs {
|
||||
if !s.Skipped() && !s.Pending() {
|
||||
count += 1
|
||||
}
|
||||
}
|
||||
return count, true
|
||||
}
|
47
vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go
generated
vendored
Normal file
47
vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go
generated
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
package spec_iterator
|
||||
|
||||
import "github.com/onsi/ginkgo/internal/spec"
|
||||
|
||||
type ShardedParallelIterator struct {
|
||||
specs []*spec.Spec
|
||||
index int
|
||||
maxIndex int
|
||||
}
|
||||
|
||||
func NewShardedParallelIterator(specs []*spec.Spec, total int, node int) *ShardedParallelIterator {
|
||||
startIndex, count := ParallelizedIndexRange(len(specs), total, node)
|
||||
|
||||
return &ShardedParallelIterator{
|
||||
specs: specs,
|
||||
index: startIndex,
|
||||
maxIndex: startIndex + count,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ShardedParallelIterator) Next() (*spec.Spec, error) {
|
||||
if s.index >= s.maxIndex {
|
||||
return nil, ErrClosed
|
||||
}
|
||||
|
||||
spec := s.specs[s.index]
|
||||
s.index += 1
|
||||
return spec, nil
|
||||
}
|
||||
|
||||
func (s *ShardedParallelIterator) NumberOfSpecsPriorToIteration() int {
|
||||
return len(s.specs)
|
||||
}
|
||||
|
||||
func (s *ShardedParallelIterator) NumberOfSpecsToProcessIfKnown() (int, bool) {
|
||||
return s.maxIndex - s.index, true
|
||||
}
|
||||
|
||||
func (s *ShardedParallelIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) {
|
||||
count := 0
|
||||
for i := s.index; i < s.maxIndex; i += 1 {
|
||||
if !s.specs[i].Skipped() && !s.specs[i].Pending() {
|
||||
count += 1
|
||||
}
|
||||
}
|
||||
return count, true
|
||||
}
|
20
vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go
generated
vendored
Normal file
20
vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
package spec_iterator
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/spec"
|
||||
)
|
||||
|
||||
var ErrClosed = errors.New("no more specs to run")
|
||||
|
||||
type SpecIterator interface {
|
||||
Next() (*spec.Spec, error)
|
||||
NumberOfSpecsPriorToIteration() int
|
||||
NumberOfSpecsToProcessIfKnown() (int, bool)
|
||||
NumberOfSpecsThatWillBeRunIfKnown() (int, bool)
|
||||
}
|
||||
|
||||
type Counter struct {
|
||||
Index int `json:"index"`
|
||||
}
|
15
vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go
generated
vendored
Normal file
15
vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
package specrunner
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func randomID() string {
|
||||
b := make([]byte, 8)
|
||||
_, err := rand.Read(b)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("%x-%x-%x-%x", b[0:2], b[2:4], b[4:6], b[6:8])
|
||||
}
|
411
vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go
generated
vendored
Normal file
411
vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go
generated
vendored
Normal file
@ -0,0 +1,411 @@
|
||||
package specrunner
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/spec_iterator"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/internal/leafnodes"
|
||||
"github.com/onsi/ginkgo/internal/spec"
|
||||
Writer "github.com/onsi/ginkgo/internal/writer"
|
||||
"github.com/onsi/ginkgo/reporters"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
|
||||
"time"
|
||||
)
|
||||
|
||||
type SpecRunner struct {
|
||||
description string
|
||||
beforeSuiteNode leafnodes.SuiteNode
|
||||
iterator spec_iterator.SpecIterator
|
||||
afterSuiteNode leafnodes.SuiteNode
|
||||
reporters []reporters.Reporter
|
||||
startTime time.Time
|
||||
suiteID string
|
||||
runningSpec *spec.Spec
|
||||
writer Writer.WriterInterface
|
||||
config config.GinkgoConfigType
|
||||
interrupted bool
|
||||
processedSpecs []*spec.Spec
|
||||
lock *sync.Mutex
|
||||
}
|
||||
|
||||
func New(description string, beforeSuiteNode leafnodes.SuiteNode, iterator spec_iterator.SpecIterator, afterSuiteNode leafnodes.SuiteNode, reporters []reporters.Reporter, writer Writer.WriterInterface, config config.GinkgoConfigType) *SpecRunner {
|
||||
return &SpecRunner{
|
||||
description: description,
|
||||
beforeSuiteNode: beforeSuiteNode,
|
||||
iterator: iterator,
|
||||
afterSuiteNode: afterSuiteNode,
|
||||
reporters: reporters,
|
||||
writer: writer,
|
||||
config: config,
|
||||
suiteID: randomID(),
|
||||
lock: &sync.Mutex{},
|
||||
}
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) Run() bool {
|
||||
if runner.config.DryRun {
|
||||
runner.performDryRun()
|
||||
return true
|
||||
}
|
||||
|
||||
runner.reportSuiteWillBegin()
|
||||
signalRegistered := make(chan struct{})
|
||||
go runner.registerForInterrupts(signalRegistered)
|
||||
<-signalRegistered
|
||||
|
||||
suitePassed := runner.runBeforeSuite()
|
||||
|
||||
if suitePassed {
|
||||
suitePassed = runner.runSpecs()
|
||||
}
|
||||
|
||||
runner.blockForeverIfInterrupted()
|
||||
|
||||
suitePassed = runner.runAfterSuite() && suitePassed
|
||||
|
||||
runner.reportSuiteDidEnd(suitePassed)
|
||||
|
||||
return suitePassed
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) performDryRun() {
|
||||
runner.reportSuiteWillBegin()
|
||||
|
||||
if runner.beforeSuiteNode != nil {
|
||||
summary := runner.beforeSuiteNode.Summary()
|
||||
summary.State = types.SpecStatePassed
|
||||
runner.reportBeforeSuite(summary)
|
||||
}
|
||||
|
||||
for {
|
||||
spec, err := runner.iterator.Next()
|
||||
if err == spec_iterator.ErrClosed {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Println("failed to iterate over tests:\n" + err.Error())
|
||||
break
|
||||
}
|
||||
|
||||
runner.processedSpecs = append(runner.processedSpecs, spec)
|
||||
|
||||
summary := spec.Summary(runner.suiteID)
|
||||
runner.reportSpecWillRun(summary)
|
||||
if summary.State == types.SpecStateInvalid {
|
||||
summary.State = types.SpecStatePassed
|
||||
}
|
||||
runner.reportSpecDidComplete(summary, false)
|
||||
}
|
||||
|
||||
if runner.afterSuiteNode != nil {
|
||||
summary := runner.afterSuiteNode.Summary()
|
||||
summary.State = types.SpecStatePassed
|
||||
runner.reportAfterSuite(summary)
|
||||
}
|
||||
|
||||
runner.reportSuiteDidEnd(true)
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) runBeforeSuite() bool {
|
||||
if runner.beforeSuiteNode == nil || runner.wasInterrupted() {
|
||||
return true
|
||||
}
|
||||
|
||||
runner.writer.Truncate()
|
||||
conf := runner.config
|
||||
passed := runner.beforeSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost)
|
||||
if !passed {
|
||||
runner.writer.DumpOut()
|
||||
}
|
||||
runner.reportBeforeSuite(runner.beforeSuiteNode.Summary())
|
||||
return passed
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) runAfterSuite() bool {
|
||||
if runner.afterSuiteNode == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
runner.writer.Truncate()
|
||||
conf := runner.config
|
||||
passed := runner.afterSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost)
|
||||
if !passed {
|
||||
runner.writer.DumpOut()
|
||||
}
|
||||
runner.reportAfterSuite(runner.afterSuiteNode.Summary())
|
||||
return passed
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) runSpecs() bool {
|
||||
suiteFailed := false
|
||||
skipRemainingSpecs := false
|
||||
for {
|
||||
spec, err := runner.iterator.Next()
|
||||
if err == spec_iterator.ErrClosed {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Println("failed to iterate over tests:\n" + err.Error())
|
||||
suiteFailed = true
|
||||
break
|
||||
}
|
||||
|
||||
runner.processedSpecs = append(runner.processedSpecs, spec)
|
||||
|
||||
if runner.wasInterrupted() {
|
||||
break
|
||||
}
|
||||
if skipRemainingSpecs {
|
||||
spec.Skip()
|
||||
}
|
||||
|
||||
if !spec.Skipped() && !spec.Pending() {
|
||||
if passed := runner.runSpec(spec); !passed {
|
||||
suiteFailed = true
|
||||
}
|
||||
} else if spec.Pending() && runner.config.FailOnPending {
|
||||
runner.reportSpecWillRun(spec.Summary(runner.suiteID))
|
||||
suiteFailed = true
|
||||
runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed())
|
||||
} else {
|
||||
runner.reportSpecWillRun(spec.Summary(runner.suiteID))
|
||||
runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed())
|
||||
}
|
||||
|
||||
if spec.Failed() && runner.config.FailFast {
|
||||
skipRemainingSpecs = true
|
||||
}
|
||||
}
|
||||
|
||||
return !suiteFailed
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) runSpec(spec *spec.Spec) (passed bool) {
|
||||
maxAttempts := 1
|
||||
if runner.config.FlakeAttempts > 0 {
|
||||
// uninitialized configs count as 1
|
||||
maxAttempts = runner.config.FlakeAttempts
|
||||
}
|
||||
|
||||
for i := 0; i < maxAttempts; i++ {
|
||||
runner.reportSpecWillRun(spec.Summary(runner.suiteID))
|
||||
runner.runningSpec = spec
|
||||
spec.Run(runner.writer)
|
||||
runner.runningSpec = nil
|
||||
runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed())
|
||||
if !spec.Failed() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) CurrentSpecSummary() (*types.SpecSummary, bool) {
|
||||
if runner.runningSpec == nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
return runner.runningSpec.Summary(runner.suiteID), true
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) registerForInterrupts(signalRegistered chan struct{}) {
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
||||
close(signalRegistered)
|
||||
|
||||
<-c
|
||||
signal.Stop(c)
|
||||
runner.markInterrupted()
|
||||
go runner.registerForHardInterrupts()
|
||||
runner.writer.DumpOutWithHeader(`
|
||||
Received interrupt. Emitting contents of GinkgoWriter...
|
||||
---------------------------------------------------------
|
||||
`)
|
||||
if runner.afterSuiteNode != nil {
|
||||
fmt.Fprint(os.Stderr, `
|
||||
---------------------------------------------------------
|
||||
Received interrupt. Running AfterSuite...
|
||||
^C again to terminate immediately
|
||||
`)
|
||||
runner.runAfterSuite()
|
||||
}
|
||||
runner.reportSuiteDidEnd(false)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) registerForHardInterrupts() {
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
||||
|
||||
<-c
|
||||
fmt.Fprintln(os.Stderr, "\nReceived second interrupt. Shutting down.")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) blockForeverIfInterrupted() {
|
||||
runner.lock.Lock()
|
||||
interrupted := runner.interrupted
|
||||
runner.lock.Unlock()
|
||||
|
||||
if interrupted {
|
||||
select {}
|
||||
}
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) markInterrupted() {
|
||||
runner.lock.Lock()
|
||||
defer runner.lock.Unlock()
|
||||
runner.interrupted = true
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) wasInterrupted() bool {
|
||||
runner.lock.Lock()
|
||||
defer runner.lock.Unlock()
|
||||
return runner.interrupted
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) reportSuiteWillBegin() {
|
||||
runner.startTime = time.Now()
|
||||
summary := runner.suiteWillBeginSummary()
|
||||
for _, reporter := range runner.reporters {
|
||||
reporter.SpecSuiteWillBegin(runner.config, summary)
|
||||
}
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) reportBeforeSuite(summary *types.SetupSummary) {
|
||||
for _, reporter := range runner.reporters {
|
||||
reporter.BeforeSuiteDidRun(summary)
|
||||
}
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) reportAfterSuite(summary *types.SetupSummary) {
|
||||
for _, reporter := range runner.reporters {
|
||||
reporter.AfterSuiteDidRun(summary)
|
||||
}
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) reportSpecWillRun(summary *types.SpecSummary) {
|
||||
runner.writer.Truncate()
|
||||
|
||||
for _, reporter := range runner.reporters {
|
||||
reporter.SpecWillRun(summary)
|
||||
}
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) reportSpecDidComplete(summary *types.SpecSummary, failed bool) {
|
||||
if failed && len(summary.CapturedOutput) == 0 {
|
||||
summary.CapturedOutput = string(runner.writer.Bytes())
|
||||
}
|
||||
for i := len(runner.reporters) - 1; i >= 1; i-- {
|
||||
runner.reporters[i].SpecDidComplete(summary)
|
||||
}
|
||||
|
||||
if failed {
|
||||
runner.writer.DumpOut()
|
||||
}
|
||||
|
||||
runner.reporters[0].SpecDidComplete(summary)
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) reportSuiteDidEnd(success bool) {
|
||||
summary := runner.suiteDidEndSummary(success)
|
||||
summary.RunTime = time.Since(runner.startTime)
|
||||
for _, reporter := range runner.reporters {
|
||||
reporter.SpecSuiteDidEnd(summary)
|
||||
}
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) countSpecsThatRanSatisfying(filter func(ex *spec.Spec) bool) (count int) {
|
||||
count = 0
|
||||
|
||||
for _, spec := range runner.processedSpecs {
|
||||
if filter(spec) {
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
return count
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) suiteDidEndSummary(success bool) *types.SuiteSummary {
|
||||
numberOfSpecsThatWillBeRun := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
|
||||
return !ex.Skipped() && !ex.Pending()
|
||||
})
|
||||
|
||||
numberOfPendingSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
|
||||
return ex.Pending()
|
||||
})
|
||||
|
||||
numberOfSkippedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
|
||||
return ex.Skipped()
|
||||
})
|
||||
|
||||
numberOfPassedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
|
||||
return ex.Passed()
|
||||
})
|
||||
|
||||
numberOfFlakedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
|
||||
return ex.Flaked()
|
||||
})
|
||||
|
||||
numberOfFailedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
|
||||
return ex.Failed()
|
||||
})
|
||||
|
||||
if runner.beforeSuiteNode != nil && !runner.beforeSuiteNode.Passed() && !runner.config.DryRun {
|
||||
var known bool
|
||||
numberOfSpecsThatWillBeRun, known = runner.iterator.NumberOfSpecsThatWillBeRunIfKnown()
|
||||
if !known {
|
||||
numberOfSpecsThatWillBeRun = runner.iterator.NumberOfSpecsPriorToIteration()
|
||||
}
|
||||
numberOfFailedSpecs = numberOfSpecsThatWillBeRun
|
||||
}
|
||||
|
||||
return &types.SuiteSummary{
|
||||
SuiteDescription: runner.description,
|
||||
SuiteSucceeded: success,
|
||||
SuiteID: runner.suiteID,
|
||||
|
||||
NumberOfSpecsBeforeParallelization: runner.iterator.NumberOfSpecsPriorToIteration(),
|
||||
NumberOfTotalSpecs: len(runner.processedSpecs),
|
||||
NumberOfSpecsThatWillBeRun: numberOfSpecsThatWillBeRun,
|
||||
NumberOfPendingSpecs: numberOfPendingSpecs,
|
||||
NumberOfSkippedSpecs: numberOfSkippedSpecs,
|
||||
NumberOfPassedSpecs: numberOfPassedSpecs,
|
||||
NumberOfFailedSpecs: numberOfFailedSpecs,
|
||||
NumberOfFlakedSpecs: numberOfFlakedSpecs,
|
||||
}
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) suiteWillBeginSummary() *types.SuiteSummary {
|
||||
numTotal, known := runner.iterator.NumberOfSpecsToProcessIfKnown()
|
||||
if !known {
|
||||
numTotal = -1
|
||||
}
|
||||
|
||||
numToRun, known := runner.iterator.NumberOfSpecsThatWillBeRunIfKnown()
|
||||
if !known {
|
||||
numToRun = -1
|
||||
}
|
||||
|
||||
return &types.SuiteSummary{
|
||||
SuiteDescription: runner.description,
|
||||
SuiteID: runner.suiteID,
|
||||
|
||||
NumberOfSpecsBeforeParallelization: runner.iterator.NumberOfSpecsPriorToIteration(),
|
||||
NumberOfTotalSpecs: numTotal,
|
||||
NumberOfSpecsThatWillBeRun: numToRun,
|
||||
NumberOfPendingSpecs: -1,
|
||||
NumberOfSkippedSpecs: -1,
|
||||
NumberOfPassedSpecs: -1,
|
||||
NumberOfFailedSpecs: -1,
|
||||
NumberOfFlakedSpecs: -1,
|
||||
}
|
||||
}
|
190
vendor/github.com/onsi/ginkgo/internal/suite/suite.go
generated
vendored
Normal file
190
vendor/github.com/onsi/ginkgo/internal/suite/suite.go
generated
vendored
Normal file
@ -0,0 +1,190 @@
|
||||
package suite
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/spec_iterator"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/internal/containernode"
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/internal/leafnodes"
|
||||
"github.com/onsi/ginkgo/internal/spec"
|
||||
"github.com/onsi/ginkgo/internal/specrunner"
|
||||
"github.com/onsi/ginkgo/internal/writer"
|
||||
"github.com/onsi/ginkgo/reporters"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type ginkgoTestingT interface {
|
||||
Fail()
|
||||
}
|
||||
|
||||
type Suite struct {
|
||||
topLevelContainer *containernode.ContainerNode
|
||||
currentContainer *containernode.ContainerNode
|
||||
containerIndex int
|
||||
beforeSuiteNode leafnodes.SuiteNode
|
||||
afterSuiteNode leafnodes.SuiteNode
|
||||
runner *specrunner.SpecRunner
|
||||
failer *failer.Failer
|
||||
running bool
|
||||
}
|
||||
|
||||
func New(failer *failer.Failer) *Suite {
|
||||
topLevelContainer := containernode.New("[Top Level]", types.FlagTypeNone, types.CodeLocation{})
|
||||
|
||||
return &Suite{
|
||||
topLevelContainer: topLevelContainer,
|
||||
currentContainer: topLevelContainer,
|
||||
failer: failer,
|
||||
containerIndex: 1,
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *Suite) Run(t ginkgoTestingT, description string, reporters []reporters.Reporter, writer writer.WriterInterface, config config.GinkgoConfigType) (bool, bool) {
|
||||
if config.ParallelTotal < 1 {
|
||||
panic("ginkgo.parallel.total must be >= 1")
|
||||
}
|
||||
|
||||
if config.ParallelNode > config.ParallelTotal || config.ParallelNode < 1 {
|
||||
panic("ginkgo.parallel.node is one-indexed and must be <= ginkgo.parallel.total")
|
||||
}
|
||||
|
||||
r := rand.New(rand.NewSource(config.RandomSeed))
|
||||
suite.topLevelContainer.Shuffle(r)
|
||||
iterator, hasProgrammaticFocus := suite.generateSpecsIterator(description, config)
|
||||
suite.runner = specrunner.New(description, suite.beforeSuiteNode, iterator, suite.afterSuiteNode, reporters, writer, config)
|
||||
|
||||
suite.running = true
|
||||
success := suite.runner.Run()
|
||||
if !success {
|
||||
t.Fail()
|
||||
}
|
||||
return success, hasProgrammaticFocus
|
||||
}
|
||||
|
||||
func (suite *Suite) generateSpecsIterator(description string, config config.GinkgoConfigType) (spec_iterator.SpecIterator, bool) {
|
||||
specsSlice := []*spec.Spec{}
|
||||
suite.topLevelContainer.BackPropagateProgrammaticFocus()
|
||||
for _, collatedNodes := range suite.topLevelContainer.Collate() {
|
||||
specsSlice = append(specsSlice, spec.New(collatedNodes.Subject, collatedNodes.Containers, config.EmitSpecProgress))
|
||||
}
|
||||
|
||||
specs := spec.NewSpecs(specsSlice)
|
||||
specs.RegexScansFilePath = config.RegexScansFilePath
|
||||
|
||||
if config.RandomizeAllSpecs {
|
||||
specs.Shuffle(rand.New(rand.NewSource(config.RandomSeed)))
|
||||
}
|
||||
|
||||
specs.ApplyFocus(description, config.FocusString, config.SkipString)
|
||||
|
||||
if config.SkipMeasurements {
|
||||
specs.SkipMeasurements()
|
||||
}
|
||||
|
||||
var iterator spec_iterator.SpecIterator
|
||||
|
||||
if config.ParallelTotal > 1 {
|
||||
iterator = spec_iterator.NewParallelIterator(specs.Specs(), config.SyncHost)
|
||||
resp, err := http.Get(config.SyncHost + "/has-counter")
|
||||
if err != nil || resp.StatusCode != http.StatusOK {
|
||||
iterator = spec_iterator.NewShardedParallelIterator(specs.Specs(), config.ParallelTotal, config.ParallelNode)
|
||||
}
|
||||
} else {
|
||||
iterator = spec_iterator.NewSerialIterator(specs.Specs())
|
||||
}
|
||||
|
||||
return iterator, specs.HasProgrammaticFocus()
|
||||
}
|
||||
|
||||
func (suite *Suite) CurrentRunningSpecSummary() (*types.SpecSummary, bool) {
|
||||
return suite.runner.CurrentSpecSummary()
|
||||
}
|
||||
|
||||
func (suite *Suite) SetBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.beforeSuiteNode != nil {
|
||||
panic("You may only call BeforeSuite once!")
|
||||
}
|
||||
suite.beforeSuiteNode = leafnodes.NewBeforeSuiteNode(body, codeLocation, timeout, suite.failer)
|
||||
}
|
||||
|
||||
func (suite *Suite) SetAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.afterSuiteNode != nil {
|
||||
panic("You may only call AfterSuite once!")
|
||||
}
|
||||
suite.afterSuiteNode = leafnodes.NewAfterSuiteNode(body, codeLocation, timeout, suite.failer)
|
||||
}
|
||||
|
||||
func (suite *Suite) SetSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.beforeSuiteNode != nil {
|
||||
panic("You may only call BeforeSuite once!")
|
||||
}
|
||||
suite.beforeSuiteNode = leafnodes.NewSynchronizedBeforeSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer)
|
||||
}
|
||||
|
||||
func (suite *Suite) SetSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.afterSuiteNode != nil {
|
||||
panic("You may only call AfterSuite once!")
|
||||
}
|
||||
suite.afterSuiteNode = leafnodes.NewSynchronizedAfterSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer)
|
||||
}
|
||||
|
||||
func (suite *Suite) PushContainerNode(text string, body func(), flag types.FlagType, codeLocation types.CodeLocation) {
|
||||
container := containernode.New(text, flag, codeLocation)
|
||||
suite.currentContainer.PushContainerNode(container)
|
||||
|
||||
previousContainer := suite.currentContainer
|
||||
suite.currentContainer = container
|
||||
suite.containerIndex++
|
||||
|
||||
body()
|
||||
|
||||
suite.containerIndex--
|
||||
suite.currentContainer = previousContainer
|
||||
}
|
||||
|
||||
func (suite *Suite) PushItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.running {
|
||||
suite.failer.Fail("You may only call It from within a Describe, Context or When", codeLocation)
|
||||
}
|
||||
suite.currentContainer.PushSubjectNode(leafnodes.NewItNode(text, body, flag, codeLocation, timeout, suite.failer, suite.containerIndex))
|
||||
}
|
||||
|
||||
func (suite *Suite) PushMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int) {
|
||||
if suite.running {
|
||||
suite.failer.Fail("You may only call Measure from within a Describe, Context or When", codeLocation)
|
||||
}
|
||||
suite.currentContainer.PushSubjectNode(leafnodes.NewMeasureNode(text, body, flag, codeLocation, samples, suite.failer, suite.containerIndex))
|
||||
}
|
||||
|
||||
func (suite *Suite) PushBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.running {
|
||||
suite.failer.Fail("You may only call BeforeEach from within a Describe, Context or When", codeLocation)
|
||||
}
|
||||
suite.currentContainer.PushSetupNode(leafnodes.NewBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
|
||||
}
|
||||
|
||||
func (suite *Suite) PushJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.running {
|
||||
suite.failer.Fail("You may only call JustBeforeEach from within a Describe, Context or When", codeLocation)
|
||||
}
|
||||
suite.currentContainer.PushSetupNode(leafnodes.NewJustBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
|
||||
}
|
||||
|
||||
func (suite *Suite) PushJustAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.running {
|
||||
suite.failer.Fail("You may only call JustAfterEach from within a Describe or Context", codeLocation)
|
||||
}
|
||||
suite.currentContainer.PushSetupNode(leafnodes.NewJustAfterEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
|
||||
}
|
||||
|
||||
func (suite *Suite) PushAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.running {
|
||||
suite.failer.Fail("You may only call AfterEach from within a Describe, Context or When", codeLocation)
|
||||
}
|
||||
suite.currentContainer.PushSetupNode(leafnodes.NewAfterEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
|
||||
}
|
76
vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go
generated
vendored
Normal file
76
vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go
generated
vendored
Normal file
@ -0,0 +1,76 @@
|
||||
package testingtproxy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
type failFunc func(message string, callerSkip ...int)
|
||||
|
||||
func New(writer io.Writer, fail failFunc, offset int) *ginkgoTestingTProxy {
|
||||
return &ginkgoTestingTProxy{
|
||||
fail: fail,
|
||||
offset: offset,
|
||||
writer: writer,
|
||||
}
|
||||
}
|
||||
|
||||
type ginkgoTestingTProxy struct {
|
||||
fail failFunc
|
||||
offset int
|
||||
writer io.Writer
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Error(args ...interface{}) {
|
||||
t.fail(fmt.Sprintln(args...), t.offset)
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Errorf(format string, args ...interface{}) {
|
||||
t.fail(fmt.Sprintf(format, args...), t.offset)
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Fail() {
|
||||
t.fail("failed", t.offset)
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) FailNow() {
|
||||
t.fail("failed", t.offset)
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Fatal(args ...interface{}) {
|
||||
t.fail(fmt.Sprintln(args...), t.offset)
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Fatalf(format string, args ...interface{}) {
|
||||
t.fail(fmt.Sprintf(format, args...), t.offset)
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Log(args ...interface{}) {
|
||||
fmt.Fprintln(t.writer, args...)
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Logf(format string, args ...interface{}) {
|
||||
t.Log(fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Failed() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Parallel() {
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Skip(args ...interface{}) {
|
||||
fmt.Println(args...)
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Skipf(format string, args ...interface{}) {
|
||||
t.Skip(fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) SkipNow() {
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Skipped() bool {
|
||||
return false
|
||||
}
|
36
vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go
generated
vendored
Normal file
36
vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
package writer
|
||||
|
||||
type FakeGinkgoWriter struct {
|
||||
EventStream []string
|
||||
}
|
||||
|
||||
func NewFake() *FakeGinkgoWriter {
|
||||
return &FakeGinkgoWriter{
|
||||
EventStream: []string{},
|
||||
}
|
||||
}
|
||||
|
||||
func (writer *FakeGinkgoWriter) AddEvent(event string) {
|
||||
writer.EventStream = append(writer.EventStream, event)
|
||||
}
|
||||
|
||||
func (writer *FakeGinkgoWriter) Truncate() {
|
||||
writer.EventStream = append(writer.EventStream, "TRUNCATE")
|
||||
}
|
||||
|
||||
func (writer *FakeGinkgoWriter) DumpOut() {
|
||||
writer.EventStream = append(writer.EventStream, "DUMP")
|
||||
}
|
||||
|
||||
func (writer *FakeGinkgoWriter) DumpOutWithHeader(header string) {
|
||||
writer.EventStream = append(writer.EventStream, "DUMP_WITH_HEADER: "+header)
|
||||
}
|
||||
|
||||
func (writer *FakeGinkgoWriter) Bytes() []byte {
|
||||
writer.EventStream = append(writer.EventStream, "BYTES")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (writer *FakeGinkgoWriter) Write(data []byte) (n int, err error) {
|
||||
return 0, nil
|
||||
}
|
89
vendor/github.com/onsi/ginkgo/internal/writer/writer.go
generated
vendored
Normal file
89
vendor/github.com/onsi/ginkgo/internal/writer/writer.go
generated
vendored
Normal file
@ -0,0 +1,89 @@
|
||||
package writer
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type WriterInterface interface {
|
||||
io.Writer
|
||||
|
||||
Truncate()
|
||||
DumpOut()
|
||||
DumpOutWithHeader(header string)
|
||||
Bytes() []byte
|
||||
}
|
||||
|
||||
type Writer struct {
|
||||
buffer *bytes.Buffer
|
||||
outWriter io.Writer
|
||||
lock *sync.Mutex
|
||||
stream bool
|
||||
redirector io.Writer
|
||||
}
|
||||
|
||||
func New(outWriter io.Writer) *Writer {
|
||||
return &Writer{
|
||||
buffer: &bytes.Buffer{},
|
||||
lock: &sync.Mutex{},
|
||||
outWriter: outWriter,
|
||||
stream: true,
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Writer) AndRedirectTo(writer io.Writer) {
|
||||
w.redirector = writer
|
||||
}
|
||||
|
||||
func (w *Writer) SetStream(stream bool) {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
w.stream = stream
|
||||
}
|
||||
|
||||
func (w *Writer) Write(b []byte) (n int, err error) {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
|
||||
n, err = w.buffer.Write(b)
|
||||
if w.redirector != nil {
|
||||
w.redirector.Write(b)
|
||||
}
|
||||
if w.stream {
|
||||
return w.outWriter.Write(b)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (w *Writer) Truncate() {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
w.buffer.Reset()
|
||||
}
|
||||
|
||||
func (w *Writer) DumpOut() {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
if !w.stream {
|
||||
w.buffer.WriteTo(w.outWriter)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Writer) Bytes() []byte {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
b := w.buffer.Bytes()
|
||||
copied := make([]byte, len(b))
|
||||
copy(copied, b)
|
||||
return copied
|
||||
}
|
||||
|
||||
func (w *Writer) DumpOutWithHeader(header string) {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
if !w.stream && w.buffer.Len() > 0 {
|
||||
w.outWriter.Write([]byte(header))
|
||||
w.buffer.WriteTo(w.outWriter)
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user