rebase: bump the golang-dependencies group with 1 update

Bumps the golang-dependencies group with 1 update: [golang.org/x/crypto](https://github.com/golang/crypto).


Updates `golang.org/x/crypto` from 0.16.0 to 0.17.0
- [Commits](https://github.com/golang/crypto/compare/v0.16.0...v0.17.0)

---
updated-dependencies:
- dependency-name: golang.org/x/crypto
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: golang-dependencies
...

Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
dependabot[bot]
2023-12-18 20:31:00 +00:00
committed by mergify[bot]
parent 1ad79314f9
commit e5d9b68d36
398 changed files with 33924 additions and 10753 deletions

View File

@ -120,7 +120,7 @@ type grpcTunnel struct {
stream client.ProxyService_ProxyClient
sendLock sync.Mutex
recvLock sync.Mutex
clientConn clientConn
grpcConn clientConn
pendingDial pendingDialManager
conns connectionManager
@ -197,7 +197,7 @@ func CreateSingleUseGrpcTunnelWithContext(createCtx, tunnelCtx context.Context,
func newUnstartedTunnel(stream client.ProxyService_ProxyClient, c clientConn) *grpcTunnel {
t := grpcTunnel{
stream: stream,
clientConn: c,
grpcConn: c,
pendingDial: pendingDialManager{pendingDials: make(map[int64]pendingDial)},
conns: connectionManager{conns: make(map[int64]*conn)},
readTimeoutSeconds: 10,
@ -238,7 +238,7 @@ func (t *grpcTunnel) closeMetric() {
func (t *grpcTunnel) serve(tunnelCtx context.Context) {
defer func() {
t.clientConn.Close()
t.grpcConn.Close()
// A connection in t.conns after serve() returns means
// we never received a CLOSE_RSP for it, so we need to
@ -278,7 +278,7 @@ func (t *grpcTunnel) serve(tunnelCtx context.Context) {
// 2. grpcTunnel.DialContext() returned early due to a dial timeout or the client canceling the context
//
// In either scenario, we should return here and close the tunnel as it is no longer needed.
kvs := []interface{}{"dialID", resp.Random, "connectID", resp.ConnectID}
kvs := []interface{}{"dialID", resp.Random, "connectionID", resp.ConnectID}
if resp.Error != "" {
kvs = append(kvs, "error", resp.Error)
}
@ -349,14 +349,7 @@ func (t *grpcTunnel) serve(tunnelCtx context.Context) {
if !ok {
klog.ErrorS(nil, "Connection not recognized", "connectionID", resp.ConnectID, "packetType", "DATA")
t.Send(&client.Packet{
Type: client.PacketType_CLOSE_REQ,
Payload: &client.Packet_CloseRequest{
CloseRequest: &client.CloseRequest{
ConnectID: resp.ConnectID,
},
},
})
t.sendCloseRequest(resp.ConnectID)
continue
}
timer := time.NewTimer((time.Duration)(t.readTimeoutSeconds) * time.Second)
@ -448,9 +441,8 @@ func (t *grpcTunnel) dialContext(requestCtx context.Context, protocol, address s
klog.V(5).Infoln("DIAL_REQ sent to proxy server")
c := &conn{
tunnel: t,
random: random,
closeTunnel: t.closeTunnel,
tunnel: t,
random: random,
}
select {
@ -464,11 +456,17 @@ func (t *grpcTunnel) dialContext(requestCtx context.Context, protocol, address s
t.conns.add(res.connid, c)
case <-time.After(30 * time.Second):
klog.V(5).InfoS("Timed out waiting for DialResp", "dialID", random)
go t.closeDial(random)
go func() {
defer t.closeTunnel()
t.sendDialClose(random)
}()
return nil, &dialFailure{"dial timeout, backstop", metrics.DialFailureTimeout}
case <-requestCtx.Done():
klog.V(5).InfoS("Context canceled waiting for DialResp", "ctxErr", requestCtx.Err(), "dialID", random)
go t.closeDial(random)
go func() {
defer t.closeTunnel()
t.sendDialClose(random)
}()
return nil, &dialFailure{"dial timeout, context", metrics.DialFailureContext}
case <-t.done:
klog.V(5).InfoS("Tunnel closed while waiting for DialResp", "dialID", random)
@ -483,7 +481,21 @@ func (t *grpcTunnel) Done() <-chan struct{} {
}
// Send a best-effort DIAL_CLS request for the given dial ID.
func (t *grpcTunnel) closeDial(dialID int64) {
func (t *grpcTunnel) sendCloseRequest(connID int64) error {
req := &client.Packet{
Type: client.PacketType_CLOSE_REQ,
Payload: &client.Packet_CloseRequest{
CloseRequest: &client.CloseRequest{
ConnectID: connID,
},
},
}
klog.V(5).InfoS("[tracing] send req", "type", req.Type)
return t.Send(req)
}
func (t *grpcTunnel) sendDialClose(dialID int64) error {
req := &client.Packet{
Type: client.PacketType_DIAL_CLS,
Payload: &client.Packet_CloseDial{
@ -492,15 +504,13 @@ func (t *grpcTunnel) closeDial(dialID int64) {
},
},
}
if err := t.Send(req); err != nil {
klog.V(5).InfoS("Failed to send DIAL_CLS", "err", err, "dialID", dialID)
}
t.closeTunnel()
klog.V(5).InfoS("[tracing] send req", "type", req.Type)
return t.Send(req)
}
func (t *grpcTunnel) closeTunnel() {
atomic.StoreUint32(&t.closing, 1)
t.clientConn.Close()
t.grpcConn.Close()
}
func (t *grpcTunnel) isClosing() bool {

View File

@ -20,6 +20,7 @@ import (
"errors"
"io"
"net"
"sync/atomic"
"time"
"k8s.io/klog/v2"
@ -31,25 +32,31 @@ import (
// successful delivery of CLOSE_REQ.
const CloseTimeout = 10 * time.Second
var errConnTunnelClosed = errors.New("tunnel closed")
var errConnCloseTimeout = errors.New("close timeout")
// conn is an implementation of net.Conn, where the data is transported
// over an established tunnel defined by a gRPC service ProxyService.
type conn struct {
tunnel *grpcTunnel
connID int64
random int64
readCh chan []byte
tunnel *grpcTunnel
// connID is set when a successful DIAL_RSP is received
connID int64
// random (dialID) is always initialized
random int64
readCh chan []byte
// On receiving CLOSE_RSP, closeCh will be sent any error message and closed.
closeCh chan string
rdata []byte
// closeTunnel is an optional callback to close the underlying grpc connection.
closeTunnel func()
// closing is an atomic bool represented as a 0 or 1, and set to true when the connection is being closed.
// closing should only be accessed through atomic methods.
// TODO: switch this to an atomic.Bool once the client is exclusively buit with go1.19+
closing uint32
}
var _ net.Conn = &conn{}
// Write sends the data thru the connection over proxy service
// Write sends the data through the connection over proxy service
func (c *conn) Write(data []byte) (n int, err error) {
req := &client.Packet{
Type: client.PacketType_DATA,
@ -116,40 +123,23 @@ func (c *conn) SetWriteDeadline(t time.Time) error {
return errors.New("not implemented")
}
// Close closes the connection. It also sends CLOSE_REQ packet over
// proxy service to notify remote to drop the connection.
// Close closes the connection, sends best-effort close signal to proxy
// service, and frees resources.
func (c *conn) Close() error {
klog.V(4).Infoln("closing connection")
if c.closeTunnel != nil {
defer c.closeTunnel()
old := atomic.SwapUint32(&c.closing, 1)
if old != 0 {
// prevent duplicate messages
return nil
}
klog.V(4).Infoln("closing connection", "dialID", c.random, "connectionID", c.connID)
defer c.tunnel.closeTunnel()
var req *client.Packet
if c.connID != 0 {
req = &client.Packet{
Type: client.PacketType_CLOSE_REQ,
Payload: &client.Packet_CloseRequest{
CloseRequest: &client.CloseRequest{
ConnectID: c.connID,
},
},
}
c.tunnel.sendCloseRequest(c.connID)
} else {
// Never received a DIAL response so no connection ID.
req = &client.Packet{
Type: client.PacketType_DIAL_CLS,
Payload: &client.Packet_CloseDial{
CloseDial: &client.CloseDial{
Random: c.random,
},
},
}
}
klog.V(5).InfoS("[tracing] send req", "type", req.Type)
if err := c.tunnel.Send(req); err != nil {
return err
c.tunnel.sendDialClose(c.random)
}
select {
@ -158,6 +148,8 @@ func (c *conn) Close() error {
return errors.New(errMsg)
}
return nil
case <-c.tunnel.Done():
return errConnTunnelClosed
case <-time.After(CloseTimeout):
}

View File

@ -28,20 +28,15 @@ import (
// for PathElementSet and SetNodeMap, so we could probably share the
// code.
type PathElementValueMap struct {
members sortedPathElementValues
valueMap PathElementMap
}
func MakePathElementValueMap(size int) PathElementValueMap {
return PathElementValueMap{
members: make(sortedPathElementValues, 0, size),
valueMap: MakePathElementMap(size),
}
}
type pathElementValue struct {
PathElement PathElement
Value value.Value
}
type sortedPathElementValues []pathElementValue
// Implement the sort interface; this would permit bulk creation, which would
@ -53,7 +48,40 @@ func (spev sortedPathElementValues) Less(i, j int) bool {
func (spev sortedPathElementValues) Swap(i, j int) { spev[i], spev[j] = spev[j], spev[i] }
// Insert adds the pathelement and associated value in the map.
// If insert is called twice with the same PathElement, the value is replaced.
func (s *PathElementValueMap) Insert(pe PathElement, v value.Value) {
s.valueMap.Insert(pe, v)
}
// Get retrieves the value associated with the given PathElement from the map.
// (nil, false) is returned if there is no such PathElement.
func (s *PathElementValueMap) Get(pe PathElement) (value.Value, bool) {
v, ok := s.valueMap.Get(pe)
if !ok {
return nil, false
}
return v.(value.Value), true
}
// PathElementValueMap is a map from PathElement to interface{}.
type PathElementMap struct {
members sortedPathElementValues
}
type pathElementValue struct {
PathElement PathElement
Value interface{}
}
func MakePathElementMap(size int) PathElementMap {
return PathElementMap{
members: make(sortedPathElementValues, 0, size),
}
}
// Insert adds the pathelement and associated value in the map.
// If insert is called twice with the same PathElement, the value is replaced.
func (s *PathElementMap) Insert(pe PathElement, v interface{}) {
loc := sort.Search(len(s.members), func(i int) bool {
return !s.members[i].PathElement.Less(pe)
})
@ -62,6 +90,7 @@ func (s *PathElementValueMap) Insert(pe PathElement, v value.Value) {
return
}
if s.members[loc].PathElement.Equals(pe) {
s.members[loc].Value = v
return
}
s.members = append(s.members, pathElementValue{})
@ -71,7 +100,7 @@ func (s *PathElementValueMap) Insert(pe PathElement, v value.Value) {
// Get retrieves the value associated with the given PathElement from the map.
// (nil, false) is returned if there is no such PathElement.
func (s *PathElementValueMap) Get(pe PathElement) (value.Value, bool) {
func (s *PathElementMap) Get(pe PathElement) (interface{}, bool) {
loc := sort.Search(len(s.members), func(i int) bool {
return !s.members[i].PathElement.Less(pe)
})

View File

@ -112,7 +112,7 @@ func ConflictsFromManagers(sets fieldpath.ManagedFields) Conflicts {
set.Set().Iterate(func(p fieldpath.Path) {
conflicts = append(conflicts, Conflict{
Manager: manager,
Path: p,
Path: p.Copy(),
})
})
}

View File

@ -18,6 +18,7 @@ import (
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
"sigs.k8s.io/structured-merge-diff/v4/typed"
"sigs.k8s.io/structured-merge-diff/v4/value"
)
// Converter is an interface to the conversion logic. The converter
@ -27,19 +28,39 @@ type Converter interface {
IsMissingVersionError(error) bool
}
// Updater is the object used to compute updated FieldSets and also
// merge the object on Apply.
type Updater struct {
// UpdateBuilder allows you to create a new Updater by exposing all of
// the options and setting them once.
type UpdaterBuilder struct {
Converter Converter
IgnoredFields map[fieldpath.APIVersion]*fieldpath.Set
enableUnions bool
// Stop comparing the new object with old object after applying.
// This was initially used to avoid spurious etcd update, but
// since that's vastly inefficient, we've come-up with a better
// way of doing that. Create this flag to stop it.
// Comparing has become more expensive too now that we're not using
// `Compare` but `value.Equals` so this gives an option to avoid it.
ReturnInputOnNoop bool
}
// EnableUnionFeature turns on union handling. It is disabled by default until the
// feature is complete.
func (s *Updater) EnableUnionFeature() {
s.enableUnions = true
func (u *UpdaterBuilder) BuildUpdater() *Updater {
return &Updater{
Converter: u.Converter,
IgnoredFields: u.IgnoredFields,
returnInputOnNoop: u.ReturnInputOnNoop,
}
}
// Updater is the object used to compute updated FieldSets and also
// merge the object on Apply.
type Updater struct {
// Deprecated: This will eventually become private.
Converter Converter
// Deprecated: This will eventually become private.
IgnoredFields map[fieldpath.APIVersion]*fieldpath.Set
returnInputOnNoop bool
}
func (s *Updater) update(oldObject, newObject *typed.TypedValue, version fieldpath.APIVersion, managers fieldpath.ManagedFields, workflow string, force bool) (fieldpath.ManagedFields, *typed.Comparison, error) {
@ -126,12 +147,6 @@ func (s *Updater) Update(liveObject, newObject *typed.TypedValue, version fieldp
if err != nil {
return nil, fieldpath.ManagedFields{}, err
}
if s.enableUnions {
newObject, err = liveObject.NormalizeUnions(newObject)
if err != nil {
return nil, fieldpath.ManagedFields{}, err
}
}
managers, compare, err := s.update(liveObject, newObject, version, managers, manager, true)
if err != nil {
return nil, fieldpath.ManagedFields{}, err
@ -145,7 +160,7 @@ func (s *Updater) Update(liveObject, newObject *typed.TypedValue, version fieldp
ignored = fieldpath.NewSet()
}
managers[manager] = fieldpath.NewVersionedSet(
managers[manager].Set().Union(compare.Modified).Union(compare.Added).Difference(compare.Removed).RecursiveDifference(ignored),
managers[manager].Set().Difference(compare.Removed).Union(compare.Modified).Union(compare.Added).RecursiveDifference(ignored),
version,
false,
)
@ -157,30 +172,17 @@ func (s *Updater) Update(liveObject, newObject *typed.TypedValue, version fieldp
// Apply should be called when Apply is run, given the current object as
// well as the configuration that is applied. This will merge the object
// and return it. If the object hasn't changed, nil is returned (the
// managers can still have changed though).
// and return it.
func (s *Updater) Apply(liveObject, configObject *typed.TypedValue, version fieldpath.APIVersion, managers fieldpath.ManagedFields, manager string, force bool) (*typed.TypedValue, fieldpath.ManagedFields, error) {
var err error
managers, err = s.reconcileManagedFieldsWithSchemaChanges(liveObject, managers)
if err != nil {
return nil, fieldpath.ManagedFields{}, err
}
if s.enableUnions {
configObject, err = configObject.NormalizeUnionsApply(configObject)
if err != nil {
return nil, fieldpath.ManagedFields{}, err
}
}
newObject, err := liveObject.Merge(configObject)
if err != nil {
return nil, fieldpath.ManagedFields{}, fmt.Errorf("failed to merge config: %v", err)
}
if s.enableUnions {
newObject, err = configObject.NormalizeUnionsApply(newObject)
if err != nil {
return nil, fieldpath.ManagedFields{}, err
}
}
lastSet := managers[manager]
set, err := configObject.ToFieldSet()
if err != nil {
@ -200,11 +202,11 @@ func (s *Updater) Apply(liveObject, configObject *typed.TypedValue, version fiel
if err != nil {
return nil, fieldpath.ManagedFields{}, fmt.Errorf("failed to prune fields: %v", err)
}
managers, compare, err := s.update(liveObject, newObject, version, managers, manager, force)
managers, _, err = s.update(liveObject, newObject, version, managers, manager, force)
if err != nil {
return nil, fieldpath.ManagedFields{}, err
}
if compare.IsSame() {
if !s.returnInputOnNoop && value.EqualsUsing(value.NewFreelistAllocator(), liveObject.AsValue(), newObject.AsValue()) {
newObject = nil
}
return newObject, managers, nil
@ -218,7 +220,8 @@ func (s *Updater) prune(merged *typed.TypedValue, managers fieldpath.ManagedFiel
if lastSet == nil || lastSet.Set().Empty() {
return merged, nil
}
convertedMerged, err := s.Converter.Convert(merged, lastSet.APIVersion())
version := lastSet.APIVersion()
convertedMerged, err := s.Converter.Convert(merged, version)
if err != nil {
if s.Converter.IsMissingVersionError(err) {
return merged, nil
@ -228,7 +231,7 @@ func (s *Updater) prune(merged *typed.TypedValue, managers fieldpath.ManagedFiel
sc, tr := convertedMerged.Schema(), convertedMerged.TypeRef()
pruned := convertedMerged.RemoveItems(lastSet.Set().EnsureNamedFieldsAreMembers(sc, tr))
pruned, err = s.addBackOwnedItems(convertedMerged, pruned, managers, applyingManager)
pruned, err = s.addBackOwnedItems(convertedMerged, pruned, version, managers, applyingManager)
if err != nil {
return nil, fmt.Errorf("failed add back owned items: %v", err)
}
@ -241,7 +244,7 @@ func (s *Updater) prune(merged *typed.TypedValue, managers fieldpath.ManagedFiel
// addBackOwnedItems adds back any fields, list and map items that were removed by prune,
// but other appliers or updaters (or the current applier's new config) claim to own.
func (s *Updater) addBackOwnedItems(merged, pruned *typed.TypedValue, managedFields fieldpath.ManagedFields, applyingManager string) (*typed.TypedValue, error) {
func (s *Updater) addBackOwnedItems(merged, pruned *typed.TypedValue, prunedVersion fieldpath.APIVersion, managedFields fieldpath.ManagedFields, applyingManager string) (*typed.TypedValue, error) {
var err error
managedAtVersion := map[fieldpath.APIVersion]*fieldpath.Set{}
for _, managerSet := range managedFields {
@ -252,7 +255,6 @@ func (s *Updater) addBackOwnedItems(merged, pruned *typed.TypedValue, managedFie
}
// Add back owned items at pruned version first to avoid conversion failure
// caused by pruned fields which are required for conversion.
prunedVersion := fieldpath.APIVersion(*pruned.TypeRef().NamedType)
if managed, ok := managedAtVersion[prunedVersion]; ok {
merged, pruned, err = s.addBackOwnedItemsForVersion(merged, pruned, prunedVersion, managed)
if err != nil {

View File

@ -73,7 +73,7 @@ type Atom struct {
}
// Scalar (AKA "primitive") represents a type which has a single value which is
// either numeric, string, or boolean.
// either numeric, string, or boolean, or untyped for any of them.
//
// TODO: split numeric into float/int? Something even more fine-grained?
type Scalar string
@ -82,6 +82,7 @@ const (
Numeric = Scalar("numeric")
String = Scalar("string")
Boolean = Scalar("boolean")
Untyped = Scalar("untyped")
)
// ElementRelationship is an enum of the different possible relationships

View File

@ -110,7 +110,7 @@ var SchemaSchemaYAML = `types:
scalar: string
- name: deduceInvalidDiscriminator
type:
scalar: bool
scalar: boolean
- name: fields
type:
list:
@ -145,6 +145,7 @@ var SchemaSchemaYAML = `types:
list:
elementType:
scalar: string
elementRelationship: atomic
- name: untyped
map:
fields:

View File

@ -0,0 +1,460 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package typed
import (
"fmt"
"strings"
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
"sigs.k8s.io/structured-merge-diff/v4/schema"
"sigs.k8s.io/structured-merge-diff/v4/value"
)
// Comparison is the return value of a TypedValue.Compare() operation.
//
// No field will appear in more than one of the three fieldsets. If all of the
// fieldsets are empty, then the objects must have been equal.
type Comparison struct {
// Removed contains any fields removed by rhs (the right-hand-side
// object in the comparison).
Removed *fieldpath.Set
// Modified contains fields present in both objects but different.
Modified *fieldpath.Set
// Added contains any fields added by rhs.
Added *fieldpath.Set
}
// IsSame returns true if the comparison returned no changes (the two
// compared objects are similar).
func (c *Comparison) IsSame() bool {
return c.Removed.Empty() && c.Modified.Empty() && c.Added.Empty()
}
// String returns a human readable version of the comparison.
func (c *Comparison) String() string {
bld := strings.Builder{}
if !c.Modified.Empty() {
bld.WriteString(fmt.Sprintf("- Modified Fields:\n%v\n", c.Modified))
}
if !c.Added.Empty() {
bld.WriteString(fmt.Sprintf("- Added Fields:\n%v\n", c.Added))
}
if !c.Removed.Empty() {
bld.WriteString(fmt.Sprintf("- Removed Fields:\n%v\n", c.Removed))
}
return bld.String()
}
// ExcludeFields fields from the compare recursively removes the fields
// from the entire comparison
func (c *Comparison) ExcludeFields(fields *fieldpath.Set) *Comparison {
if fields == nil || fields.Empty() {
return c
}
c.Removed = c.Removed.RecursiveDifference(fields)
c.Modified = c.Modified.RecursiveDifference(fields)
c.Added = c.Added.RecursiveDifference(fields)
return c
}
type compareWalker struct {
lhs value.Value
rhs value.Value
schema *schema.Schema
typeRef schema.TypeRef
// Current path that we are comparing
path fieldpath.Path
// Resulting comparison.
comparison *Comparison
// internal housekeeping--don't set when constructing.
inLeaf bool // Set to true if we're in a "big leaf"--atomic map/list
// Allocate only as many walkers as needed for the depth by storing them here.
spareWalkers *[]*compareWalker
allocator value.Allocator
}
// compare compares stuff.
func (w *compareWalker) compare(prefixFn func() string) (errs ValidationErrors) {
if w.lhs == nil && w.rhs == nil {
// check this condidition here instead of everywhere below.
return errorf("at least one of lhs and rhs must be provided")
}
a, ok := w.schema.Resolve(w.typeRef)
if !ok {
return errorf("schema error: no type found matching: %v", *w.typeRef.NamedType)
}
alhs := deduceAtom(a, w.lhs)
arhs := deduceAtom(a, w.rhs)
// deduceAtom does not fix the type for nil values
// nil is a wildcard and will accept whatever form the other operand takes
if w.rhs == nil {
errs = append(errs, handleAtom(alhs, w.typeRef, w)...)
} else if w.lhs == nil || alhs.Equals(&arhs) {
errs = append(errs, handleAtom(arhs, w.typeRef, w)...)
} else {
w2 := *w
errs = append(errs, handleAtom(alhs, w.typeRef, &w2)...)
errs = append(errs, handleAtom(arhs, w.typeRef, w)...)
}
if !w.inLeaf {
if w.lhs == nil {
w.comparison.Added.Insert(w.path)
} else if w.rhs == nil {
w.comparison.Removed.Insert(w.path)
}
}
return errs.WithLazyPrefix(prefixFn)
}
// doLeaf should be called on leaves before descending into children, if there
// will be a descent. It modifies w.inLeaf.
func (w *compareWalker) doLeaf() {
if w.inLeaf {
// We're in a "big leaf", an atomic map or list. Ignore
// subsequent leaves.
return
}
w.inLeaf = true
// We don't recurse into leaf fields for merging.
if w.lhs == nil {
w.comparison.Added.Insert(w.path)
} else if w.rhs == nil {
w.comparison.Removed.Insert(w.path)
} else if !value.EqualsUsing(w.allocator, w.rhs, w.lhs) {
// TODO: Equality is not sufficient for this.
// Need to implement equality check on the value type.
w.comparison.Modified.Insert(w.path)
}
}
func (w *compareWalker) doScalar(t *schema.Scalar) ValidationErrors {
// Make sure at least one side is a valid scalar.
lerrs := validateScalar(t, w.lhs, "lhs: ")
rerrs := validateScalar(t, w.rhs, "rhs: ")
if len(lerrs) > 0 && len(rerrs) > 0 {
return append(lerrs, rerrs...)
}
// All scalars are leaf fields.
w.doLeaf()
return nil
}
func (w *compareWalker) prepareDescent(pe fieldpath.PathElement, tr schema.TypeRef, cmp *Comparison) *compareWalker {
if w.spareWalkers == nil {
// first descent.
w.spareWalkers = &[]*compareWalker{}
}
var w2 *compareWalker
if n := len(*w.spareWalkers); n > 0 {
w2, *w.spareWalkers = (*w.spareWalkers)[n-1], (*w.spareWalkers)[:n-1]
} else {
w2 = &compareWalker{}
}
*w2 = *w
w2.typeRef = tr
w2.path = append(w2.path, pe)
w2.lhs = nil
w2.rhs = nil
w2.comparison = cmp
return w2
}
func (w *compareWalker) finishDescent(w2 *compareWalker) {
// if the descent caused a realloc, ensure that we reuse the buffer
// for the next sibling.
w.path = w2.path[:len(w2.path)-1]
*w.spareWalkers = append(*w.spareWalkers, w2)
}
func (w *compareWalker) derefMap(prefix string, v value.Value) (value.Map, ValidationErrors) {
if v == nil {
return nil, nil
}
m, err := mapValue(w.allocator, v)
if err != nil {
return nil, errorf("%v: %v", prefix, err)
}
return m, nil
}
func (w *compareWalker) visitListItems(t *schema.List, lhs, rhs value.List) (errs ValidationErrors) {
rLen := 0
if rhs != nil {
rLen = rhs.Length()
}
lLen := 0
if lhs != nil {
lLen = lhs.Length()
}
maxLength := rLen
if lLen > maxLength {
maxLength = lLen
}
// Contains all the unique PEs between lhs and rhs, exactly once.
// Order doesn't matter since we're just tracking ownership in a set.
allPEs := make([]fieldpath.PathElement, 0, maxLength)
// Gather all the elements from lhs, indexed by PE, in a list for duplicates.
lValues := fieldpath.MakePathElementMap(lLen)
for i := 0; i < lLen; i++ {
child := lhs.At(i)
pe, err := listItemToPathElement(w.allocator, w.schema, t, child)
if err != nil {
errs = append(errs, errorf("element %v: %v", i, err.Error())...)
// If we can't construct the path element, we can't
// even report errors deeper in the schema, so bail on
// this element.
continue
}
if v, found := lValues.Get(pe); found {
list := v.([]value.Value)
lValues.Insert(pe, append(list, child))
} else {
lValues.Insert(pe, []value.Value{child})
allPEs = append(allPEs, pe)
}
}
// Gather all the elements from rhs, indexed by PE, in a list for duplicates.
rValues := fieldpath.MakePathElementMap(rLen)
for i := 0; i < rLen; i++ {
rValue := rhs.At(i)
pe, err := listItemToPathElement(w.allocator, w.schema, t, rValue)
if err != nil {
errs = append(errs, errorf("element %v: %v", i, err.Error())...)
// If we can't construct the path element, we can't
// even report errors deeper in the schema, so bail on
// this element.
continue
}
if v, found := rValues.Get(pe); found {
list := v.([]value.Value)
rValues.Insert(pe, append(list, rValue))
} else {
rValues.Insert(pe, []value.Value{rValue})
if _, found := lValues.Get(pe); !found {
allPEs = append(allPEs, pe)
}
}
}
for _, pe := range allPEs {
lList := []value.Value(nil)
if l, ok := lValues.Get(pe); ok {
lList = l.([]value.Value)
}
rList := []value.Value(nil)
if l, ok := rValues.Get(pe); ok {
rList = l.([]value.Value)
}
switch {
case len(lList) == 0 && len(rList) == 0:
// We shouldn't be here anyway.
return
// Normal use-case:
// We have no duplicates for this PE, compare items one-to-one.
case len(lList) <= 1 && len(rList) <= 1:
lValue := value.Value(nil)
if len(lList) != 0 {
lValue = lList[0]
}
rValue := value.Value(nil)
if len(rList) != 0 {
rValue = rList[0]
}
errs = append(errs, w.compareListItem(t, pe, lValue, rValue)...)
// Duplicates before & after use-case:
// Compare the duplicates lists as if they were atomic, mark modified if they changed.
case len(lList) >= 2 && len(rList) >= 2:
listEqual := func(lList, rList []value.Value) bool {
if len(lList) != len(rList) {
return false
}
for i := range lList {
if !value.Equals(lList[i], rList[i]) {
return false
}
}
return true
}
if !listEqual(lList, rList) {
w.comparison.Modified.Insert(append(w.path, pe))
}
// Duplicates before & not anymore use-case:
// Rcursively add new non-duplicate items, Remove duplicate marker,
case len(lList) >= 2:
if len(rList) != 0 {
errs = append(errs, w.compareListItem(t, pe, nil, rList[0])...)
}
w.comparison.Removed.Insert(append(w.path, pe))
// New duplicates use-case:
// Recursively remove old non-duplicate items, add duplicate marker.
case len(rList) >= 2:
if len(lList) != 0 {
errs = append(errs, w.compareListItem(t, pe, lList[0], nil)...)
}
w.comparison.Added.Insert(append(w.path, pe))
}
}
return
}
func (w *compareWalker) indexListPathElements(t *schema.List, list value.List) ([]fieldpath.PathElement, fieldpath.PathElementValueMap, ValidationErrors) {
var errs ValidationErrors
length := 0
if list != nil {
length = list.Length()
}
observed := fieldpath.MakePathElementValueMap(length)
pes := make([]fieldpath.PathElement, 0, length)
for i := 0; i < length; i++ {
child := list.At(i)
pe, err := listItemToPathElement(w.allocator, w.schema, t, child)
if err != nil {
errs = append(errs, errorf("element %v: %v", i, err.Error())...)
// If we can't construct the path element, we can't
// even report errors deeper in the schema, so bail on
// this element.
continue
}
// Ignore repeated occurences of `pe`.
if _, found := observed.Get(pe); found {
continue
}
observed.Insert(pe, child)
pes = append(pes, pe)
}
return pes, observed, errs
}
func (w *compareWalker) compareListItem(t *schema.List, pe fieldpath.PathElement, lChild, rChild value.Value) ValidationErrors {
w2 := w.prepareDescent(pe, t.ElementType, w.comparison)
w2.lhs = lChild
w2.rhs = rChild
errs := w2.compare(pe.String)
w.finishDescent(w2)
return errs
}
func (w *compareWalker) derefList(prefix string, v value.Value) (value.List, ValidationErrors) {
if v == nil {
return nil, nil
}
l, err := listValue(w.allocator, v)
if err != nil {
return nil, errorf("%v: %v", prefix, err)
}
return l, nil
}
func (w *compareWalker) doList(t *schema.List) (errs ValidationErrors) {
lhs, _ := w.derefList("lhs: ", w.lhs)
if lhs != nil {
defer w.allocator.Free(lhs)
}
rhs, _ := w.derefList("rhs: ", w.rhs)
if rhs != nil {
defer w.allocator.Free(rhs)
}
// If both lhs and rhs are empty/null, treat it as a
// leaf: this helps preserve the empty/null
// distinction.
emptyPromoteToLeaf := (lhs == nil || lhs.Length() == 0) && (rhs == nil || rhs.Length() == 0)
if t.ElementRelationship == schema.Atomic || emptyPromoteToLeaf {
w.doLeaf()
return nil
}
if lhs == nil && rhs == nil {
return nil
}
errs = w.visitListItems(t, lhs, rhs)
return errs
}
func (w *compareWalker) visitMapItem(t *schema.Map, out map[string]interface{}, key string, lhs, rhs value.Value) (errs ValidationErrors) {
fieldType := t.ElementType
if sf, ok := t.FindField(key); ok {
fieldType = sf.Type
}
pe := fieldpath.PathElement{FieldName: &key}
w2 := w.prepareDescent(pe, fieldType, w.comparison)
w2.lhs = lhs
w2.rhs = rhs
errs = append(errs, w2.compare(pe.String)...)
w.finishDescent(w2)
return errs
}
func (w *compareWalker) visitMapItems(t *schema.Map, lhs, rhs value.Map) (errs ValidationErrors) {
out := map[string]interface{}{}
value.MapZipUsing(w.allocator, lhs, rhs, value.Unordered, func(key string, lhsValue, rhsValue value.Value) bool {
errs = append(errs, w.visitMapItem(t, out, key, lhsValue, rhsValue)...)
return true
})
return errs
}
func (w *compareWalker) doMap(t *schema.Map) (errs ValidationErrors) {
lhs, _ := w.derefMap("lhs: ", w.lhs)
if lhs != nil {
defer w.allocator.Free(lhs)
}
rhs, _ := w.derefMap("rhs: ", w.rhs)
if rhs != nil {
defer w.allocator.Free(rhs)
}
// If both lhs and rhs are empty/null, treat it as a
// leaf: this helps preserve the empty/null
// distinction.
emptyPromoteToLeaf := (lhs == nil || lhs.Empty()) && (rhs == nil || rhs.Empty())
if t.ElementRelationship == schema.Atomic || emptyPromoteToLeaf {
w.doLeaf()
return nil
}
if lhs == nil && rhs == nil {
return nil
}
errs = append(errs, w.visitMapItems(t, lhs, rhs)...)
return errs
}

View File

@ -197,7 +197,7 @@ func getAssociativeKeyDefault(s *schema.Schema, list *schema.List, fieldName str
return field.Default, nil
}
func keyedAssociativeListItemToPathElement(a value.Allocator, s *schema.Schema, list *schema.List, index int, child value.Value) (fieldpath.PathElement, error) {
func keyedAssociativeListItemToPathElement(a value.Allocator, s *schema.Schema, list *schema.List, child value.Value) (fieldpath.PathElement, error) {
pe := fieldpath.PathElement{}
if child.IsNull() {
// null entries are illegal.
@ -225,7 +225,7 @@ func keyedAssociativeListItemToPathElement(a value.Allocator, s *schema.Schema,
return pe, nil
}
func setItemToPathElement(list *schema.List, index int, child value.Value) (fieldpath.PathElement, error) {
func setItemToPathElement(child value.Value) (fieldpath.PathElement, error) {
pe := fieldpath.PathElement{}
switch {
case child.IsMap():
@ -245,16 +245,15 @@ func setItemToPathElement(list *schema.List, index int, child value.Value) (fiel
}
}
func listItemToPathElement(a value.Allocator, s *schema.Schema, list *schema.List, index int, child value.Value) (fieldpath.PathElement, error) {
if list.ElementRelationship == schema.Associative {
if len(list.Keys) > 0 {
return keyedAssociativeListItemToPathElement(a, s, list, index, child)
}
// If there's no keys, then we must be a set of primitives.
return setItemToPathElement(list, index, child)
func listItemToPathElement(a value.Allocator, s *schema.Schema, list *schema.List, child value.Value) (fieldpath.PathElement, error) {
if list.ElementRelationship != schema.Associative {
return fieldpath.PathElement{}, errors.New("invalid indexing of non-associative list")
}
// Use the index as a key for atomic lists.
return fieldpath.PathElement{Index: &index}, nil
if len(list.Keys) > 0 {
return keyedAssociativeListItemToPathElement(a, s, list, child)
}
// If there's no keys, then we must be a set of primitives.
return setItemToPathElement(child)
}

View File

@ -113,11 +113,12 @@ func (w *mergingWalker) doLeaf() {
w.rule(w)
}
func (w *mergingWalker) doScalar(t *schema.Scalar) (errs ValidationErrors) {
errs = append(errs, validateScalar(t, w.lhs, "lhs: ")...)
errs = append(errs, validateScalar(t, w.rhs, "rhs: ")...)
if len(errs) > 0 {
return errs
func (w *mergingWalker) doScalar(t *schema.Scalar) ValidationErrors {
// Make sure at least one side is a valid scalar.
lerrs := validateScalar(t, w.lhs, "lhs: ")
rerrs := validateScalar(t, w.rhs, "rhs: ")
if len(lerrs) > 0 && len(rerrs) > 0 {
return append(lerrs, rerrs...)
}
// All scalars are leaf fields.
@ -179,14 +180,18 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err
}
out := make([]interface{}, 0, outLen)
rhsOrder, observedRHS, rhsErrs := w.indexListPathElements(t, rhs)
rhsPEs, observedRHS, rhsErrs := w.indexListPathElements(t, rhs, false)
errs = append(errs, rhsErrs...)
lhsOrder, observedLHS, lhsErrs := w.indexListPathElements(t, lhs)
lhsPEs, observedLHS, lhsErrs := w.indexListPathElements(t, lhs, true)
errs = append(errs, lhsErrs...)
if len(errs) != 0 {
return errs
}
sharedOrder := make([]*fieldpath.PathElement, 0, rLen)
for i := range rhsOrder {
pe := &rhsOrder[i]
for i := range rhsPEs {
pe := &rhsPEs[i]
if _, ok := observedLHS.Get(*pe); ok {
sharedOrder = append(sharedOrder, pe)
}
@ -198,13 +203,15 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err
sharedOrder = sharedOrder[1:]
}
lLen, rLen = len(lhsOrder), len(rhsOrder)
mergedRHS := fieldpath.MakePathElementMap(len(rhsPEs))
lLen, rLen = len(lhsPEs), len(rhsPEs)
for lI, rI := 0, 0; lI < lLen || rI < rLen; {
if lI < lLen && rI < rLen {
pe := lhsOrder[lI]
if pe.Equals(rhsOrder[rI]) {
pe := lhsPEs[lI]
if pe.Equals(rhsPEs[rI]) {
// merge LHS & RHS items
lChild, _ := observedLHS.Get(pe)
mergedRHS.Insert(pe, struct{}{})
lChild, _ := observedLHS.Get(pe) // may be nil if the PE is duplicaated.
rChild, _ := observedRHS.Get(pe)
mergeOut, errs := w.mergeListItem(t, pe, lChild, rChild)
errs = append(errs, errs...)
@ -221,17 +228,17 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err
}
continue
}
if _, ok := observedRHS.Get(pe); ok && nextShared != nil && !nextShared.Equals(lhsOrder[lI]) {
if _, ok := observedRHS.Get(pe); ok && nextShared != nil && !nextShared.Equals(lhsPEs[lI]) {
// shared item, but not the one we want in this round
lI++
continue
}
}
if lI < lLen {
pe := lhsOrder[lI]
pe := lhsPEs[lI]
if _, ok := observedRHS.Get(pe); !ok {
// take LHS item
lChild, _ := observedLHS.Get(pe)
// take LHS item using At to make sure we get the right item (observed may not contain the right item).
lChild := lhs.AtUsing(w.allocator, lI)
mergeOut, errs := w.mergeListItem(t, pe, lChild, nil)
errs = append(errs, errs...)
if mergeOut != nil {
@ -239,12 +246,16 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err
}
lI++
continue
} else if _, ok := mergedRHS.Get(pe); ok {
// we've already merged it with RHS, we don't want to duplicate it, skip it.
lI++
}
}
if rI < rLen {
// Take the RHS item, merge with matching LHS item if possible
pe := rhsOrder[rI]
lChild, _ := observedLHS.Get(pe) // may be nil
pe := rhsPEs[rI]
mergedRHS.Insert(pe, struct{}{})
lChild, _ := observedLHS.Get(pe) // may be nil if absent or duplicaated.
rChild, _ := observedRHS.Get(pe)
mergeOut, errs := w.mergeListItem(t, pe, lChild, rChild)
errs = append(errs, errs...)
@ -271,7 +282,7 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err
return errs
}
func (w *mergingWalker) indexListPathElements(t *schema.List, list value.List) ([]fieldpath.PathElement, fieldpath.PathElementValueMap, ValidationErrors) {
func (w *mergingWalker) indexListPathElements(t *schema.List, list value.List, allowDuplicates bool) ([]fieldpath.PathElement, fieldpath.PathElementValueMap, ValidationErrors) {
var errs ValidationErrors
length := 0
if list != nil {
@ -281,7 +292,7 @@ func (w *mergingWalker) indexListPathElements(t *schema.List, list value.List) (
pes := make([]fieldpath.PathElement, 0, length)
for i := 0; i < length; i++ {
child := list.At(i)
pe, err := listItemToPathElement(w.allocator, w.schema, t, i, child)
pe, err := listItemToPathElement(w.allocator, w.schema, t, child)
if err != nil {
errs = append(errs, errorf("element %v: %v", i, err.Error())...)
// If we can't construct the path element, we can't
@ -289,11 +300,15 @@ func (w *mergingWalker) indexListPathElements(t *schema.List, list value.List) (
// this element.
continue
}
if _, found := observed.Get(pe); found {
if _, found := observed.Get(pe); found && !allowDuplicates {
errs = append(errs, errorf("duplicate entries for key %v", pe.String())...)
continue
} else if !found {
observed.Insert(pe, child)
} else {
// Duplicated items are not merged with the new value, make them nil.
observed.Insert(pe, value.NewValueInterface(nil))
}
observed.Insert(pe, child)
pes = append(pes, pe)
}
return pes, observed, errs

View File

@ -93,13 +93,13 @@ func (p ParseableType) IsValid() bool {
// FromYAML parses a yaml string into an object with the current schema
// and the type "typename" or an error if validation fails.
func (p ParseableType) FromYAML(object YAMLObject) (*TypedValue, error) {
func (p ParseableType) FromYAML(object YAMLObject, opts ...ValidationOptions) (*TypedValue, error) {
var v interface{}
err := yaml.Unmarshal([]byte(object), &v)
if err != nil {
return nil, err
}
return AsTyped(value.NewValueInterface(v), p.Schema, p.TypeRef)
return AsTyped(value.NewValueInterface(v), p.Schema, p.TypeRef, opts...)
}
// FromUnstructured converts a go "interface{}" type, typically an
@ -108,8 +108,8 @@ func (p ParseableType) FromYAML(object YAMLObject) (*TypedValue, error) {
// The provided interface{} must be one of: map[string]interface{},
// map[interface{}]interface{}, []interface{}, int types, float types,
// string or boolean. Nested interface{} must also be one of these types.
func (p ParseableType) FromUnstructured(in interface{}) (*TypedValue, error) {
return AsTyped(value.NewValueInterface(in), p.Schema, p.TypeRef)
func (p ParseableType) FromUnstructured(in interface{}, opts ...ValidationOptions) (*TypedValue, error) {
return AsTyped(value.NewValueInterface(in), p.Schema, p.TypeRef, opts...)
}
// FromStructured converts a go "interface{}" type, typically an structured object in
@ -117,12 +117,12 @@ func (p ParseableType) FromUnstructured(in interface{}) (*TypedValue, error) {
// schema validation. The provided "interface{}" value must be a pointer so that the
// value can be modified via reflection. The provided "interface{}" may contain structs
// and types that are converted to Values by the jsonMarshaler interface.
func (p ParseableType) FromStructured(in interface{}) (*TypedValue, error) {
func (p ParseableType) FromStructured(in interface{}, opts ...ValidationOptions) (*TypedValue, error) {
v, err := value.NewValueReflect(in)
if err != nil {
return nil, fmt.Errorf("error creating struct value reflector: %v", err)
}
return AsTyped(v, p.Schema, p.TypeRef)
return AsTyped(v, p.Schema, p.TypeRef, opts...)
}
// DeducedParseableType is a ParseableType that deduces the type from

View File

@ -74,9 +74,9 @@ func (w *removingWalker) doList(t *schema.List) (errs ValidationErrors) {
iter := l.RangeUsing(w.allocator)
defer w.allocator.Free(iter)
for iter.Next() {
i, item := iter.Item()
_, item := iter.Item()
// Ignore error because we have already validated this list
pe, _ := listItemToPathElement(w.allocator, w.schema, t, i, item)
pe, _ := listItemToPathElement(w.allocator, w.schema, t, item)
path, _ := fieldpath.MakePath(pe)
// save items on the path when we shouldExtract
// but ignore them when we are removing (i.e. !w.shouldExtract)

View File

@ -94,9 +94,31 @@ func (v *toFieldSetWalker) doScalar(t *schema.Scalar) ValidationErrors {
}
func (v *toFieldSetWalker) visitListItems(t *schema.List, list value.List) (errs ValidationErrors) {
// Keeps track of the PEs we've seen
seen := fieldpath.MakePathElementSet(list.Length())
// Keeps tracks of the PEs we've counted as duplicates
duplicates := fieldpath.MakePathElementSet(list.Length())
for i := 0; i < list.Length(); i++ {
child := list.At(i)
pe, _ := listItemToPathElement(v.allocator, v.schema, t, i, child)
pe, _ := listItemToPathElement(v.allocator, v.schema, t, child)
if seen.Has(pe) {
if duplicates.Has(pe) {
// do nothing
} else {
v.set.Insert(append(v.path, pe))
duplicates.Insert(pe)
}
} else {
seen.Insert(pe)
}
}
for i := 0; i < list.Length(); i++ {
child := list.At(i)
pe, _ := listItemToPathElement(v.allocator, v.schema, t, child)
if duplicates.Has(pe) {
continue
}
v2 := v.prepareDescent(pe, t.ElementType)
v2.value = child
errs = append(errs, v2.toFieldSet()...)

View File

@ -17,8 +17,6 @@ limitations under the License.
package typed
import (
"fmt"
"strings"
"sync"
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
@ -26,16 +24,24 @@ import (
"sigs.k8s.io/structured-merge-diff/v4/value"
)
// ValidationOptions is the list of all the options available when running the validation.
type ValidationOptions int
const (
// AllowDuplicates means that sets and associative lists can have duplicate similar items.
AllowDuplicates ValidationOptions = iota
)
// AsTyped accepts a value and a type and returns a TypedValue. 'v' must have
// type 'typeName' in the schema. An error is returned if the v doesn't conform
// to the schema.
func AsTyped(v value.Value, s *schema.Schema, typeRef schema.TypeRef) (*TypedValue, error) {
func AsTyped(v value.Value, s *schema.Schema, typeRef schema.TypeRef, opts ...ValidationOptions) (*TypedValue, error) {
tv := &TypedValue{
value: v,
typeRef: typeRef,
schema: s,
}
if err := tv.Validate(); err != nil {
if err := tv.Validate(opts...); err != nil {
return nil, err
}
return tv, nil
@ -45,6 +51,10 @@ func AsTyped(v value.Value, s *schema.Schema, typeRef schema.TypeRef) (*TypedVal
// conforms to the schema, for cases where that has already been checked or
// where you're going to call a method that validates as a side-effect (like
// ToFieldSet).
//
// Deprecated: This function was initially created because validation
// was expensive. Now that this has been solved, objects should always
// be created as validated, using `AsTyped`.
func AsTypedUnvalidated(v value.Value, s *schema.Schema, typeRef schema.TypeRef) *TypedValue {
tv := &TypedValue{
value: v,
@ -77,8 +87,14 @@ func (tv TypedValue) Schema() *schema.Schema {
}
// Validate returns an error with a list of every spec violation.
func (tv TypedValue) Validate() error {
func (tv TypedValue) Validate(opts ...ValidationOptions) error {
w := tv.walker()
for _, opt := range opts {
switch opt {
case AllowDuplicates:
w.allowDuplicates = true
}
}
defer w.finished()
if errs := w.validate(nil); len(errs) != 0 {
return errs
@ -113,6 +129,10 @@ func (tv TypedValue) Merge(pso *TypedValue) (*TypedValue, error) {
return merge(&tv, pso, ruleKeepRHS, nil)
}
var cmpwPool = sync.Pool{
New: func() interface{} { return &compareWalker{} },
}
// Compare compares the two objects. See the comments on the `Comparison`
// struct for details on the return value.
//
@ -120,33 +140,44 @@ func (tv TypedValue) Merge(pso *TypedValue) (*TypedValue, error) {
// match), or an error will be returned. Validation errors will be returned if
// the objects don't conform to the schema.
func (tv TypedValue) Compare(rhs *TypedValue) (c *Comparison, err error) {
c = &Comparison{
lhs := tv
if lhs.schema != rhs.schema {
return nil, errorf("expected objects with types from the same schema")
}
if !lhs.typeRef.Equals(&rhs.typeRef) {
return nil, errorf("expected objects of the same type, but got %v and %v", lhs.typeRef, rhs.typeRef)
}
cmpw := cmpwPool.Get().(*compareWalker)
defer func() {
cmpw.lhs = nil
cmpw.rhs = nil
cmpw.schema = nil
cmpw.typeRef = schema.TypeRef{}
cmpw.comparison = nil
cmpw.inLeaf = false
cmpwPool.Put(cmpw)
}()
cmpw.lhs = lhs.value
cmpw.rhs = rhs.value
cmpw.schema = lhs.schema
cmpw.typeRef = lhs.typeRef
cmpw.comparison = &Comparison{
Removed: fieldpath.NewSet(),
Modified: fieldpath.NewSet(),
Added: fieldpath.NewSet(),
}
_, err = merge(&tv, rhs, func(w *mergingWalker) {
if w.lhs == nil {
c.Added.Insert(w.path)
} else if w.rhs == nil {
c.Removed.Insert(w.path)
} else if !value.Equals(w.rhs, w.lhs) {
// TODO: Equality is not sufficient for this.
// Need to implement equality check on the value type.
c.Modified.Insert(w.path)
}
}, func(w *mergingWalker) {
if w.lhs == nil {
c.Added.Insert(w.path)
} else if w.rhs == nil {
c.Removed.Insert(w.path)
}
})
if err != nil {
return nil, err
if cmpw.allocator == nil {
cmpw.allocator = value.NewFreelistAllocator()
}
return c, nil
errs := cmpw.compare(nil)
if len(errs) > 0 {
return nil, errs
}
return cmpw.comparison, nil
}
// RemoveItems removes each provided list or map item from the value.
@ -161,63 +192,6 @@ func (tv TypedValue) ExtractItems(items *fieldpath.Set) *TypedValue {
return &tv
}
// NormalizeUnions takes the new object and normalizes the union:
// - If discriminator changed to non-nil, and a new field has been added
// that doesn't match, an error is returned,
// - If discriminator hasn't changed and two fields or more are set, an
// error is returned,
// - If discriminator changed to non-nil, all other fields but the
// discriminated one will be cleared,
// - Otherwise, If only one field is left, update discriminator to that value.
//
// Please note: union behavior isn't finalized yet and this is still experimental.
func (tv TypedValue) NormalizeUnions(new *TypedValue) (*TypedValue, error) {
var errs ValidationErrors
var normalizeFn = func(w *mergingWalker) {
if w.rhs != nil {
v := w.rhs.Unstructured()
w.out = &v
}
if err := normalizeUnions(w); err != nil {
errs = append(errs, errorf(err.Error())...)
}
}
out, mergeErrs := merge(&tv, new, func(w *mergingWalker) {}, normalizeFn)
if mergeErrs != nil {
errs = append(errs, mergeErrs.(ValidationErrors)...)
}
if len(errs) > 0 {
return nil, errs
}
return out, nil
}
// NormalizeUnionsApply specifically normalize unions on apply. It
// validates that the applied union is correct (there should be no
// ambiguity there), and clear the fields according to the sent intent.
//
// Please note: union behavior isn't finalized yet and this is still experimental.
func (tv TypedValue) NormalizeUnionsApply(new *TypedValue) (*TypedValue, error) {
var errs ValidationErrors
var normalizeFn = func(w *mergingWalker) {
if w.rhs != nil {
v := w.rhs.Unstructured()
w.out = &v
}
if err := normalizeUnionsApply(w); err != nil {
errs = append(errs, errorf(err.Error())...)
}
}
out, mergeErrs := merge(&tv, new, func(w *mergingWalker) {}, normalizeFn)
if mergeErrs != nil {
errs = append(errs, mergeErrs.(ValidationErrors)...)
}
if len(errs) > 0 {
return nil, errs
}
return out, nil
}
func (tv TypedValue) Empty() *TypedValue {
tv.value = value.NewValueInterface(nil)
return &tv
@ -273,50 +247,3 @@ func merge(lhs, rhs *TypedValue, rule, postRule mergeRule) (*TypedValue, error)
}
return out, nil
}
// Comparison is the return value of a TypedValue.Compare() operation.
//
// No field will appear in more than one of the three fieldsets. If all of the
// fieldsets are empty, then the objects must have been equal.
type Comparison struct {
// Removed contains any fields removed by rhs (the right-hand-side
// object in the comparison).
Removed *fieldpath.Set
// Modified contains fields present in both objects but different.
Modified *fieldpath.Set
// Added contains any fields added by rhs.
Added *fieldpath.Set
}
// IsSame returns true if the comparison returned no changes (the two
// compared objects are similar).
func (c *Comparison) IsSame() bool {
return c.Removed.Empty() && c.Modified.Empty() && c.Added.Empty()
}
// String returns a human readable version of the comparison.
func (c *Comparison) String() string {
bld := strings.Builder{}
if !c.Modified.Empty() {
bld.WriteString(fmt.Sprintf("- Modified Fields:\n%v\n", c.Modified))
}
if !c.Added.Empty() {
bld.WriteString(fmt.Sprintf("- Added Fields:\n%v\n", c.Added))
}
if !c.Removed.Empty() {
bld.WriteString(fmt.Sprintf("- Removed Fields:\n%v\n", c.Removed))
}
return bld.String()
}
// ExcludeFields fields from the compare recursively removes the fields
// from the entire comparison
func (c *Comparison) ExcludeFields(fields *fieldpath.Set) *Comparison {
if fields == nil || fields.Empty() {
return c
}
c.Removed = c.Removed.RecursiveDifference(fields)
c.Modified = c.Modified.RecursiveDifference(fields)
c.Added = c.Added.RecursiveDifference(fields)
return c
}

View File

@ -1,276 +0,0 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package typed
import (
"fmt"
"strings"
"sigs.k8s.io/structured-merge-diff/v4/schema"
"sigs.k8s.io/structured-merge-diff/v4/value"
)
func normalizeUnions(w *mergingWalker) error {
atom, found := w.schema.Resolve(w.typeRef)
if !found {
panic(fmt.Sprintf("Unable to resolve schema in normalize union: %v/%v", w.schema, w.typeRef))
}
// Unions can only be in structures, and the struct must not have been removed
if atom.Map == nil || w.out == nil {
return nil
}
var old value.Map
if w.lhs != nil && !w.lhs.IsNull() {
old = w.lhs.AsMap()
}
for _, union := range atom.Map.Unions {
if err := newUnion(&union).Normalize(old, w.rhs.AsMap(), value.NewValueInterface(*w.out).AsMap()); err != nil {
return err
}
}
return nil
}
func normalizeUnionsApply(w *mergingWalker) error {
atom, found := w.schema.Resolve(w.typeRef)
if !found {
panic(fmt.Sprintf("Unable to resolve schema in normalize union: %v/%v", w.schema, w.typeRef))
}
// Unions can only be in structures, and the struct must not have been removed
if atom.Map == nil || w.out == nil {
return nil
}
var old value.Map
if w.lhs != nil && !w.lhs.IsNull() {
old = w.lhs.AsMap()
}
for _, union := range atom.Map.Unions {
out := value.NewValueInterface(*w.out)
if err := newUnion(&union).NormalizeApply(old, w.rhs.AsMap(), out.AsMap()); err != nil {
return err
}
*w.out = out.Unstructured()
}
return nil
}
type discriminated string
type field string
type discriminatedNames struct {
f2d map[field]discriminated
d2f map[discriminated]field
}
func newDiscriminatedName(f2d map[field]discriminated) discriminatedNames {
d2f := map[discriminated]field{}
for key, value := range f2d {
d2f[value] = key
}
return discriminatedNames{
f2d: f2d,
d2f: d2f,
}
}
func (dn discriminatedNames) toField(d discriminated) field {
if f, ok := dn.d2f[d]; ok {
return f
}
return field(d)
}
func (dn discriminatedNames) toDiscriminated(f field) discriminated {
if d, ok := dn.f2d[f]; ok {
return d
}
return discriminated(f)
}
type discriminator struct {
name string
}
func (d *discriminator) Set(m value.Map, v discriminated) {
if d == nil {
return
}
m.Set(d.name, value.NewValueInterface(string(v)))
}
func (d *discriminator) Get(m value.Map) discriminated {
if d == nil || m == nil {
return ""
}
val, ok := m.Get(d.name)
if !ok {
return ""
}
if !val.IsString() {
return ""
}
return discriminated(val.AsString())
}
type fieldsSet map[field]struct{}
// newFieldsSet returns a map of the fields that are part of the union and are set
// in the given map.
func newFieldsSet(m value.Map, fields []field) fieldsSet {
if m == nil {
return nil
}
set := fieldsSet{}
for _, f := range fields {
if subField, ok := m.Get(string(f)); ok && !subField.IsNull() {
set.Add(f)
}
}
return set
}
func (fs fieldsSet) Add(f field) {
if fs == nil {
fs = map[field]struct{}{}
}
fs[f] = struct{}{}
}
func (fs fieldsSet) One() *field {
for f := range fs {
return &f
}
return nil
}
func (fs fieldsSet) Has(f field) bool {
_, ok := fs[f]
return ok
}
func (fs fieldsSet) List() []field {
fields := []field{}
for f := range fs {
fields = append(fields, f)
}
return fields
}
func (fs fieldsSet) Difference(o fieldsSet) fieldsSet {
n := fieldsSet{}
for f := range fs {
if !o.Has(f) {
n.Add(f)
}
}
return n
}
func (fs fieldsSet) String() string {
s := []string{}
for k := range fs {
s = append(s, string(k))
}
return strings.Join(s, ", ")
}
type union struct {
deduceInvalidDiscriminator bool
d *discriminator
dn discriminatedNames
f []field
}
func newUnion(su *schema.Union) *union {
u := &union{}
if su.Discriminator != nil {
u.d = &discriminator{name: *su.Discriminator}
}
f2d := map[field]discriminated{}
for _, f := range su.Fields {
u.f = append(u.f, field(f.FieldName))
f2d[field(f.FieldName)] = discriminated(f.DiscriminatorValue)
}
u.dn = newDiscriminatedName(f2d)
u.deduceInvalidDiscriminator = su.DeduceInvalidDiscriminator
return u
}
// clear removes all the fields in map that are part of the union, but
// the one we decided to keep.
func (u *union) clear(m value.Map, f field) {
for _, fieldName := range u.f {
if field(fieldName) != f {
m.Delete(string(fieldName))
}
}
}
func (u *union) Normalize(old, new, out value.Map) error {
os := newFieldsSet(old, u.f)
ns := newFieldsSet(new, u.f)
diff := ns.Difference(os)
if u.d.Get(old) != u.d.Get(new) && u.d.Get(new) != "" {
if len(diff) == 1 && u.d.Get(new) != u.dn.toDiscriminated(*diff.One()) {
return fmt.Errorf("discriminator (%v) and field changed (%v) don't match", u.d.Get(new), diff.One())
}
if len(diff) > 1 {
return fmt.Errorf("multiple new fields added: %v", diff)
}
u.clear(out, u.dn.toField(u.d.Get(new)))
return nil
}
if len(ns) > 1 {
return fmt.Errorf("multiple fields set without discriminator change: %v", ns)
}
// Set discriminiator if it needs to be deduced.
if u.deduceInvalidDiscriminator && len(ns) == 1 {
u.d.Set(out, u.dn.toDiscriminated(*ns.One()))
}
return nil
}
func (u *union) NormalizeApply(applied, merged, out value.Map) error {
as := newFieldsSet(applied, u.f)
if len(as) > 1 {
return fmt.Errorf("more than one field of union applied: %v", as)
}
if len(as) == 0 {
// None is set, just leave.
return nil
}
// We have exactly one, discriminiator must match if set
if u.d.Get(applied) != "" && u.d.Get(applied) != u.dn.toDiscriminated(*as.One()) {
return fmt.Errorf("applied discriminator (%v) doesn't match applied field (%v)", u.d.Get(applied), *as.One())
}
// Update discriminiator if needed
if u.deduceInvalidDiscriminator {
u.d.Set(out, u.dn.toDiscriminated(*as.One()))
}
// Clear others fields.
u.clear(out, *as.One())
return nil
}

View File

@ -33,6 +33,7 @@ func (tv TypedValue) walker() *validatingObjectWalker {
v.value = tv.value
v.schema = tv.schema
v.typeRef = tv.typeRef
v.allowDuplicates = false
if v.allocator == nil {
v.allocator = value.NewFreelistAllocator()
}
@ -49,6 +50,9 @@ type validatingObjectWalker struct {
value value.Value
schema *schema.Schema
typeRef schema.TypeRef
// If set to true, duplicates will be allowed in
// associativeLists/sets.
allowDuplicates bool
// Allocate only as many walkers as needed for the depth by storing them here.
spareWalkers *[]*validatingObjectWalker
@ -102,6 +106,12 @@ func validateScalar(t *schema.Scalar, v value.Value, prefix string) (errs Valida
if !v.IsBool() {
return errorf("%vexpected boolean, got %v", prefix, v)
}
case schema.Untyped:
if !v.IsFloat() && !v.IsInt() && !v.IsString() && !v.IsBool() {
return errorf("%vexpected any scalar, got %v", prefix, v)
}
default:
return errorf("%vunexpected scalar type in schema: %v", prefix, *t)
}
return nil
}
@ -123,7 +133,7 @@ func (v *validatingObjectWalker) visitListItems(t *schema.List, list value.List)
pe.Index = &i
} else {
var err error
pe, err = listItemToPathElement(v.allocator, v.schema, t, i, child)
pe, err = listItemToPathElement(v.allocator, v.schema, t, child)
if err != nil {
errs = append(errs, errorf("element %v: %v", i, err.Error())...)
// If we can't construct the path element, we can't
@ -131,7 +141,7 @@ func (v *validatingObjectWalker) visitListItems(t *schema.List, list value.List)
// this element.
return
}
if observedKeys.Has(pe) {
if observedKeys.Has(pe) && !v.allowDuplicates {
errs = append(errs, errorf("duplicate entries for key %v", pe.String())...)
}
observedKeys.Insert(pe)

View File

@ -136,7 +136,7 @@ func (r mapReflect) EqualsUsing(a Allocator, m Map) bool {
if !ok {
return false
}
return Equals(vr.mustReuse(lhsVal, entry, nil, nil), value)
return EqualsUsing(a, vr.mustReuse(lhsVal, entry, nil, nil), value)
})
}

View File

@ -88,12 +88,12 @@ func (m mapUnstructuredInterface) EqualsUsing(a Allocator, other Map) bool {
}
vv := a.allocValueUnstructured()
defer a.Free(vv)
return other.Iterate(func(key string, value Value) bool {
return other.IterateUsing(a, func(key string, value Value) bool {
lhsVal, ok := m[key]
if !ok {
return false
}
return Equals(vv.reuse(lhsVal), value)
return EqualsUsing(a, vv.reuse(lhsVal), value)
})
}
@ -168,12 +168,12 @@ func (m mapUnstructuredString) EqualsUsing(a Allocator, other Map) bool {
}
vv := a.allocValueUnstructured()
defer a.Free(vv)
return other.Iterate(func(key string, value Value) bool {
return other.IterateUsing(a, func(key string, value Value) bool {
lhsVal, ok := m[key]
if !ok {
return false
}
return Equals(vv.reuse(lhsVal), value)
return EqualsUsing(a, vv.reuse(lhsVal), value)
})
}

View File

@ -154,7 +154,9 @@ func buildStructCacheEntry(t reflect.Type, infos map[string]*FieldCacheEntry, fi
if field.Type.Kind() == reflect.Ptr {
e = field.Type.Elem()
}
buildStructCacheEntry(e, infos, append(fieldPath, field.Index))
if e.Kind() == reflect.Struct {
buildStructCacheEntry(e, infos, append(fieldPath, field.Index))
}
continue
}
info := &FieldCacheEntry{JsonName: jsonName, isOmitEmpty: isOmitempty, fieldPath: append(fieldPath, field.Index), fieldType: field.Type}