mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 18:43:34 +00:00
build: move e2e dependencies into e2e/go.mod
Several packages are only used while running the e2e suite. These packages are less important to update, as the they can not influence the final executable that is part of the Ceph-CSI container-image. By moving these dependencies out of the main Ceph-CSI go.mod, it is easier to identify if a reported CVE affects Ceph-CSI, or only the testing (like most of the Kubernetes CVEs). Signed-off-by: Niels de Vos <ndevos@ibm.com>
This commit is contained in:
committed by
mergify[bot]
parent
15da101b1b
commit
bec6090996
364
e2e/vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go
generated
vendored
Normal file
364
e2e/vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go
generated
vendored
Normal file
@ -0,0 +1,364 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Helper code for parsing a protocol buffer
|
||||
|
||||
package protolazy
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"google.golang.org/protobuf/encoding/protowire"
|
||||
)
|
||||
|
||||
// BufferReader is a structure encapsulating a protobuf and a current position
|
||||
type BufferReader struct {
|
||||
Buf []byte
|
||||
Pos int
|
||||
}
|
||||
|
||||
// NewBufferReader creates a new BufferRead from a protobuf
|
||||
func NewBufferReader(buf []byte) BufferReader {
|
||||
return BufferReader{Buf: buf, Pos: 0}
|
||||
}
|
||||
|
||||
var errOutOfBounds = errors.New("protobuf decoding: out of bounds")
|
||||
var errOverflow = errors.New("proto: integer overflow")
|
||||
|
||||
func (b *BufferReader) DecodeVarintSlow() (x uint64, err error) {
|
||||
i := b.Pos
|
||||
l := len(b.Buf)
|
||||
|
||||
for shift := uint(0); shift < 64; shift += 7 {
|
||||
if i >= l {
|
||||
err = io.ErrUnexpectedEOF
|
||||
return
|
||||
}
|
||||
v := b.Buf[i]
|
||||
i++
|
||||
x |= (uint64(v) & 0x7F) << shift
|
||||
if v < 0x80 {
|
||||
b.Pos = i
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// The number is too large to represent in a 64-bit value.
|
||||
err = errOverflow
|
||||
return
|
||||
}
|
||||
|
||||
// decodeVarint decodes a varint at the current position
|
||||
func (b *BufferReader) DecodeVarint() (x uint64, err error) {
|
||||
i := b.Pos
|
||||
buf := b.Buf
|
||||
|
||||
if i >= len(buf) {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
} else if buf[i] < 0x80 {
|
||||
b.Pos++
|
||||
return uint64(buf[i]), nil
|
||||
} else if len(buf)-i < 10 {
|
||||
return b.DecodeVarintSlow()
|
||||
}
|
||||
|
||||
var v uint64
|
||||
// we already checked the first byte
|
||||
x = uint64(buf[i]) & 127
|
||||
i++
|
||||
|
||||
v = uint64(buf[i])
|
||||
i++
|
||||
x |= (v & 127) << 7
|
||||
if v < 128 {
|
||||
goto done
|
||||
}
|
||||
|
||||
v = uint64(buf[i])
|
||||
i++
|
||||
x |= (v & 127) << 14
|
||||
if v < 128 {
|
||||
goto done
|
||||
}
|
||||
|
||||
v = uint64(buf[i])
|
||||
i++
|
||||
x |= (v & 127) << 21
|
||||
if v < 128 {
|
||||
goto done
|
||||
}
|
||||
|
||||
v = uint64(buf[i])
|
||||
i++
|
||||
x |= (v & 127) << 28
|
||||
if v < 128 {
|
||||
goto done
|
||||
}
|
||||
|
||||
v = uint64(buf[i])
|
||||
i++
|
||||
x |= (v & 127) << 35
|
||||
if v < 128 {
|
||||
goto done
|
||||
}
|
||||
|
||||
v = uint64(buf[i])
|
||||
i++
|
||||
x |= (v & 127) << 42
|
||||
if v < 128 {
|
||||
goto done
|
||||
}
|
||||
|
||||
v = uint64(buf[i])
|
||||
i++
|
||||
x |= (v & 127) << 49
|
||||
if v < 128 {
|
||||
goto done
|
||||
}
|
||||
|
||||
v = uint64(buf[i])
|
||||
i++
|
||||
x |= (v & 127) << 56
|
||||
if v < 128 {
|
||||
goto done
|
||||
}
|
||||
|
||||
v = uint64(buf[i])
|
||||
i++
|
||||
x |= (v & 127) << 63
|
||||
if v < 128 {
|
||||
goto done
|
||||
}
|
||||
|
||||
return 0, errOverflow
|
||||
|
||||
done:
|
||||
b.Pos = i
|
||||
return
|
||||
}
|
||||
|
||||
// decodeVarint32 decodes a varint32 at the current position
|
||||
func (b *BufferReader) DecodeVarint32() (x uint32, err error) {
|
||||
i := b.Pos
|
||||
buf := b.Buf
|
||||
|
||||
if i >= len(buf) {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
} else if buf[i] < 0x80 {
|
||||
b.Pos++
|
||||
return uint32(buf[i]), nil
|
||||
} else if len(buf)-i < 5 {
|
||||
v, err := b.DecodeVarintSlow()
|
||||
return uint32(v), err
|
||||
}
|
||||
|
||||
var v uint32
|
||||
// we already checked the first byte
|
||||
x = uint32(buf[i]) & 127
|
||||
i++
|
||||
|
||||
v = uint32(buf[i])
|
||||
i++
|
||||
x |= (v & 127) << 7
|
||||
if v < 128 {
|
||||
goto done
|
||||
}
|
||||
|
||||
v = uint32(buf[i])
|
||||
i++
|
||||
x |= (v & 127) << 14
|
||||
if v < 128 {
|
||||
goto done
|
||||
}
|
||||
|
||||
v = uint32(buf[i])
|
||||
i++
|
||||
x |= (v & 127) << 21
|
||||
if v < 128 {
|
||||
goto done
|
||||
}
|
||||
|
||||
v = uint32(buf[i])
|
||||
i++
|
||||
x |= (v & 127) << 28
|
||||
if v < 128 {
|
||||
goto done
|
||||
}
|
||||
|
||||
return 0, errOverflow
|
||||
|
||||
done:
|
||||
b.Pos = i
|
||||
return
|
||||
}
|
||||
|
||||
// skipValue skips a value in the protobuf, based on the specified tag
|
||||
func (b *BufferReader) SkipValue(tag uint32) (err error) {
|
||||
wireType := tag & 0x7
|
||||
switch protowire.Type(wireType) {
|
||||
case protowire.VarintType:
|
||||
err = b.SkipVarint()
|
||||
case protowire.Fixed64Type:
|
||||
err = b.SkipFixed64()
|
||||
case protowire.BytesType:
|
||||
var n uint32
|
||||
n, err = b.DecodeVarint32()
|
||||
if err == nil {
|
||||
err = b.Skip(int(n))
|
||||
}
|
||||
case protowire.StartGroupType:
|
||||
err = b.SkipGroup(tag)
|
||||
case protowire.Fixed32Type:
|
||||
err = b.SkipFixed32()
|
||||
default:
|
||||
err = fmt.Errorf("Unexpected wire type (%d)", wireType)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// skipGroup skips a group with the specified tag. It executes efficiently using a tag stack
|
||||
func (b *BufferReader) SkipGroup(tag uint32) (err error) {
|
||||
tagStack := make([]uint32, 0, 16)
|
||||
tagStack = append(tagStack, tag)
|
||||
var n uint32
|
||||
for len(tagStack) > 0 {
|
||||
tag, err = b.DecodeVarint32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch protowire.Type(tag & 0x7) {
|
||||
case protowire.VarintType:
|
||||
err = b.SkipVarint()
|
||||
case protowire.Fixed64Type:
|
||||
err = b.Skip(8)
|
||||
case protowire.BytesType:
|
||||
n, err = b.DecodeVarint32()
|
||||
if err == nil {
|
||||
err = b.Skip(int(n))
|
||||
}
|
||||
case protowire.StartGroupType:
|
||||
tagStack = append(tagStack, tag)
|
||||
case protowire.Fixed32Type:
|
||||
err = b.SkipFixed32()
|
||||
case protowire.EndGroupType:
|
||||
if protoFieldNumber(tagStack[len(tagStack)-1]) == protoFieldNumber(tag) {
|
||||
tagStack = tagStack[:len(tagStack)-1]
|
||||
} else {
|
||||
err = fmt.Errorf("end group tag %d does not match begin group tag %d at pos %d",
|
||||
protoFieldNumber(tag), protoFieldNumber(tagStack[len(tagStack)-1]), b.Pos)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// skipVarint effiently skips a varint
|
||||
func (b *BufferReader) SkipVarint() (err error) {
|
||||
i := b.Pos
|
||||
|
||||
if len(b.Buf)-i < 10 {
|
||||
// Use DecodeVarintSlow() to check for buffer overflow, but ignore result
|
||||
if _, err := b.DecodeVarintSlow(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if b.Buf[i] < 0x80 {
|
||||
goto out
|
||||
}
|
||||
i++
|
||||
|
||||
if b.Buf[i] < 0x80 {
|
||||
goto out
|
||||
}
|
||||
i++
|
||||
|
||||
if b.Buf[i] < 0x80 {
|
||||
goto out
|
||||
}
|
||||
i++
|
||||
|
||||
if b.Buf[i] < 0x80 {
|
||||
goto out
|
||||
}
|
||||
i++
|
||||
|
||||
if b.Buf[i] < 0x80 {
|
||||
goto out
|
||||
}
|
||||
i++
|
||||
|
||||
if b.Buf[i] < 0x80 {
|
||||
goto out
|
||||
}
|
||||
i++
|
||||
|
||||
if b.Buf[i] < 0x80 {
|
||||
goto out
|
||||
}
|
||||
i++
|
||||
|
||||
if b.Buf[i] < 0x80 {
|
||||
goto out
|
||||
}
|
||||
i++
|
||||
|
||||
if b.Buf[i] < 0x80 {
|
||||
goto out
|
||||
}
|
||||
i++
|
||||
|
||||
if b.Buf[i] < 0x80 {
|
||||
goto out
|
||||
}
|
||||
return errOverflow
|
||||
|
||||
out:
|
||||
b.Pos = i + 1
|
||||
return nil
|
||||
}
|
||||
|
||||
// skip skips the specified number of bytes
|
||||
func (b *BufferReader) Skip(n int) (err error) {
|
||||
if len(b.Buf) < b.Pos+n {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b.Pos += n
|
||||
return
|
||||
}
|
||||
|
||||
// skipFixed64 skips a fixed64
|
||||
func (b *BufferReader) SkipFixed64() (err error) {
|
||||
return b.Skip(8)
|
||||
}
|
||||
|
||||
// skipFixed32 skips a fixed32
|
||||
func (b *BufferReader) SkipFixed32() (err error) {
|
||||
return b.Skip(4)
|
||||
}
|
||||
|
||||
// skipBytes skips a set of bytes
|
||||
func (b *BufferReader) SkipBytes() (err error) {
|
||||
n, err := b.DecodeVarint32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return b.Skip(int(n))
|
||||
}
|
||||
|
||||
// Done returns whether we are at the end of the protobuf
|
||||
func (b *BufferReader) Done() bool {
|
||||
return b.Pos == len(b.Buf)
|
||||
}
|
||||
|
||||
// Remaining returns how many bytes remain
|
||||
func (b *BufferReader) Remaining() int {
|
||||
return len(b.Buf) - b.Pos
|
||||
}
|
359
e2e/vendor/google.golang.org/protobuf/internal/protolazy/lazy.go
generated
vendored
Normal file
359
e2e/vendor/google.golang.org/protobuf/internal/protolazy/lazy.go
generated
vendored
Normal file
@ -0,0 +1,359 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package protolazy contains internal data structures for lazy message decoding.
|
||||
package protolazy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"google.golang.org/protobuf/encoding/protowire"
|
||||
piface "google.golang.org/protobuf/runtime/protoiface"
|
||||
)
|
||||
|
||||
// IndexEntry is the structure for an index of the fields in a message of a
|
||||
// proto (not descending to sub-messages)
|
||||
type IndexEntry struct {
|
||||
FieldNum uint32
|
||||
// first byte of this tag/field
|
||||
Start uint32
|
||||
// first byte after a contiguous sequence of bytes for this tag/field, which could
|
||||
// include a single encoding of the field, or multiple encodings for the field
|
||||
End uint32
|
||||
// True if this protobuf segment includes multiple encodings of the field
|
||||
MultipleContiguous bool
|
||||
}
|
||||
|
||||
// XXX_lazyUnmarshalInfo has information about a particular lazily decoded message
|
||||
//
|
||||
// Deprecated: Do not use. This will be deleted in the near future.
|
||||
type XXX_lazyUnmarshalInfo struct {
|
||||
// Index of fields and their positions in the protobuf for this
|
||||
// message. Make index be a pointer to a slice so it can be updated
|
||||
// atomically. The index pointer is only set once (lazily when/if
|
||||
// the index is first needed), and must always be SET and LOADED
|
||||
// ATOMICALLY.
|
||||
index *[]IndexEntry
|
||||
// The protobuf associated with this lazily decoded message. It is
|
||||
// only set during proto.Unmarshal(). It doesn't need to be set and
|
||||
// loaded atomically, since any simultaneous set (Unmarshal) and read
|
||||
// (during a get) would already be a race in the app code.
|
||||
Protobuf []byte
|
||||
// The flags present when Unmarshal was originally called for this particular message
|
||||
unmarshalFlags piface.UnmarshalInputFlags
|
||||
}
|
||||
|
||||
// The Buffer and SetBuffer methods let v2/internal/impl interact with
|
||||
// XXX_lazyUnmarshalInfo via an interface, to avoid an import cycle.
|
||||
|
||||
// Buffer returns the lazy unmarshal buffer.
|
||||
//
|
||||
// Deprecated: Do not use. This will be deleted in the near future.
|
||||
func (lazy *XXX_lazyUnmarshalInfo) Buffer() []byte {
|
||||
return lazy.Protobuf
|
||||
}
|
||||
|
||||
// SetBuffer sets the lazy unmarshal buffer.
|
||||
//
|
||||
// Deprecated: Do not use. This will be deleted in the near future.
|
||||
func (lazy *XXX_lazyUnmarshalInfo) SetBuffer(b []byte) {
|
||||
lazy.Protobuf = b
|
||||
}
|
||||
|
||||
// SetUnmarshalFlags is called to set a copy of the original unmarshalInputFlags.
|
||||
// The flags should reflect how Unmarshal was called.
|
||||
func (lazy *XXX_lazyUnmarshalInfo) SetUnmarshalFlags(f piface.UnmarshalInputFlags) {
|
||||
lazy.unmarshalFlags = f
|
||||
}
|
||||
|
||||
// UnmarshalFlags returns the original unmarshalInputFlags.
|
||||
func (lazy *XXX_lazyUnmarshalInfo) UnmarshalFlags() piface.UnmarshalInputFlags {
|
||||
return lazy.unmarshalFlags
|
||||
}
|
||||
|
||||
// AllowedPartial returns true if the user originally unmarshalled this message with
|
||||
// AllowPartial set to true
|
||||
func (lazy *XXX_lazyUnmarshalInfo) AllowedPartial() bool {
|
||||
return (lazy.unmarshalFlags & piface.UnmarshalCheckRequired) == 0
|
||||
}
|
||||
|
||||
func protoFieldNumber(tag uint32) uint32 {
|
||||
return tag >> 3
|
||||
}
|
||||
|
||||
// buildIndex builds an index of the specified protobuf, return the index
|
||||
// array and an error.
|
||||
func buildIndex(buf []byte) ([]IndexEntry, error) {
|
||||
index := make([]IndexEntry, 0, 16)
|
||||
var lastProtoFieldNum uint32
|
||||
var outOfOrder bool
|
||||
|
||||
var r BufferReader = NewBufferReader(buf)
|
||||
|
||||
for !r.Done() {
|
||||
var tag uint32
|
||||
var err error
|
||||
var curPos = r.Pos
|
||||
// INLINED: tag, err = r.DecodeVarint32()
|
||||
{
|
||||
i := r.Pos
|
||||
buf := r.Buf
|
||||
|
||||
if i >= len(buf) {
|
||||
return nil, errOutOfBounds
|
||||
} else if buf[i] < 0x80 {
|
||||
r.Pos++
|
||||
tag = uint32(buf[i])
|
||||
} else if r.Remaining() < 5 {
|
||||
var v uint64
|
||||
v, err = r.DecodeVarintSlow()
|
||||
tag = uint32(v)
|
||||
} else {
|
||||
var v uint32
|
||||
// we already checked the first byte
|
||||
tag = uint32(buf[i]) & 127
|
||||
i++
|
||||
|
||||
v = uint32(buf[i])
|
||||
i++
|
||||
tag |= (v & 127) << 7
|
||||
if v < 128 {
|
||||
goto done
|
||||
}
|
||||
|
||||
v = uint32(buf[i])
|
||||
i++
|
||||
tag |= (v & 127) << 14
|
||||
if v < 128 {
|
||||
goto done
|
||||
}
|
||||
|
||||
v = uint32(buf[i])
|
||||
i++
|
||||
tag |= (v & 127) << 21
|
||||
if v < 128 {
|
||||
goto done
|
||||
}
|
||||
|
||||
v = uint32(buf[i])
|
||||
i++
|
||||
tag |= (v & 127) << 28
|
||||
if v < 128 {
|
||||
goto done
|
||||
}
|
||||
|
||||
return nil, errOutOfBounds
|
||||
|
||||
done:
|
||||
r.Pos = i
|
||||
}
|
||||
}
|
||||
// DONE: tag, err = r.DecodeVarint32()
|
||||
|
||||
fieldNum := protoFieldNumber(tag)
|
||||
if fieldNum < lastProtoFieldNum {
|
||||
outOfOrder = true
|
||||
}
|
||||
|
||||
// Skip the current value -- will skip over an entire group as well.
|
||||
// INLINED: err = r.SkipValue(tag)
|
||||
wireType := tag & 0x7
|
||||
switch protowire.Type(wireType) {
|
||||
case protowire.VarintType:
|
||||
// INLINED: err = r.SkipVarint()
|
||||
i := r.Pos
|
||||
|
||||
if len(r.Buf)-i < 10 {
|
||||
// Use DecodeVarintSlow() to skip while
|
||||
// checking for buffer overflow, but ignore result
|
||||
_, err = r.DecodeVarintSlow()
|
||||
goto out2
|
||||
}
|
||||
if r.Buf[i] < 0x80 {
|
||||
goto out
|
||||
}
|
||||
i++
|
||||
|
||||
if r.Buf[i] < 0x80 {
|
||||
goto out
|
||||
}
|
||||
i++
|
||||
|
||||
if r.Buf[i] < 0x80 {
|
||||
goto out
|
||||
}
|
||||
i++
|
||||
|
||||
if r.Buf[i] < 0x80 {
|
||||
goto out
|
||||
}
|
||||
i++
|
||||
|
||||
if r.Buf[i] < 0x80 {
|
||||
goto out
|
||||
}
|
||||
i++
|
||||
|
||||
if r.Buf[i] < 0x80 {
|
||||
goto out
|
||||
}
|
||||
i++
|
||||
|
||||
if r.Buf[i] < 0x80 {
|
||||
goto out
|
||||
}
|
||||
i++
|
||||
|
||||
if r.Buf[i] < 0x80 {
|
||||
goto out
|
||||
}
|
||||
i++
|
||||
|
||||
if r.Buf[i] < 0x80 {
|
||||
goto out
|
||||
}
|
||||
i++
|
||||
|
||||
if r.Buf[i] < 0x80 {
|
||||
goto out
|
||||
}
|
||||
return nil, errOverflow
|
||||
out:
|
||||
r.Pos = i + 1
|
||||
// DONE: err = r.SkipVarint()
|
||||
case protowire.Fixed64Type:
|
||||
err = r.SkipFixed64()
|
||||
case protowire.BytesType:
|
||||
var n uint32
|
||||
n, err = r.DecodeVarint32()
|
||||
if err == nil {
|
||||
err = r.Skip(int(n))
|
||||
}
|
||||
case protowire.StartGroupType:
|
||||
err = r.SkipGroup(tag)
|
||||
case protowire.Fixed32Type:
|
||||
err = r.SkipFixed32()
|
||||
default:
|
||||
err = fmt.Errorf("Unexpected wire type (%d)", wireType)
|
||||
}
|
||||
// DONE: err = r.SkipValue(tag)
|
||||
|
||||
out2:
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if fieldNum != lastProtoFieldNum {
|
||||
index = append(index, IndexEntry{FieldNum: fieldNum,
|
||||
Start: uint32(curPos),
|
||||
End: uint32(r.Pos)},
|
||||
)
|
||||
} else {
|
||||
index[len(index)-1].End = uint32(r.Pos)
|
||||
index[len(index)-1].MultipleContiguous = true
|
||||
}
|
||||
lastProtoFieldNum = fieldNum
|
||||
}
|
||||
if outOfOrder {
|
||||
sort.Slice(index, func(i, j int) bool {
|
||||
return index[i].FieldNum < index[j].FieldNum ||
|
||||
(index[i].FieldNum == index[j].FieldNum &&
|
||||
index[i].Start < index[j].Start)
|
||||
})
|
||||
}
|
||||
return index, nil
|
||||
}
|
||||
|
||||
func (lazy *XXX_lazyUnmarshalInfo) SizeField(num uint32) (size int) {
|
||||
start, end, found, _, multipleEntries := lazy.FindFieldInProto(num)
|
||||
if multipleEntries != nil {
|
||||
for _, entry := range multipleEntries {
|
||||
size += int(entry.End - entry.Start)
|
||||
}
|
||||
return size
|
||||
}
|
||||
if !found {
|
||||
return 0
|
||||
}
|
||||
return int(end - start)
|
||||
}
|
||||
|
||||
func (lazy *XXX_lazyUnmarshalInfo) AppendField(b []byte, num uint32) ([]byte, bool) {
|
||||
start, end, found, _, multipleEntries := lazy.FindFieldInProto(num)
|
||||
if multipleEntries != nil {
|
||||
for _, entry := range multipleEntries {
|
||||
b = append(b, lazy.Protobuf[entry.Start:entry.End]...)
|
||||
}
|
||||
return b, true
|
||||
}
|
||||
if !found {
|
||||
return nil, false
|
||||
}
|
||||
b = append(b, lazy.Protobuf[start:end]...)
|
||||
return b, true
|
||||
}
|
||||
|
||||
func (lazy *XXX_lazyUnmarshalInfo) SetIndex(index []IndexEntry) {
|
||||
atomicStoreIndex(&lazy.index, &index)
|
||||
}
|
||||
|
||||
// FindFieldInProto looks for field fieldNum in lazyUnmarshalInfo information
|
||||
// (including protobuf), returns startOffset/endOffset/found.
|
||||
func (lazy *XXX_lazyUnmarshalInfo) FindFieldInProto(fieldNum uint32) (start, end uint32, found, multipleContiguous bool, multipleEntries []IndexEntry) {
|
||||
if lazy.Protobuf == nil {
|
||||
// There is no backing protobuf for this message -- it was made from a builder
|
||||
return 0, 0, false, false, nil
|
||||
}
|
||||
index := atomicLoadIndex(&lazy.index)
|
||||
if index == nil {
|
||||
r, err := buildIndex(lazy.Protobuf)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("findFieldInfo: error building index when looking for field %d: %v", fieldNum, err))
|
||||
}
|
||||
// lazy.index is a pointer to the slice returned by BuildIndex
|
||||
index = &r
|
||||
atomicStoreIndex(&lazy.index, index)
|
||||
}
|
||||
return lookupField(index, fieldNum)
|
||||
}
|
||||
|
||||
// lookupField returns the offset at which the indicated field starts using
|
||||
// the index, offset immediately after field ends (including all instances of
|
||||
// a repeated field), and bools indicating if field was found and if there
|
||||
// are multiple encodings of the field in the byte range.
|
||||
//
|
||||
// To hande the uncommon case where there are repeated encodings for the same
|
||||
// field which are not consecutive in the protobuf (so we need to returns
|
||||
// multiple start/end offsets), we also return a slice multipleEntries. If
|
||||
// multipleEntries is non-nil, then multiple entries were found, and the
|
||||
// values in the slice should be used, rather than start/end/found.
|
||||
func lookupField(indexp *[]IndexEntry, fieldNum uint32) (start, end uint32, found bool, multipleContiguous bool, multipleEntries []IndexEntry) {
|
||||
// The pointer indexp to the index was already loaded atomically.
|
||||
// The slice is uniquely associated with the pointer, so it doesn't
|
||||
// need to be loaded atomically.
|
||||
index := *indexp
|
||||
for i, entry := range index {
|
||||
if fieldNum == entry.FieldNum {
|
||||
if i < len(index)-1 && entry.FieldNum == index[i+1].FieldNum {
|
||||
// Handle the uncommon case where there are
|
||||
// repeated entries for the same field which
|
||||
// are not contiguous in the protobuf.
|
||||
multiple := make([]IndexEntry, 1, 2)
|
||||
multiple[0] = IndexEntry{fieldNum, entry.Start, entry.End, entry.MultipleContiguous}
|
||||
i++
|
||||
for i < len(index) && index[i].FieldNum == fieldNum {
|
||||
multiple = append(multiple, IndexEntry{fieldNum, index[i].Start, index[i].End, index[i].MultipleContiguous})
|
||||
i++
|
||||
}
|
||||
return 0, 0, false, false, multiple
|
||||
|
||||
}
|
||||
return entry.Start, entry.End, true, entry.MultipleContiguous, nil
|
||||
}
|
||||
if fieldNum < entry.FieldNum {
|
||||
return 0, 0, false, false, nil
|
||||
}
|
||||
}
|
||||
return 0, 0, false, false, nil
|
||||
}
|
17
e2e/vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go
generated
vendored
Normal file
17
e2e/vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package protolazy
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func atomicLoadIndex(p **[]IndexEntry) *[]IndexEntry {
|
||||
return (*[]IndexEntry)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
||||
}
|
||||
func atomicStoreIndex(p **[]IndexEntry, v *[]IndexEntry) {
|
||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
||||
}
|
Reference in New Issue
Block a user