mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 10:53:34 +00:00
rebase: bump google.golang.org/grpc from 1.65.0 to 1.66.0
Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.65.0 to 1.66.0. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.65.0...v1.66.0) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
committed by
mergify[bot]
parent
89da94cfd0
commit
56cf915dff
194
vendor/google.golang.org/grpc/mem/buffer_pool.go
generated
vendored
Normal file
194
vendor/google.golang.org/grpc/mem/buffer_pool.go
generated
vendored
Normal file
@ -0,0 +1,194 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2024 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package mem
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc/internal"
|
||||
)
|
||||
|
||||
// BufferPool is a pool of buffers that can be shared and reused, resulting in
|
||||
// decreased memory allocation.
|
||||
type BufferPool interface {
|
||||
// Get returns a buffer with specified length from the pool.
|
||||
Get(length int) *[]byte
|
||||
|
||||
// Put returns a buffer to the pool.
|
||||
Put(*[]byte)
|
||||
}
|
||||
|
||||
var defaultBufferPoolSizes = []int{
|
||||
256,
|
||||
4 << 10, // 4KB (go page size)
|
||||
16 << 10, // 16KB (max HTTP/2 frame size used by gRPC)
|
||||
32 << 10, // 32KB (default buffer size for io.Copy)
|
||||
1 << 20, // 1MB
|
||||
}
|
||||
|
||||
var defaultBufferPool BufferPool
|
||||
|
||||
func init() {
|
||||
defaultBufferPool = NewTieredBufferPool(defaultBufferPoolSizes...)
|
||||
|
||||
internal.SetDefaultBufferPoolForTesting = func(pool BufferPool) {
|
||||
defaultBufferPool = pool
|
||||
}
|
||||
|
||||
internal.SetBufferPoolingThresholdForTesting = func(threshold int) {
|
||||
bufferPoolingThreshold = threshold
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultBufferPool returns the current default buffer pool. It is a BufferPool
|
||||
// created with NewBufferPool that uses a set of default sizes optimized for
|
||||
// expected workflows.
|
||||
func DefaultBufferPool() BufferPool {
|
||||
return defaultBufferPool
|
||||
}
|
||||
|
||||
// NewTieredBufferPool returns a BufferPool implementation that uses multiple
|
||||
// underlying pools of the given pool sizes.
|
||||
func NewTieredBufferPool(poolSizes ...int) BufferPool {
|
||||
sort.Ints(poolSizes)
|
||||
pools := make([]*sizedBufferPool, len(poolSizes))
|
||||
for i, s := range poolSizes {
|
||||
pools[i] = newSizedBufferPool(s)
|
||||
}
|
||||
return &tieredBufferPool{
|
||||
sizedPools: pools,
|
||||
}
|
||||
}
|
||||
|
||||
// tieredBufferPool implements the BufferPool interface with multiple tiers of
|
||||
// buffer pools for different sizes of buffers.
|
||||
type tieredBufferPool struct {
|
||||
sizedPools []*sizedBufferPool
|
||||
fallbackPool simpleBufferPool
|
||||
}
|
||||
|
||||
func (p *tieredBufferPool) Get(size int) *[]byte {
|
||||
return p.getPool(size).Get(size)
|
||||
}
|
||||
|
||||
func (p *tieredBufferPool) Put(buf *[]byte) {
|
||||
p.getPool(cap(*buf)).Put(buf)
|
||||
}
|
||||
|
||||
func (p *tieredBufferPool) getPool(size int) BufferPool {
|
||||
poolIdx := sort.Search(len(p.sizedPools), func(i int) bool {
|
||||
return p.sizedPools[i].defaultSize >= size
|
||||
})
|
||||
|
||||
if poolIdx == len(p.sizedPools) {
|
||||
return &p.fallbackPool
|
||||
}
|
||||
|
||||
return p.sizedPools[poolIdx]
|
||||
}
|
||||
|
||||
// sizedBufferPool is a BufferPool implementation that is optimized for specific
|
||||
// buffer sizes. For example, HTTP/2 frames within gRPC have a default max size
|
||||
// of 16kb and a sizedBufferPool can be configured to only return buffers with a
|
||||
// capacity of 16kb. Note that however it does not support returning larger
|
||||
// buffers and in fact panics if such a buffer is requested. Because of this,
|
||||
// this BufferPool implementation is not meant to be used on its own and rather
|
||||
// is intended to be embedded in a tieredBufferPool such that Get is only
|
||||
// invoked when the required size is smaller than or equal to defaultSize.
|
||||
type sizedBufferPool struct {
|
||||
pool sync.Pool
|
||||
defaultSize int
|
||||
}
|
||||
|
||||
func (p *sizedBufferPool) Get(size int) *[]byte {
|
||||
buf := p.pool.Get().(*[]byte)
|
||||
b := *buf
|
||||
clear(b[:cap(b)])
|
||||
*buf = b[:size]
|
||||
return buf
|
||||
}
|
||||
|
||||
func (p *sizedBufferPool) Put(buf *[]byte) {
|
||||
if cap(*buf) < p.defaultSize {
|
||||
// Ignore buffers that are too small to fit in the pool. Otherwise, when
|
||||
// Get is called it will panic as it tries to index outside the bounds
|
||||
// of the buffer.
|
||||
return
|
||||
}
|
||||
p.pool.Put(buf)
|
||||
}
|
||||
|
||||
func newSizedBufferPool(size int) *sizedBufferPool {
|
||||
return &sizedBufferPool{
|
||||
pool: sync.Pool{
|
||||
New: func() any {
|
||||
buf := make([]byte, size)
|
||||
return &buf
|
||||
},
|
||||
},
|
||||
defaultSize: size,
|
||||
}
|
||||
}
|
||||
|
||||
var _ BufferPool = (*simpleBufferPool)(nil)
|
||||
|
||||
// simpleBufferPool is an implementation of the BufferPool interface that
|
||||
// attempts to pool buffers with a sync.Pool. When Get is invoked, it tries to
|
||||
// acquire a buffer from the pool but if that buffer is too small, it returns it
|
||||
// to the pool and creates a new one.
|
||||
type simpleBufferPool struct {
|
||||
pool sync.Pool
|
||||
}
|
||||
|
||||
func (p *simpleBufferPool) Get(size int) *[]byte {
|
||||
bs, ok := p.pool.Get().(*[]byte)
|
||||
if ok && cap(*bs) >= size {
|
||||
*bs = (*bs)[:size]
|
||||
return bs
|
||||
}
|
||||
|
||||
// A buffer was pulled from the pool, but it is too small. Put it back in
|
||||
// the pool and create one large enough.
|
||||
if ok {
|
||||
p.pool.Put(bs)
|
||||
}
|
||||
|
||||
b := make([]byte, size)
|
||||
return &b
|
||||
}
|
||||
|
||||
func (p *simpleBufferPool) Put(buf *[]byte) {
|
||||
p.pool.Put(buf)
|
||||
}
|
||||
|
||||
var _ BufferPool = NopBufferPool{}
|
||||
|
||||
// NopBufferPool is a buffer pool that returns new buffers without pooling.
|
||||
type NopBufferPool struct{}
|
||||
|
||||
// Get returns a buffer with specified length from the pool.
|
||||
func (NopBufferPool) Get(length int) *[]byte {
|
||||
b := make([]byte, length)
|
||||
return &b
|
||||
}
|
||||
|
||||
// Put returns a buffer to the pool.
|
||||
func (NopBufferPool) Put(*[]byte) {
|
||||
}
|
224
vendor/google.golang.org/grpc/mem/buffer_slice.go
generated
vendored
Normal file
224
vendor/google.golang.org/grpc/mem/buffer_slice.go
generated
vendored
Normal file
@ -0,0 +1,224 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2024 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package mem
|
||||
|
||||
import (
|
||||
"compress/flate"
|
||||
"io"
|
||||
)
|
||||
|
||||
// BufferSlice offers a means to represent data that spans one or more Buffer
|
||||
// instances. A BufferSlice is meant to be immutable after creation, and methods
|
||||
// like Ref create and return copies of the slice. This is why all methods have
|
||||
// value receivers rather than pointer receivers.
|
||||
//
|
||||
// Note that any of the methods that read the underlying buffers such as Ref,
|
||||
// Len or CopyTo etc., will panic if any underlying buffers have already been
|
||||
// freed. It is recommended to not directly interact with any of the underlying
|
||||
// buffers directly, rather such interactions should be mediated through the
|
||||
// various methods on this type.
|
||||
//
|
||||
// By convention, any APIs that return (mem.BufferSlice, error) should reduce
|
||||
// the burden on the caller by never returning a mem.BufferSlice that needs to
|
||||
// be freed if the error is non-nil, unless explicitly stated.
|
||||
type BufferSlice []Buffer
|
||||
|
||||
// Len returns the sum of the length of all the Buffers in this slice.
|
||||
//
|
||||
// # Warning
|
||||
//
|
||||
// Invoking the built-in len on a BufferSlice will return the number of buffers
|
||||
// in the slice, and *not* the value returned by this function.
|
||||
func (s BufferSlice) Len() int {
|
||||
var length int
|
||||
for _, b := range s {
|
||||
length += b.Len()
|
||||
}
|
||||
return length
|
||||
}
|
||||
|
||||
// Ref invokes Ref on each buffer in the slice.
|
||||
func (s BufferSlice) Ref() {
|
||||
for _, b := range s {
|
||||
b.Ref()
|
||||
}
|
||||
}
|
||||
|
||||
// Free invokes Buffer.Free() on each Buffer in the slice.
|
||||
func (s BufferSlice) Free() {
|
||||
for _, b := range s {
|
||||
b.Free()
|
||||
}
|
||||
}
|
||||
|
||||
// CopyTo copies each of the underlying Buffer's data into the given buffer,
|
||||
// returning the number of bytes copied. Has the same semantics as the copy
|
||||
// builtin in that it will copy as many bytes as it can, stopping when either dst
|
||||
// is full or s runs out of data, returning the minimum of s.Len() and len(dst).
|
||||
func (s BufferSlice) CopyTo(dst []byte) int {
|
||||
off := 0
|
||||
for _, b := range s {
|
||||
off += copy(dst[off:], b.ReadOnlyData())
|
||||
}
|
||||
return off
|
||||
}
|
||||
|
||||
// Materialize concatenates all the underlying Buffer's data into a single
|
||||
// contiguous buffer using CopyTo.
|
||||
func (s BufferSlice) Materialize() []byte {
|
||||
l := s.Len()
|
||||
if l == 0 {
|
||||
return nil
|
||||
}
|
||||
out := make([]byte, l)
|
||||
s.CopyTo(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// MaterializeToBuffer functions like Materialize except that it writes the data
|
||||
// to a single Buffer pulled from the given BufferPool. As a special case, if the
|
||||
// input BufferSlice only actually has one Buffer, this function has nothing to
|
||||
// do and simply returns said Buffer.
|
||||
func (s BufferSlice) MaterializeToBuffer(pool BufferPool) Buffer {
|
||||
if len(s) == 1 {
|
||||
s[0].Ref()
|
||||
return s[0]
|
||||
}
|
||||
sLen := s.Len()
|
||||
if sLen == 0 {
|
||||
return emptyBuffer{}
|
||||
}
|
||||
buf := pool.Get(sLen)
|
||||
s.CopyTo(*buf)
|
||||
return NewBuffer(buf, pool)
|
||||
}
|
||||
|
||||
// Reader returns a new Reader for the input slice after taking references to
|
||||
// each underlying buffer.
|
||||
func (s BufferSlice) Reader() Reader {
|
||||
s.Ref()
|
||||
return &sliceReader{
|
||||
data: s,
|
||||
len: s.Len(),
|
||||
}
|
||||
}
|
||||
|
||||
// Reader exposes a BufferSlice's data as an io.Reader, allowing it to interface
|
||||
// with other parts systems. It also provides an additional convenience method
|
||||
// Remaining(), which returns the number of unread bytes remaining in the slice.
|
||||
// Buffers will be freed as they are read.
|
||||
type Reader interface {
|
||||
flate.Reader
|
||||
// Close frees the underlying BufferSlice and never returns an error. Subsequent
|
||||
// calls to Read will return (0, io.EOF).
|
||||
Close() error
|
||||
// Remaining returns the number of unread bytes remaining in the slice.
|
||||
Remaining() int
|
||||
}
|
||||
|
||||
type sliceReader struct {
|
||||
data BufferSlice
|
||||
len int
|
||||
// The index into data[0].ReadOnlyData().
|
||||
bufferIdx int
|
||||
}
|
||||
|
||||
func (r *sliceReader) Remaining() int {
|
||||
return r.len
|
||||
}
|
||||
|
||||
func (r *sliceReader) Close() error {
|
||||
r.data.Free()
|
||||
r.data = nil
|
||||
r.len = 0
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *sliceReader) freeFirstBufferIfEmpty() bool {
|
||||
if len(r.data) == 0 || r.bufferIdx != len(r.data[0].ReadOnlyData()) {
|
||||
return false
|
||||
}
|
||||
|
||||
r.data[0].Free()
|
||||
r.data = r.data[1:]
|
||||
r.bufferIdx = 0
|
||||
return true
|
||||
}
|
||||
|
||||
func (r *sliceReader) Read(buf []byte) (n int, _ error) {
|
||||
if r.len == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
for len(buf) != 0 && r.len != 0 {
|
||||
// Copy as much as possible from the first Buffer in the slice into the
|
||||
// given byte slice.
|
||||
data := r.data[0].ReadOnlyData()
|
||||
copied := copy(buf, data[r.bufferIdx:])
|
||||
r.len -= copied // Reduce len by the number of bytes copied.
|
||||
r.bufferIdx += copied // Increment the buffer index.
|
||||
n += copied // Increment the total number of bytes read.
|
||||
buf = buf[copied:] // Shrink the given byte slice.
|
||||
|
||||
// If we have copied all the data from the first Buffer, free it and advance to
|
||||
// the next in the slice.
|
||||
r.freeFirstBufferIfEmpty()
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (r *sliceReader) ReadByte() (byte, error) {
|
||||
if r.len == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
// There may be any number of empty buffers in the slice, clear them all until a
|
||||
// non-empty buffer is reached. This is guaranteed to exit since r.len is not 0.
|
||||
for r.freeFirstBufferIfEmpty() {
|
||||
}
|
||||
|
||||
b := r.data[0].ReadOnlyData()[r.bufferIdx]
|
||||
r.len--
|
||||
r.bufferIdx++
|
||||
// Free the first buffer in the slice if the last byte was read
|
||||
r.freeFirstBufferIfEmpty()
|
||||
return b, nil
|
||||
}
|
||||
|
||||
var _ io.Writer = (*writer)(nil)
|
||||
|
||||
type writer struct {
|
||||
buffers *BufferSlice
|
||||
pool BufferPool
|
||||
}
|
||||
|
||||
func (w *writer) Write(p []byte) (n int, err error) {
|
||||
b := Copy(p, w.pool)
|
||||
*w.buffers = append(*w.buffers, b)
|
||||
return b.Len(), nil
|
||||
}
|
||||
|
||||
// NewWriter wraps the given BufferSlice and BufferPool to implement the
|
||||
// io.Writer interface. Every call to Write copies the contents of the given
|
||||
// buffer into a new Buffer pulled from the given pool and the Buffer is added to
|
||||
// the given BufferSlice.
|
||||
func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer {
|
||||
return &writer{buffers: buffers, pool: pool}
|
||||
}
|
252
vendor/google.golang.org/grpc/mem/buffers.go
generated
vendored
Normal file
252
vendor/google.golang.org/grpc/mem/buffers.go
generated
vendored
Normal file
@ -0,0 +1,252 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2024 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package mem provides utilities that facilitate memory reuse in byte slices
|
||||
// that are used as buffers.
|
||||
//
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: All APIs in this package are EXPERIMENTAL and may be changed or
|
||||
// removed in a later release.
|
||||
package mem
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// A Buffer represents a reference counted piece of data (in bytes) that can be
|
||||
// acquired by a call to NewBuffer() or Copy(). A reference to a Buffer may be
|
||||
// released by calling Free(), which invokes the free function given at creation
|
||||
// only after all references are released.
|
||||
//
|
||||
// Note that a Buffer is not safe for concurrent access and instead each
|
||||
// goroutine should use its own reference to the data, which can be acquired via
|
||||
// a call to Ref().
|
||||
//
|
||||
// Attempts to access the underlying data after releasing the reference to the
|
||||
// Buffer will panic.
|
||||
type Buffer interface {
|
||||
// ReadOnlyData returns the underlying byte slice. Note that it is undefined
|
||||
// behavior to modify the contents of this slice in any way.
|
||||
ReadOnlyData() []byte
|
||||
// Ref increases the reference counter for this Buffer.
|
||||
Ref()
|
||||
// Free decrements this Buffer's reference counter and frees the underlying
|
||||
// byte slice if the counter reaches 0 as a result of this call.
|
||||
Free()
|
||||
// Len returns the Buffer's size.
|
||||
Len() int
|
||||
|
||||
split(n int) (left, right Buffer)
|
||||
read(buf []byte) (int, Buffer)
|
||||
}
|
||||
|
||||
var (
|
||||
bufferPoolingThreshold = 1 << 10
|
||||
|
||||
bufferObjectPool = sync.Pool{New: func() any { return new(buffer) }}
|
||||
refObjectPool = sync.Pool{New: func() any { return new(atomic.Int32) }}
|
||||
)
|
||||
|
||||
func IsBelowBufferPoolingThreshold(size int) bool {
|
||||
return size <= bufferPoolingThreshold
|
||||
}
|
||||
|
||||
type buffer struct {
|
||||
origData *[]byte
|
||||
data []byte
|
||||
refs *atomic.Int32
|
||||
pool BufferPool
|
||||
}
|
||||
|
||||
func newBuffer() *buffer {
|
||||
return bufferObjectPool.Get().(*buffer)
|
||||
}
|
||||
|
||||
// NewBuffer creates a new Buffer from the given data, initializing the reference
|
||||
// counter to 1. The data will then be returned to the given pool when all
|
||||
// references to the returned Buffer are released. As a special case to avoid
|
||||
// additional allocations, if the given buffer pool is nil, the returned buffer
|
||||
// will be a "no-op" Buffer where invoking Buffer.Free() does nothing and the
|
||||
// underlying data is never freed.
|
||||
//
|
||||
// Note that the backing array of the given data is not copied.
|
||||
func NewBuffer(data *[]byte, pool BufferPool) Buffer {
|
||||
if pool == nil || IsBelowBufferPoolingThreshold(len(*data)) {
|
||||
return (SliceBuffer)(*data)
|
||||
}
|
||||
b := newBuffer()
|
||||
b.origData = data
|
||||
b.data = *data
|
||||
b.pool = pool
|
||||
b.refs = refObjectPool.Get().(*atomic.Int32)
|
||||
b.refs.Add(1)
|
||||
return b
|
||||
}
|
||||
|
||||
// Copy creates a new Buffer from the given data, initializing the reference
|
||||
// counter to 1.
|
||||
//
|
||||
// It acquires a []byte from the given pool and copies over the backing array
|
||||
// of the given data. The []byte acquired from the pool is returned to the
|
||||
// pool when all references to the returned Buffer are released.
|
||||
func Copy(data []byte, pool BufferPool) Buffer {
|
||||
if IsBelowBufferPoolingThreshold(len(data)) {
|
||||
buf := make(SliceBuffer, len(data))
|
||||
copy(buf, data)
|
||||
return buf
|
||||
}
|
||||
|
||||
buf := pool.Get(len(data))
|
||||
copy(*buf, data)
|
||||
return NewBuffer(buf, pool)
|
||||
}
|
||||
|
||||
func (b *buffer) ReadOnlyData() []byte {
|
||||
if b.refs == nil {
|
||||
panic("Cannot read freed buffer")
|
||||
}
|
||||
return b.data
|
||||
}
|
||||
|
||||
func (b *buffer) Ref() {
|
||||
if b.refs == nil {
|
||||
panic("Cannot ref freed buffer")
|
||||
}
|
||||
b.refs.Add(1)
|
||||
}
|
||||
|
||||
func (b *buffer) Free() {
|
||||
if b.refs == nil {
|
||||
panic("Cannot free freed buffer")
|
||||
}
|
||||
|
||||
refs := b.refs.Add(-1)
|
||||
switch {
|
||||
case refs > 0:
|
||||
return
|
||||
case refs == 0:
|
||||
if b.pool != nil {
|
||||
b.pool.Put(b.origData)
|
||||
}
|
||||
|
||||
refObjectPool.Put(b.refs)
|
||||
b.origData = nil
|
||||
b.data = nil
|
||||
b.refs = nil
|
||||
b.pool = nil
|
||||
bufferObjectPool.Put(b)
|
||||
default:
|
||||
panic("Cannot free freed buffer")
|
||||
}
|
||||
}
|
||||
|
||||
func (b *buffer) Len() int {
|
||||
return len(b.ReadOnlyData())
|
||||
}
|
||||
|
||||
func (b *buffer) split(n int) (Buffer, Buffer) {
|
||||
if b.refs == nil {
|
||||
panic("Cannot split freed buffer")
|
||||
}
|
||||
|
||||
b.refs.Add(1)
|
||||
split := newBuffer()
|
||||
split.origData = b.origData
|
||||
split.data = b.data[n:]
|
||||
split.refs = b.refs
|
||||
split.pool = b.pool
|
||||
|
||||
b.data = b.data[:n]
|
||||
|
||||
return b, split
|
||||
}
|
||||
|
||||
func (b *buffer) read(buf []byte) (int, Buffer) {
|
||||
if b.refs == nil {
|
||||
panic("Cannot read freed buffer")
|
||||
}
|
||||
|
||||
n := copy(buf, b.data)
|
||||
if n == len(b.data) {
|
||||
b.Free()
|
||||
return n, nil
|
||||
}
|
||||
|
||||
b.data = b.data[n:]
|
||||
return n, b
|
||||
}
|
||||
|
||||
// String returns a string representation of the buffer. May be used for
|
||||
// debugging purposes.
|
||||
func (b *buffer) String() string {
|
||||
return fmt.Sprintf("mem.Buffer(%p, data: %p, length: %d)", b, b.ReadOnlyData(), len(b.ReadOnlyData()))
|
||||
}
|
||||
|
||||
func ReadUnsafe(dst []byte, buf Buffer) (int, Buffer) {
|
||||
return buf.read(dst)
|
||||
}
|
||||
|
||||
// SplitUnsafe modifies the receiver to point to the first n bytes while it
|
||||
// returns a new reference to the remaining bytes. The returned Buffer functions
|
||||
// just like a normal reference acquired using Ref().
|
||||
func SplitUnsafe(buf Buffer, n int) (left, right Buffer) {
|
||||
return buf.split(n)
|
||||
}
|
||||
|
||||
type emptyBuffer struct{}
|
||||
|
||||
func (e emptyBuffer) ReadOnlyData() []byte {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e emptyBuffer) Ref() {}
|
||||
func (e emptyBuffer) Free() {}
|
||||
|
||||
func (e emptyBuffer) Len() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (e emptyBuffer) split(n int) (left, right Buffer) {
|
||||
return e, e
|
||||
}
|
||||
|
||||
func (e emptyBuffer) read(buf []byte) (int, Buffer) {
|
||||
return 0, e
|
||||
}
|
||||
|
||||
type SliceBuffer []byte
|
||||
|
||||
func (s SliceBuffer) ReadOnlyData() []byte { return s }
|
||||
func (s SliceBuffer) Ref() {}
|
||||
func (s SliceBuffer) Free() {}
|
||||
func (s SliceBuffer) Len() int { return len(s) }
|
||||
|
||||
func (s SliceBuffer) split(n int) (left, right Buffer) {
|
||||
return s[:n], s[n:]
|
||||
}
|
||||
|
||||
func (s SliceBuffer) read(buf []byte) (int, Buffer) {
|
||||
n := copy(buf, s)
|
||||
if n == len(s) {
|
||||
return n, nil
|
||||
}
|
||||
return n, s[n:]
|
||||
}
|
Reference in New Issue
Block a user