rebase: bump golang.org/x/oauth2 in /actions/retest

Bumps [golang.org/x/oauth2](https://github.com/golang/oauth2) from 0.18.0 to 0.22.0.
- [Commits](https://github.com/golang/oauth2/compare/v0.18.0...v0.22.0)

---
updated-dependencies:
- dependency-name: golang.org/x/oauth2
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
dependabot[bot] 2024-08-26 20:13:26 +00:00 committed by mergify[bot]
parent 91b8efe473
commit 5c934d08f2
194 changed files with 8 additions and 53136 deletions

View File

@ -4,13 +4,7 @@ go 1.22.5
require ( require (
github.com/google/go-github v17.0.0+incompatible github.com/google/go-github v17.0.0+incompatible
golang.org/x/oauth2 v0.18.0 golang.org/x/oauth2 v0.22.0
) )
require ( require github.com/google/go-querystring v1.1.0 // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/go-querystring v1.1.0 // indirect
golang.org/x/net v0.23.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/protobuf v1.33.0 // indirect
)

View File

@ -1,29 +1,10 @@
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY= github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY=
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI=
golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=

View File

@ -1,3 +0,0 @@
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
# visible at http://tip.golang.org/AUTHORS.

View File

@ -1,3 +0,0 @@
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at http://tip.golang.org/CONTRIBUTORS.

View File

@ -1,28 +0,0 @@
Copyright 2010 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,324 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
import (
"errors"
"fmt"
"google.golang.org/protobuf/encoding/prototext"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/runtime/protoimpl"
)
const (
WireVarint = 0
WireFixed32 = 5
WireFixed64 = 1
WireBytes = 2
WireStartGroup = 3
WireEndGroup = 4
)
// EncodeVarint returns the varint encoded bytes of v.
func EncodeVarint(v uint64) []byte {
return protowire.AppendVarint(nil, v)
}
// SizeVarint returns the length of the varint encoded bytes of v.
// This is equal to len(EncodeVarint(v)).
func SizeVarint(v uint64) int {
return protowire.SizeVarint(v)
}
// DecodeVarint parses a varint encoded integer from b,
// returning the integer value and the length of the varint.
// It returns (0, 0) if there is a parse error.
func DecodeVarint(b []byte) (uint64, int) {
v, n := protowire.ConsumeVarint(b)
if n < 0 {
return 0, 0
}
return v, n
}
// Buffer is a buffer for encoding and decoding the protobuf wire format.
// It may be reused between invocations to reduce memory usage.
type Buffer struct {
buf []byte
idx int
deterministic bool
}
// NewBuffer allocates a new Buffer initialized with buf,
// where the contents of buf are considered the unread portion of the buffer.
func NewBuffer(buf []byte) *Buffer {
return &Buffer{buf: buf}
}
// SetDeterministic specifies whether to use deterministic serialization.
//
// Deterministic serialization guarantees that for a given binary, equal
// messages will always be serialized to the same bytes. This implies:
//
// - Repeated serialization of a message will return the same bytes.
// - Different processes of the same binary (which may be executing on
// different machines) will serialize equal messages to the same bytes.
//
// Note that the deterministic serialization is NOT canonical across
// languages. It is not guaranteed to remain stable over time. It is unstable
// across different builds with schema changes due to unknown fields.
// Users who need canonical serialization (e.g., persistent storage in a
// canonical form, fingerprinting, etc.) should define their own
// canonicalization specification and implement their own serializer rather
// than relying on this API.
//
// If deterministic serialization is requested, map entries will be sorted
// by keys in lexographical order. This is an implementation detail and
// subject to change.
func (b *Buffer) SetDeterministic(deterministic bool) {
b.deterministic = deterministic
}
// SetBuf sets buf as the internal buffer,
// where the contents of buf are considered the unread portion of the buffer.
func (b *Buffer) SetBuf(buf []byte) {
b.buf = buf
b.idx = 0
}
// Reset clears the internal buffer of all written and unread data.
func (b *Buffer) Reset() {
b.buf = b.buf[:0]
b.idx = 0
}
// Bytes returns the internal buffer.
func (b *Buffer) Bytes() []byte {
return b.buf
}
// Unread returns the unread portion of the buffer.
func (b *Buffer) Unread() []byte {
return b.buf[b.idx:]
}
// Marshal appends the wire-format encoding of m to the buffer.
func (b *Buffer) Marshal(m Message) error {
var err error
b.buf, err = marshalAppend(b.buf, m, b.deterministic)
return err
}
// Unmarshal parses the wire-format message in the buffer and
// places the decoded results in m.
// It does not reset m before unmarshaling.
func (b *Buffer) Unmarshal(m Message) error {
err := UnmarshalMerge(b.Unread(), m)
b.idx = len(b.buf)
return err
}
type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields }
func (m *unknownFields) String() string { panic("not implemented") }
func (m *unknownFields) Reset() { panic("not implemented") }
func (m *unknownFields) ProtoMessage() { panic("not implemented") }
// DebugPrint dumps the encoded bytes of b with a header and footer including s
// to stdout. This is only intended for debugging.
func (*Buffer) DebugPrint(s string, b []byte) {
m := MessageReflect(new(unknownFields))
m.SetUnknown(b)
b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface())
fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s)
}
// EncodeVarint appends an unsigned varint encoding to the buffer.
func (b *Buffer) EncodeVarint(v uint64) error {
b.buf = protowire.AppendVarint(b.buf, v)
return nil
}
// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer.
func (b *Buffer) EncodeZigzag32(v uint64) error {
return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
}
// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer.
func (b *Buffer) EncodeZigzag64(v uint64) error {
return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63))))
}
// EncodeFixed32 appends a 32-bit little-endian integer to the buffer.
func (b *Buffer) EncodeFixed32(v uint64) error {
b.buf = protowire.AppendFixed32(b.buf, uint32(v))
return nil
}
// EncodeFixed64 appends a 64-bit little-endian integer to the buffer.
func (b *Buffer) EncodeFixed64(v uint64) error {
b.buf = protowire.AppendFixed64(b.buf, uint64(v))
return nil
}
// EncodeRawBytes appends a length-prefixed raw bytes to the buffer.
func (b *Buffer) EncodeRawBytes(v []byte) error {
b.buf = protowire.AppendBytes(b.buf, v)
return nil
}
// EncodeStringBytes appends a length-prefixed raw bytes to the buffer.
// It does not validate whether v contains valid UTF-8.
func (b *Buffer) EncodeStringBytes(v string) error {
b.buf = protowire.AppendString(b.buf, v)
return nil
}
// EncodeMessage appends a length-prefixed encoded message to the buffer.
func (b *Buffer) EncodeMessage(m Message) error {
var err error
b.buf = protowire.AppendVarint(b.buf, uint64(Size(m)))
b.buf, err = marshalAppend(b.buf, m, b.deterministic)
return err
}
// DecodeVarint consumes an encoded unsigned varint from the buffer.
func (b *Buffer) DecodeVarint() (uint64, error) {
v, n := protowire.ConsumeVarint(b.buf[b.idx:])
if n < 0 {
return 0, protowire.ParseError(n)
}
b.idx += n
return uint64(v), nil
}
// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer.
func (b *Buffer) DecodeZigzag32() (uint64, error) {
v, err := b.DecodeVarint()
if err != nil {
return 0, err
}
return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil
}
// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer.
func (b *Buffer) DecodeZigzag64() (uint64, error) {
v, err := b.DecodeVarint()
if err != nil {
return 0, err
}
return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil
}
// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer.
func (b *Buffer) DecodeFixed32() (uint64, error) {
v, n := protowire.ConsumeFixed32(b.buf[b.idx:])
if n < 0 {
return 0, protowire.ParseError(n)
}
b.idx += n
return uint64(v), nil
}
// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer.
func (b *Buffer) DecodeFixed64() (uint64, error) {
v, n := protowire.ConsumeFixed64(b.buf[b.idx:])
if n < 0 {
return 0, protowire.ParseError(n)
}
b.idx += n
return uint64(v), nil
}
// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer.
// If alloc is specified, it returns a copy the raw bytes
// rather than a sub-slice of the buffer.
func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) {
v, n := protowire.ConsumeBytes(b.buf[b.idx:])
if n < 0 {
return nil, protowire.ParseError(n)
}
b.idx += n
if alloc {
v = append([]byte(nil), v...)
}
return v, nil
}
// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer.
// It does not validate whether the raw bytes contain valid UTF-8.
func (b *Buffer) DecodeStringBytes() (string, error) {
v, n := protowire.ConsumeString(b.buf[b.idx:])
if n < 0 {
return "", protowire.ParseError(n)
}
b.idx += n
return v, nil
}
// DecodeMessage consumes a length-prefixed message from the buffer.
// It does not reset m before unmarshaling.
func (b *Buffer) DecodeMessage(m Message) error {
v, err := b.DecodeRawBytes(false)
if err != nil {
return err
}
return UnmarshalMerge(v, m)
}
// DecodeGroup consumes a message group from the buffer.
// It assumes that the start group marker has already been consumed and
// consumes all bytes until (and including the end group marker).
// It does not reset m before unmarshaling.
func (b *Buffer) DecodeGroup(m Message) error {
v, n, err := consumeGroup(b.buf[b.idx:])
if err != nil {
return err
}
b.idx += n
return UnmarshalMerge(v, m)
}
// consumeGroup parses b until it finds an end group marker, returning
// the raw bytes of the message (excluding the end group marker) and the
// the total length of the message (including the end group marker).
func consumeGroup(b []byte) ([]byte, int, error) {
b0 := b
depth := 1 // assume this follows a start group marker
for {
_, wtyp, tagLen := protowire.ConsumeTag(b)
if tagLen < 0 {
return nil, 0, protowire.ParseError(tagLen)
}
b = b[tagLen:]
var valLen int
switch wtyp {
case protowire.VarintType:
_, valLen = protowire.ConsumeVarint(b)
case protowire.Fixed32Type:
_, valLen = protowire.ConsumeFixed32(b)
case protowire.Fixed64Type:
_, valLen = protowire.ConsumeFixed64(b)
case protowire.BytesType:
_, valLen = protowire.ConsumeBytes(b)
case protowire.StartGroupType:
depth++
case protowire.EndGroupType:
depth--
default:
return nil, 0, errors.New("proto: cannot parse reserved wire type")
}
if valLen < 0 {
return nil, 0, protowire.ParseError(valLen)
}
b = b[valLen:]
if depth == 0 {
return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil
}
}
}

View File

@ -1,63 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
import (
"google.golang.org/protobuf/reflect/protoreflect"
)
// SetDefaults sets unpopulated scalar fields to their default values.
// Fields within a oneof are not set even if they have a default value.
// SetDefaults is recursively called upon any populated message fields.
func SetDefaults(m Message) {
if m != nil {
setDefaults(MessageReflect(m))
}
}
func setDefaults(m protoreflect.Message) {
fds := m.Descriptor().Fields()
for i := 0; i < fds.Len(); i++ {
fd := fds.Get(i)
if !m.Has(fd) {
if fd.HasDefault() && fd.ContainingOneof() == nil {
v := fd.Default()
if fd.Kind() == protoreflect.BytesKind {
v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes
}
m.Set(fd, v)
}
continue
}
}
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
switch {
// Handle singular message.
case fd.Cardinality() != protoreflect.Repeated:
if fd.Message() != nil {
setDefaults(m.Get(fd).Message())
}
// Handle list of messages.
case fd.IsList():
if fd.Message() != nil {
ls := m.Get(fd).List()
for i := 0; i < ls.Len(); i++ {
setDefaults(ls.Get(i).Message())
}
}
// Handle map of messages.
case fd.IsMap():
if fd.MapValue().Message() != nil {
ms := m.Get(fd).Map()
ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
setDefaults(v.Message())
return true
})
}
}
return true
})
}

View File

@ -1,113 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
import (
"encoding/json"
"errors"
"fmt"
"strconv"
protoV2 "google.golang.org/protobuf/proto"
)
var (
// Deprecated: No longer returned.
ErrNil = errors.New("proto: Marshal called with nil")
// Deprecated: No longer returned.
ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
// Deprecated: No longer returned.
ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
)
// Deprecated: Do not use.
type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
// Deprecated: Do not use.
func GetStats() Stats { return Stats{} }
// Deprecated: Do not use.
func MarshalMessageSet(interface{}) ([]byte, error) {
return nil, errors.New("proto: not implemented")
}
// Deprecated: Do not use.
func UnmarshalMessageSet([]byte, interface{}) error {
return errors.New("proto: not implemented")
}
// Deprecated: Do not use.
func MarshalMessageSetJSON(interface{}) ([]byte, error) {
return nil, errors.New("proto: not implemented")
}
// Deprecated: Do not use.
func UnmarshalMessageSetJSON([]byte, interface{}) error {
return errors.New("proto: not implemented")
}
// Deprecated: Do not use.
func RegisterMessageSetType(Message, int32, string) {}
// Deprecated: Do not use.
func EnumName(m map[int32]string, v int32) string {
s, ok := m[v]
if ok {
return s
}
return strconv.Itoa(int(v))
}
// Deprecated: Do not use.
func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
if data[0] == '"' {
// New style: enums are strings.
var repr string
if err := json.Unmarshal(data, &repr); err != nil {
return -1, err
}
val, ok := m[repr]
if !ok {
return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
}
return val, nil
}
// Old style: enums are ints.
var val int32
if err := json.Unmarshal(data, &val); err != nil {
return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
}
return val, nil
}
// Deprecated: Do not use; this type existed for intenal-use only.
type InternalMessageInfo struct{}
// Deprecated: Do not use; this method existed for intenal-use only.
func (*InternalMessageInfo) DiscardUnknown(m Message) {
DiscardUnknown(m)
}
// Deprecated: Do not use; this method existed for intenal-use only.
func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) {
return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m))
}
// Deprecated: Do not use; this method existed for intenal-use only.
func (*InternalMessageInfo) Merge(dst, src Message) {
protoV2.Merge(MessageV2(dst), MessageV2(src))
}
// Deprecated: Do not use; this method existed for intenal-use only.
func (*InternalMessageInfo) Size(m Message) int {
return protoV2.Size(MessageV2(m))
}
// Deprecated: Do not use; this method existed for intenal-use only.
func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error {
return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m))
}

View File

@ -1,58 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
import (
"google.golang.org/protobuf/reflect/protoreflect"
)
// DiscardUnknown recursively discards all unknown fields from this message
// and all embedded messages.
//
// When unmarshaling a message with unrecognized fields, the tags and values
// of such fields are preserved in the Message. This allows a later call to
// marshal to be able to produce a message that continues to have those
// unrecognized fields. To avoid this, DiscardUnknown is used to
// explicitly clear the unknown fields after unmarshaling.
func DiscardUnknown(m Message) {
if m != nil {
discardUnknown(MessageReflect(m))
}
}
func discardUnknown(m protoreflect.Message) {
m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool {
switch {
// Handle singular message.
case fd.Cardinality() != protoreflect.Repeated:
if fd.Message() != nil {
discardUnknown(m.Get(fd).Message())
}
// Handle list of messages.
case fd.IsList():
if fd.Message() != nil {
ls := m.Get(fd).List()
for i := 0; i < ls.Len(); i++ {
discardUnknown(ls.Get(i).Message())
}
}
// Handle map of messages.
case fd.IsMap():
if fd.MapValue().Message() != nil {
ms := m.Get(fd).Map()
ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
discardUnknown(v.Message())
return true
})
}
}
return true
})
// Discard unknown fields.
if len(m.GetUnknown()) > 0 {
m.SetUnknown(nil)
}
}

View File

@ -1,356 +0,0 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
import (
"errors"
"fmt"
"reflect"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
"google.golang.org/protobuf/runtime/protoiface"
"google.golang.org/protobuf/runtime/protoimpl"
)
type (
// ExtensionDesc represents an extension descriptor and
// is used to interact with an extension field in a message.
//
// Variables of this type are generated in code by protoc-gen-go.
ExtensionDesc = protoimpl.ExtensionInfo
// ExtensionRange represents a range of message extensions.
// Used in code generated by protoc-gen-go.
ExtensionRange = protoiface.ExtensionRangeV1
// Deprecated: Do not use; this is an internal type.
Extension = protoimpl.ExtensionFieldV1
// Deprecated: Do not use; this is an internal type.
XXX_InternalExtensions = protoimpl.ExtensionFields
)
// ErrMissingExtension reports whether the extension was not present.
var ErrMissingExtension = errors.New("proto: missing extension")
var errNotExtendable = errors.New("proto: not an extendable proto.Message")
// HasExtension reports whether the extension field is present in m
// either as an explicitly populated field or as an unknown field.
func HasExtension(m Message, xt *ExtensionDesc) (has bool) {
mr := MessageReflect(m)
if mr == nil || !mr.IsValid() {
return false
}
// Check whether any populated known field matches the field number.
xtd := xt.TypeDescriptor()
if isValidExtension(mr.Descriptor(), xtd) {
has = mr.Has(xtd)
} else {
mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
has = int32(fd.Number()) == xt.Field
return !has
})
}
// Check whether any unknown field matches the field number.
for b := mr.GetUnknown(); !has && len(b) > 0; {
num, _, n := protowire.ConsumeField(b)
has = int32(num) == xt.Field
b = b[n:]
}
return has
}
// ClearExtension removes the extension field from m
// either as an explicitly populated field or as an unknown field.
func ClearExtension(m Message, xt *ExtensionDesc) {
mr := MessageReflect(m)
if mr == nil || !mr.IsValid() {
return
}
xtd := xt.TypeDescriptor()
if isValidExtension(mr.Descriptor(), xtd) {
mr.Clear(xtd)
} else {
mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
if int32(fd.Number()) == xt.Field {
mr.Clear(fd)
return false
}
return true
})
}
clearUnknown(mr, fieldNum(xt.Field))
}
// ClearAllExtensions clears all extensions from m.
// This includes populated fields and unknown fields in the extension range.
func ClearAllExtensions(m Message) {
mr := MessageReflect(m)
if mr == nil || !mr.IsValid() {
return
}
mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
if fd.IsExtension() {
mr.Clear(fd)
}
return true
})
clearUnknown(mr, mr.Descriptor().ExtensionRanges())
}
// GetExtension retrieves a proto2 extended field from m.
//
// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
// then GetExtension parses the encoded field and returns a Go value of the specified type.
// If the field is not present, then the default value is returned (if one is specified),
// otherwise ErrMissingExtension is reported.
//
// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil),
// then GetExtension returns the raw encoded bytes for the extension field.
func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) {
mr := MessageReflect(m)
if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
return nil, errNotExtendable
}
// Retrieve the unknown fields for this extension field.
var bo protoreflect.RawFields
for bi := mr.GetUnknown(); len(bi) > 0; {
num, _, n := protowire.ConsumeField(bi)
if int32(num) == xt.Field {
bo = append(bo, bi[:n]...)
}
bi = bi[n:]
}
// For type incomplete descriptors, only retrieve the unknown fields.
if xt.ExtensionType == nil {
return []byte(bo), nil
}
// If the extension field only exists as unknown fields, unmarshal it.
// This is rarely done since proto.Unmarshal eagerly unmarshals extensions.
xtd := xt.TypeDescriptor()
if !isValidExtension(mr.Descriptor(), xtd) {
return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
}
if !mr.Has(xtd) && len(bo) > 0 {
m2 := mr.New()
if err := (proto.UnmarshalOptions{
Resolver: extensionResolver{xt},
}.Unmarshal(bo, m2.Interface())); err != nil {
return nil, err
}
if m2.Has(xtd) {
mr.Set(xtd, m2.Get(xtd))
clearUnknown(mr, fieldNum(xt.Field))
}
}
// Check whether the message has the extension field set or a default.
var pv protoreflect.Value
switch {
case mr.Has(xtd):
pv = mr.Get(xtd)
case xtd.HasDefault():
pv = xtd.Default()
default:
return nil, ErrMissingExtension
}
v := xt.InterfaceOf(pv)
rv := reflect.ValueOf(v)
if isScalarKind(rv.Kind()) {
rv2 := reflect.New(rv.Type())
rv2.Elem().Set(rv)
v = rv2.Interface()
}
return v, nil
}
// extensionResolver is a custom extension resolver that stores a single
// extension type that takes precedence over the global registry.
type extensionResolver struct{ xt protoreflect.ExtensionType }
func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field {
return r.xt, nil
}
return protoregistry.GlobalTypes.FindExtensionByName(field)
}
func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field {
return r.xt, nil
}
return protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
}
// GetExtensions returns a list of the extensions values present in m,
// corresponding with the provided list of extension descriptors, xts.
// If an extension is missing in m, the corresponding value is nil.
func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) {
mr := MessageReflect(m)
if mr == nil || !mr.IsValid() {
return nil, errNotExtendable
}
vs := make([]interface{}, len(xts))
for i, xt := range xts {
v, err := GetExtension(m, xt)
if err != nil {
if err == ErrMissingExtension {
continue
}
return vs, err
}
vs[i] = v
}
return vs, nil
}
// SetExtension sets an extension field in m to the provided value.
func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error {
mr := MessageReflect(m)
if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
return errNotExtendable
}
rv := reflect.ValueOf(v)
if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) {
return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType)
}
if rv.Kind() == reflect.Ptr {
if rv.IsNil() {
return fmt.Errorf("proto: SetExtension called with nil value of type %T", v)
}
if isScalarKind(rv.Elem().Kind()) {
v = rv.Elem().Interface()
}
}
xtd := xt.TypeDescriptor()
if !isValidExtension(mr.Descriptor(), xtd) {
return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
}
mr.Set(xtd, xt.ValueOf(v))
clearUnknown(mr, fieldNum(xt.Field))
return nil
}
// SetRawExtension inserts b into the unknown fields of m.
//
// Deprecated: Use Message.ProtoReflect.SetUnknown instead.
func SetRawExtension(m Message, fnum int32, b []byte) {
mr := MessageReflect(m)
if mr == nil || !mr.IsValid() {
return
}
// Verify that the raw field is valid.
for b0 := b; len(b0) > 0; {
num, _, n := protowire.ConsumeField(b0)
if int32(num) != fnum {
panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum))
}
b0 = b0[n:]
}
ClearExtension(m, &ExtensionDesc{Field: fnum})
mr.SetUnknown(append(mr.GetUnknown(), b...))
}
// ExtensionDescs returns a list of extension descriptors found in m,
// containing descriptors for both populated extension fields in m and
// also unknown fields of m that are in the extension range.
// For the later case, an type incomplete descriptor is provided where only
// the ExtensionDesc.Field field is populated.
// The order of the extension descriptors is undefined.
func ExtensionDescs(m Message) ([]*ExtensionDesc, error) {
mr := MessageReflect(m)
if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
return nil, errNotExtendable
}
// Collect a set of known extension descriptors.
extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc)
mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
if fd.IsExtension() {
xt := fd.(protoreflect.ExtensionTypeDescriptor)
if xd, ok := xt.Type().(*ExtensionDesc); ok {
extDescs[fd.Number()] = xd
}
}
return true
})
// Collect a set of unknown extension descriptors.
extRanges := mr.Descriptor().ExtensionRanges()
for b := mr.GetUnknown(); len(b) > 0; {
num, _, n := protowire.ConsumeField(b)
if extRanges.Has(num) && extDescs[num] == nil {
extDescs[num] = nil
}
b = b[n:]
}
// Transpose the set of descriptors into a list.
var xts []*ExtensionDesc
for num, xt := range extDescs {
if xt == nil {
xt = &ExtensionDesc{Field: int32(num)}
}
xts = append(xts, xt)
}
return xts, nil
}
// isValidExtension reports whether xtd is a valid extension descriptor for md.
func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool {
return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number())
}
// isScalarKind reports whether k is a protobuf scalar kind (except bytes).
// This function exists for historical reasons since the representation of
// scalars differs between v1 and v2, where v1 uses *T and v2 uses T.
func isScalarKind(k reflect.Kind) bool {
switch k {
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
return true
default:
return false
}
}
// clearUnknown removes unknown fields from m where remover.Has reports true.
func clearUnknown(m protoreflect.Message, remover interface {
Has(protoreflect.FieldNumber) bool
}) {
var bo protoreflect.RawFields
for bi := m.GetUnknown(); len(bi) > 0; {
num, _, n := protowire.ConsumeField(bi)
if !remover.Has(num) {
bo = append(bo, bi[:n]...)
}
bi = bi[n:]
}
if bi := m.GetUnknown(); len(bi) != len(bo) {
m.SetUnknown(bo)
}
}
type fieldNum protoreflect.FieldNumber
func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool {
return protoreflect.FieldNumber(n1) == n2
}

View File

@ -1,306 +0,0 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
import (
"fmt"
"reflect"
"strconv"
"strings"
"sync"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/runtime/protoimpl"
)
// StructProperties represents protocol buffer type information for a
// generated protobuf message in the open-struct API.
//
// Deprecated: Do not use.
type StructProperties struct {
// Prop are the properties for each field.
//
// Fields belonging to a oneof are stored in OneofTypes instead, with a
// single Properties representing the parent oneof held here.
//
// The order of Prop matches the order of fields in the Go struct.
// Struct fields that are not related to protobufs have a "XXX_" prefix
// in the Properties.Name and must be ignored by the user.
Prop []*Properties
// OneofTypes contains information about the oneof fields in this message.
// It is keyed by the protobuf field name.
OneofTypes map[string]*OneofProperties
}
// Properties represents the type information for a protobuf message field.
//
// Deprecated: Do not use.
type Properties struct {
// Name is a placeholder name with little meaningful semantic value.
// If the name has an "XXX_" prefix, the entire Properties must be ignored.
Name string
// OrigName is the protobuf field name or oneof name.
OrigName string
// JSONName is the JSON name for the protobuf field.
JSONName string
// Enum is a placeholder name for enums.
// For historical reasons, this is neither the Go name for the enum,
// nor the protobuf name for the enum.
Enum string // Deprecated: Do not use.
// Weak contains the full name of the weakly referenced message.
Weak string
// Wire is a string representation of the wire type.
Wire string
// WireType is the protobuf wire type for the field.
WireType int
// Tag is the protobuf field number.
Tag int
// Required reports whether this is a required field.
Required bool
// Optional reports whether this is a optional field.
Optional bool
// Repeated reports whether this is a repeated field.
Repeated bool
// Packed reports whether this is a packed repeated field of scalars.
Packed bool
// Proto3 reports whether this field operates under the proto3 syntax.
Proto3 bool
// Oneof reports whether this field belongs within a oneof.
Oneof bool
// Default is the default value in string form.
Default string
// HasDefault reports whether the field has a default value.
HasDefault bool
// MapKeyProp is the properties for the key field for a map field.
MapKeyProp *Properties
// MapValProp is the properties for the value field for a map field.
MapValProp *Properties
}
// OneofProperties represents the type information for a protobuf oneof.
//
// Deprecated: Do not use.
type OneofProperties struct {
// Type is a pointer to the generated wrapper type for the field value.
// This is nil for messages that are not in the open-struct API.
Type reflect.Type
// Field is the index into StructProperties.Prop for the containing oneof.
Field int
// Prop is the properties for the field.
Prop *Properties
}
// String formats the properties in the protobuf struct field tag style.
func (p *Properties) String() string {
s := p.Wire
s += "," + strconv.Itoa(p.Tag)
if p.Required {
s += ",req"
}
if p.Optional {
s += ",opt"
}
if p.Repeated {
s += ",rep"
}
if p.Packed {
s += ",packed"
}
s += ",name=" + p.OrigName
if p.JSONName != "" {
s += ",json=" + p.JSONName
}
if len(p.Enum) > 0 {
s += ",enum=" + p.Enum
}
if len(p.Weak) > 0 {
s += ",weak=" + p.Weak
}
if p.Proto3 {
s += ",proto3"
}
if p.Oneof {
s += ",oneof"
}
if p.HasDefault {
s += ",def=" + p.Default
}
return s
}
// Parse populates p by parsing a string in the protobuf struct field tag style.
func (p *Properties) Parse(tag string) {
// For example: "bytes,49,opt,name=foo,def=hello!"
for len(tag) > 0 {
i := strings.IndexByte(tag, ',')
if i < 0 {
i = len(tag)
}
switch s := tag[:i]; {
case strings.HasPrefix(s, "name="):
p.OrigName = s[len("name="):]
case strings.HasPrefix(s, "json="):
p.JSONName = s[len("json="):]
case strings.HasPrefix(s, "enum="):
p.Enum = s[len("enum="):]
case strings.HasPrefix(s, "weak="):
p.Weak = s[len("weak="):]
case strings.Trim(s, "0123456789") == "":
n, _ := strconv.ParseUint(s, 10, 32)
p.Tag = int(n)
case s == "opt":
p.Optional = true
case s == "req":
p.Required = true
case s == "rep":
p.Repeated = true
case s == "varint" || s == "zigzag32" || s == "zigzag64":
p.Wire = s
p.WireType = WireVarint
case s == "fixed32":
p.Wire = s
p.WireType = WireFixed32
case s == "fixed64":
p.Wire = s
p.WireType = WireFixed64
case s == "bytes":
p.Wire = s
p.WireType = WireBytes
case s == "group":
p.Wire = s
p.WireType = WireStartGroup
case s == "packed":
p.Packed = true
case s == "proto3":
p.Proto3 = true
case s == "oneof":
p.Oneof = true
case strings.HasPrefix(s, "def="):
// The default tag is special in that everything afterwards is the
// default regardless of the presence of commas.
p.HasDefault = true
p.Default, i = tag[len("def="):], len(tag)
}
tag = strings.TrimPrefix(tag[i:], ",")
}
}
// Init populates the properties from a protocol buffer struct tag.
//
// Deprecated: Do not use.
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
p.Name = name
p.OrigName = name
if tag == "" {
return
}
p.Parse(tag)
if typ != nil && typ.Kind() == reflect.Map {
p.MapKeyProp = new(Properties)
p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil)
p.MapValProp = new(Properties)
p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil)
}
}
var propertiesCache sync.Map // map[reflect.Type]*StructProperties
// GetProperties returns the list of properties for the type represented by t,
// which must be a generated protocol buffer message in the open-struct API,
// where protobuf message fields are represented by exported Go struct fields.
//
// Deprecated: Use protobuf reflection instead.
func GetProperties(t reflect.Type) *StructProperties {
if p, ok := propertiesCache.Load(t); ok {
return p.(*StructProperties)
}
p, _ := propertiesCache.LoadOrStore(t, newProperties(t))
return p.(*StructProperties)
}
func newProperties(t reflect.Type) *StructProperties {
if t.Kind() != reflect.Struct {
panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
}
var hasOneof bool
prop := new(StructProperties)
// Construct a list of properties for each field in the struct.
for i := 0; i < t.NumField(); i++ {
p := new(Properties)
f := t.Field(i)
tagField := f.Tag.Get("protobuf")
p.Init(f.Type, f.Name, tagField, &f)
tagOneof := f.Tag.Get("protobuf_oneof")
if tagOneof != "" {
hasOneof = true
p.OrigName = tagOneof
}
// Rename unrelated struct fields with the "XXX_" prefix since so much
// user code simply checks for this to exclude special fields.
if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") {
p.Name = "XXX_" + p.Name
p.OrigName = "XXX_" + p.OrigName
} else if p.Weak != "" {
p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field
}
prop.Prop = append(prop.Prop, p)
}
// Construct a mapping of oneof field names to properties.
if hasOneof {
var oneofWrappers []interface{}
if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok {
oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{})
}
if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok {
oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{})
}
if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok {
if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok {
oneofWrappers = m.ProtoMessageInfo().OneofWrappers
}
}
prop.OneofTypes = make(map[string]*OneofProperties)
for _, wrapper := range oneofWrappers {
p := &OneofProperties{
Type: reflect.ValueOf(wrapper).Type(), // *T
Prop: new(Properties),
}
f := p.Type.Elem().Field(0)
p.Prop.Name = f.Name
p.Prop.Parse(f.Tag.Get("protobuf"))
// Determine the struct field that contains this oneof.
// Each wrapper is assignable to exactly one parent field.
var foundOneof bool
for i := 0; i < t.NumField() && !foundOneof; i++ {
if p.Type.AssignableTo(t.Field(i).Type) {
p.Field = i
foundOneof = true
}
}
if !foundOneof {
panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
}
prop.OneofTypes[p.Prop.OrigName] = p
}
}
return prop
}
func (sp *StructProperties) Len() int { return len(sp.Prop) }
func (sp *StructProperties) Less(i, j int) bool { return false }
func (sp *StructProperties) Swap(i, j int) { return }

View File

@ -1,167 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package proto provides functionality for handling protocol buffer messages.
// In particular, it provides marshaling and unmarshaling between a protobuf
// message and the binary wire format.
//
// See https://developers.google.com/protocol-buffers/docs/gotutorial for
// more information.
//
// Deprecated: Use the "google.golang.org/protobuf/proto" package instead.
package proto
import (
protoV2 "google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/runtime/protoiface"
"google.golang.org/protobuf/runtime/protoimpl"
)
const (
ProtoPackageIsVersion1 = true
ProtoPackageIsVersion2 = true
ProtoPackageIsVersion3 = true
ProtoPackageIsVersion4 = true
)
// GeneratedEnum is any enum type generated by protoc-gen-go
// which is a named int32 kind.
// This type exists for documentation purposes.
type GeneratedEnum interface{}
// GeneratedMessage is any message type generated by protoc-gen-go
// which is a pointer to a named struct kind.
// This type exists for documentation purposes.
type GeneratedMessage interface{}
// Message is a protocol buffer message.
//
// This is the v1 version of the message interface and is marginally better
// than an empty interface as it lacks any method to programatically interact
// with the contents of the message.
//
// A v2 message is declared in "google.golang.org/protobuf/proto".Message and
// exposes protobuf reflection as a first-class feature of the interface.
//
// To convert a v1 message to a v2 message, use the MessageV2 function.
// To convert a v2 message to a v1 message, use the MessageV1 function.
type Message = protoiface.MessageV1
// MessageV1 converts either a v1 or v2 message to a v1 message.
// It returns nil if m is nil.
func MessageV1(m GeneratedMessage) protoiface.MessageV1 {
return protoimpl.X.ProtoMessageV1Of(m)
}
// MessageV2 converts either a v1 or v2 message to a v2 message.
// It returns nil if m is nil.
func MessageV2(m GeneratedMessage) protoV2.Message {
return protoimpl.X.ProtoMessageV2Of(m)
}
// MessageReflect returns a reflective view for a message.
// It returns nil if m is nil.
func MessageReflect(m Message) protoreflect.Message {
return protoimpl.X.MessageOf(m)
}
// Marshaler is implemented by messages that can marshal themselves.
// This interface is used by the following functions: Size, Marshal,
// Buffer.Marshal, and Buffer.EncodeMessage.
//
// Deprecated: Do not implement.
type Marshaler interface {
// Marshal formats the encoded bytes of the message.
// It should be deterministic and emit valid protobuf wire data.
// The caller takes ownership of the returned buffer.
Marshal() ([]byte, error)
}
// Unmarshaler is implemented by messages that can unmarshal themselves.
// This interface is used by the following functions: Unmarshal, UnmarshalMerge,
// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup.
//
// Deprecated: Do not implement.
type Unmarshaler interface {
// Unmarshal parses the encoded bytes of the protobuf wire input.
// The provided buffer is only valid for during method call.
// It should not reset the receiver message.
Unmarshal([]byte) error
}
// Merger is implemented by messages that can merge themselves.
// This interface is used by the following functions: Clone and Merge.
//
// Deprecated: Do not implement.
type Merger interface {
// Merge merges the contents of src into the receiver message.
// It clones all data structures in src such that it aliases no mutable
// memory referenced by src.
Merge(src Message)
}
// RequiredNotSetError is an error type returned when
// marshaling or unmarshaling a message with missing required fields.
type RequiredNotSetError struct {
err error
}
func (e *RequiredNotSetError) Error() string {
if e.err != nil {
return e.err.Error()
}
return "proto: required field not set"
}
func (e *RequiredNotSetError) RequiredNotSet() bool {
return true
}
func checkRequiredNotSet(m protoV2.Message) error {
if err := protoV2.CheckInitialized(m); err != nil {
return &RequiredNotSetError{err: err}
}
return nil
}
// Clone returns a deep copy of src.
func Clone(src Message) Message {
return MessageV1(protoV2.Clone(MessageV2(src)))
}
// Merge merges src into dst, which must be messages of the same type.
//
// Populated scalar fields in src are copied to dst, while populated
// singular messages in src are merged into dst by recursively calling Merge.
// The elements of every list field in src is appended to the corresponded
// list fields in dst. The entries of every map field in src is copied into
// the corresponding map field in dst, possibly replacing existing entries.
// The unknown fields of src are appended to the unknown fields of dst.
func Merge(dst, src Message) {
protoV2.Merge(MessageV2(dst), MessageV2(src))
}
// Equal reports whether two messages are equal.
// If two messages marshal to the same bytes under deterministic serialization,
// then Equal is guaranteed to report true.
//
// Two messages are equal if they are the same protobuf message type,
// have the same set of populated known and extension field values,
// and the same set of unknown fields values.
//
// Scalar values are compared with the equivalent of the == operator in Go,
// except bytes values which are compared using bytes.Equal and
// floating point values which specially treat NaNs as equal.
// Message values are compared by recursively calling Equal.
// Lists are equal if each element value is also equal.
// Maps are equal if they have the same set of keys, where the pair of values
// for each key is also equal.
func Equal(x, y Message) bool {
return protoV2.Equal(MessageV2(x), MessageV2(y))
}
func isMessageSet(md protoreflect.MessageDescriptor) bool {
ms, ok := md.(interface{ IsMessageSet() bool })
return ok && ms.IsMessageSet()
}

View File

@ -1,317 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
import (
"bytes"
"compress/gzip"
"fmt"
"io/ioutil"
"reflect"
"strings"
"sync"
"google.golang.org/protobuf/reflect/protodesc"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
"google.golang.org/protobuf/runtime/protoimpl"
)
// filePath is the path to the proto source file.
type filePath = string // e.g., "google/protobuf/descriptor.proto"
// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto.
type fileDescGZIP = []byte
var fileCache sync.Map // map[filePath]fileDescGZIP
// RegisterFile is called from generated code to register the compressed
// FileDescriptorProto with the file path for a proto source file.
//
// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead.
func RegisterFile(s filePath, d fileDescGZIP) {
// Decompress the descriptor.
zr, err := gzip.NewReader(bytes.NewReader(d))
if err != nil {
panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
}
b, err := ioutil.ReadAll(zr)
if err != nil {
panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
}
// Construct a protoreflect.FileDescriptor from the raw descriptor.
// Note that DescBuilder.Build automatically registers the constructed
// file descriptor with the v2 registry.
protoimpl.DescBuilder{RawDescriptor: b}.Build()
// Locally cache the raw descriptor form for the file.
fileCache.Store(s, d)
}
// FileDescriptor returns the compressed FileDescriptorProto given the file path
// for a proto source file. It returns nil if not found.
//
// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead.
func FileDescriptor(s filePath) fileDescGZIP {
if v, ok := fileCache.Load(s); ok {
return v.(fileDescGZIP)
}
// Find the descriptor in the v2 registry.
var b []byte
if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil {
b, _ = Marshal(protodesc.ToFileDescriptorProto(fd))
}
// Locally cache the raw descriptor form for the file.
if len(b) > 0 {
v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b))
return v.(fileDescGZIP)
}
return nil
}
// enumName is the name of an enum. For historical reasons, the enum name is
// neither the full Go name nor the full protobuf name of the enum.
// The name is the dot-separated combination of just the proto package that the
// enum is declared within followed by the Go type name of the generated enum.
type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum"
// enumsByName maps enum values by name to their numeric counterpart.
type enumsByName = map[string]int32
// enumsByNumber maps enum values by number to their name counterpart.
type enumsByNumber = map[int32]string
var enumCache sync.Map // map[enumName]enumsByName
var numFilesCache sync.Map // map[protoreflect.FullName]int
// RegisterEnum is called from the generated code to register the mapping of
// enum value names to enum numbers for the enum identified by s.
//
// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead.
func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) {
if _, ok := enumCache.Load(s); ok {
panic("proto: duplicate enum registered: " + s)
}
enumCache.Store(s, m)
// This does not forward registration to the v2 registry since this API
// lacks sufficient information to construct a complete v2 enum descriptor.
}
// EnumValueMap returns the mapping from enum value names to enum numbers for
// the enum of the given name. It returns nil if not found.
//
// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead.
func EnumValueMap(s enumName) enumsByName {
if v, ok := enumCache.Load(s); ok {
return v.(enumsByName)
}
// Check whether the cache is stale. If the number of files in the current
// package differs, then it means that some enums may have been recently
// registered upstream that we do not know about.
var protoPkg protoreflect.FullName
if i := strings.LastIndexByte(s, '.'); i >= 0 {
protoPkg = protoreflect.FullName(s[:i])
}
v, _ := numFilesCache.Load(protoPkg)
numFiles, _ := v.(int)
if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles {
return nil // cache is up-to-date; was not found earlier
}
// Update the enum cache for all enums declared in the given proto package.
numFiles = 0
protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool {
walkEnums(fd, func(ed protoreflect.EnumDescriptor) {
name := protoimpl.X.LegacyEnumName(ed)
if _, ok := enumCache.Load(name); !ok {
m := make(enumsByName)
evs := ed.Values()
for i := evs.Len() - 1; i >= 0; i-- {
ev := evs.Get(i)
m[string(ev.Name())] = int32(ev.Number())
}
enumCache.LoadOrStore(name, m)
}
})
numFiles++
return true
})
numFilesCache.Store(protoPkg, numFiles)
// Check cache again for enum map.
if v, ok := enumCache.Load(s); ok {
return v.(enumsByName)
}
return nil
}
// walkEnums recursively walks all enums declared in d.
func walkEnums(d interface {
Enums() protoreflect.EnumDescriptors
Messages() protoreflect.MessageDescriptors
}, f func(protoreflect.EnumDescriptor)) {
eds := d.Enums()
for i := eds.Len() - 1; i >= 0; i-- {
f(eds.Get(i))
}
mds := d.Messages()
for i := mds.Len() - 1; i >= 0; i-- {
walkEnums(mds.Get(i), f)
}
}
// messageName is the full name of protobuf message.
type messageName = string
var messageTypeCache sync.Map // map[messageName]reflect.Type
// RegisterType is called from generated code to register the message Go type
// for a message of the given name.
//
// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead.
func RegisterType(m Message, s messageName) {
mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s))
if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil {
panic(err)
}
messageTypeCache.Store(s, reflect.TypeOf(m))
}
// RegisterMapType is called from generated code to register the Go map type
// for a protobuf message representing a map entry.
//
// Deprecated: Do not use.
func RegisterMapType(m interface{}, s messageName) {
t := reflect.TypeOf(m)
if t.Kind() != reflect.Map {
panic(fmt.Sprintf("invalid map kind: %v", t))
}
if _, ok := messageTypeCache.Load(s); ok {
panic(fmt.Errorf("proto: duplicate proto message registered: %s", s))
}
messageTypeCache.Store(s, t)
}
// MessageType returns the message type for a named message.
// It returns nil if not found.
//
// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead.
func MessageType(s messageName) reflect.Type {
if v, ok := messageTypeCache.Load(s); ok {
return v.(reflect.Type)
}
// Derive the message type from the v2 registry.
var t reflect.Type
if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil {
t = messageGoType(mt)
}
// If we could not get a concrete type, it is possible that it is a
// pseudo-message for a map entry.
if t == nil {
d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s))
if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() {
kt := goTypeForField(md.Fields().ByNumber(1))
vt := goTypeForField(md.Fields().ByNumber(2))
t = reflect.MapOf(kt, vt)
}
}
// Locally cache the message type for the given name.
if t != nil {
v, _ := messageTypeCache.LoadOrStore(s, t)
return v.(reflect.Type)
}
return nil
}
func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type {
switch k := fd.Kind(); k {
case protoreflect.EnumKind:
if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil {
return enumGoType(et)
}
return reflect.TypeOf(protoreflect.EnumNumber(0))
case protoreflect.MessageKind, protoreflect.GroupKind:
if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil {
return messageGoType(mt)
}
return reflect.TypeOf((*protoreflect.Message)(nil)).Elem()
default:
return reflect.TypeOf(fd.Default().Interface())
}
}
func enumGoType(et protoreflect.EnumType) reflect.Type {
return reflect.TypeOf(et.New(0))
}
func messageGoType(mt protoreflect.MessageType) reflect.Type {
return reflect.TypeOf(MessageV1(mt.Zero().Interface()))
}
// MessageName returns the full protobuf name for the given message type.
//
// Deprecated: Use protoreflect.MessageDescriptor.FullName instead.
func MessageName(m Message) messageName {
if m == nil {
return ""
}
if m, ok := m.(interface{ XXX_MessageName() messageName }); ok {
return m.XXX_MessageName()
}
return messageName(protoimpl.X.MessageDescriptorOf(m).FullName())
}
// RegisterExtension is called from the generated code to register
// the extension descriptor.
//
// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead.
func RegisterExtension(d *ExtensionDesc) {
if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil {
panic(err)
}
}
type extensionsByNumber = map[int32]*ExtensionDesc
var extensionCache sync.Map // map[messageName]extensionsByNumber
// RegisteredExtensions returns a map of the registered extensions for the
// provided protobuf message, indexed by the extension field number.
//
// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead.
func RegisteredExtensions(m Message) extensionsByNumber {
// Check whether the cache is stale. If the number of extensions for
// the given message differs, then it means that some extensions were
// recently registered upstream that we do not know about.
s := MessageName(m)
v, _ := extensionCache.Load(s)
xs, _ := v.(extensionsByNumber)
if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) {
return xs // cache is up-to-date
}
// Cache is stale, re-compute the extensions map.
xs = make(extensionsByNumber)
protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool {
if xd, ok := xt.(*ExtensionDesc); ok {
xs[int32(xt.TypeDescriptor().Number())] = xd
} else {
// TODO: This implies that the protoreflect.ExtensionType is a
// custom type not generated by protoc-gen-go. We could try and
// convert the type to an ExtensionDesc.
}
return true
})
extensionCache.Store(s, xs)
return xs
}

View File

@ -1,801 +0,0 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
import (
"encoding"
"errors"
"fmt"
"reflect"
"strconv"
"strings"
"unicode/utf8"
"google.golang.org/protobuf/encoding/prototext"
protoV2 "google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
)
const wrapTextUnmarshalV2 = false
// ParseError is returned by UnmarshalText.
type ParseError struct {
Message string
// Deprecated: Do not use.
Line, Offset int
}
func (e *ParseError) Error() string {
if wrapTextUnmarshalV2 {
return e.Message
}
if e.Line == 1 {
return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message)
}
return fmt.Sprintf("line %d: %v", e.Line, e.Message)
}
// UnmarshalText parses a proto text formatted string into m.
func UnmarshalText(s string, m Message) error {
if u, ok := m.(encoding.TextUnmarshaler); ok {
return u.UnmarshalText([]byte(s))
}
m.Reset()
mi := MessageV2(m)
if wrapTextUnmarshalV2 {
err := prototext.UnmarshalOptions{
AllowPartial: true,
}.Unmarshal([]byte(s), mi)
if err != nil {
return &ParseError{Message: err.Error()}
}
return checkRequiredNotSet(mi)
} else {
if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil {
return err
}
return checkRequiredNotSet(mi)
}
}
type textParser struct {
s string // remaining input
done bool // whether the parsing is finished (success or error)
backed bool // whether back() was called
offset, line int
cur token
}
type token struct {
value string
err *ParseError
line int // line number
offset int // byte number from start of input, not start of line
unquoted string // the unquoted version of value, if it was a quoted string
}
func newTextParser(s string) *textParser {
p := new(textParser)
p.s = s
p.line = 1
p.cur.line = 1
return p
}
func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) {
md := m.Descriptor()
fds := md.Fields()
// A struct is a sequence of "name: value", terminated by one of
// '>' or '}', or the end of the input. A name may also be
// "[extension]" or "[type/url]".
//
// The whole struct can also be an expanded Any message, like:
// [type/url] < ... struct contents ... >
seen := make(map[protoreflect.FieldNumber]bool)
for {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value == terminator {
break
}
if tok.value == "[" {
if err := p.unmarshalExtensionOrAny(m, seen); err != nil {
return err
}
continue
}
// This is a normal, non-extension field.
name := protoreflect.Name(tok.value)
fd := fds.ByName(name)
switch {
case fd == nil:
gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name))))
if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name {
fd = gd
}
case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name:
fd = nil
case fd.IsWeak() && fd.Message().IsPlaceholder():
fd = nil
}
if fd == nil {
typeName := string(md.FullName())
if m, ok := m.Interface().(Message); ok {
t := reflect.TypeOf(m)
if t.Kind() == reflect.Ptr {
typeName = t.Elem().String()
}
}
return p.errorf("unknown field name %q in %v", name, typeName)
}
if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil {
return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name())
}
if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] {
return p.errorf("non-repeated field %q was repeated", fd.Name())
}
seen[fd.Number()] = true
// Consume any colon.
if err := p.checkForColon(fd); err != nil {
return err
}
// Parse into the field.
v := m.Get(fd)
if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
v = m.Mutable(fd)
}
if v, err = p.unmarshalValue(v, fd); err != nil {
return err
}
m.Set(fd, v)
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
}
return nil
}
func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error {
name, err := p.consumeExtensionOrAnyName()
if err != nil {
return err
}
// If it contains a slash, it's an Any type URL.
if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 {
tok := p.next()
if tok.err != nil {
return tok.err
}
// consume an optional colon
if tok.value == ":" {
tok = p.next()
if tok.err != nil {
return tok.err
}
}
var terminator string
switch tok.value {
case "<":
terminator = ">"
case "{":
terminator = "}"
default:
return p.errorf("expected '{' or '<', found %q", tok.value)
}
mt, err := protoregistry.GlobalTypes.FindMessageByURL(name)
if err != nil {
return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):])
}
m2 := mt.New()
if err := p.unmarshalMessage(m2, terminator); err != nil {
return err
}
b, err := protoV2.Marshal(m2.Interface())
if err != nil {
return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err)
}
urlFD := m.Descriptor().Fields().ByName("type_url")
valFD := m.Descriptor().Fields().ByName("value")
if seen[urlFD.Number()] {
return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name())
}
if seen[valFD.Number()] {
return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name())
}
m.Set(urlFD, protoreflect.ValueOfString(name))
m.Set(valFD, protoreflect.ValueOfBytes(b))
seen[urlFD.Number()] = true
seen[valFD.Number()] = true
return nil
}
xname := protoreflect.FullName(name)
xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname)
if xt == nil && isMessageSet(m.Descriptor()) {
xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension"))
}
if xt == nil {
return p.errorf("unrecognized extension %q", name)
}
fd := xt.TypeDescriptor()
if fd.ContainingMessage().FullName() != m.Descriptor().FullName() {
return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName())
}
if err := p.checkForColon(fd); err != nil {
return err
}
v := m.Get(fd)
if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
v = m.Mutable(fd)
}
v, err = p.unmarshalValue(v, fd)
if err != nil {
return err
}
m.Set(fd, v)
return p.consumeOptionalSeparator()
}
func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
tok := p.next()
if tok.err != nil {
return v, tok.err
}
if tok.value == "" {
return v, p.errorf("unexpected EOF")
}
switch {
case fd.IsList():
lv := v.List()
var err error
if tok.value == "[" {
// Repeated field with list notation, like [1,2,3].
for {
vv := lv.NewElement()
vv, err = p.unmarshalSingularValue(vv, fd)
if err != nil {
return v, err
}
lv.Append(vv)
tok := p.next()
if tok.err != nil {
return v, tok.err
}
if tok.value == "]" {
break
}
if tok.value != "," {
return v, p.errorf("Expected ']' or ',' found %q", tok.value)
}
}
return v, nil
}
// One value of the repeated field.
p.back()
vv := lv.NewElement()
vv, err = p.unmarshalSingularValue(vv, fd)
if err != nil {
return v, err
}
lv.Append(vv)
return v, nil
case fd.IsMap():
// The map entry should be this sequence of tokens:
// < key : KEY value : VALUE >
// However, implementations may omit key or value, and technically
// we should support them in any order.
var terminator string
switch tok.value {
case "<":
terminator = ">"
case "{":
terminator = "}"
default:
return v, p.errorf("expected '{' or '<', found %q", tok.value)
}
keyFD := fd.MapKey()
valFD := fd.MapValue()
mv := v.Map()
kv := keyFD.Default()
vv := mv.NewValue()
for {
tok := p.next()
if tok.err != nil {
return v, tok.err
}
if tok.value == terminator {
break
}
var err error
switch tok.value {
case "key":
if err := p.consumeToken(":"); err != nil {
return v, err
}
if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil {
return v, err
}
if err := p.consumeOptionalSeparator(); err != nil {
return v, err
}
case "value":
if err := p.checkForColon(valFD); err != nil {
return v, err
}
if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil {
return v, err
}
if err := p.consumeOptionalSeparator(); err != nil {
return v, err
}
default:
p.back()
return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
}
}
mv.Set(kv.MapKey(), vv)
return v, nil
default:
p.back()
return p.unmarshalSingularValue(v, fd)
}
}
func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
tok := p.next()
if tok.err != nil {
return v, tok.err
}
if tok.value == "" {
return v, p.errorf("unexpected EOF")
}
switch fd.Kind() {
case protoreflect.BoolKind:
switch tok.value {
case "true", "1", "t", "True":
return protoreflect.ValueOfBool(true), nil
case "false", "0", "f", "False":
return protoreflect.ValueOfBool(false), nil
}
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
return protoreflect.ValueOfInt32(int32(x)), nil
}
// The C++ parser accepts large positive hex numbers that uses
// two's complement arithmetic to represent negative numbers.
// This feature is here for backwards compatibility with C++.
if strings.HasPrefix(tok.value, "0x") {
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil
}
}
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
return protoreflect.ValueOfInt64(int64(x)), nil
}
// The C++ parser accepts large positive hex numbers that uses
// two's complement arithmetic to represent negative numbers.
// This feature is here for backwards compatibility with C++.
if strings.HasPrefix(tok.value, "0x") {
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil
}
}
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
return protoreflect.ValueOfUint32(uint32(x)), nil
}
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
return protoreflect.ValueOfUint64(uint64(x)), nil
}
case protoreflect.FloatKind:
// Ignore 'f' for compatibility with output generated by C++,
// but don't remove 'f' when the value is "-inf" or "inf".
v := tok.value
if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
v = v[:len(v)-len("f")]
}
if x, err := strconv.ParseFloat(v, 32); err == nil {
return protoreflect.ValueOfFloat32(float32(x)), nil
}
case protoreflect.DoubleKind:
// Ignore 'f' for compatibility with output generated by C++,
// but don't remove 'f' when the value is "-inf" or "inf".
v := tok.value
if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
v = v[:len(v)-len("f")]
}
if x, err := strconv.ParseFloat(v, 64); err == nil {
return protoreflect.ValueOfFloat64(float64(x)), nil
}
case protoreflect.StringKind:
if isQuote(tok.value[0]) {
return protoreflect.ValueOfString(tok.unquoted), nil
}
case protoreflect.BytesKind:
if isQuote(tok.value[0]) {
return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil
}
case protoreflect.EnumKind:
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil
}
vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value))
if vd != nil {
return protoreflect.ValueOfEnum(vd.Number()), nil
}
case protoreflect.MessageKind, protoreflect.GroupKind:
var terminator string
switch tok.value {
case "{":
terminator = "}"
case "<":
terminator = ">"
default:
return v, p.errorf("expected '{' or '<', found %q", tok.value)
}
err := p.unmarshalMessage(v.Message(), terminator)
return v, err
default:
panic(fmt.Sprintf("invalid kind %v", fd.Kind()))
}
return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value)
}
// Consume a ':' from the input stream (if the next token is a colon),
// returning an error if a colon is needed but not present.
func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value != ":" {
if fd.Message() == nil {
return p.errorf("expected ':', found %q", tok.value)
}
p.back()
}
return nil
}
// consumeExtensionOrAnyName consumes an extension name or an Any type URL and
// the following ']'. It returns the name or URL consumed.
func (p *textParser) consumeExtensionOrAnyName() (string, error) {
tok := p.next()
if tok.err != nil {
return "", tok.err
}
// If extension name or type url is quoted, it's a single token.
if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
if err != nil {
return "", err
}
return name, p.consumeToken("]")
}
// Consume everything up to "]"
var parts []string
for tok.value != "]" {
parts = append(parts, tok.value)
tok = p.next()
if tok.err != nil {
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
}
if p.done && tok.value != "]" {
return "", p.errorf("unclosed type_url or extension name")
}
}
return strings.Join(parts, ""), nil
}
// consumeOptionalSeparator consumes an optional semicolon or comma.
// It is used in unmarshalMessage to provide backward compatibility.
func (p *textParser) consumeOptionalSeparator() error {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value != ";" && tok.value != "," {
p.back()
}
return nil
}
func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
p.cur.err = pe
p.done = true
return pe
}
func (p *textParser) skipWhitespace() {
i := 0
for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
if p.s[i] == '#' {
// comment; skip to end of line or input
for i < len(p.s) && p.s[i] != '\n' {
i++
}
if i == len(p.s) {
break
}
}
if p.s[i] == '\n' {
p.line++
}
i++
}
p.offset += i
p.s = p.s[i:len(p.s)]
if len(p.s) == 0 {
p.done = true
}
}
func (p *textParser) advance() {
// Skip whitespace
p.skipWhitespace()
if p.done {
return
}
// Start of non-whitespace
p.cur.err = nil
p.cur.offset, p.cur.line = p.offset, p.line
p.cur.unquoted = ""
switch p.s[0] {
case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
// Single symbol
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
case '"', '\'':
// Quoted string
i := 1
for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
if p.s[i] == '\\' && i+1 < len(p.s) {
// skip escaped char
i++
}
i++
}
if i >= len(p.s) || p.s[i] != p.s[0] {
p.errorf("unmatched quote")
return
}
unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
if err != nil {
p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
return
}
p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
p.cur.unquoted = unq
default:
i := 0
for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
i++
}
if i == 0 {
p.errorf("unexpected byte %#x", p.s[0])
return
}
p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
}
p.offset += len(p.cur.value)
}
// Back off the parser by one token. Can only be done between calls to next().
// It makes the next advance() a no-op.
func (p *textParser) back() { p.backed = true }
// Advances the parser and returns the new current token.
func (p *textParser) next() *token {
if p.backed || p.done {
p.backed = false
return &p.cur
}
p.advance()
if p.done {
p.cur.value = ""
} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
// Look for multiple quoted strings separated by whitespace,
// and concatenate them.
cat := p.cur
for {
p.skipWhitespace()
if p.done || !isQuote(p.s[0]) {
break
}
p.advance()
if p.cur.err != nil {
return &p.cur
}
cat.value += " " + p.cur.value
cat.unquoted += p.cur.unquoted
}
p.done = false // parser may have seen EOF, but we want to return cat
p.cur = cat
}
return &p.cur
}
func (p *textParser) consumeToken(s string) error {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value != s {
p.back()
return p.errorf("expected %q, found %q", s, tok.value)
}
return nil
}
var errBadUTF8 = errors.New("proto: bad UTF-8")
func unquoteC(s string, quote rune) (string, error) {
// This is based on C++'s tokenizer.cc.
// Despite its name, this is *not* parsing C syntax.
// For instance, "\0" is an invalid quoted string.
// Avoid allocation in trivial cases.
simple := true
for _, r := range s {
if r == '\\' || r == quote {
simple = false
break
}
}
if simple {
return s, nil
}
buf := make([]byte, 0, 3*len(s)/2)
for len(s) > 0 {
r, n := utf8.DecodeRuneInString(s)
if r == utf8.RuneError && n == 1 {
return "", errBadUTF8
}
s = s[n:]
if r != '\\' {
if r < utf8.RuneSelf {
buf = append(buf, byte(r))
} else {
buf = append(buf, string(r)...)
}
continue
}
ch, tail, err := unescape(s)
if err != nil {
return "", err
}
buf = append(buf, ch...)
s = tail
}
return string(buf), nil
}
func unescape(s string) (ch string, tail string, err error) {
r, n := utf8.DecodeRuneInString(s)
if r == utf8.RuneError && n == 1 {
return "", "", errBadUTF8
}
s = s[n:]
switch r {
case 'a':
return "\a", s, nil
case 'b':
return "\b", s, nil
case 'f':
return "\f", s, nil
case 'n':
return "\n", s, nil
case 'r':
return "\r", s, nil
case 't':
return "\t", s, nil
case 'v':
return "\v", s, nil
case '?':
return "?", s, nil // trigraph workaround
case '\'', '"', '\\':
return string(r), s, nil
case '0', '1', '2', '3', '4', '5', '6', '7':
if len(s) < 2 {
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
}
ss := string(r) + s[:2]
s = s[2:]
i, err := strconv.ParseUint(ss, 8, 8)
if err != nil {
return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
}
return string([]byte{byte(i)}), s, nil
case 'x', 'X', 'u', 'U':
var n int
switch r {
case 'x', 'X':
n = 2
case 'u':
n = 4
case 'U':
n = 8
}
if len(s) < n {
return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
}
ss := s[:n]
s = s[n:]
i, err := strconv.ParseUint(ss, 16, 64)
if err != nil {
return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
}
if r == 'x' || r == 'X' {
return string([]byte{byte(i)}), s, nil
}
if i > utf8.MaxRune {
return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
}
return string(rune(i)), s, nil
}
return "", "", fmt.Errorf(`unknown escape \%c`, r)
}
func isIdentOrNumberChar(c byte) bool {
switch {
case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
return true
case '0' <= c && c <= '9':
return true
}
switch c {
case '-', '+', '.', '_':
return true
}
return false
}
func isWhitespace(c byte) bool {
switch c {
case ' ', '\t', '\n', '\r':
return true
}
return false
}
func isQuote(c byte) bool {
switch c {
case '"', '\'':
return true
}
return false
}

View File

@ -1,560 +0,0 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
import (
"bytes"
"encoding"
"fmt"
"io"
"math"
"sort"
"strings"
"google.golang.org/protobuf/encoding/prototext"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
)
const wrapTextMarshalV2 = false
// TextMarshaler is a configurable text format marshaler.
type TextMarshaler struct {
Compact bool // use compact text format (one line)
ExpandAny bool // expand google.protobuf.Any messages of known types
}
// Marshal writes the proto text format of m to w.
func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error {
b, err := tm.marshal(m)
if len(b) > 0 {
if _, err := w.Write(b); err != nil {
return err
}
}
return err
}
// Text returns a proto text formatted string of m.
func (tm *TextMarshaler) Text(m Message) string {
b, _ := tm.marshal(m)
return string(b)
}
func (tm *TextMarshaler) marshal(m Message) ([]byte, error) {
mr := MessageReflect(m)
if mr == nil || !mr.IsValid() {
return []byte("<nil>"), nil
}
if wrapTextMarshalV2 {
if m, ok := m.(encoding.TextMarshaler); ok {
return m.MarshalText()
}
opts := prototext.MarshalOptions{
AllowPartial: true,
EmitUnknown: true,
}
if !tm.Compact {
opts.Indent = " "
}
if !tm.ExpandAny {
opts.Resolver = (*protoregistry.Types)(nil)
}
return opts.Marshal(mr.Interface())
} else {
w := &textWriter{
compact: tm.Compact,
expandAny: tm.ExpandAny,
complete: true,
}
if m, ok := m.(encoding.TextMarshaler); ok {
b, err := m.MarshalText()
if err != nil {
return nil, err
}
w.Write(b)
return w.buf, nil
}
err := w.writeMessage(mr)
return w.buf, err
}
}
var (
defaultTextMarshaler = TextMarshaler{}
compactTextMarshaler = TextMarshaler{Compact: true}
)
// MarshalText writes the proto text format of m to w.
func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) }
// MarshalTextString returns a proto text formatted string of m.
func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) }
// CompactText writes the compact proto text format of m to w.
func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) }
// CompactTextString returns a compact proto text formatted string of m.
func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) }
var (
newline = []byte("\n")
endBraceNewline = []byte("}\n")
posInf = []byte("inf")
negInf = []byte("-inf")
nan = []byte("nan")
)
// textWriter is an io.Writer that tracks its indentation level.
type textWriter struct {
compact bool // same as TextMarshaler.Compact
expandAny bool // same as TextMarshaler.ExpandAny
complete bool // whether the current position is a complete line
indent int // indentation level; never negative
buf []byte
}
func (w *textWriter) Write(p []byte) (n int, _ error) {
newlines := bytes.Count(p, newline)
if newlines == 0 {
if !w.compact && w.complete {
w.writeIndent()
}
w.buf = append(w.buf, p...)
w.complete = false
return len(p), nil
}
frags := bytes.SplitN(p, newline, newlines+1)
if w.compact {
for i, frag := range frags {
if i > 0 {
w.buf = append(w.buf, ' ')
n++
}
w.buf = append(w.buf, frag...)
n += len(frag)
}
return n, nil
}
for i, frag := range frags {
if w.complete {
w.writeIndent()
}
w.buf = append(w.buf, frag...)
n += len(frag)
if i+1 < len(frags) {
w.buf = append(w.buf, '\n')
n++
}
}
w.complete = len(frags[len(frags)-1]) == 0
return n, nil
}
func (w *textWriter) WriteByte(c byte) error {
if w.compact && c == '\n' {
c = ' '
}
if !w.compact && w.complete {
w.writeIndent()
}
w.buf = append(w.buf, c)
w.complete = c == '\n'
return nil
}
func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) {
if !w.compact && w.complete {
w.writeIndent()
}
w.complete = false
if fd.Kind() != protoreflect.GroupKind {
w.buf = append(w.buf, fd.Name()...)
w.WriteByte(':')
} else {
// Use message type name for group field name.
w.buf = append(w.buf, fd.Message().Name()...)
}
if !w.compact {
w.WriteByte(' ')
}
}
func requiresQuotes(u string) bool {
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
for _, ch := range u {
switch {
case ch == '.' || ch == '/' || ch == '_':
continue
case '0' <= ch && ch <= '9':
continue
case 'A' <= ch && ch <= 'Z':
continue
case 'a' <= ch && ch <= 'z':
continue
default:
return true
}
}
return false
}
// writeProto3Any writes an expanded google.protobuf.Any message.
//
// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
// required messages are not linked in).
//
// It returns (true, error) when sv was written in expanded format or an error
// was encountered.
func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) {
md := m.Descriptor()
fdURL := md.Fields().ByName("type_url")
fdVal := md.Fields().ByName("value")
url := m.Get(fdURL).String()
mt, err := protoregistry.GlobalTypes.FindMessageByURL(url)
if err != nil {
return false, nil
}
b := m.Get(fdVal).Bytes()
m2 := mt.New()
if err := proto.Unmarshal(b, m2.Interface()); err != nil {
return false, nil
}
w.Write([]byte("["))
if requiresQuotes(url) {
w.writeQuotedString(url)
} else {
w.Write([]byte(url))
}
if w.compact {
w.Write([]byte("]:<"))
} else {
w.Write([]byte("]: <\n"))
w.indent++
}
if err := w.writeMessage(m2); err != nil {
return true, err
}
if w.compact {
w.Write([]byte("> "))
} else {
w.indent--
w.Write([]byte(">\n"))
}
return true, nil
}
func (w *textWriter) writeMessage(m protoreflect.Message) error {
md := m.Descriptor()
if w.expandAny && md.FullName() == "google.protobuf.Any" {
if canExpand, err := w.writeProto3Any(m); canExpand {
return err
}
}
fds := md.Fields()
for i := 0; i < fds.Len(); {
fd := fds.Get(i)
if od := fd.ContainingOneof(); od != nil {
fd = m.WhichOneof(od)
i += od.Fields().Len()
} else {
i++
}
if fd == nil || !m.Has(fd) {
continue
}
switch {
case fd.IsList():
lv := m.Get(fd).List()
for j := 0; j < lv.Len(); j++ {
w.writeName(fd)
v := lv.Get(j)
if err := w.writeSingularValue(v, fd); err != nil {
return err
}
w.WriteByte('\n')
}
case fd.IsMap():
kfd := fd.MapKey()
vfd := fd.MapValue()
mv := m.Get(fd).Map()
type entry struct{ key, val protoreflect.Value }
var entries []entry
mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
entries = append(entries, entry{k.Value(), v})
return true
})
sort.Slice(entries, func(i, j int) bool {
switch kfd.Kind() {
case protoreflect.BoolKind:
return !entries[i].key.Bool() && entries[j].key.Bool()
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
return entries[i].key.Int() < entries[j].key.Int()
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
return entries[i].key.Uint() < entries[j].key.Uint()
case protoreflect.StringKind:
return entries[i].key.String() < entries[j].key.String()
default:
panic("invalid kind")
}
})
for _, entry := range entries {
w.writeName(fd)
w.WriteByte('<')
if !w.compact {
w.WriteByte('\n')
}
w.indent++
w.writeName(kfd)
if err := w.writeSingularValue(entry.key, kfd); err != nil {
return err
}
w.WriteByte('\n')
w.writeName(vfd)
if err := w.writeSingularValue(entry.val, vfd); err != nil {
return err
}
w.WriteByte('\n')
w.indent--
w.WriteByte('>')
w.WriteByte('\n')
}
default:
w.writeName(fd)
if err := w.writeSingularValue(m.Get(fd), fd); err != nil {
return err
}
w.WriteByte('\n')
}
}
if b := m.GetUnknown(); len(b) > 0 {
w.writeUnknownFields(b)
}
return w.writeExtensions(m)
}
func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
switch fd.Kind() {
case protoreflect.FloatKind, protoreflect.DoubleKind:
switch vf := v.Float(); {
case math.IsInf(vf, +1):
w.Write(posInf)
case math.IsInf(vf, -1):
w.Write(negInf)
case math.IsNaN(vf):
w.Write(nan)
default:
fmt.Fprint(w, v.Interface())
}
case protoreflect.StringKind:
// NOTE: This does not validate UTF-8 for historical reasons.
w.writeQuotedString(string(v.String()))
case protoreflect.BytesKind:
w.writeQuotedString(string(v.Bytes()))
case protoreflect.MessageKind, protoreflect.GroupKind:
var bra, ket byte = '<', '>'
if fd.Kind() == protoreflect.GroupKind {
bra, ket = '{', '}'
}
w.WriteByte(bra)
if !w.compact {
w.WriteByte('\n')
}
w.indent++
m := v.Message()
if m2, ok := m.Interface().(encoding.TextMarshaler); ok {
b, err := m2.MarshalText()
if err != nil {
return err
}
w.Write(b)
} else {
w.writeMessage(m)
}
w.indent--
w.WriteByte(ket)
case protoreflect.EnumKind:
if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil {
fmt.Fprint(w, ev.Name())
} else {
fmt.Fprint(w, v.Enum())
}
default:
fmt.Fprint(w, v.Interface())
}
return nil
}
// writeQuotedString writes a quoted string in the protocol buffer text format.
func (w *textWriter) writeQuotedString(s string) {
w.WriteByte('"')
for i := 0; i < len(s); i++ {
switch c := s[i]; c {
case '\n':
w.buf = append(w.buf, `\n`...)
case '\r':
w.buf = append(w.buf, `\r`...)
case '\t':
w.buf = append(w.buf, `\t`...)
case '"':
w.buf = append(w.buf, `\"`...)
case '\\':
w.buf = append(w.buf, `\\`...)
default:
if isPrint := c >= 0x20 && c < 0x7f; isPrint {
w.buf = append(w.buf, c)
} else {
w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...)
}
}
}
w.WriteByte('"')
}
func (w *textWriter) writeUnknownFields(b []byte) {
if !w.compact {
fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b))
}
for len(b) > 0 {
num, wtyp, n := protowire.ConsumeTag(b)
if n < 0 {
return
}
b = b[n:]
if wtyp == protowire.EndGroupType {
w.indent--
w.Write(endBraceNewline)
continue
}
fmt.Fprint(w, num)
if wtyp != protowire.StartGroupType {
w.WriteByte(':')
}
if !w.compact || wtyp == protowire.StartGroupType {
w.WriteByte(' ')
}
switch wtyp {
case protowire.VarintType:
v, n := protowire.ConsumeVarint(b)
if n < 0 {
return
}
b = b[n:]
fmt.Fprint(w, v)
case protowire.Fixed32Type:
v, n := protowire.ConsumeFixed32(b)
if n < 0 {
return
}
b = b[n:]
fmt.Fprint(w, v)
case protowire.Fixed64Type:
v, n := protowire.ConsumeFixed64(b)
if n < 0 {
return
}
b = b[n:]
fmt.Fprint(w, v)
case protowire.BytesType:
v, n := protowire.ConsumeBytes(b)
if n < 0 {
return
}
b = b[n:]
fmt.Fprintf(w, "%q", v)
case protowire.StartGroupType:
w.WriteByte('{')
w.indent++
default:
fmt.Fprintf(w, "/* unknown wire type %d */", wtyp)
}
w.WriteByte('\n')
}
}
// writeExtensions writes all the extensions in m.
func (w *textWriter) writeExtensions(m protoreflect.Message) error {
md := m.Descriptor()
if md.ExtensionRanges().Len() == 0 {
return nil
}
type ext struct {
desc protoreflect.FieldDescriptor
val protoreflect.Value
}
var exts []ext
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
if fd.IsExtension() {
exts = append(exts, ext{fd, v})
}
return true
})
sort.Slice(exts, func(i, j int) bool {
return exts[i].desc.Number() < exts[j].desc.Number()
})
for _, ext := range exts {
// For message set, use the name of the message as the extension name.
name := string(ext.desc.FullName())
if isMessageSet(ext.desc.ContainingMessage()) {
name = strings.TrimSuffix(name, ".message_set_extension")
}
if !ext.desc.IsList() {
if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil {
return err
}
} else {
lv := ext.val.List()
for i := 0; i < lv.Len(); i++ {
if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil {
return err
}
}
}
}
return nil
}
func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
fmt.Fprintf(w, "[%s]:", name)
if !w.compact {
w.WriteByte(' ')
}
if err := w.writeSingularValue(v, fd); err != nil {
return err
}
w.WriteByte('\n')
return nil
}
func (w *textWriter) writeIndent() {
if !w.complete {
return
}
for i := 0; i < w.indent*2; i++ {
w.buf = append(w.buf, ' ')
}
w.complete = false
}

View File

@ -1,78 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
import (
protoV2 "google.golang.org/protobuf/proto"
"google.golang.org/protobuf/runtime/protoiface"
)
// Size returns the size in bytes of the wire-format encoding of m.
func Size(m Message) int {
if m == nil {
return 0
}
mi := MessageV2(m)
return protoV2.Size(mi)
}
// Marshal returns the wire-format encoding of m.
func Marshal(m Message) ([]byte, error) {
b, err := marshalAppend(nil, m, false)
if b == nil {
b = zeroBytes
}
return b, err
}
var zeroBytes = make([]byte, 0, 0)
func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) {
if m == nil {
return nil, ErrNil
}
mi := MessageV2(m)
nbuf, err := protoV2.MarshalOptions{
Deterministic: deterministic,
AllowPartial: true,
}.MarshalAppend(buf, mi)
if err != nil {
return buf, err
}
if len(buf) == len(nbuf) {
if !mi.ProtoReflect().IsValid() {
return buf, ErrNil
}
}
return nbuf, checkRequiredNotSet(mi)
}
// Unmarshal parses a wire-format message in b and places the decoded results in m.
//
// Unmarshal resets m before starting to unmarshal, so any existing data in m is always
// removed. Use UnmarshalMerge to preserve and append to existing data.
func Unmarshal(b []byte, m Message) error {
m.Reset()
return UnmarshalMerge(b, m)
}
// UnmarshalMerge parses a wire-format message in b and places the decoded results in m.
func UnmarshalMerge(b []byte, m Message) error {
mi := MessageV2(m)
out, err := protoV2.UnmarshalOptions{
AllowPartial: true,
Merge: true,
}.UnmarshalState(protoiface.UnmarshalInput{
Buf: b,
Message: mi.ProtoReflect(),
})
if err != nil {
return err
}
if out.Flags&protoiface.UnmarshalInitialized > 0 {
return nil
}
return checkRequiredNotSet(mi)
}

View File

@ -1,34 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
// Bool stores v in a new bool value and returns a pointer to it.
func Bool(v bool) *bool { return &v }
// Int stores v in a new int32 value and returns a pointer to it.
//
// Deprecated: Use Int32 instead.
func Int(v int) *int32 { return Int32(int32(v)) }
// Int32 stores v in a new int32 value and returns a pointer to it.
func Int32(v int32) *int32 { return &v }
// Int64 stores v in a new int64 value and returns a pointer to it.
func Int64(v int64) *int64 { return &v }
// Uint32 stores v in a new uint32 value and returns a pointer to it.
func Uint32(v uint32) *uint32 { return &v }
// Uint64 stores v in a new uint64 value and returns a pointer to it.
func Uint64(v uint64) *uint64 { return &v }
// Float32 stores v in a new float32 value and returns a pointer to it.
func Float32(v float32) *float32 { return &v }
// Float64 stores v in a new float64 value and returns a pointer to it.
func Float64(v float64) *float64 { return &v }
// String stores v in a new string value and returns a pointer to it.
func String(v string) *string { return &v }

View File

@ -1,27 +0,0 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,22 +0,0 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

View File

@ -1,56 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package context defines the Context type, which carries deadlines,
// cancelation signals, and other request-scoped values across API boundaries
// and between processes.
// As of Go 1.7 this package is available in the standard library under the
// name context. https://golang.org/pkg/context.
//
// Incoming requests to a server should create a Context, and outgoing calls to
// servers should accept a Context. The chain of function calls between must
// propagate the Context, optionally replacing it with a modified copy created
// using WithDeadline, WithTimeout, WithCancel, or WithValue.
//
// Programs that use Contexts should follow these rules to keep interfaces
// consistent across packages and enable static analysis tools to check context
// propagation:
//
// Do not store Contexts inside a struct type; instead, pass a Context
// explicitly to each function that needs it. The Context should be the first
// parameter, typically named ctx:
//
// func DoSomething(ctx context.Context, arg Arg) error {
// // ... use ctx ...
// }
//
// Do not pass a nil Context, even if a function permits it. Pass context.TODO
// if you are unsure about which Context to use.
//
// Use context Values only for request-scoped data that transits processes and
// APIs, not for passing optional parameters to functions.
//
// The same Context may be passed to functions running in different goroutines;
// Contexts are safe for simultaneous use by multiple goroutines.
//
// See http://blog.golang.org/context for example code for a server that uses
// Contexts.
package context // import "golang.org/x/net/context"
// Background returns a non-nil, empty Context. It is never canceled, has no
// values, and has no deadline. It is typically used by the main function,
// initialization, and tests, and as the top-level Context for incoming
// requests.
func Background() Context {
return background
}
// TODO returns a non-nil, empty Context. Code should use context.TODO when
// it's unclear which Context to use or it is not yet available (because the
// surrounding function has not yet been extended to accept a Context
// parameter). TODO is recognized by static analysis tools that determine
// whether Contexts are propagated correctly in a program.
func TODO() Context {
return todo
}

View File

@ -1,72 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.7
package context
import (
"context" // standard library's context, as of Go 1.7
"time"
)
var (
todo = context.TODO()
background = context.Background()
)
// Canceled is the error returned by Context.Err when the context is canceled.
var Canceled = context.Canceled
// DeadlineExceeded is the error returned by Context.Err when the context's
// deadline passes.
var DeadlineExceeded = context.DeadlineExceeded
// WithCancel returns a copy of parent with a new Done channel. The returned
// context's Done channel is closed when the returned cancel function is called
// or when the parent context's Done channel is closed, whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
ctx, f := context.WithCancel(parent)
return ctx, f
}
// WithDeadline returns a copy of the parent context with the deadline adjusted
// to be no later than d. If the parent's deadline is already earlier than d,
// WithDeadline(parent, d) is semantically equivalent to parent. The returned
// context's Done channel is closed when the deadline expires, when the returned
// cancel function is called, or when the parent context's Done channel is
// closed, whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
ctx, f := context.WithDeadline(parent, deadline)
return ctx, f
}
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete:
//
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
// defer cancel() // releases resources if slowOperation completes before timeout elapses
// return slowOperation(ctx)
// }
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
return WithDeadline(parent, time.Now().Add(timeout))
}
// WithValue returns a copy of parent in which the value associated with key is
// val.
//
// Use context Values only for request-scoped data that transits processes and
// APIs, not for passing optional parameters to functions.
func WithValue(parent Context, key interface{}, val interface{}) Context {
return context.WithValue(parent, key, val)
}

View File

@ -1,20 +0,0 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.9
package context
import "context" // standard library's context, as of Go 1.7
// A Context carries a deadline, a cancelation signal, and other values across
// API boundaries.
//
// Context's methods may be called by multiple goroutines simultaneously.
type Context = context.Context
// A CancelFunc tells an operation to abandon its work.
// A CancelFunc does not wait for the work to stop.
// After the first call, subsequent calls to a CancelFunc do nothing.
type CancelFunc = context.CancelFunc

View File

@ -1,300 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.7
package context
import (
"errors"
"fmt"
"sync"
"time"
)
// An emptyCtx is never canceled, has no values, and has no deadline. It is not
// struct{}, since vars of this type must have distinct addresses.
type emptyCtx int
func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
return
}
func (*emptyCtx) Done() <-chan struct{} {
return nil
}
func (*emptyCtx) Err() error {
return nil
}
func (*emptyCtx) Value(key interface{}) interface{} {
return nil
}
func (e *emptyCtx) String() string {
switch e {
case background:
return "context.Background"
case todo:
return "context.TODO"
}
return "unknown empty Context"
}
var (
background = new(emptyCtx)
todo = new(emptyCtx)
)
// Canceled is the error returned by Context.Err when the context is canceled.
var Canceled = errors.New("context canceled")
// DeadlineExceeded is the error returned by Context.Err when the context's
// deadline passes.
var DeadlineExceeded = errors.New("context deadline exceeded")
// WithCancel returns a copy of parent with a new Done channel. The returned
// context's Done channel is closed when the returned cancel function is called
// or when the parent context's Done channel is closed, whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
c := newCancelCtx(parent)
propagateCancel(parent, c)
return c, func() { c.cancel(true, Canceled) }
}
// newCancelCtx returns an initialized cancelCtx.
func newCancelCtx(parent Context) *cancelCtx {
return &cancelCtx{
Context: parent,
done: make(chan struct{}),
}
}
// propagateCancel arranges for child to be canceled when parent is.
func propagateCancel(parent Context, child canceler) {
if parent.Done() == nil {
return // parent is never canceled
}
if p, ok := parentCancelCtx(parent); ok {
p.mu.Lock()
if p.err != nil {
// parent has already been canceled
child.cancel(false, p.err)
} else {
if p.children == nil {
p.children = make(map[canceler]bool)
}
p.children[child] = true
}
p.mu.Unlock()
} else {
go func() {
select {
case <-parent.Done():
child.cancel(false, parent.Err())
case <-child.Done():
}
}()
}
}
// parentCancelCtx follows a chain of parent references until it finds a
// *cancelCtx. This function understands how each of the concrete types in this
// package represents its parent.
func parentCancelCtx(parent Context) (*cancelCtx, bool) {
for {
switch c := parent.(type) {
case *cancelCtx:
return c, true
case *timerCtx:
return c.cancelCtx, true
case *valueCtx:
parent = c.Context
default:
return nil, false
}
}
}
// removeChild removes a context from its parent.
func removeChild(parent Context, child canceler) {
p, ok := parentCancelCtx(parent)
if !ok {
return
}
p.mu.Lock()
if p.children != nil {
delete(p.children, child)
}
p.mu.Unlock()
}
// A canceler is a context type that can be canceled directly. The
// implementations are *cancelCtx and *timerCtx.
type canceler interface {
cancel(removeFromParent bool, err error)
Done() <-chan struct{}
}
// A cancelCtx can be canceled. When canceled, it also cancels any children
// that implement canceler.
type cancelCtx struct {
Context
done chan struct{} // closed by the first cancel call.
mu sync.Mutex
children map[canceler]bool // set to nil by the first cancel call
err error // set to non-nil by the first cancel call
}
func (c *cancelCtx) Done() <-chan struct{} {
return c.done
}
func (c *cancelCtx) Err() error {
c.mu.Lock()
defer c.mu.Unlock()
return c.err
}
func (c *cancelCtx) String() string {
return fmt.Sprintf("%v.WithCancel", c.Context)
}
// cancel closes c.done, cancels each of c's children, and, if
// removeFromParent is true, removes c from its parent's children.
func (c *cancelCtx) cancel(removeFromParent bool, err error) {
if err == nil {
panic("context: internal error: missing cancel error")
}
c.mu.Lock()
if c.err != nil {
c.mu.Unlock()
return // already canceled
}
c.err = err
close(c.done)
for child := range c.children {
// NOTE: acquiring the child's lock while holding parent's lock.
child.cancel(false, err)
}
c.children = nil
c.mu.Unlock()
if removeFromParent {
removeChild(c.Context, c)
}
}
// WithDeadline returns a copy of the parent context with the deadline adjusted
// to be no later than d. If the parent's deadline is already earlier than d,
// WithDeadline(parent, d) is semantically equivalent to parent. The returned
// context's Done channel is closed when the deadline expires, when the returned
// cancel function is called, or when the parent context's Done channel is
// closed, whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
if cur, ok := parent.Deadline(); ok && cur.Before(deadline) {
// The current deadline is already sooner than the new one.
return WithCancel(parent)
}
c := &timerCtx{
cancelCtx: newCancelCtx(parent),
deadline: deadline,
}
propagateCancel(parent, c)
d := deadline.Sub(time.Now())
if d <= 0 {
c.cancel(true, DeadlineExceeded) // deadline has already passed
return c, func() { c.cancel(true, Canceled) }
}
c.mu.Lock()
defer c.mu.Unlock()
if c.err == nil {
c.timer = time.AfterFunc(d, func() {
c.cancel(true, DeadlineExceeded)
})
}
return c, func() { c.cancel(true, Canceled) }
}
// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to
// implement Done and Err. It implements cancel by stopping its timer then
// delegating to cancelCtx.cancel.
type timerCtx struct {
*cancelCtx
timer *time.Timer // Under cancelCtx.mu.
deadline time.Time
}
func (c *timerCtx) Deadline() (deadline time.Time, ok bool) {
return c.deadline, true
}
func (c *timerCtx) String() string {
return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now()))
}
func (c *timerCtx) cancel(removeFromParent bool, err error) {
c.cancelCtx.cancel(false, err)
if removeFromParent {
// Remove this timerCtx from its parent cancelCtx's children.
removeChild(c.cancelCtx.Context, c)
}
c.mu.Lock()
if c.timer != nil {
c.timer.Stop()
c.timer = nil
}
c.mu.Unlock()
}
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete:
//
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
// defer cancel() // releases resources if slowOperation completes before timeout elapses
// return slowOperation(ctx)
// }
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
return WithDeadline(parent, time.Now().Add(timeout))
}
// WithValue returns a copy of parent in which the value associated with key is
// val.
//
// Use context Values only for request-scoped data that transits processes and
// APIs, not for passing optional parameters to functions.
func WithValue(parent Context, key interface{}, val interface{}) Context {
return &valueCtx{parent, key, val}
}
// A valueCtx carries a key-value pair. It implements Value for that key and
// delegates all other calls to the embedded Context.
type valueCtx struct {
Context
key, val interface{}
}
func (c *valueCtx) String() string {
return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val)
}
func (c *valueCtx) Value(key interface{}) interface{} {
if c.key == key {
return c.val
}
return c.Context.Value(key)
}

View File

@ -1,109 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.9
package context
import "time"
// A Context carries a deadline, a cancelation signal, and other values across
// API boundaries.
//
// Context's methods may be called by multiple goroutines simultaneously.
type Context interface {
// Deadline returns the time when work done on behalf of this context
// should be canceled. Deadline returns ok==false when no deadline is
// set. Successive calls to Deadline return the same results.
Deadline() (deadline time.Time, ok bool)
// Done returns a channel that's closed when work done on behalf of this
// context should be canceled. Done may return nil if this context can
// never be canceled. Successive calls to Done return the same value.
//
// WithCancel arranges for Done to be closed when cancel is called;
// WithDeadline arranges for Done to be closed when the deadline
// expires; WithTimeout arranges for Done to be closed when the timeout
// elapses.
//
// Done is provided for use in select statements:
//
// // Stream generates values with DoSomething and sends them to out
// // until DoSomething returns an error or ctx.Done is closed.
// func Stream(ctx context.Context, out chan<- Value) error {
// for {
// v, err := DoSomething(ctx)
// if err != nil {
// return err
// }
// select {
// case <-ctx.Done():
// return ctx.Err()
// case out <- v:
// }
// }
// }
//
// See http://blog.golang.org/pipelines for more examples of how to use
// a Done channel for cancelation.
Done() <-chan struct{}
// Err returns a non-nil error value after Done is closed. Err returns
// Canceled if the context was canceled or DeadlineExceeded if the
// context's deadline passed. No other values for Err are defined.
// After Done is closed, successive calls to Err return the same value.
Err() error
// Value returns the value associated with this context for key, or nil
// if no value is associated with key. Successive calls to Value with
// the same key returns the same result.
//
// Use context values only for request-scoped data that transits
// processes and API boundaries, not for passing optional parameters to
// functions.
//
// A key identifies a specific value in a Context. Functions that wish
// to store values in Context typically allocate a key in a global
// variable then use that key as the argument to context.WithValue and
// Context.Value. A key can be any type that supports equality;
// packages should define keys as an unexported type to avoid
// collisions.
//
// Packages that define a Context key should provide type-safe accessors
// for the values stores using that key:
//
// // Package user defines a User type that's stored in Contexts.
// package user
//
// import "golang.org/x/net/context"
//
// // User is the type of value stored in the Contexts.
// type User struct {...}
//
// // key is an unexported type for keys defined in this package.
// // This prevents collisions with keys defined in other packages.
// type key int
//
// // userKey is the key for user.User values in Contexts. It is
// // unexported; clients use user.NewContext and user.FromContext
// // instead of using this key directly.
// var userKey key = 0
//
// // NewContext returns a new Context that carries value u.
// func NewContext(ctx context.Context, u *User) context.Context {
// return context.WithValue(ctx, userKey, u)
// }
//
// // FromContext returns the User value stored in ctx, if any.
// func FromContext(ctx context.Context) (*User, bool) {
// u, ok := ctx.Value(userKey).(*User)
// return u, ok
// }
Value(key interface{}) interface{}
}
// A CancelFunc tells an operation to abandon its work.
// A CancelFunc does not wait for the work to stop.
// After the first call, subsequent calls to a CancelFunc do nothing.
type CancelFunc func()

View File

@ -1,4 +1,4 @@
Copyright (c) 2009 The Go Authors. All rights reserved. Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are modification, are permitted provided that the following conditions are
@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
copyright notice, this list of conditions and the following disclaimer copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the in the documentation and/or other materials provided with the
distribution. distribution.
* Neither the name of Google Inc. nor the names of its * Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from contributors may be used to endorse or promote products derived from
this software without specific prior written permission. this software without specific prior written permission.

View File

@ -1,13 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build appengine
package internal
import "google.golang.org/appengine/urlfetch"
func init() {
appengineClientHook = urlfetch.Client
}

View File

@ -18,16 +18,11 @@ var HTTPClient ContextKey
// because nobody else can create a ContextKey, being unexported. // because nobody else can create a ContextKey, being unexported.
type ContextKey struct{} type ContextKey struct{}
var appengineClientHook func(context.Context) *http.Client
func ContextClient(ctx context.Context) *http.Client { func ContextClient(ctx context.Context) *http.Client {
if ctx != nil { if ctx != nil {
if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok {
return hc return hc
} }
} }
if appengineClientHook != nil {
return appengineClientHook(ctx)
}
return http.DefaultClient return http.DefaultClient
} }

View File

@ -393,7 +393,7 @@ func ReuseTokenSource(t *Token, src TokenSource) TokenSource {
} }
} }
// ReuseTokenSource returns a TokenSource that acts in the same manner as the // ReuseTokenSourceWithExpiry returns a TokenSource that acts in the same manner as the
// TokenSource returned by ReuseTokenSource, except the expiry buffer is // TokenSource returned by ReuseTokenSource, except the expiry buffer is
// configurable. The expiration time of a token is calculated as // configurable. The expiration time of a token is calculated as
// t.Expiry.Add(-earlyExpiry). // t.Expiry.Add(-earlyExpiry).

View File

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,678 +0,0 @@
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
// +build !appengine
package internal
import (
"bytes"
"errors"
"fmt"
"io/ioutil"
"log"
"net"
"net/http"
"net/url"
"os"
"runtime"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/golang/protobuf/proto"
netcontext "golang.org/x/net/context"
basepb "google.golang.org/appengine/internal/base"
logpb "google.golang.org/appengine/internal/log"
remotepb "google.golang.org/appengine/internal/remote_api"
)
const (
apiPath = "/rpc_http"
defaultTicketSuffix = "/default.20150612t184001.0"
)
var (
// Incoming headers.
ticketHeader = http.CanonicalHeaderKey("X-AppEngine-API-Ticket")
dapperHeader = http.CanonicalHeaderKey("X-Google-DapperTraceInfo")
traceHeader = http.CanonicalHeaderKey("X-Cloud-Trace-Context")
curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace")
userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP")
remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr")
devRequestIdHeader = http.CanonicalHeaderKey("X-Appengine-Dev-Request-Id")
// Outgoing headers.
apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint")
apiEndpointHeaderValue = []string{"app-engine-apis"}
apiMethodHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Method")
apiMethodHeaderValue = []string{"/VMRemoteAPI.CallRemoteAPI"}
apiDeadlineHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline")
apiContentType = http.CanonicalHeaderKey("Content-Type")
apiContentTypeValue = []string{"application/octet-stream"}
logFlushHeader = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count")
apiHTTPClient = &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: limitDial,
MaxIdleConns: 1000,
MaxIdleConnsPerHost: 10000,
IdleConnTimeout: 90 * time.Second,
},
}
defaultTicketOnce sync.Once
defaultTicket string
backgroundContextOnce sync.Once
backgroundContext netcontext.Context
)
func apiURL() *url.URL {
host, port := "appengine.googleapis.internal", "10001"
if h := os.Getenv("API_HOST"); h != "" {
host = h
}
if p := os.Getenv("API_PORT"); p != "" {
port = p
}
return &url.URL{
Scheme: "http",
Host: host + ":" + port,
Path: apiPath,
}
}
func handleHTTP(w http.ResponseWriter, r *http.Request) {
c := &context{
req: r,
outHeader: w.Header(),
apiURL: apiURL(),
}
r = r.WithContext(withContext(r.Context(), c))
c.req = r
stopFlushing := make(chan int)
// Patch up RemoteAddr so it looks reasonable.
if addr := r.Header.Get(userIPHeader); addr != "" {
r.RemoteAddr = addr
} else if addr = r.Header.Get(remoteAddrHeader); addr != "" {
r.RemoteAddr = addr
} else {
// Should not normally reach here, but pick a sensible default anyway.
r.RemoteAddr = "127.0.0.1"
}
// The address in the headers will most likely be of these forms:
// 123.123.123.123
// 2001:db8::1
// net/http.Request.RemoteAddr is specified to be in "IP:port" form.
if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil {
// Assume the remote address is only a host; add a default port.
r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80")
}
// Start goroutine responsible for flushing app logs.
// This is done after adding c to ctx.m (and stopped before removing it)
// because flushing logs requires making an API call.
go c.logFlusher(stopFlushing)
executeRequestSafely(c, r)
c.outHeader = nil // make sure header changes aren't respected any more
stopFlushing <- 1 // any logging beyond this point will be dropped
// Flush any pending logs asynchronously.
c.pendingLogs.Lock()
flushes := c.pendingLogs.flushes
if len(c.pendingLogs.lines) > 0 {
flushes++
}
c.pendingLogs.Unlock()
flushed := make(chan struct{})
go func() {
defer close(flushed)
// Force a log flush, because with very short requests we
// may not ever flush logs.
c.flushLog(true)
}()
w.Header().Set(logFlushHeader, strconv.Itoa(flushes))
// Avoid nil Write call if c.Write is never called.
if c.outCode != 0 {
w.WriteHeader(c.outCode)
}
if c.outBody != nil {
w.Write(c.outBody)
}
// Wait for the last flush to complete before returning,
// otherwise the security ticket will not be valid.
<-flushed
}
func executeRequestSafely(c *context, r *http.Request) {
defer func() {
if x := recover(); x != nil {
logf(c, 4, "%s", renderPanic(x)) // 4 == critical
c.outCode = 500
}
}()
http.DefaultServeMux.ServeHTTP(c, r)
}
func renderPanic(x interface{}) string {
buf := make([]byte, 16<<10) // 16 KB should be plenty
buf = buf[:runtime.Stack(buf, false)]
// Remove the first few stack frames:
// this func
// the recover closure in the caller
// That will root the stack trace at the site of the panic.
const (
skipStart = "internal.renderPanic"
skipFrames = 2
)
start := bytes.Index(buf, []byte(skipStart))
p := start
for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ {
p = bytes.IndexByte(buf[p+1:], '\n') + p + 1
if p < 0 {
break
}
}
if p >= 0 {
// buf[start:p+1] is the block to remove.
// Copy buf[p+1:] over buf[start:] and shrink buf.
copy(buf[start:], buf[p+1:])
buf = buf[:len(buf)-(p+1-start)]
}
// Add panic heading.
head := fmt.Sprintf("panic: %v\n\n", x)
if len(head) > len(buf) {
// Extremely unlikely to happen.
return head
}
copy(buf[len(head):], buf)
copy(buf, head)
return string(buf)
}
// context represents the context of an in-flight HTTP request.
// It implements the appengine.Context and http.ResponseWriter interfaces.
type context struct {
req *http.Request
outCode int
outHeader http.Header
outBody []byte
pendingLogs struct {
sync.Mutex
lines []*logpb.UserAppLogLine
flushes int
}
apiURL *url.URL
}
var contextKey = "holds a *context"
// jointContext joins two contexts in a superficial way.
// It takes values and timeouts from a base context, and only values from another context.
type jointContext struct {
base netcontext.Context
valuesOnly netcontext.Context
}
func (c jointContext) Deadline() (time.Time, bool) {
return c.base.Deadline()
}
func (c jointContext) Done() <-chan struct{} {
return c.base.Done()
}
func (c jointContext) Err() error {
return c.base.Err()
}
func (c jointContext) Value(key interface{}) interface{} {
if val := c.base.Value(key); val != nil {
return val
}
return c.valuesOnly.Value(key)
}
// fromContext returns the App Engine context or nil if ctx is not
// derived from an App Engine context.
func fromContext(ctx netcontext.Context) *context {
c, _ := ctx.Value(&contextKey).(*context)
return c
}
func withContext(parent netcontext.Context, c *context) netcontext.Context {
ctx := netcontext.WithValue(parent, &contextKey, c)
if ns := c.req.Header.Get(curNamespaceHeader); ns != "" {
ctx = withNamespace(ctx, ns)
}
return ctx
}
func toContext(c *context) netcontext.Context {
return withContext(netcontext.Background(), c)
}
func IncomingHeaders(ctx netcontext.Context) http.Header {
if c := fromContext(ctx); c != nil {
return c.req.Header
}
return nil
}
func ReqContext(req *http.Request) netcontext.Context {
return req.Context()
}
func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
return jointContext{
base: parent,
valuesOnly: req.Context(),
}
}
// DefaultTicket returns a ticket used for background context or dev_appserver.
func DefaultTicket() string {
defaultTicketOnce.Do(func() {
if IsDevAppServer() {
defaultTicket = "testapp" + defaultTicketSuffix
return
}
appID := partitionlessAppID()
escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1)
majVersion := VersionID(nil)
if i := strings.Index(majVersion, "."); i > 0 {
majVersion = majVersion[:i]
}
defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID())
})
return defaultTicket
}
func BackgroundContext() netcontext.Context {
backgroundContextOnce.Do(func() {
// Compute background security ticket.
ticket := DefaultTicket()
c := &context{
req: &http.Request{
Header: http.Header{
ticketHeader: []string{ticket},
},
},
apiURL: apiURL(),
}
backgroundContext = toContext(c)
// TODO(dsymonds): Wire up the shutdown handler to do a final flush.
go c.logFlusher(make(chan int))
})
return backgroundContext
}
// RegisterTestRequest registers the HTTP request req for testing, such that
// any API calls are sent to the provided URL. It returns a closure to delete
// the registration.
// It should only be used by aetest package.
func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) (*http.Request, func()) {
c := &context{
req: req,
apiURL: apiURL,
}
ctx := withContext(decorate(req.Context()), c)
req = req.WithContext(ctx)
c.req = req
return req, func() {}
}
var errTimeout = &CallError{
Detail: "Deadline exceeded",
Code: int32(remotepb.RpcError_CANCELLED),
Timeout: true,
}
func (c *context) Header() http.Header { return c.outHeader }
// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status
// codes do not permit a response body (nor response entity headers such as
// Content-Length, Content-Type, etc).
func bodyAllowedForStatus(status int) bool {
switch {
case status >= 100 && status <= 199:
return false
case status == 204:
return false
case status == 304:
return false
}
return true
}
func (c *context) Write(b []byte) (int, error) {
if c.outCode == 0 {
c.WriteHeader(http.StatusOK)
}
if len(b) > 0 && !bodyAllowedForStatus(c.outCode) {
return 0, http.ErrBodyNotAllowed
}
c.outBody = append(c.outBody, b...)
return len(b), nil
}
func (c *context) WriteHeader(code int) {
if c.outCode != 0 {
logf(c, 3, "WriteHeader called multiple times on request.") // error level
return
}
c.outCode = code
}
func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) {
hreq := &http.Request{
Method: "POST",
URL: c.apiURL,
Header: http.Header{
apiEndpointHeader: apiEndpointHeaderValue,
apiMethodHeader: apiMethodHeaderValue,
apiContentType: apiContentTypeValue,
apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)},
},
Body: ioutil.NopCloser(bytes.NewReader(body)),
ContentLength: int64(len(body)),
Host: c.apiURL.Host,
}
if info := c.req.Header.Get(dapperHeader); info != "" {
hreq.Header.Set(dapperHeader, info)
}
if info := c.req.Header.Get(traceHeader); info != "" {
hreq.Header.Set(traceHeader, info)
}
tr := apiHTTPClient.Transport.(*http.Transport)
var timedOut int32 // atomic; set to 1 if timed out
t := time.AfterFunc(timeout, func() {
atomic.StoreInt32(&timedOut, 1)
tr.CancelRequest(hreq)
})
defer t.Stop()
defer func() {
// Check if timeout was exceeded.
if atomic.LoadInt32(&timedOut) != 0 {
err = errTimeout
}
}()
hresp, err := apiHTTPClient.Do(hreq)
if err != nil {
return nil, &CallError{
Detail: fmt.Sprintf("service bridge HTTP failed: %v", err),
Code: int32(remotepb.RpcError_UNKNOWN),
}
}
defer hresp.Body.Close()
hrespBody, err := ioutil.ReadAll(hresp.Body)
if hresp.StatusCode != 200 {
return nil, &CallError{
Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody),
Code: int32(remotepb.RpcError_UNKNOWN),
}
}
if err != nil {
return nil, &CallError{
Detail: fmt.Sprintf("service bridge response bad: %v", err),
Code: int32(remotepb.RpcError_UNKNOWN),
}
}
return hrespBody, nil
}
func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
if ns := NamespaceFromContext(ctx); ns != "" {
if fn, ok := NamespaceMods[service]; ok {
fn(in, ns)
}
}
if f, ctx, ok := callOverrideFromContext(ctx); ok {
return f(ctx, service, method, in, out)
}
// Handle already-done contexts quickly.
select {
case <-ctx.Done():
return ctx.Err()
default:
}
c := fromContext(ctx)
if c == nil {
// Give a good error message rather than a panic lower down.
return errNotAppEngineContext
}
// Apply transaction modifications if we're in a transaction.
if t := transactionFromContext(ctx); t != nil {
if t.finished {
return errors.New("transaction context has expired")
}
applyTransaction(in, &t.transaction)
}
// Default RPC timeout is 60s.
timeout := 60 * time.Second
if deadline, ok := ctx.Deadline(); ok {
timeout = deadline.Sub(time.Now())
}
data, err := proto.Marshal(in)
if err != nil {
return err
}
ticket := c.req.Header.Get(ticketHeader)
// Use a test ticket under test environment.
if ticket == "" {
if appid := ctx.Value(&appIDOverrideKey); appid != nil {
ticket = appid.(string) + defaultTicketSuffix
}
}
// Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver.
if ticket == "" {
ticket = DefaultTicket()
}
if dri := c.req.Header.Get(devRequestIdHeader); IsDevAppServer() && dri != "" {
ticket = dri
}
req := &remotepb.Request{
ServiceName: &service,
Method: &method,
Request: data,
RequestId: &ticket,
}
hreqBody, err := proto.Marshal(req)
if err != nil {
return err
}
hrespBody, err := c.post(hreqBody, timeout)
if err != nil {
return err
}
res := &remotepb.Response{}
if err := proto.Unmarshal(hrespBody, res); err != nil {
return err
}
if res.RpcError != nil {
ce := &CallError{
Detail: res.RpcError.GetDetail(),
Code: *res.RpcError.Code,
}
switch remotepb.RpcError_ErrorCode(ce.Code) {
case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED:
ce.Timeout = true
}
return ce
}
if res.ApplicationError != nil {
return &APIError{
Service: *req.ServiceName,
Detail: res.ApplicationError.GetDetail(),
Code: *res.ApplicationError.Code,
}
}
if res.Exception != nil || res.JavaException != nil {
// This shouldn't happen, but let's be defensive.
return &CallError{
Detail: "service bridge returned exception",
Code: int32(remotepb.RpcError_UNKNOWN),
}
}
return proto.Unmarshal(res.Response, out)
}
func (c *context) Request() *http.Request {
return c.req
}
func (c *context) addLogLine(ll *logpb.UserAppLogLine) {
// Truncate long log lines.
// TODO(dsymonds): Check if this is still necessary.
const lim = 8 << 10
if len(*ll.Message) > lim {
suffix := fmt.Sprintf("...(length %d)", len(*ll.Message))
ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix)
}
c.pendingLogs.Lock()
c.pendingLogs.lines = append(c.pendingLogs.lines, ll)
c.pendingLogs.Unlock()
}
var logLevelName = map[int64]string{
0: "DEBUG",
1: "INFO",
2: "WARNING",
3: "ERROR",
4: "CRITICAL",
}
func logf(c *context, level int64, format string, args ...interface{}) {
if c == nil {
panic("not an App Engine context")
}
s := fmt.Sprintf(format, args...)
s = strings.TrimRight(s, "\n") // Remove any trailing newline characters.
c.addLogLine(&logpb.UserAppLogLine{
TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3),
Level: &level,
Message: &s,
})
// Only duplicate log to stderr if not running on App Engine second generation
if !IsSecondGen() {
log.Print(logLevelName[level] + ": " + s)
}
}
// flushLog attempts to flush any pending logs to the appserver.
// It should not be called concurrently.
func (c *context) flushLog(force bool) (flushed bool) {
c.pendingLogs.Lock()
// Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious.
n, rem := 0, 30<<20
for ; n < len(c.pendingLogs.lines); n++ {
ll := c.pendingLogs.lines[n]
// Each log line will require about 3 bytes of overhead.
nb := proto.Size(ll) + 3
if nb > rem {
break
}
rem -= nb
}
lines := c.pendingLogs.lines[:n]
c.pendingLogs.lines = c.pendingLogs.lines[n:]
c.pendingLogs.Unlock()
if len(lines) == 0 && !force {
// Nothing to flush.
return false
}
rescueLogs := false
defer func() {
if rescueLogs {
c.pendingLogs.Lock()
c.pendingLogs.lines = append(lines, c.pendingLogs.lines...)
c.pendingLogs.Unlock()
}
}()
buf, err := proto.Marshal(&logpb.UserAppLogGroup{
LogLine: lines,
})
if err != nil {
log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err)
rescueLogs = true
return false
}
req := &logpb.FlushRequest{
Logs: buf,
}
res := &basepb.VoidProto{}
c.pendingLogs.Lock()
c.pendingLogs.flushes++
c.pendingLogs.Unlock()
if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil {
log.Printf("internal.flushLog: Flush RPC: %v", err)
rescueLogs = true
return false
}
return true
}
const (
// Log flushing parameters.
flushInterval = 1 * time.Second
forceFlushInterval = 60 * time.Second
)
func (c *context) logFlusher(stop <-chan int) {
lastFlush := time.Now()
tick := time.NewTicker(flushInterval)
for {
select {
case <-stop:
// Request finished.
tick.Stop()
return
case <-tick.C:
force := time.Now().Sub(lastFlush) > forceFlushInterval
if c.flushLog(force) {
lastFlush = time.Now()
}
}
}
}
func ContextForTesting(req *http.Request) netcontext.Context {
return toContext(&context{req: req})
}

View File

@ -1,169 +0,0 @@
// Copyright 2015 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
// +build appengine
package internal
import (
"errors"
"fmt"
"net/http"
"time"
"appengine"
"appengine_internal"
basepb "appengine_internal/base"
"github.com/golang/protobuf/proto"
netcontext "golang.org/x/net/context"
)
var contextKey = "holds an appengine.Context"
// fromContext returns the App Engine context or nil if ctx is not
// derived from an App Engine context.
func fromContext(ctx netcontext.Context) appengine.Context {
c, _ := ctx.Value(&contextKey).(appengine.Context)
return c
}
// This is only for classic App Engine adapters.
func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error) {
c := fromContext(ctx)
if c == nil {
return nil, errNotAppEngineContext
}
return c, nil
}
func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context {
ctx := netcontext.WithValue(parent, &contextKey, c)
s := &basepb.StringProto{}
c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil)
if ns := s.GetValue(); ns != "" {
ctx = NamespacedContext(ctx, ns)
}
return ctx
}
func IncomingHeaders(ctx netcontext.Context) http.Header {
if c := fromContext(ctx); c != nil {
if req, ok := c.Request().(*http.Request); ok {
return req.Header
}
}
return nil
}
func ReqContext(req *http.Request) netcontext.Context {
return WithContext(netcontext.Background(), req)
}
func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
c := appengine.NewContext(req)
return withContext(parent, c)
}
type testingContext struct {
appengine.Context
req *http.Request
}
func (t *testingContext) FullyQualifiedAppID() string { return "dev~testcontext" }
func (t *testingContext) Call(service, method string, _, _ appengine_internal.ProtoMessage, _ *appengine_internal.CallOptions) error {
if service == "__go__" && method == "GetNamespace" {
return nil
}
return fmt.Errorf("testingContext: unsupported Call")
}
func (t *testingContext) Request() interface{} { return t.req }
func ContextForTesting(req *http.Request) netcontext.Context {
return withContext(netcontext.Background(), &testingContext{req: req})
}
func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
if ns := NamespaceFromContext(ctx); ns != "" {
if fn, ok := NamespaceMods[service]; ok {
fn(in, ns)
}
}
if f, ctx, ok := callOverrideFromContext(ctx); ok {
return f(ctx, service, method, in, out)
}
// Handle already-done contexts quickly.
select {
case <-ctx.Done():
return ctx.Err()
default:
}
c := fromContext(ctx)
if c == nil {
// Give a good error message rather than a panic lower down.
return errNotAppEngineContext
}
// Apply transaction modifications if we're in a transaction.
if t := transactionFromContext(ctx); t != nil {
if t.finished {
return errors.New("transaction context has expired")
}
applyTransaction(in, &t.transaction)
}
var opts *appengine_internal.CallOptions
if d, ok := ctx.Deadline(); ok {
opts = &appengine_internal.CallOptions{
Timeout: d.Sub(time.Now()),
}
}
err := c.Call(service, method, in, out, opts)
switch v := err.(type) {
case *appengine_internal.APIError:
return &APIError{
Service: v.Service,
Detail: v.Detail,
Code: v.Code,
}
case *appengine_internal.CallError:
return &CallError{
Detail: v.Detail,
Code: v.Code,
Timeout: v.Timeout,
}
}
return err
}
func handleHTTP(w http.ResponseWriter, r *http.Request) {
panic("handleHTTP called; this should be impossible")
}
func logf(c appengine.Context, level int64, format string, args ...interface{}) {
var fn func(format string, args ...interface{})
switch level {
case 0:
fn = c.Debugf
case 1:
fn = c.Infof
case 2:
fn = c.Warningf
case 3:
fn = c.Errorf
case 4:
fn = c.Criticalf
default:
// This shouldn't happen.
fn = c.Criticalf
}
fn(format, args...)
}

View File

@ -1,123 +0,0 @@
// Copyright 2015 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package internal
import (
"errors"
"os"
"github.com/golang/protobuf/proto"
netcontext "golang.org/x/net/context"
)
var errNotAppEngineContext = errors.New("not an App Engine context")
type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error
var callOverrideKey = "holds []CallOverrideFunc"
func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context {
// We avoid appending to any existing call override
// so we don't risk overwriting a popped stack below.
var cofs []CallOverrideFunc
if uf, ok := ctx.Value(&callOverrideKey).([]CallOverrideFunc); ok {
cofs = append(cofs, uf...)
}
cofs = append(cofs, f)
return netcontext.WithValue(ctx, &callOverrideKey, cofs)
}
func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) {
cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc)
if len(cofs) == 0 {
return nil, nil, false
}
// We found a list of overrides; grab the last, and reconstitute a
// context that will hide it.
f := cofs[len(cofs)-1]
ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1])
return f, ctx, true
}
type logOverrideFunc func(level int64, format string, args ...interface{})
var logOverrideKey = "holds a logOverrideFunc"
func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context {
return netcontext.WithValue(ctx, &logOverrideKey, f)
}
var appIDOverrideKey = "holds a string, being the full app ID"
func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context {
return netcontext.WithValue(ctx, &appIDOverrideKey, appID)
}
var namespaceKey = "holds the namespace string"
func withNamespace(ctx netcontext.Context, ns string) netcontext.Context {
return netcontext.WithValue(ctx, &namespaceKey, ns)
}
func NamespaceFromContext(ctx netcontext.Context) string {
// If there's no namespace, return the empty string.
ns, _ := ctx.Value(&namespaceKey).(string)
return ns
}
// FullyQualifiedAppID returns the fully-qualified application ID.
// This may contain a partition prefix (e.g. "s~" for High Replication apps),
// or a domain prefix (e.g. "example.com:").
func FullyQualifiedAppID(ctx netcontext.Context) string {
if id, ok := ctx.Value(&appIDOverrideKey).(string); ok {
return id
}
return fullyQualifiedAppID(ctx)
}
func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) {
if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok {
f(level, format, args...)
return
}
c := fromContext(ctx)
if c == nil {
panic(errNotAppEngineContext)
}
logf(c, level, format, args...)
}
// NamespacedContext wraps a Context to support namespaces.
func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context {
return withNamespace(ctx, namespace)
}
// SetTestEnv sets the env variables for testing background ticket in Flex.
func SetTestEnv() func() {
var environ = []struct {
key, value string
}{
{"GAE_LONG_APP_ID", "my-app-id"},
{"GAE_MINOR_VERSION", "067924799508853122"},
{"GAE_MODULE_INSTANCE", "0"},
{"GAE_MODULE_NAME", "default"},
{"GAE_MODULE_VERSION", "20150612t184001"},
}
for _, v := range environ {
old := os.Getenv(v.key)
os.Setenv(v.key, v.value)
v.value = old
}
return func() { // Restore old environment after the test completes.
for _, v := range environ {
if v.value == "" {
os.Unsetenv(v.key)
continue
}
os.Setenv(v.key, v.value)
}
}
}

View File

@ -1,28 +0,0 @@
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package internal
import (
"strings"
)
func parseFullAppID(appid string) (partition, domain, displayID string) {
if i := strings.Index(appid, "~"); i != -1 {
partition, appid = appid[:i], appid[i+1:]
}
if i := strings.Index(appid, ":"); i != -1 {
domain, appid = appid[:i], appid[i+1:]
}
return partition, domain, appid
}
// appID returns "appid" or "domain.com:appid".
func appID(fullAppID string) string {
_, dom, dis := parseFullAppID(fullAppID)
if dom != "" {
return dom + ":" + dis
}
return dis
}

View File

@ -1,308 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google.golang.org/appengine/internal/base/api_base.proto
package base
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type StringProto struct {
Value *string `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *StringProto) Reset() { *m = StringProto{} }
func (m *StringProto) String() string { return proto.CompactTextString(m) }
func (*StringProto) ProtoMessage() {}
func (*StringProto) Descriptor() ([]byte, []int) {
return fileDescriptor_api_base_9d49f8792e0c1140, []int{0}
}
func (m *StringProto) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_StringProto.Unmarshal(m, b)
}
func (m *StringProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_StringProto.Marshal(b, m, deterministic)
}
func (dst *StringProto) XXX_Merge(src proto.Message) {
xxx_messageInfo_StringProto.Merge(dst, src)
}
func (m *StringProto) XXX_Size() int {
return xxx_messageInfo_StringProto.Size(m)
}
func (m *StringProto) XXX_DiscardUnknown() {
xxx_messageInfo_StringProto.DiscardUnknown(m)
}
var xxx_messageInfo_StringProto proto.InternalMessageInfo
func (m *StringProto) GetValue() string {
if m != nil && m.Value != nil {
return *m.Value
}
return ""
}
type Integer32Proto struct {
Value *int32 `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Integer32Proto) Reset() { *m = Integer32Proto{} }
func (m *Integer32Proto) String() string { return proto.CompactTextString(m) }
func (*Integer32Proto) ProtoMessage() {}
func (*Integer32Proto) Descriptor() ([]byte, []int) {
return fileDescriptor_api_base_9d49f8792e0c1140, []int{1}
}
func (m *Integer32Proto) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Integer32Proto.Unmarshal(m, b)
}
func (m *Integer32Proto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Integer32Proto.Marshal(b, m, deterministic)
}
func (dst *Integer32Proto) XXX_Merge(src proto.Message) {
xxx_messageInfo_Integer32Proto.Merge(dst, src)
}
func (m *Integer32Proto) XXX_Size() int {
return xxx_messageInfo_Integer32Proto.Size(m)
}
func (m *Integer32Proto) XXX_DiscardUnknown() {
xxx_messageInfo_Integer32Proto.DiscardUnknown(m)
}
var xxx_messageInfo_Integer32Proto proto.InternalMessageInfo
func (m *Integer32Proto) GetValue() int32 {
if m != nil && m.Value != nil {
return *m.Value
}
return 0
}
type Integer64Proto struct {
Value *int64 `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Integer64Proto) Reset() { *m = Integer64Proto{} }
func (m *Integer64Proto) String() string { return proto.CompactTextString(m) }
func (*Integer64Proto) ProtoMessage() {}
func (*Integer64Proto) Descriptor() ([]byte, []int) {
return fileDescriptor_api_base_9d49f8792e0c1140, []int{2}
}
func (m *Integer64Proto) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Integer64Proto.Unmarshal(m, b)
}
func (m *Integer64Proto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Integer64Proto.Marshal(b, m, deterministic)
}
func (dst *Integer64Proto) XXX_Merge(src proto.Message) {
xxx_messageInfo_Integer64Proto.Merge(dst, src)
}
func (m *Integer64Proto) XXX_Size() int {
return xxx_messageInfo_Integer64Proto.Size(m)
}
func (m *Integer64Proto) XXX_DiscardUnknown() {
xxx_messageInfo_Integer64Proto.DiscardUnknown(m)
}
var xxx_messageInfo_Integer64Proto proto.InternalMessageInfo
func (m *Integer64Proto) GetValue() int64 {
if m != nil && m.Value != nil {
return *m.Value
}
return 0
}
type BoolProto struct {
Value *bool `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *BoolProto) Reset() { *m = BoolProto{} }
func (m *BoolProto) String() string { return proto.CompactTextString(m) }
func (*BoolProto) ProtoMessage() {}
func (*BoolProto) Descriptor() ([]byte, []int) {
return fileDescriptor_api_base_9d49f8792e0c1140, []int{3}
}
func (m *BoolProto) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_BoolProto.Unmarshal(m, b)
}
func (m *BoolProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_BoolProto.Marshal(b, m, deterministic)
}
func (dst *BoolProto) XXX_Merge(src proto.Message) {
xxx_messageInfo_BoolProto.Merge(dst, src)
}
func (m *BoolProto) XXX_Size() int {
return xxx_messageInfo_BoolProto.Size(m)
}
func (m *BoolProto) XXX_DiscardUnknown() {
xxx_messageInfo_BoolProto.DiscardUnknown(m)
}
var xxx_messageInfo_BoolProto proto.InternalMessageInfo
func (m *BoolProto) GetValue() bool {
if m != nil && m.Value != nil {
return *m.Value
}
return false
}
type DoubleProto struct {
Value *float64 `protobuf:"fixed64,1,req,name=value" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *DoubleProto) Reset() { *m = DoubleProto{} }
func (m *DoubleProto) String() string { return proto.CompactTextString(m) }
func (*DoubleProto) ProtoMessage() {}
func (*DoubleProto) Descriptor() ([]byte, []int) {
return fileDescriptor_api_base_9d49f8792e0c1140, []int{4}
}
func (m *DoubleProto) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_DoubleProto.Unmarshal(m, b)
}
func (m *DoubleProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_DoubleProto.Marshal(b, m, deterministic)
}
func (dst *DoubleProto) XXX_Merge(src proto.Message) {
xxx_messageInfo_DoubleProto.Merge(dst, src)
}
func (m *DoubleProto) XXX_Size() int {
return xxx_messageInfo_DoubleProto.Size(m)
}
func (m *DoubleProto) XXX_DiscardUnknown() {
xxx_messageInfo_DoubleProto.DiscardUnknown(m)
}
var xxx_messageInfo_DoubleProto proto.InternalMessageInfo
func (m *DoubleProto) GetValue() float64 {
if m != nil && m.Value != nil {
return *m.Value
}
return 0
}
type BytesProto struct {
Value []byte `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *BytesProto) Reset() { *m = BytesProto{} }
func (m *BytesProto) String() string { return proto.CompactTextString(m) }
func (*BytesProto) ProtoMessage() {}
func (*BytesProto) Descriptor() ([]byte, []int) {
return fileDescriptor_api_base_9d49f8792e0c1140, []int{5}
}
func (m *BytesProto) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_BytesProto.Unmarshal(m, b)
}
func (m *BytesProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_BytesProto.Marshal(b, m, deterministic)
}
func (dst *BytesProto) XXX_Merge(src proto.Message) {
xxx_messageInfo_BytesProto.Merge(dst, src)
}
func (m *BytesProto) XXX_Size() int {
return xxx_messageInfo_BytesProto.Size(m)
}
func (m *BytesProto) XXX_DiscardUnknown() {
xxx_messageInfo_BytesProto.DiscardUnknown(m)
}
var xxx_messageInfo_BytesProto proto.InternalMessageInfo
func (m *BytesProto) GetValue() []byte {
if m != nil {
return m.Value
}
return nil
}
type VoidProto struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *VoidProto) Reset() { *m = VoidProto{} }
func (m *VoidProto) String() string { return proto.CompactTextString(m) }
func (*VoidProto) ProtoMessage() {}
func (*VoidProto) Descriptor() ([]byte, []int) {
return fileDescriptor_api_base_9d49f8792e0c1140, []int{6}
}
func (m *VoidProto) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_VoidProto.Unmarshal(m, b)
}
func (m *VoidProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_VoidProto.Marshal(b, m, deterministic)
}
func (dst *VoidProto) XXX_Merge(src proto.Message) {
xxx_messageInfo_VoidProto.Merge(dst, src)
}
func (m *VoidProto) XXX_Size() int {
return xxx_messageInfo_VoidProto.Size(m)
}
func (m *VoidProto) XXX_DiscardUnknown() {
xxx_messageInfo_VoidProto.DiscardUnknown(m)
}
var xxx_messageInfo_VoidProto proto.InternalMessageInfo
func init() {
proto.RegisterType((*StringProto)(nil), "appengine.base.StringProto")
proto.RegisterType((*Integer32Proto)(nil), "appengine.base.Integer32Proto")
proto.RegisterType((*Integer64Proto)(nil), "appengine.base.Integer64Proto")
proto.RegisterType((*BoolProto)(nil), "appengine.base.BoolProto")
proto.RegisterType((*DoubleProto)(nil), "appengine.base.DoubleProto")
proto.RegisterType((*BytesProto)(nil), "appengine.base.BytesProto")
proto.RegisterType((*VoidProto)(nil), "appengine.base.VoidProto")
}
func init() {
proto.RegisterFile("google.golang.org/appengine/internal/base/api_base.proto", fileDescriptor_api_base_9d49f8792e0c1140)
}
var fileDescriptor_api_base_9d49f8792e0c1140 = []byte{
// 199 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0xcf, 0x3f, 0x4b, 0xc6, 0x30,
0x10, 0x06, 0x70, 0x5a, 0xad, 0xb4, 0x57, 0xe9, 0x20, 0x0e, 0x1d, 0xb5, 0x05, 0x71, 0x4a, 0x40,
0x45, 0x9c, 0x83, 0x8b, 0x9b, 0x28, 0x38, 0xb8, 0x48, 0x8a, 0xc7, 0x11, 0x08, 0xb9, 0x90, 0xa6,
0x82, 0xdf, 0x5e, 0xda, 0xd2, 0xfa, 0xc2, 0x9b, 0xed, 0xfe, 0xfc, 0xe0, 0xe1, 0x81, 0x27, 0x62,
0x26, 0x8b, 0x82, 0xd8, 0x6a, 0x47, 0x82, 0x03, 0x49, 0xed, 0x3d, 0x3a, 0x32, 0x0e, 0xa5, 0x71,
0x11, 0x83, 0xd3, 0x56, 0x0e, 0x7a, 0x44, 0xa9, 0xbd, 0xf9, 0x9a, 0x07, 0xe1, 0x03, 0x47, 0xbe,
0x68, 0x76, 0x27, 0xe6, 0x6b, 0xd7, 0x43, 0xfd, 0x1e, 0x83, 0x71, 0xf4, 0xba, 0xbc, 0x2f, 0xa1,
0xf8, 0xd1, 0x76, 0xc2, 0x36, 0xbb, 0xca, 0x6f, 0xab, 0xb7, 0x75, 0xe9, 0x6e, 0xa0, 0x79, 0x71,
0x11, 0x09, 0xc3, 0xfd, 0x5d, 0xc2, 0x15, 0xc7, 0xee, 0xf1, 0x21, 0xe1, 0x4e, 0x36, 0x77, 0x0d,
0x95, 0x62, 0xb6, 0x09, 0x52, 0x6e, 0xa4, 0x87, 0xfa, 0x99, 0xa7, 0xc1, 0x62, 0x02, 0x65, 0xff,
0x79, 0xa0, 0x7e, 0x23, 0x8e, 0xab, 0x69, 0x0f, 0xcd, 0xb9, 0xca, 0xcb, 0xdd, 0xd5, 0x50, 0x7d,
0xb0, 0xf9, 0x5e, 0x98, 0x3a, 0xfb, 0x3c, 0x9d, 0x9b, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0xba,
0x37, 0x25, 0xea, 0x44, 0x01, 0x00, 0x00,
}

View File

@ -1,33 +0,0 @@
// Built-in base types for API calls. Primarily useful as return types.
syntax = "proto2";
option go_package = "base";
package appengine.base;
message StringProto {
required string value = 1;
}
message Integer32Proto {
required int32 value = 1;
}
message Integer64Proto {
required int64 value = 1;
}
message BoolProto {
required bool value = 1;
}
message DoubleProto {
required double value = 1;
}
message BytesProto {
required bytes value = 1 [ctype=CORD];
}
message VoidProto {
}

File diff suppressed because it is too large Load Diff

View File

@ -1,551 +0,0 @@
syntax = "proto2";
option go_package = "datastore";
package appengine;
message Action{}
message PropertyValue {
optional int64 int64Value = 1;
optional bool booleanValue = 2;
optional string stringValue = 3;
optional double doubleValue = 4;
optional group PointValue = 5 {
required double x = 6;
required double y = 7;
}
optional group UserValue = 8 {
required string email = 9;
required string auth_domain = 10;
optional string nickname = 11;
optional string federated_identity = 21;
optional string federated_provider = 22;
}
optional group ReferenceValue = 12 {
required string app = 13;
optional string name_space = 20;
repeated group PathElement = 14 {
required string type = 15;
optional int64 id = 16;
optional string name = 17;
}
}
}
message Property {
enum Meaning {
NO_MEANING = 0;
BLOB = 14;
TEXT = 15;
BYTESTRING = 16;
ATOM_CATEGORY = 1;
ATOM_LINK = 2;
ATOM_TITLE = 3;
ATOM_CONTENT = 4;
ATOM_SUMMARY = 5;
ATOM_AUTHOR = 6;
GD_WHEN = 7;
GD_EMAIL = 8;
GEORSS_POINT = 9;
GD_IM = 10;
GD_PHONENUMBER = 11;
GD_POSTALADDRESS = 12;
GD_RATING = 13;
BLOBKEY = 17;
ENTITY_PROTO = 19;
INDEX_VALUE = 18;
};
optional Meaning meaning = 1 [default = NO_MEANING];
optional string meaning_uri = 2;
required string name = 3;
required PropertyValue value = 5;
required bool multiple = 4;
optional bool searchable = 6 [default=false];
enum FtsTokenizationOption {
HTML = 1;
ATOM = 2;
}
optional FtsTokenizationOption fts_tokenization_option = 8;
optional string locale = 9 [default = "en"];
}
message Path {
repeated group Element = 1 {
required string type = 2;
optional int64 id = 3;
optional string name = 4;
}
}
message Reference {
required string app = 13;
optional string name_space = 20;
required Path path = 14;
}
message User {
required string email = 1;
required string auth_domain = 2;
optional string nickname = 3;
optional string federated_identity = 6;
optional string federated_provider = 7;
}
message EntityProto {
required Reference key = 13;
required Path entity_group = 16;
optional User owner = 17;
enum Kind {
GD_CONTACT = 1;
GD_EVENT = 2;
GD_MESSAGE = 3;
}
optional Kind kind = 4;
optional string kind_uri = 5;
repeated Property property = 14;
repeated Property raw_property = 15;
optional int32 rank = 18;
}
message CompositeProperty {
required int64 index_id = 1;
repeated string value = 2;
}
message Index {
required string entity_type = 1;
required bool ancestor = 5;
repeated group Property = 2 {
required string name = 3;
enum Direction {
ASCENDING = 1;
DESCENDING = 2;
}
optional Direction direction = 4 [default = ASCENDING];
}
}
message CompositeIndex {
required string app_id = 1;
required int64 id = 2;
required Index definition = 3;
enum State {
WRITE_ONLY = 1;
READ_WRITE = 2;
DELETED = 3;
ERROR = 4;
}
required State state = 4;
optional bool only_use_if_required = 6 [default = false];
}
message IndexPostfix {
message IndexValue {
required string property_name = 1;
required PropertyValue value = 2;
}
repeated IndexValue index_value = 1;
optional Reference key = 2;
optional bool before = 3 [default=true];
}
message IndexPosition {
optional string key = 1;
optional bool before = 2 [default=true];
}
message Snapshot {
enum Status {
INACTIVE = 0;
ACTIVE = 1;
}
required int64 ts = 1;
}
message InternalHeader {
optional string qos = 1;
}
message Transaction {
optional InternalHeader header = 4;
required fixed64 handle = 1;
required string app = 2;
optional bool mark_changes = 3 [default = false];
}
message Query {
optional InternalHeader header = 39;
required string app = 1;
optional string name_space = 29;
optional string kind = 3;
optional Reference ancestor = 17;
repeated group Filter = 4 {
enum Operator {
LESS_THAN = 1;
LESS_THAN_OR_EQUAL = 2;
GREATER_THAN = 3;
GREATER_THAN_OR_EQUAL = 4;
EQUAL = 5;
IN = 6;
EXISTS = 7;
}
required Operator op = 6;
repeated Property property = 14;
}
optional string search_query = 8;
repeated group Order = 9 {
enum Direction {
ASCENDING = 1;
DESCENDING = 2;
}
required string property = 10;
optional Direction direction = 11 [default = ASCENDING];
}
enum Hint {
ORDER_FIRST = 1;
ANCESTOR_FIRST = 2;
FILTER_FIRST = 3;
}
optional Hint hint = 18;
optional int32 count = 23;
optional int32 offset = 12 [default = 0];
optional int32 limit = 16;
optional CompiledCursor compiled_cursor = 30;
optional CompiledCursor end_compiled_cursor = 31;
repeated CompositeIndex composite_index = 19;
optional bool require_perfect_plan = 20 [default = false];
optional bool keys_only = 21 [default = false];
optional Transaction transaction = 22;
optional bool compile = 25 [default = false];
optional int64 failover_ms = 26;
optional bool strong = 32;
repeated string property_name = 33;
repeated string group_by_property_name = 34;
optional bool distinct = 24;
optional int64 min_safe_time_seconds = 35;
repeated string safe_replica_name = 36;
optional bool persist_offset = 37 [default=false];
}
message CompiledQuery {
required group PrimaryScan = 1 {
optional string index_name = 2;
optional string start_key = 3;
optional bool start_inclusive = 4;
optional string end_key = 5;
optional bool end_inclusive = 6;
repeated string start_postfix_value = 22;
repeated string end_postfix_value = 23;
optional int64 end_unapplied_log_timestamp_us = 19;
}
repeated group MergeJoinScan = 7 {
required string index_name = 8;
repeated string prefix_value = 9;
optional bool value_prefix = 20 [default=false];
}
optional Index index_def = 21;
optional int32 offset = 10 [default = 0];
optional int32 limit = 11;
required bool keys_only = 12;
repeated string property_name = 24;
optional int32 distinct_infix_size = 25;
optional group EntityFilter = 13 {
optional bool distinct = 14 [default=false];
optional string kind = 17;
optional Reference ancestor = 18;
}
}
message CompiledCursor {
optional group Position = 2 {
optional string start_key = 27;
repeated group IndexValue = 29 {
optional string property = 30;
required PropertyValue value = 31;
}
optional Reference key = 32;
optional bool start_inclusive = 28 [default=true];
}
}
message Cursor {
required fixed64 cursor = 1;
optional string app = 2;
}
message Error {
enum ErrorCode {
BAD_REQUEST = 1;
CONCURRENT_TRANSACTION = 2;
INTERNAL_ERROR = 3;
NEED_INDEX = 4;
TIMEOUT = 5;
PERMISSION_DENIED = 6;
BIGTABLE_ERROR = 7;
COMMITTED_BUT_STILL_APPLYING = 8;
CAPABILITY_DISABLED = 9;
TRY_ALTERNATE_BACKEND = 10;
SAFE_TIME_TOO_OLD = 11;
}
}
message Cost {
optional int32 index_writes = 1;
optional int32 index_write_bytes = 2;
optional int32 entity_writes = 3;
optional int32 entity_write_bytes = 4;
optional group CommitCost = 5 {
optional int32 requested_entity_puts = 6;
optional int32 requested_entity_deletes = 7;
};
optional int32 approximate_storage_delta = 8;
optional int32 id_sequence_updates = 9;
}
message GetRequest {
optional InternalHeader header = 6;
repeated Reference key = 1;
optional Transaction transaction = 2;
optional int64 failover_ms = 3;
optional bool strong = 4;
optional bool allow_deferred = 5 [default=false];
}
message GetResponse {
repeated group Entity = 1 {
optional EntityProto entity = 2;
optional Reference key = 4;
optional int64 version = 3;
}
repeated Reference deferred = 5;
optional bool in_order = 6 [default=true];
}
message PutRequest {
optional InternalHeader header = 11;
repeated EntityProto entity = 1;
optional Transaction transaction = 2;
repeated CompositeIndex composite_index = 3;
optional bool trusted = 4 [default = false];
optional bool force = 7 [default = false];
optional bool mark_changes = 8 [default = false];
repeated Snapshot snapshot = 9;
enum AutoIdPolicy {
CURRENT = 0;
SEQUENTIAL = 1;
}
optional AutoIdPolicy auto_id_policy = 10 [default = CURRENT];
}
message PutResponse {
repeated Reference key = 1;
optional Cost cost = 2;
repeated int64 version = 3;
}
message TouchRequest {
optional InternalHeader header = 10;
repeated Reference key = 1;
repeated CompositeIndex composite_index = 2;
optional bool force = 3 [default = false];
repeated Snapshot snapshot = 9;
}
message TouchResponse {
optional Cost cost = 1;
}
message DeleteRequest {
optional InternalHeader header = 10;
repeated Reference key = 6;
optional Transaction transaction = 5;
optional bool trusted = 4 [default = false];
optional bool force = 7 [default = false];
optional bool mark_changes = 8 [default = false];
repeated Snapshot snapshot = 9;
}
message DeleteResponse {
optional Cost cost = 1;
repeated int64 version = 3;
}
message NextRequest {
optional InternalHeader header = 5;
required Cursor cursor = 1;
optional int32 count = 2;
optional int32 offset = 4 [default = 0];
optional bool compile = 3 [default = false];
}
message QueryResult {
optional Cursor cursor = 1;
repeated EntityProto result = 2;
optional int32 skipped_results = 7;
required bool more_results = 3;
optional bool keys_only = 4;
optional bool index_only = 9;
optional bool small_ops = 10;
optional CompiledQuery compiled_query = 5;
optional CompiledCursor compiled_cursor = 6;
repeated CompositeIndex index = 8;
repeated int64 version = 11;
}
message AllocateIdsRequest {
optional InternalHeader header = 4;
optional Reference model_key = 1;
optional int64 size = 2;
optional int64 max = 3;
repeated Reference reserve = 5;
}
message AllocateIdsResponse {
required int64 start = 1;
required int64 end = 2;
optional Cost cost = 3;
}
message CompositeIndices {
repeated CompositeIndex index = 1;
}
message AddActionsRequest {
optional InternalHeader header = 3;
required Transaction transaction = 1;
repeated Action action = 2;
}
message AddActionsResponse {
}
message BeginTransactionRequest {
optional InternalHeader header = 3;
required string app = 1;
optional bool allow_multiple_eg = 2 [default = false];
optional string database_id = 4;
enum TransactionMode {
UNKNOWN = 0;
READ_ONLY = 1;
READ_WRITE = 2;
}
optional TransactionMode mode = 5 [default = UNKNOWN];
optional Transaction previous_transaction = 7;
}
message CommitResponse {
optional Cost cost = 1;
repeated group Version = 3 {
required Reference root_entity_key = 4;
required int64 version = 5;
}
}

View File

@ -1,55 +0,0 @@
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package internal
import (
"os"
netcontext "golang.org/x/net/context"
)
var (
// This is set to true in identity_classic.go, which is behind the appengine build tag.
// The appengine build tag is set for the first generation runtimes (<= Go 1.9) but not
// the second generation runtimes (>= Go 1.11), so this indicates whether we're on a
// first-gen runtime. See IsStandard below for the second-gen check.
appengineStandard bool
// This is set to true in identity_flex.go, which is behind the appenginevm build tag.
appengineFlex bool
)
// AppID is the implementation of the wrapper function of the same name in
// ../identity.go. See that file for commentary.
func AppID(c netcontext.Context) string {
return appID(FullyQualifiedAppID(c))
}
// IsStandard is the implementation of the wrapper function of the same name in
// ../appengine.go. See that file for commentary.
func IsStandard() bool {
// appengineStandard will be true for first-gen runtimes (<= Go 1.9) but not
// second-gen (>= Go 1.11).
return appengineStandard || IsSecondGen()
}
// IsStandard is the implementation of the wrapper function of the same name in
// ../appengine.go. See that file for commentary.
func IsSecondGen() bool {
// Second-gen runtimes set $GAE_ENV so we use that to check if we're on a second-gen runtime.
return os.Getenv("GAE_ENV") == "standard"
}
// IsFlex is the implementation of the wrapper function of the same name in
// ../appengine.go. See that file for commentary.
func IsFlex() bool {
return appengineFlex
}
// IsAppEngine is the implementation of the wrapper function of the same name in
// ../appengine.go. See that file for commentary.
func IsAppEngine() bool {
return IsStandard() || IsFlex()
}

View File

@ -1,61 +0,0 @@
// Copyright 2015 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
// +build appengine
package internal
import (
"appengine"
netcontext "golang.org/x/net/context"
)
func init() {
appengineStandard = true
}
func DefaultVersionHostname(ctx netcontext.Context) string {
c := fromContext(ctx)
if c == nil {
panic(errNotAppEngineContext)
}
return appengine.DefaultVersionHostname(c)
}
func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() }
func ServerSoftware() string { return appengine.ServerSoftware() }
func InstanceID() string { return appengine.InstanceID() }
func IsDevAppServer() bool { return appengine.IsDevAppServer() }
func RequestID(ctx netcontext.Context) string {
c := fromContext(ctx)
if c == nil {
panic(errNotAppEngineContext)
}
return appengine.RequestID(c)
}
func ModuleName(ctx netcontext.Context) string {
c := fromContext(ctx)
if c == nil {
panic(errNotAppEngineContext)
}
return appengine.ModuleName(c)
}
func VersionID(ctx netcontext.Context) string {
c := fromContext(ctx)
if c == nil {
panic(errNotAppEngineContext)
}
return appengine.VersionID(c)
}
func fullyQualifiedAppID(ctx netcontext.Context) string {
c := fromContext(ctx)
if c == nil {
panic(errNotAppEngineContext)
}
return c.FullyQualifiedAppID()
}

View File

@ -1,11 +0,0 @@
// Copyright 2018 Google LLC. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
// +build appenginevm
package internal
func init() {
appengineFlex = true
}

View File

@ -1,134 +0,0 @@
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
// +build !appengine
package internal
import (
"log"
"net/http"
"os"
"strings"
netcontext "golang.org/x/net/context"
)
// These functions are implementations of the wrapper functions
// in ../appengine/identity.go. See that file for commentary.
const (
hDefaultVersionHostname = "X-AppEngine-Default-Version-Hostname"
hRequestLogId = "X-AppEngine-Request-Log-Id"
hDatacenter = "X-AppEngine-Datacenter"
)
func ctxHeaders(ctx netcontext.Context) http.Header {
c := fromContext(ctx)
if c == nil {
return nil
}
return c.Request().Header
}
func DefaultVersionHostname(ctx netcontext.Context) string {
return ctxHeaders(ctx).Get(hDefaultVersionHostname)
}
func RequestID(ctx netcontext.Context) string {
return ctxHeaders(ctx).Get(hRequestLogId)
}
func Datacenter(ctx netcontext.Context) string {
if dc := ctxHeaders(ctx).Get(hDatacenter); dc != "" {
return dc
}
// If the header isn't set, read zone from the metadata service.
// It has the format projects/[NUMERIC_PROJECT_ID]/zones/[ZONE]
zone, err := getMetadata("instance/zone")
if err != nil {
log.Printf("Datacenter: %v", err)
return ""
}
parts := strings.Split(string(zone), "/")
if len(parts) == 0 {
return ""
}
return parts[len(parts)-1]
}
func ServerSoftware() string {
// TODO(dsymonds): Remove fallback when we've verified this.
if s := os.Getenv("SERVER_SOFTWARE"); s != "" {
return s
}
if s := os.Getenv("GAE_ENV"); s != "" {
return s
}
return "Google App Engine/1.x.x"
}
// TODO(dsymonds): Remove the metadata fetches.
func ModuleName(_ netcontext.Context) string {
if s := os.Getenv("GAE_MODULE_NAME"); s != "" {
return s
}
if s := os.Getenv("GAE_SERVICE"); s != "" {
return s
}
return string(mustGetMetadata("instance/attributes/gae_backend_name"))
}
func VersionID(_ netcontext.Context) string {
if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" {
return s1 + "." + s2
}
if s1, s2 := os.Getenv("GAE_VERSION"), os.Getenv("GAE_DEPLOYMENT_ID"); s1 != "" && s2 != "" {
return s1 + "." + s2
}
return string(mustGetMetadata("instance/attributes/gae_backend_version")) + "." + string(mustGetMetadata("instance/attributes/gae_backend_minor_version"))
}
func InstanceID() string {
if s := os.Getenv("GAE_MODULE_INSTANCE"); s != "" {
return s
}
if s := os.Getenv("GAE_INSTANCE"); s != "" {
return s
}
return string(mustGetMetadata("instance/attributes/gae_backend_instance"))
}
func partitionlessAppID() string {
// gae_project has everything except the partition prefix.
if appID := os.Getenv("GAE_LONG_APP_ID"); appID != "" {
return appID
}
if project := os.Getenv("GOOGLE_CLOUD_PROJECT"); project != "" {
return project
}
return string(mustGetMetadata("instance/attributes/gae_project"))
}
func fullyQualifiedAppID(_ netcontext.Context) string {
if s := os.Getenv("GAE_APPLICATION"); s != "" {
return s
}
appID := partitionlessAppID()
part := os.Getenv("GAE_PARTITION")
if part == "" {
part = string(mustGetMetadata("instance/attributes/gae_partition"))
}
if part != "" {
appID = part + "~" + appID
}
return appID
}
func IsDevAppServer() bool {
return os.Getenv("RUN_WITH_DEVAPPSERVER") != ""
}

View File

@ -1,110 +0,0 @@
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
// Package internal provides support for package appengine.
//
// Programs should not use this package directly. Its API is not stable.
// Use packages appengine and appengine/* instead.
package internal
import (
"fmt"
"github.com/golang/protobuf/proto"
remotepb "google.golang.org/appengine/internal/remote_api"
)
// errorCodeMaps is a map of service name to the error code map for the service.
var errorCodeMaps = make(map[string]map[int32]string)
// RegisterErrorCodeMap is called from API implementations to register their
// error code map. This should only be called from init functions.
func RegisterErrorCodeMap(service string, m map[int32]string) {
errorCodeMaps[service] = m
}
type timeoutCodeKey struct {
service string
code int32
}
// timeoutCodes is the set of service+code pairs that represent timeouts.
var timeoutCodes = make(map[timeoutCodeKey]bool)
func RegisterTimeoutErrorCode(service string, code int32) {
timeoutCodes[timeoutCodeKey{service, code}] = true
}
// APIError is the type returned by appengine.Context's Call method
// when an API call fails in an API-specific way. This may be, for instance,
// a taskqueue API call failing with TaskQueueServiceError::UNKNOWN_QUEUE.
type APIError struct {
Service string
Detail string
Code int32 // API-specific error code
}
func (e *APIError) Error() string {
if e.Code == 0 {
if e.Detail == "" {
return "APIError <empty>"
}
return e.Detail
}
s := fmt.Sprintf("API error %d", e.Code)
if m, ok := errorCodeMaps[e.Service]; ok {
s += " (" + e.Service + ": " + m[e.Code] + ")"
} else {
// Shouldn't happen, but provide a bit more detail if it does.
s = e.Service + " " + s
}
if e.Detail != "" {
s += ": " + e.Detail
}
return s
}
func (e *APIError) IsTimeout() bool {
return timeoutCodes[timeoutCodeKey{e.Service, e.Code}]
}
// CallError is the type returned by appengine.Context's Call method when an
// API call fails in a generic way, such as RpcError::CAPABILITY_DISABLED.
type CallError struct {
Detail string
Code int32
// TODO: Remove this if we get a distinguishable error code.
Timeout bool
}
func (e *CallError) Error() string {
var msg string
switch remotepb.RpcError_ErrorCode(e.Code) {
case remotepb.RpcError_UNKNOWN:
return e.Detail
case remotepb.RpcError_OVER_QUOTA:
msg = "Over quota"
case remotepb.RpcError_CAPABILITY_DISABLED:
msg = "Capability disabled"
case remotepb.RpcError_CANCELLED:
msg = "Canceled"
default:
msg = fmt.Sprintf("Call error %d", e.Code)
}
s := msg + ": " + e.Detail
if e.Timeout {
s += " (timeout)"
}
return s
}
func (e *CallError) IsTimeout() bool {
return e.Timeout
}
// NamespaceMods is a map from API service to a function that will mutate an RPC request to attach a namespace.
// The function should be prepared to be called on the same message more than once; it should only modify the
// RPC request the first time.
var NamespaceMods = make(map[string]func(m proto.Message, namespace string))

File diff suppressed because it is too large Load Diff

View File

@ -1,150 +0,0 @@
syntax = "proto2";
option go_package = "log";
package appengine;
message LogServiceError {
enum ErrorCode {
OK = 0;
INVALID_REQUEST = 1;
STORAGE_ERROR = 2;
}
}
message UserAppLogLine {
required int64 timestamp_usec = 1;
required int64 level = 2;
required string message = 3;
}
message UserAppLogGroup {
repeated UserAppLogLine log_line = 2;
}
message FlushRequest {
optional bytes logs = 1;
}
message SetStatusRequest {
required string status = 1;
}
message LogOffset {
optional bytes request_id = 1;
}
message LogLine {
required int64 time = 1;
required int32 level = 2;
required string log_message = 3;
}
message RequestLog {
required string app_id = 1;
optional string module_id = 37 [default="default"];
required string version_id = 2;
required bytes request_id = 3;
optional LogOffset offset = 35;
required string ip = 4;
optional string nickname = 5;
required int64 start_time = 6;
required int64 end_time = 7;
required int64 latency = 8;
required int64 mcycles = 9;
required string method = 10;
required string resource = 11;
required string http_version = 12;
required int32 status = 13;
required int64 response_size = 14;
optional string referrer = 15;
optional string user_agent = 16;
required string url_map_entry = 17;
required string combined = 18;
optional int64 api_mcycles = 19;
optional string host = 20;
optional double cost = 21;
optional string task_queue_name = 22;
optional string task_name = 23;
optional bool was_loading_request = 24;
optional int64 pending_time = 25;
optional int32 replica_index = 26 [default = -1];
optional bool finished = 27 [default = true];
optional bytes clone_key = 28;
repeated LogLine line = 29;
optional bool lines_incomplete = 36;
optional bytes app_engine_release = 38;
optional int32 exit_reason = 30;
optional bool was_throttled_for_time = 31;
optional bool was_throttled_for_requests = 32;
optional int64 throttled_time = 33;
optional bytes server_name = 34;
}
message LogModuleVersion {
optional string module_id = 1 [default="default"];
optional string version_id = 2;
}
message LogReadRequest {
required string app_id = 1;
repeated string version_id = 2;
repeated LogModuleVersion module_version = 19;
optional int64 start_time = 3;
optional int64 end_time = 4;
optional LogOffset offset = 5;
repeated bytes request_id = 6;
optional int32 minimum_log_level = 7;
optional bool include_incomplete = 8;
optional int64 count = 9;
optional string combined_log_regex = 14;
optional string host_regex = 15;
optional int32 replica_index = 16;
optional bool include_app_logs = 10;
optional int32 app_logs_per_request = 17;
optional bool include_host = 11;
optional bool include_all = 12;
optional bool cache_iterator = 13;
optional int32 num_shards = 18;
}
message LogReadResponse {
repeated RequestLog log = 1;
optional LogOffset offset = 2;
optional int64 last_end_time = 3;
}
message LogUsageRecord {
optional string version_id = 1;
optional int32 start_time = 2;
optional int32 end_time = 3;
optional int64 count = 4;
optional int64 total_size = 5;
optional int32 records = 6;
}
message LogUsageRequest {
required string app_id = 1;
repeated string version_id = 2;
optional int32 start_time = 3;
optional int32 end_time = 4;
optional uint32 resolution_hours = 5 [default = 1];
optional bool combine_versions = 6;
optional int32 usage_version = 7;
optional bool versions_only = 8;
}
message LogUsageResponse {
repeated LogUsageRecord usage = 1;
optional LogUsageRecord summary = 2;
}

View File

@ -1,16 +0,0 @@
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
// +build appengine
package internal
import (
"appengine_internal"
)
func Main() {
MainPath = ""
appengine_internal.Main()
}

View File

@ -1,7 +0,0 @@
package internal
// MainPath stores the file path of the main package. On App Engine Standard
// using Go version 1.9 and below, this will be unset. On App Engine Flex and
// App Engine Standard second-gen (Go 1.11 and above), this will be the
// filepath to package main.
var MainPath string

View File

@ -1,69 +0,0 @@
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
// +build !appengine
package internal
import (
"io"
"log"
"net/http"
"net/url"
"os"
"path/filepath"
"runtime"
)
func Main() {
MainPath = filepath.Dir(findMainPath())
installHealthChecker(http.DefaultServeMux)
port := "8080"
if s := os.Getenv("PORT"); s != "" {
port = s
}
host := ""
if IsDevAppServer() {
host = "127.0.0.1"
}
if err := http.ListenAndServe(host+":"+port, http.HandlerFunc(handleHTTP)); err != nil {
log.Fatalf("http.ListenAndServe: %v", err)
}
}
// Find the path to package main by looking at the root Caller.
func findMainPath() string {
pc := make([]uintptr, 100)
n := runtime.Callers(2, pc)
frames := runtime.CallersFrames(pc[:n])
for {
frame, more := frames.Next()
// Tests won't have package main, instead they have testing.tRunner
if frame.Function == "main.main" || frame.Function == "testing.tRunner" {
return frame.File
}
if !more {
break
}
}
return ""
}
func installHealthChecker(mux *http.ServeMux) {
// If no health check handler has been installed by this point, add a trivial one.
const healthPath = "/_ah/health"
hreq := &http.Request{
Method: "GET",
URL: &url.URL{
Path: healthPath,
},
}
if _, pat := mux.Handler(hreq); pat != healthPath {
mux.HandleFunc(healthPath, func(w http.ResponseWriter, r *http.Request) {
io.WriteString(w, "ok")
})
}
}

View File

@ -1,60 +0,0 @@
// Copyright 2014 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package internal
// This file has code for accessing metadata.
//
// References:
// https://cloud.google.com/compute/docs/metadata
import (
"fmt"
"io/ioutil"
"net/http"
"net/url"
)
const (
metadataHost = "metadata"
metadataPath = "/computeMetadata/v1/"
)
var (
metadataRequestHeaders = http.Header{
"Metadata-Flavor": []string{"Google"},
}
)
// TODO(dsymonds): Do we need to support default values, like Python?
func mustGetMetadata(key string) []byte {
b, err := getMetadata(key)
if err != nil {
panic(fmt.Sprintf("Metadata fetch failed for '%s': %v", key, err))
}
return b
}
func getMetadata(key string) ([]byte, error) {
// TODO(dsymonds): May need to use url.Parse to support keys with query args.
req := &http.Request{
Method: "GET",
URL: &url.URL{
Scheme: "http",
Host: metadataHost,
Path: metadataPath + key,
},
Header: metadataRequestHeaders,
Host: metadataHost,
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return nil, fmt.Errorf("metadata server returned HTTP %d", resp.StatusCode)
}
return ioutil.ReadAll(resp.Body)
}

View File

@ -1,56 +0,0 @@
// Copyright 2014 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package internal
// This file implements a network dialer that limits the number of concurrent connections.
// It is only used for API calls.
import (
"log"
"net"
"runtime"
"sync"
"time"
)
var limitSem = make(chan int, 100) // TODO(dsymonds): Use environment variable.
func limitRelease() {
// non-blocking
select {
case <-limitSem:
default:
// This should not normally happen.
log.Print("appengine: unbalanced limitSem release!")
}
}
func limitDial(network, addr string) (net.Conn, error) {
limitSem <- 1
// Dial with a timeout in case the API host is MIA.
// The connection should normally be very fast.
conn, err := net.DialTimeout(network, addr, 10*time.Second)
if err != nil {
limitRelease()
return nil, err
}
lc := &limitConn{Conn: conn}
runtime.SetFinalizer(lc, (*limitConn).Close) // shouldn't usually be required
return lc, nil
}
type limitConn struct {
close sync.Once
net.Conn
}
func (lc *limitConn) Close() error {
defer lc.close.Do(func() {
limitRelease()
runtime.SetFinalizer(lc, nil)
})
return lc.Conn.Close()
}

View File

@ -1,40 +0,0 @@
#!/bin/bash -e
#
# This script rebuilds the generated code for the protocol buffers.
# To run this you will need protoc and goprotobuf installed;
# see https://github.com/golang/protobuf for instructions.
PKG=google.golang.org/appengine
function die() {
echo 1>&2 $*
exit 1
}
# Sanity check that the right tools are accessible.
for tool in go protoc protoc-gen-go; do
q=$(which $tool) || die "didn't find $tool"
echo 1>&2 "$tool: $q"
done
echo -n 1>&2 "finding package dir... "
pkgdir=$(go list -f '{{.Dir}}' $PKG)
echo 1>&2 $pkgdir
base=$(echo $pkgdir | sed "s,/$PKG\$,,")
echo 1>&2 "base: $base"
cd $base
# Run protoc once per package.
for dir in $(find $PKG/internal -name '*.proto' | xargs dirname | sort | uniq); do
echo 1>&2 "* $dir"
protoc --go_out=. $dir/*.proto
done
for f in $(find $PKG/internal -name '*.pb.go'); do
# Remove proto.RegisterEnum calls.
# These cause duplicate registration panics when these packages
# are used on classic App Engine. proto.RegisterEnum only affects
# parsing the text format; we don't care about that.
# https://code.google.com/p/googleappengine/issues/detail?id=11670#c17
sed -i '/proto.RegisterEnum/d' $f
done

View File

@ -1,361 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google.golang.org/appengine/internal/remote_api/remote_api.proto
package remote_api
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type RpcError_ErrorCode int32
const (
RpcError_UNKNOWN RpcError_ErrorCode = 0
RpcError_CALL_NOT_FOUND RpcError_ErrorCode = 1
RpcError_PARSE_ERROR RpcError_ErrorCode = 2
RpcError_SECURITY_VIOLATION RpcError_ErrorCode = 3
RpcError_OVER_QUOTA RpcError_ErrorCode = 4
RpcError_REQUEST_TOO_LARGE RpcError_ErrorCode = 5
RpcError_CAPABILITY_DISABLED RpcError_ErrorCode = 6
RpcError_FEATURE_DISABLED RpcError_ErrorCode = 7
RpcError_BAD_REQUEST RpcError_ErrorCode = 8
RpcError_RESPONSE_TOO_LARGE RpcError_ErrorCode = 9
RpcError_CANCELLED RpcError_ErrorCode = 10
RpcError_REPLAY_ERROR RpcError_ErrorCode = 11
RpcError_DEADLINE_EXCEEDED RpcError_ErrorCode = 12
)
var RpcError_ErrorCode_name = map[int32]string{
0: "UNKNOWN",
1: "CALL_NOT_FOUND",
2: "PARSE_ERROR",
3: "SECURITY_VIOLATION",
4: "OVER_QUOTA",
5: "REQUEST_TOO_LARGE",
6: "CAPABILITY_DISABLED",
7: "FEATURE_DISABLED",
8: "BAD_REQUEST",
9: "RESPONSE_TOO_LARGE",
10: "CANCELLED",
11: "REPLAY_ERROR",
12: "DEADLINE_EXCEEDED",
}
var RpcError_ErrorCode_value = map[string]int32{
"UNKNOWN": 0,
"CALL_NOT_FOUND": 1,
"PARSE_ERROR": 2,
"SECURITY_VIOLATION": 3,
"OVER_QUOTA": 4,
"REQUEST_TOO_LARGE": 5,
"CAPABILITY_DISABLED": 6,
"FEATURE_DISABLED": 7,
"BAD_REQUEST": 8,
"RESPONSE_TOO_LARGE": 9,
"CANCELLED": 10,
"REPLAY_ERROR": 11,
"DEADLINE_EXCEEDED": 12,
}
func (x RpcError_ErrorCode) Enum() *RpcError_ErrorCode {
p := new(RpcError_ErrorCode)
*p = x
return p
}
func (x RpcError_ErrorCode) String() string {
return proto.EnumName(RpcError_ErrorCode_name, int32(x))
}
func (x *RpcError_ErrorCode) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(RpcError_ErrorCode_value, data, "RpcError_ErrorCode")
if err != nil {
return err
}
*x = RpcError_ErrorCode(value)
return nil
}
func (RpcError_ErrorCode) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_remote_api_1978114ec33a273d, []int{2, 0}
}
type Request struct {
ServiceName *string `protobuf:"bytes,2,req,name=service_name,json=serviceName" json:"service_name,omitempty"`
Method *string `protobuf:"bytes,3,req,name=method" json:"method,omitempty"`
Request []byte `protobuf:"bytes,4,req,name=request" json:"request,omitempty"`
RequestId *string `protobuf:"bytes,5,opt,name=request_id,json=requestId" json:"request_id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Request) Reset() { *m = Request{} }
func (m *Request) String() string { return proto.CompactTextString(m) }
func (*Request) ProtoMessage() {}
func (*Request) Descriptor() ([]byte, []int) {
return fileDescriptor_remote_api_1978114ec33a273d, []int{0}
}
func (m *Request) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Request.Unmarshal(m, b)
}
func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Request.Marshal(b, m, deterministic)
}
func (dst *Request) XXX_Merge(src proto.Message) {
xxx_messageInfo_Request.Merge(dst, src)
}
func (m *Request) XXX_Size() int {
return xxx_messageInfo_Request.Size(m)
}
func (m *Request) XXX_DiscardUnknown() {
xxx_messageInfo_Request.DiscardUnknown(m)
}
var xxx_messageInfo_Request proto.InternalMessageInfo
func (m *Request) GetServiceName() string {
if m != nil && m.ServiceName != nil {
return *m.ServiceName
}
return ""
}
func (m *Request) GetMethod() string {
if m != nil && m.Method != nil {
return *m.Method
}
return ""
}
func (m *Request) GetRequest() []byte {
if m != nil {
return m.Request
}
return nil
}
func (m *Request) GetRequestId() string {
if m != nil && m.RequestId != nil {
return *m.RequestId
}
return ""
}
type ApplicationError struct {
Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
Detail *string `protobuf:"bytes,2,req,name=detail" json:"detail,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ApplicationError) Reset() { *m = ApplicationError{} }
func (m *ApplicationError) String() string { return proto.CompactTextString(m) }
func (*ApplicationError) ProtoMessage() {}
func (*ApplicationError) Descriptor() ([]byte, []int) {
return fileDescriptor_remote_api_1978114ec33a273d, []int{1}
}
func (m *ApplicationError) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ApplicationError.Unmarshal(m, b)
}
func (m *ApplicationError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ApplicationError.Marshal(b, m, deterministic)
}
func (dst *ApplicationError) XXX_Merge(src proto.Message) {
xxx_messageInfo_ApplicationError.Merge(dst, src)
}
func (m *ApplicationError) XXX_Size() int {
return xxx_messageInfo_ApplicationError.Size(m)
}
func (m *ApplicationError) XXX_DiscardUnknown() {
xxx_messageInfo_ApplicationError.DiscardUnknown(m)
}
var xxx_messageInfo_ApplicationError proto.InternalMessageInfo
func (m *ApplicationError) GetCode() int32 {
if m != nil && m.Code != nil {
return *m.Code
}
return 0
}
func (m *ApplicationError) GetDetail() string {
if m != nil && m.Detail != nil {
return *m.Detail
}
return ""
}
type RpcError struct {
Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
Detail *string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RpcError) Reset() { *m = RpcError{} }
func (m *RpcError) String() string { return proto.CompactTextString(m) }
func (*RpcError) ProtoMessage() {}
func (*RpcError) Descriptor() ([]byte, []int) {
return fileDescriptor_remote_api_1978114ec33a273d, []int{2}
}
func (m *RpcError) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RpcError.Unmarshal(m, b)
}
func (m *RpcError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_RpcError.Marshal(b, m, deterministic)
}
func (dst *RpcError) XXX_Merge(src proto.Message) {
xxx_messageInfo_RpcError.Merge(dst, src)
}
func (m *RpcError) XXX_Size() int {
return xxx_messageInfo_RpcError.Size(m)
}
func (m *RpcError) XXX_DiscardUnknown() {
xxx_messageInfo_RpcError.DiscardUnknown(m)
}
var xxx_messageInfo_RpcError proto.InternalMessageInfo
func (m *RpcError) GetCode() int32 {
if m != nil && m.Code != nil {
return *m.Code
}
return 0
}
func (m *RpcError) GetDetail() string {
if m != nil && m.Detail != nil {
return *m.Detail
}
return ""
}
type Response struct {
Response []byte `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"`
Exception []byte `protobuf:"bytes,2,opt,name=exception" json:"exception,omitempty"`
ApplicationError *ApplicationError `protobuf:"bytes,3,opt,name=application_error,json=applicationError" json:"application_error,omitempty"`
JavaException []byte `protobuf:"bytes,4,opt,name=java_exception,json=javaException" json:"java_exception,omitempty"`
RpcError *RpcError `protobuf:"bytes,5,opt,name=rpc_error,json=rpcError" json:"rpc_error,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Response) Reset() { *m = Response{} }
func (m *Response) String() string { return proto.CompactTextString(m) }
func (*Response) ProtoMessage() {}
func (*Response) Descriptor() ([]byte, []int) {
return fileDescriptor_remote_api_1978114ec33a273d, []int{3}
}
func (m *Response) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Response.Unmarshal(m, b)
}
func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Response.Marshal(b, m, deterministic)
}
func (dst *Response) XXX_Merge(src proto.Message) {
xxx_messageInfo_Response.Merge(dst, src)
}
func (m *Response) XXX_Size() int {
return xxx_messageInfo_Response.Size(m)
}
func (m *Response) XXX_DiscardUnknown() {
xxx_messageInfo_Response.DiscardUnknown(m)
}
var xxx_messageInfo_Response proto.InternalMessageInfo
func (m *Response) GetResponse() []byte {
if m != nil {
return m.Response
}
return nil
}
func (m *Response) GetException() []byte {
if m != nil {
return m.Exception
}
return nil
}
func (m *Response) GetApplicationError() *ApplicationError {
if m != nil {
return m.ApplicationError
}
return nil
}
func (m *Response) GetJavaException() []byte {
if m != nil {
return m.JavaException
}
return nil
}
func (m *Response) GetRpcError() *RpcError {
if m != nil {
return m.RpcError
}
return nil
}
func init() {
proto.RegisterType((*Request)(nil), "remote_api.Request")
proto.RegisterType((*ApplicationError)(nil), "remote_api.ApplicationError")
proto.RegisterType((*RpcError)(nil), "remote_api.RpcError")
proto.RegisterType((*Response)(nil), "remote_api.Response")
}
func init() {
proto.RegisterFile("google.golang.org/appengine/internal/remote_api/remote_api.proto", fileDescriptor_remote_api_1978114ec33a273d)
}
var fileDescriptor_remote_api_1978114ec33a273d = []byte{
// 531 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x51, 0x6e, 0xd3, 0x40,
0x10, 0x86, 0xb1, 0x9b, 0x34, 0xf1, 0xc4, 0x2d, 0xdb, 0xa5, 0x14, 0x0b, 0x15, 0x29, 0x44, 0x42,
0xca, 0x53, 0x2a, 0x38, 0x00, 0x62, 0x63, 0x6f, 0x91, 0x85, 0x65, 0xa7, 0x6b, 0xbb, 0x50, 0x5e,
0x56, 0x2b, 0x67, 0x65, 0x8c, 0x12, 0xaf, 0xd9, 0x98, 0x8a, 0x17, 0x6e, 0xc0, 0xb5, 0x38, 0x0c,
0xb7, 0x40, 0x36, 0x6e, 0x63, 0xf5, 0x89, 0xb7, 0x7f, 0x7e, 0x7b, 0xe6, 0x1b, 0xcd, 0xcc, 0xc2,
0xbb, 0x5c, 0xa9, 0x7c, 0x23, 0x17, 0xb9, 0xda, 0x88, 0x32, 0x5f, 0x28, 0x9d, 0x5f, 0x88, 0xaa,
0x92, 0x65, 0x5e, 0x94, 0xf2, 0xa2, 0x28, 0x6b, 0xa9, 0x4b, 0xb1, 0xb9, 0xd0, 0x72, 0xab, 0x6a,
0xc9, 0x45, 0x55, 0xf4, 0xe4, 0xa2, 0xd2, 0xaa, 0x56, 0x18, 0xf6, 0xce, 0xec, 0x27, 0x8c, 0x98,
0xfc, 0xf6, 0x5d, 0xee, 0x6a, 0xfc, 0x12, 0xec, 0x9d, 0xd4, 0xb7, 0x45, 0x26, 0x79, 0x29, 0xb6,
0xd2, 0x31, 0xa7, 0xe6, 0xdc, 0x62, 0x93, 0xce, 0x0b, 0xc5, 0x56, 0xe2, 0x33, 0x38, 0xdc, 0xca,
0xfa, 0x8b, 0x5a, 0x3b, 0x07, 0xed, 0xc7, 0x2e, 0xc2, 0x0e, 0x8c, 0xf4, 0xbf, 0x2a, 0xce, 0x60,
0x6a, 0xce, 0x6d, 0x76, 0x17, 0xe2, 0x17, 0x00, 0x9d, 0xe4, 0xc5, 0xda, 0x19, 0x4e, 0x8d, 0xb9,
0xc5, 0xac, 0xce, 0xf1, 0xd7, 0xb3, 0xb7, 0x80, 0x48, 0x55, 0x6d, 0x8a, 0x4c, 0xd4, 0x85, 0x2a,
0xa9, 0xd6, 0x4a, 0x63, 0x0c, 0x83, 0x4c, 0xad, 0xa5, 0x63, 0x4c, 0xcd, 0xf9, 0x90, 0xb5, 0xba,
0x01, 0xaf, 0x65, 0x2d, 0x8a, 0x4d, 0xd7, 0x55, 0x17, 0xcd, 0x7e, 0x9b, 0x30, 0x66, 0x55, 0xf6,
0x7f, 0x89, 0x46, 0x2f, 0xf1, 0x97, 0x09, 0x56, 0x9b, 0xe5, 0x36, 0x7f, 0x4d, 0x60, 0x94, 0x86,
0x1f, 0xc2, 0xe8, 0x63, 0x88, 0x1e, 0x61, 0x0c, 0xc7, 0x2e, 0x09, 0x02, 0x1e, 0x46, 0x09, 0xbf,
0x8c, 0xd2, 0xd0, 0x43, 0x06, 0x7e, 0x0c, 0x93, 0x15, 0x61, 0x31, 0xe5, 0x94, 0xb1, 0x88, 0x21,
0x13, 0x9f, 0x01, 0x8e, 0xa9, 0x9b, 0x32, 0x3f, 0xb9, 0xe1, 0xd7, 0x7e, 0x14, 0x90, 0xc4, 0x8f,
0x42, 0x74, 0x80, 0x8f, 0x01, 0xa2, 0x6b, 0xca, 0xf8, 0x55, 0x1a, 0x25, 0x04, 0x0d, 0xf0, 0x53,
0x38, 0x61, 0xf4, 0x2a, 0xa5, 0x71, 0xc2, 0x93, 0x28, 0xe2, 0x01, 0x61, 0xef, 0x29, 0x1a, 0xe2,
0x67, 0xf0, 0xc4, 0x25, 0x2b, 0xb2, 0xf4, 0x83, 0xa6, 0x80, 0xe7, 0xc7, 0x64, 0x19, 0x50, 0x0f,
0x1d, 0xe2, 0x53, 0x40, 0x97, 0x94, 0x24, 0x29, 0xa3, 0x7b, 0x77, 0xd4, 0xe0, 0x97, 0xc4, 0xe3,
0x5d, 0x25, 0x34, 0x6e, 0xf0, 0x8c, 0xc6, 0xab, 0x28, 0x8c, 0x69, 0xaf, 0xae, 0x85, 0x8f, 0xc0,
0x72, 0x49, 0xe8, 0xd2, 0xa0, 0xc9, 0x03, 0x8c, 0xc0, 0x66, 0x74, 0x15, 0x90, 0x9b, 0xae, 0xef,
0x49, 0xd3, 0x8f, 0x47, 0x89, 0x17, 0xf8, 0x21, 0xe5, 0xf4, 0x93, 0x4b, 0xa9, 0x47, 0x3d, 0x64,
0xcf, 0xfe, 0x18, 0x30, 0x66, 0x72, 0x57, 0xa9, 0x72, 0x27, 0xf1, 0x73, 0x18, 0xeb, 0x4e, 0x3b,
0xc6, 0xd4, 0x98, 0xdb, 0xec, 0x3e, 0xc6, 0xe7, 0x60, 0xc9, 0x1f, 0x99, 0xac, 0x9a, 0x75, 0xb5,
0x23, 0xb5, 0xd9, 0xde, 0xc0, 0x3e, 0x9c, 0x88, 0xfd, 0x3a, 0xb9, 0x6c, 0x06, 0xec, 0x1c, 0x4c,
0x8d, 0xf9, 0xe4, 0xcd, 0xf9, 0xa2, 0x77, 0x87, 0x0f, 0x77, 0xce, 0x90, 0x78, 0x78, 0x05, 0xaf,
0xe0, 0xf8, 0xab, 0xb8, 0x15, 0x7c, 0x4f, 0x1b, 0xb4, 0xb4, 0xa3, 0xc6, 0xa5, 0xf7, 0xc4, 0xd7,
0x60, 0xe9, 0x2a, 0xeb, 0x48, 0xc3, 0x96, 0x74, 0xda, 0x27, 0xdd, 0x1d, 0x07, 0x1b, 0xeb, 0x4e,
0x2d, 0xed, 0xcf, 0xbd, 0x07, 0xf0, 0x37, 0x00, 0x00, 0xff, 0xff, 0x38, 0xd1, 0x0f, 0x22, 0x4f,
0x03, 0x00, 0x00,
}

View File

@ -1,44 +0,0 @@
syntax = "proto2";
option go_package = "remote_api";
package remote_api;
message Request {
required string service_name = 2;
required string method = 3;
required bytes request = 4;
optional string request_id = 5;
}
message ApplicationError {
required int32 code = 1;
required string detail = 2;
}
message RpcError {
enum ErrorCode {
UNKNOWN = 0;
CALL_NOT_FOUND = 1;
PARSE_ERROR = 2;
SECURITY_VIOLATION = 3;
OVER_QUOTA = 4;
REQUEST_TOO_LARGE = 5;
CAPABILITY_DISABLED = 6;
FEATURE_DISABLED = 7;
BAD_REQUEST = 8;
RESPONSE_TOO_LARGE = 9;
CANCELLED = 10;
REPLAY_ERROR = 11;
DEADLINE_EXCEEDED = 12;
}
required int32 code = 1;
optional string detail = 2;
}
message Response {
optional bytes response = 1;
optional bytes exception = 2;
optional ApplicationError application_error = 3;
optional bytes java_exception = 4;
optional RpcError rpc_error = 5;
}

View File

@ -1,115 +0,0 @@
// Copyright 2014 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package internal
// This file implements hooks for applying datastore transactions.
import (
"errors"
"reflect"
"github.com/golang/protobuf/proto"
netcontext "golang.org/x/net/context"
basepb "google.golang.org/appengine/internal/base"
pb "google.golang.org/appengine/internal/datastore"
)
var transactionSetters = make(map[reflect.Type]reflect.Value)
// RegisterTransactionSetter registers a function that sets transaction information
// in a protocol buffer message. f should be a function with two arguments,
// the first being a protocol buffer type, and the second being *datastore.Transaction.
func RegisterTransactionSetter(f interface{}) {
v := reflect.ValueOf(f)
transactionSetters[v.Type().In(0)] = v
}
// applyTransaction applies the transaction t to message pb
// by using the relevant setter passed to RegisterTransactionSetter.
func applyTransaction(pb proto.Message, t *pb.Transaction) {
v := reflect.ValueOf(pb)
if f, ok := transactionSetters[v.Type()]; ok {
f.Call([]reflect.Value{v, reflect.ValueOf(t)})
}
}
var transactionKey = "used for *Transaction"
func transactionFromContext(ctx netcontext.Context) *transaction {
t, _ := ctx.Value(&transactionKey).(*transaction)
return t
}
func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context {
return netcontext.WithValue(ctx, &transactionKey, t)
}
type transaction struct {
transaction pb.Transaction
finished bool
}
var ErrConcurrentTransaction = errors.New("internal: concurrent transaction")
func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) {
if transactionFromContext(c) != nil {
return nil, errors.New("nested transactions are not supported")
}
// Begin the transaction.
t := &transaction{}
req := &pb.BeginTransactionRequest{
App: proto.String(FullyQualifiedAppID(c)),
}
if xg {
req.AllowMultipleEg = proto.Bool(true)
}
if previousTransaction != nil {
req.PreviousTransaction = previousTransaction
}
if readOnly {
req.Mode = pb.BeginTransactionRequest_READ_ONLY.Enum()
} else {
req.Mode = pb.BeginTransactionRequest_READ_WRITE.Enum()
}
if err := Call(c, "datastore_v3", "BeginTransaction", req, &t.transaction); err != nil {
return nil, err
}
// Call f, rolling back the transaction if f returns a non-nil error, or panics.
// The panic is not recovered.
defer func() {
if t.finished {
return
}
t.finished = true
// Ignore the error return value, since we are already returning a non-nil
// error (or we're panicking).
Call(c, "datastore_v3", "Rollback", &t.transaction, &basepb.VoidProto{})
}()
if err := f(withTransaction(c, t)); err != nil {
return &t.transaction, err
}
t.finished = true
// Commit the transaction.
res := &pb.CommitResponse{}
err := Call(c, "datastore_v3", "Commit", &t.transaction, res)
if ae, ok := err.(*APIError); ok {
/* TODO: restore this conditional
if appengine.IsDevAppServer() {
*/
// The Python Dev AppServer raises an ApplicationError with error code 2 (which is
// Error.CONCURRENT_TRANSACTION) and message "Concurrency exception.".
if ae.Code == int32(pb.Error_BAD_REQUEST) && ae.Detail == "ApplicationError: 2 Concurrency exception." {
return &t.transaction, ErrConcurrentTransaction
}
if ae.Code == int32(pb.Error_CONCURRENT_TRANSACTION) {
return &t.transaction, ErrConcurrentTransaction
}
}
return &t.transaction, err
}

View File

@ -1,527 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
package urlfetch
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type URLFetchServiceError_ErrorCode int32
const (
URLFetchServiceError_OK URLFetchServiceError_ErrorCode = 0
URLFetchServiceError_INVALID_URL URLFetchServiceError_ErrorCode = 1
URLFetchServiceError_FETCH_ERROR URLFetchServiceError_ErrorCode = 2
URLFetchServiceError_UNSPECIFIED_ERROR URLFetchServiceError_ErrorCode = 3
URLFetchServiceError_RESPONSE_TOO_LARGE URLFetchServiceError_ErrorCode = 4
URLFetchServiceError_DEADLINE_EXCEEDED URLFetchServiceError_ErrorCode = 5
URLFetchServiceError_SSL_CERTIFICATE_ERROR URLFetchServiceError_ErrorCode = 6
URLFetchServiceError_DNS_ERROR URLFetchServiceError_ErrorCode = 7
URLFetchServiceError_CLOSED URLFetchServiceError_ErrorCode = 8
URLFetchServiceError_INTERNAL_TRANSIENT_ERROR URLFetchServiceError_ErrorCode = 9
URLFetchServiceError_TOO_MANY_REDIRECTS URLFetchServiceError_ErrorCode = 10
URLFetchServiceError_MALFORMED_REPLY URLFetchServiceError_ErrorCode = 11
URLFetchServiceError_CONNECTION_ERROR URLFetchServiceError_ErrorCode = 12
)
var URLFetchServiceError_ErrorCode_name = map[int32]string{
0: "OK",
1: "INVALID_URL",
2: "FETCH_ERROR",
3: "UNSPECIFIED_ERROR",
4: "RESPONSE_TOO_LARGE",
5: "DEADLINE_EXCEEDED",
6: "SSL_CERTIFICATE_ERROR",
7: "DNS_ERROR",
8: "CLOSED",
9: "INTERNAL_TRANSIENT_ERROR",
10: "TOO_MANY_REDIRECTS",
11: "MALFORMED_REPLY",
12: "CONNECTION_ERROR",
}
var URLFetchServiceError_ErrorCode_value = map[string]int32{
"OK": 0,
"INVALID_URL": 1,
"FETCH_ERROR": 2,
"UNSPECIFIED_ERROR": 3,
"RESPONSE_TOO_LARGE": 4,
"DEADLINE_EXCEEDED": 5,
"SSL_CERTIFICATE_ERROR": 6,
"DNS_ERROR": 7,
"CLOSED": 8,
"INTERNAL_TRANSIENT_ERROR": 9,
"TOO_MANY_REDIRECTS": 10,
"MALFORMED_REPLY": 11,
"CONNECTION_ERROR": 12,
}
func (x URLFetchServiceError_ErrorCode) Enum() *URLFetchServiceError_ErrorCode {
p := new(URLFetchServiceError_ErrorCode)
*p = x
return p
}
func (x URLFetchServiceError_ErrorCode) String() string {
return proto.EnumName(URLFetchServiceError_ErrorCode_name, int32(x))
}
func (x *URLFetchServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(URLFetchServiceError_ErrorCode_value, data, "URLFetchServiceError_ErrorCode")
if err != nil {
return err
}
*x = URLFetchServiceError_ErrorCode(value)
return nil
}
func (URLFetchServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{0, 0}
}
type URLFetchRequest_RequestMethod int32
const (
URLFetchRequest_GET URLFetchRequest_RequestMethod = 1
URLFetchRequest_POST URLFetchRequest_RequestMethod = 2
URLFetchRequest_HEAD URLFetchRequest_RequestMethod = 3
URLFetchRequest_PUT URLFetchRequest_RequestMethod = 4
URLFetchRequest_DELETE URLFetchRequest_RequestMethod = 5
URLFetchRequest_PATCH URLFetchRequest_RequestMethod = 6
)
var URLFetchRequest_RequestMethod_name = map[int32]string{
1: "GET",
2: "POST",
3: "HEAD",
4: "PUT",
5: "DELETE",
6: "PATCH",
}
var URLFetchRequest_RequestMethod_value = map[string]int32{
"GET": 1,
"POST": 2,
"HEAD": 3,
"PUT": 4,
"DELETE": 5,
"PATCH": 6,
}
func (x URLFetchRequest_RequestMethod) Enum() *URLFetchRequest_RequestMethod {
p := new(URLFetchRequest_RequestMethod)
*p = x
return p
}
func (x URLFetchRequest_RequestMethod) String() string {
return proto.EnumName(URLFetchRequest_RequestMethod_name, int32(x))
}
func (x *URLFetchRequest_RequestMethod) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(URLFetchRequest_RequestMethod_value, data, "URLFetchRequest_RequestMethod")
if err != nil {
return err
}
*x = URLFetchRequest_RequestMethod(value)
return nil
}
func (URLFetchRequest_RequestMethod) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1, 0}
}
type URLFetchServiceError struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *URLFetchServiceError) Reset() { *m = URLFetchServiceError{} }
func (m *URLFetchServiceError) String() string { return proto.CompactTextString(m) }
func (*URLFetchServiceError) ProtoMessage() {}
func (*URLFetchServiceError) Descriptor() ([]byte, []int) {
return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{0}
}
func (m *URLFetchServiceError) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_URLFetchServiceError.Unmarshal(m, b)
}
func (m *URLFetchServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_URLFetchServiceError.Marshal(b, m, deterministic)
}
func (dst *URLFetchServiceError) XXX_Merge(src proto.Message) {
xxx_messageInfo_URLFetchServiceError.Merge(dst, src)
}
func (m *URLFetchServiceError) XXX_Size() int {
return xxx_messageInfo_URLFetchServiceError.Size(m)
}
func (m *URLFetchServiceError) XXX_DiscardUnknown() {
xxx_messageInfo_URLFetchServiceError.DiscardUnknown(m)
}
var xxx_messageInfo_URLFetchServiceError proto.InternalMessageInfo
type URLFetchRequest struct {
Method *URLFetchRequest_RequestMethod `protobuf:"varint,1,req,name=Method,enum=appengine.URLFetchRequest_RequestMethod" json:"Method,omitempty"`
Url *string `protobuf:"bytes,2,req,name=Url" json:"Url,omitempty"`
Header []*URLFetchRequest_Header `protobuf:"group,3,rep,name=Header,json=header" json:"header,omitempty"`
Payload []byte `protobuf:"bytes,6,opt,name=Payload" json:"Payload,omitempty"`
FollowRedirects *bool `protobuf:"varint,7,opt,name=FollowRedirects,def=1" json:"FollowRedirects,omitempty"`
Deadline *float64 `protobuf:"fixed64,8,opt,name=Deadline" json:"Deadline,omitempty"`
MustValidateServerCertificate *bool `protobuf:"varint,9,opt,name=MustValidateServerCertificate,def=1" json:"MustValidateServerCertificate,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *URLFetchRequest) Reset() { *m = URLFetchRequest{} }
func (m *URLFetchRequest) String() string { return proto.CompactTextString(m) }
func (*URLFetchRequest) ProtoMessage() {}
func (*URLFetchRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1}
}
func (m *URLFetchRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_URLFetchRequest.Unmarshal(m, b)
}
func (m *URLFetchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_URLFetchRequest.Marshal(b, m, deterministic)
}
func (dst *URLFetchRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_URLFetchRequest.Merge(dst, src)
}
func (m *URLFetchRequest) XXX_Size() int {
return xxx_messageInfo_URLFetchRequest.Size(m)
}
func (m *URLFetchRequest) XXX_DiscardUnknown() {
xxx_messageInfo_URLFetchRequest.DiscardUnknown(m)
}
var xxx_messageInfo_URLFetchRequest proto.InternalMessageInfo
const Default_URLFetchRequest_FollowRedirects bool = true
const Default_URLFetchRequest_MustValidateServerCertificate bool = true
func (m *URLFetchRequest) GetMethod() URLFetchRequest_RequestMethod {
if m != nil && m.Method != nil {
return *m.Method
}
return URLFetchRequest_GET
}
func (m *URLFetchRequest) GetUrl() string {
if m != nil && m.Url != nil {
return *m.Url
}
return ""
}
func (m *URLFetchRequest) GetHeader() []*URLFetchRequest_Header {
if m != nil {
return m.Header
}
return nil
}
func (m *URLFetchRequest) GetPayload() []byte {
if m != nil {
return m.Payload
}
return nil
}
func (m *URLFetchRequest) GetFollowRedirects() bool {
if m != nil && m.FollowRedirects != nil {
return *m.FollowRedirects
}
return Default_URLFetchRequest_FollowRedirects
}
func (m *URLFetchRequest) GetDeadline() float64 {
if m != nil && m.Deadline != nil {
return *m.Deadline
}
return 0
}
func (m *URLFetchRequest) GetMustValidateServerCertificate() bool {
if m != nil && m.MustValidateServerCertificate != nil {
return *m.MustValidateServerCertificate
}
return Default_URLFetchRequest_MustValidateServerCertificate
}
type URLFetchRequest_Header struct {
Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"`
Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *URLFetchRequest_Header) Reset() { *m = URLFetchRequest_Header{} }
func (m *URLFetchRequest_Header) String() string { return proto.CompactTextString(m) }
func (*URLFetchRequest_Header) ProtoMessage() {}
func (*URLFetchRequest_Header) Descriptor() ([]byte, []int) {
return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1, 0}
}
func (m *URLFetchRequest_Header) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_URLFetchRequest_Header.Unmarshal(m, b)
}
func (m *URLFetchRequest_Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_URLFetchRequest_Header.Marshal(b, m, deterministic)
}
func (dst *URLFetchRequest_Header) XXX_Merge(src proto.Message) {
xxx_messageInfo_URLFetchRequest_Header.Merge(dst, src)
}
func (m *URLFetchRequest_Header) XXX_Size() int {
return xxx_messageInfo_URLFetchRequest_Header.Size(m)
}
func (m *URLFetchRequest_Header) XXX_DiscardUnknown() {
xxx_messageInfo_URLFetchRequest_Header.DiscardUnknown(m)
}
var xxx_messageInfo_URLFetchRequest_Header proto.InternalMessageInfo
func (m *URLFetchRequest_Header) GetKey() string {
if m != nil && m.Key != nil {
return *m.Key
}
return ""
}
func (m *URLFetchRequest_Header) GetValue() string {
if m != nil && m.Value != nil {
return *m.Value
}
return ""
}
type URLFetchResponse struct {
Content []byte `protobuf:"bytes,1,opt,name=Content" json:"Content,omitempty"`
StatusCode *int32 `protobuf:"varint,2,req,name=StatusCode" json:"StatusCode,omitempty"`
Header []*URLFetchResponse_Header `protobuf:"group,3,rep,name=Header,json=header" json:"header,omitempty"`
ContentWasTruncated *bool `protobuf:"varint,6,opt,name=ContentWasTruncated,def=0" json:"ContentWasTruncated,omitempty"`
ExternalBytesSent *int64 `protobuf:"varint,7,opt,name=ExternalBytesSent" json:"ExternalBytesSent,omitempty"`
ExternalBytesReceived *int64 `protobuf:"varint,8,opt,name=ExternalBytesReceived" json:"ExternalBytesReceived,omitempty"`
FinalUrl *string `protobuf:"bytes,9,opt,name=FinalUrl" json:"FinalUrl,omitempty"`
ApiCpuMilliseconds *int64 `protobuf:"varint,10,opt,name=ApiCpuMilliseconds,def=0" json:"ApiCpuMilliseconds,omitempty"`
ApiBytesSent *int64 `protobuf:"varint,11,opt,name=ApiBytesSent,def=0" json:"ApiBytesSent,omitempty"`
ApiBytesReceived *int64 `protobuf:"varint,12,opt,name=ApiBytesReceived,def=0" json:"ApiBytesReceived,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *URLFetchResponse) Reset() { *m = URLFetchResponse{} }
func (m *URLFetchResponse) String() string { return proto.CompactTextString(m) }
func (*URLFetchResponse) ProtoMessage() {}
func (*URLFetchResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{2}
}
func (m *URLFetchResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_URLFetchResponse.Unmarshal(m, b)
}
func (m *URLFetchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_URLFetchResponse.Marshal(b, m, deterministic)
}
func (dst *URLFetchResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_URLFetchResponse.Merge(dst, src)
}
func (m *URLFetchResponse) XXX_Size() int {
return xxx_messageInfo_URLFetchResponse.Size(m)
}
func (m *URLFetchResponse) XXX_DiscardUnknown() {
xxx_messageInfo_URLFetchResponse.DiscardUnknown(m)
}
var xxx_messageInfo_URLFetchResponse proto.InternalMessageInfo
const Default_URLFetchResponse_ContentWasTruncated bool = false
const Default_URLFetchResponse_ApiCpuMilliseconds int64 = 0
const Default_URLFetchResponse_ApiBytesSent int64 = 0
const Default_URLFetchResponse_ApiBytesReceived int64 = 0
func (m *URLFetchResponse) GetContent() []byte {
if m != nil {
return m.Content
}
return nil
}
func (m *URLFetchResponse) GetStatusCode() int32 {
if m != nil && m.StatusCode != nil {
return *m.StatusCode
}
return 0
}
func (m *URLFetchResponse) GetHeader() []*URLFetchResponse_Header {
if m != nil {
return m.Header
}
return nil
}
func (m *URLFetchResponse) GetContentWasTruncated() bool {
if m != nil && m.ContentWasTruncated != nil {
return *m.ContentWasTruncated
}
return Default_URLFetchResponse_ContentWasTruncated
}
func (m *URLFetchResponse) GetExternalBytesSent() int64 {
if m != nil && m.ExternalBytesSent != nil {
return *m.ExternalBytesSent
}
return 0
}
func (m *URLFetchResponse) GetExternalBytesReceived() int64 {
if m != nil && m.ExternalBytesReceived != nil {
return *m.ExternalBytesReceived
}
return 0
}
func (m *URLFetchResponse) GetFinalUrl() string {
if m != nil && m.FinalUrl != nil {
return *m.FinalUrl
}
return ""
}
func (m *URLFetchResponse) GetApiCpuMilliseconds() int64 {
if m != nil && m.ApiCpuMilliseconds != nil {
return *m.ApiCpuMilliseconds
}
return Default_URLFetchResponse_ApiCpuMilliseconds
}
func (m *URLFetchResponse) GetApiBytesSent() int64 {
if m != nil && m.ApiBytesSent != nil {
return *m.ApiBytesSent
}
return Default_URLFetchResponse_ApiBytesSent
}
func (m *URLFetchResponse) GetApiBytesReceived() int64 {
if m != nil && m.ApiBytesReceived != nil {
return *m.ApiBytesReceived
}
return Default_URLFetchResponse_ApiBytesReceived
}
type URLFetchResponse_Header struct {
Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"`
Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *URLFetchResponse_Header) Reset() { *m = URLFetchResponse_Header{} }
func (m *URLFetchResponse_Header) String() string { return proto.CompactTextString(m) }
func (*URLFetchResponse_Header) ProtoMessage() {}
func (*URLFetchResponse_Header) Descriptor() ([]byte, []int) {
return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{2, 0}
}
func (m *URLFetchResponse_Header) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_URLFetchResponse_Header.Unmarshal(m, b)
}
func (m *URLFetchResponse_Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_URLFetchResponse_Header.Marshal(b, m, deterministic)
}
func (dst *URLFetchResponse_Header) XXX_Merge(src proto.Message) {
xxx_messageInfo_URLFetchResponse_Header.Merge(dst, src)
}
func (m *URLFetchResponse_Header) XXX_Size() int {
return xxx_messageInfo_URLFetchResponse_Header.Size(m)
}
func (m *URLFetchResponse_Header) XXX_DiscardUnknown() {
xxx_messageInfo_URLFetchResponse_Header.DiscardUnknown(m)
}
var xxx_messageInfo_URLFetchResponse_Header proto.InternalMessageInfo
func (m *URLFetchResponse_Header) GetKey() string {
if m != nil && m.Key != nil {
return *m.Key
}
return ""
}
func (m *URLFetchResponse_Header) GetValue() string {
if m != nil && m.Value != nil {
return *m.Value
}
return ""
}
func init() {
proto.RegisterType((*URLFetchServiceError)(nil), "appengine.URLFetchServiceError")
proto.RegisterType((*URLFetchRequest)(nil), "appengine.URLFetchRequest")
proto.RegisterType((*URLFetchRequest_Header)(nil), "appengine.URLFetchRequest.Header")
proto.RegisterType((*URLFetchResponse)(nil), "appengine.URLFetchResponse")
proto.RegisterType((*URLFetchResponse_Header)(nil), "appengine.URLFetchResponse.Header")
}
func init() {
proto.RegisterFile("google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto", fileDescriptor_urlfetch_service_b245a7065f33bced)
}
var fileDescriptor_urlfetch_service_b245a7065f33bced = []byte{
// 770 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xdd, 0x6e, 0xe3, 0x54,
0x10, 0xc6, 0x76, 0x7e, 0xa7, 0x5d, 0x7a, 0x76, 0xb6, 0x45, 0x66, 0xb5, 0xa0, 0x10, 0x09, 0x29,
0x17, 0x90, 0x2e, 0x2b, 0x24, 0x44, 0xaf, 0x70, 0xed, 0x93, 0xad, 0xa9, 0x63, 0x47, 0xc7, 0x4e,
0x61, 0xb9, 0xb1, 0xac, 0x78, 0x9a, 0x5a, 0xb2, 0xec, 0x60, 0x9f, 0x2c, 0xf4, 0x35, 0x78, 0x0d,
0xde, 0x87, 0xa7, 0xe1, 0x02, 0x9d, 0xc4, 0xc9, 0x6e, 0xbb, 0xd1, 0x4a, 0x5c, 0x65, 0xe6, 0x9b,
0xef, 0xcc, 0x99, 0x7c, 0xdf, 0xf8, 0x80, 0xb3, 0x2c, 0xcb, 0x65, 0x4e, 0xe3, 0x65, 0x99, 0x27,
0xc5, 0x72, 0x5c, 0x56, 0xcb, 0xf3, 0x64, 0xb5, 0xa2, 0x62, 0x99, 0x15, 0x74, 0x9e, 0x15, 0x92,
0xaa, 0x22, 0xc9, 0xcf, 0xd7, 0x55, 0x7e, 0x4b, 0x72, 0x71, 0xb7, 0x0f, 0xe2, 0x9a, 0xaa, 0xb7,
0xd9, 0x82, 0xc6, 0xab, 0xaa, 0x94, 0x25, 0xf6, 0xf7, 0x67, 0x86, 0x7f, 0xeb, 0x70, 0x3a, 0x17,
0xde, 0x44, 0xb1, 0xc2, 0x2d, 0x89, 0x57, 0x55, 0x59, 0x0d, 0xff, 0xd2, 0xa1, 0xbf, 0x89, 0xec,
0x32, 0x25, 0xec, 0x80, 0x1e, 0x5c, 0xb3, 0x4f, 0xf0, 0x04, 0x8e, 0x5c, 0xff, 0xc6, 0xf2, 0x5c,
0x27, 0x9e, 0x0b, 0x8f, 0x69, 0x0a, 0x98, 0xf0, 0xc8, 0xbe, 0x8a, 0xb9, 0x10, 0x81, 0x60, 0x3a,
0x9e, 0xc1, 0xd3, 0xb9, 0x1f, 0xce, 0xb8, 0xed, 0x4e, 0x5c, 0xee, 0x34, 0xb0, 0x81, 0x9f, 0x01,
0x0a, 0x1e, 0xce, 0x02, 0x3f, 0xe4, 0x71, 0x14, 0x04, 0xb1, 0x67, 0x89, 0xd7, 0x9c, 0xb5, 0x14,
0xdd, 0xe1, 0x96, 0xe3, 0xb9, 0x3e, 0x8f, 0xf9, 0xaf, 0x36, 0xe7, 0x0e, 0x77, 0x58, 0x1b, 0x3f,
0x87, 0xb3, 0x30, 0xf4, 0x62, 0x9b, 0x8b, 0xc8, 0x9d, 0xb8, 0xb6, 0x15, 0xf1, 0xa6, 0x53, 0x07,
0x9f, 0x40, 0xdf, 0xf1, 0xc3, 0x26, 0xed, 0x22, 0x40, 0xc7, 0xf6, 0x82, 0x90, 0x3b, 0xac, 0x87,
0x2f, 0xc0, 0x74, 0xfd, 0x88, 0x0b, 0xdf, 0xf2, 0xe2, 0x48, 0x58, 0x7e, 0xe8, 0x72, 0x3f, 0x6a,
0x98, 0x7d, 0x35, 0x82, 0xba, 0x79, 0x6a, 0xf9, 0x6f, 0x62, 0xc1, 0x1d, 0x57, 0x70, 0x3b, 0x0a,
0x19, 0xe0, 0x33, 0x38, 0x99, 0x5a, 0xde, 0x24, 0x10, 0x53, 0xee, 0xc4, 0x82, 0xcf, 0xbc, 0x37,
0xec, 0x08, 0x4f, 0x81, 0xd9, 0x81, 0xef, 0x73, 0x3b, 0x72, 0x03, 0xbf, 0x69, 0x71, 0x3c, 0xfc,
0xc7, 0x80, 0x93, 0x9d, 0x5a, 0x82, 0x7e, 0x5f, 0x53, 0x2d, 0xf1, 0x27, 0xe8, 0x4c, 0x49, 0xde,
0x95, 0xa9, 0xa9, 0x0d, 0xf4, 0xd1, 0xa7, 0xaf, 0x46, 0xe3, 0xbd, 0xba, 0xe3, 0x47, 0xdc, 0x71,
0xf3, 0xbb, 0xe5, 0x8b, 0xe6, 0x1c, 0x32, 0x30, 0xe6, 0x55, 0x6e, 0xea, 0x03, 0x7d, 0xd4, 0x17,
0x2a, 0xc4, 0x1f, 0xa1, 0x73, 0x47, 0x49, 0x4a, 0x95, 0x69, 0x0c, 0x8c, 0x11, 0xbc, 0xfa, 0xea,
0x23, 0x3d, 0xaf, 0x36, 0x44, 0xd1, 0x1c, 0xc0, 0x17, 0xd0, 0x9d, 0x25, 0xf7, 0x79, 0x99, 0xa4,
0x66, 0x67, 0xa0, 0x8d, 0x8e, 0x2f, 0xf5, 0x9e, 0x26, 0x76, 0x10, 0x8e, 0xe1, 0x64, 0x52, 0xe6,
0x79, 0xf9, 0x87, 0xa0, 0x34, 0xab, 0x68, 0x21, 0x6b, 0xb3, 0x3b, 0xd0, 0x46, 0xbd, 0x8b, 0x96,
0xac, 0xd6, 0x24, 0x1e, 0x17, 0xf1, 0x39, 0xf4, 0x1c, 0x4a, 0xd2, 0x3c, 0x2b, 0xc8, 0xec, 0x0d,
0xb4, 0x91, 0x26, 0xf6, 0x39, 0xfe, 0x0c, 0x5f, 0x4c, 0xd7, 0xb5, 0xbc, 0x49, 0xf2, 0x2c, 0x4d,
0x24, 0xa9, 0xed, 0xa1, 0xca, 0xa6, 0x4a, 0x66, 0xb7, 0xd9, 0x22, 0x91, 0x64, 0xf6, 0xdf, 0xeb,
0xfc, 0x71, 0xea, 0xf3, 0x97, 0xd0, 0xd9, 0xfe, 0x0f, 0x25, 0xc6, 0x35, 0xdd, 0x9b, 0xad, 0xad,
0x18, 0xd7, 0x74, 0x8f, 0xa7, 0xd0, 0xbe, 0x49, 0xf2, 0x35, 0x99, 0xed, 0x0d, 0xb6, 0x4d, 0x86,
0x1e, 0x3c, 0x79, 0xa0, 0x26, 0x76, 0xc1, 0x78, 0xcd, 0x23, 0xa6, 0x61, 0x0f, 0x5a, 0xb3, 0x20,
0x8c, 0x98, 0xae, 0xa2, 0x2b, 0x6e, 0x39, 0xcc, 0x50, 0xc5, 0xd9, 0x3c, 0x62, 0x2d, 0xb5, 0x2e,
0x0e, 0xf7, 0x78, 0xc4, 0x59, 0x1b, 0xfb, 0xd0, 0x9e, 0x59, 0x91, 0x7d, 0xc5, 0x3a, 0xc3, 0x7f,
0x0d, 0x60, 0xef, 0x84, 0xad, 0x57, 0x65, 0x51, 0x13, 0x9a, 0xd0, 0xb5, 0xcb, 0x42, 0x52, 0x21,
0x4d, 0x4d, 0x49, 0x29, 0x76, 0x29, 0x7e, 0x09, 0x10, 0xca, 0x44, 0xae, 0x6b, 0xf5, 0x71, 0x6c,
0x8c, 0x6b, 0x8b, 0xf7, 0x10, 0xbc, 0x78, 0xe4, 0xdf, 0xf0, 0xa0, 0x7f, 0xdb, 0x6b, 0x1e, 0x1b,
0xf8, 0x03, 0x3c, 0x6b, 0xae, 0xf9, 0x25, 0xa9, 0xa3, 0x6a, 0x5d, 0x28, 0x81, 0xb6, 0x66, 0xf6,
0x2e, 0xda, 0xb7, 0x49, 0x5e, 0x93, 0x38, 0xc4, 0xc0, 0x6f, 0xe0, 0x29, 0xff, 0x73, 0xfb, 0x02,
0x5c, 0xde, 0x4b, 0xaa, 0x43, 0x35, 0xb8, 0x72, 0xd7, 0x10, 0x1f, 0x16, 0xf0, 0x7b, 0x38, 0x7b,
0x00, 0x0a, 0x5a, 0x50, 0xf6, 0x96, 0xd2, 0x8d, 0xcd, 0x86, 0x38, 0x5c, 0x54, 0xfb, 0x30, 0xc9,
0x8a, 0x24, 0x57, 0xfb, 0xaa, 0xec, 0xed, 0x8b, 0x7d, 0x8e, 0xdf, 0x01, 0x5a, 0xab, 0xcc, 0x5e,
0xad, 0xa7, 0x59, 0x9e, 0x67, 0x35, 0x2d, 0xca, 0x22, 0xad, 0x4d, 0x50, 0xed, 0x2e, 0xb4, 0x97,
0xe2, 0x40, 0x11, 0xbf, 0x86, 0x63, 0x6b, 0x95, 0xbd, 0x9b, 0xf6, 0x68, 0x47, 0x7e, 0x00, 0xe3,
0xb7, 0xc0, 0x76, 0xf9, 0x7e, 0xcc, 0xe3, 0x1d, 0xf5, 0x83, 0xd2, 0xff, 0x5f, 0xa6, 0x4b, 0xf8,
0xad, 0xb7, 0x7b, 0x2a, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x1d, 0x9f, 0x6d, 0x24, 0x63, 0x05,
0x00, 0x00,
}

View File

@ -1,64 +0,0 @@
syntax = "proto2";
option go_package = "urlfetch";
package appengine;
message URLFetchServiceError {
enum ErrorCode {
OK = 0;
INVALID_URL = 1;
FETCH_ERROR = 2;
UNSPECIFIED_ERROR = 3;
RESPONSE_TOO_LARGE = 4;
DEADLINE_EXCEEDED = 5;
SSL_CERTIFICATE_ERROR = 6;
DNS_ERROR = 7;
CLOSED = 8;
INTERNAL_TRANSIENT_ERROR = 9;
TOO_MANY_REDIRECTS = 10;
MALFORMED_REPLY = 11;
CONNECTION_ERROR = 12;
}
}
message URLFetchRequest {
enum RequestMethod {
GET = 1;
POST = 2;
HEAD = 3;
PUT = 4;
DELETE = 5;
PATCH = 6;
}
required RequestMethod Method = 1;
required string Url = 2;
repeated group Header = 3 {
required string Key = 4;
required string Value = 5;
}
optional bytes Payload = 6 [ctype=CORD];
optional bool FollowRedirects = 7 [default=true];
optional double Deadline = 8;
optional bool MustValidateServerCertificate = 9 [default=true];
}
message URLFetchResponse {
optional bytes Content = 1;
required int32 StatusCode = 2;
repeated group Header = 3 {
required string Key = 4;
required string Value = 5;
}
optional bool ContentWasTruncated = 6 [default=false];
optional int64 ExternalBytesSent = 7;
optional int64 ExternalBytesReceived = 8;
optional string FinalUrl = 9;
optional int64 ApiCpuMilliseconds = 10 [default=0];
optional int64 ApiBytesSent = 11 [default=0];
optional int64 ApiBytesReceived = 12 [default=0];
}

View File

@ -1,210 +0,0 @@
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
// Package urlfetch provides an http.RoundTripper implementation
// for fetching URLs via App Engine's urlfetch service.
package urlfetch // import "google.golang.org/appengine/urlfetch"
import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/golang/protobuf/proto"
"golang.org/x/net/context"
"google.golang.org/appengine/internal"
pb "google.golang.org/appengine/internal/urlfetch"
)
// Transport is an implementation of http.RoundTripper for
// App Engine. Users should generally create an http.Client using
// this transport and use the Client rather than using this transport
// directly.
type Transport struct {
Context context.Context
// Controls whether the application checks the validity of SSL certificates
// over HTTPS connections. A value of false (the default) instructs the
// application to send a request to the server only if the certificate is
// valid and signed by a trusted certificate authority (CA), and also
// includes a hostname that matches the certificate. A value of true
// instructs the application to perform no certificate validation.
AllowInvalidServerCertificate bool
}
// Verify statically that *Transport implements http.RoundTripper.
var _ http.RoundTripper = (*Transport)(nil)
// Client returns an *http.Client using a default urlfetch Transport. This
// client will have the default deadline of 5 seconds, and will check the
// validity of SSL certificates.
//
// Any deadline of the provided context will be used for requests through this client;
// if the client does not have a deadline then a 5 second default is used.
func Client(ctx context.Context) *http.Client {
return &http.Client{
Transport: &Transport{
Context: ctx,
},
}
}
type bodyReader struct {
content []byte
truncated bool
closed bool
}
// ErrTruncatedBody is the error returned after the final Read() from a
// response's Body if the body has been truncated by App Engine's proxy.
var ErrTruncatedBody = errors.New("urlfetch: truncated body")
func statusCodeToText(code int) string {
if t := http.StatusText(code); t != "" {
return t
}
return strconv.Itoa(code)
}
func (br *bodyReader) Read(p []byte) (n int, err error) {
if br.closed {
if br.truncated {
return 0, ErrTruncatedBody
}
return 0, io.EOF
}
n = copy(p, br.content)
if n > 0 {
br.content = br.content[n:]
return
}
if br.truncated {
br.closed = true
return 0, ErrTruncatedBody
}
return 0, io.EOF
}
func (br *bodyReader) Close() error {
br.closed = true
br.content = nil
return nil
}
// A map of the URL Fetch-accepted methods that take a request body.
var methodAcceptsRequestBody = map[string]bool{
"POST": true,
"PUT": true,
"PATCH": true,
}
// urlString returns a valid string given a URL. This function is necessary because
// the String method of URL doesn't correctly handle URLs with non-empty Opaque values.
// See http://code.google.com/p/go/issues/detail?id=4860.
func urlString(u *url.URL) string {
if u.Opaque == "" || strings.HasPrefix(u.Opaque, "//") {
return u.String()
}
aux := *u
aux.Opaque = "//" + aux.Host + aux.Opaque
return aux.String()
}
// RoundTrip issues a single HTTP request and returns its response. Per the
// http.RoundTripper interface, RoundTrip only returns an error if there
// was an unsupported request or the URL Fetch proxy fails.
// Note that HTTP response codes such as 5xx, 403, 404, etc are not
// errors as far as the transport is concerned and will be returned
// with err set to nil.
func (t *Transport) RoundTrip(req *http.Request) (res *http.Response, err error) {
methNum, ok := pb.URLFetchRequest_RequestMethod_value[req.Method]
if !ok {
return nil, fmt.Errorf("urlfetch: unsupported HTTP method %q", req.Method)
}
method := pb.URLFetchRequest_RequestMethod(methNum)
freq := &pb.URLFetchRequest{
Method: &method,
Url: proto.String(urlString(req.URL)),
FollowRedirects: proto.Bool(false), // http.Client's responsibility
MustValidateServerCertificate: proto.Bool(!t.AllowInvalidServerCertificate),
}
if deadline, ok := t.Context.Deadline(); ok {
freq.Deadline = proto.Float64(deadline.Sub(time.Now()).Seconds())
}
for k, vals := range req.Header {
for _, val := range vals {
freq.Header = append(freq.Header, &pb.URLFetchRequest_Header{
Key: proto.String(k),
Value: proto.String(val),
})
}
}
if methodAcceptsRequestBody[req.Method] && req.Body != nil {
// Avoid a []byte copy if req.Body has a Bytes method.
switch b := req.Body.(type) {
case interface {
Bytes() []byte
}:
freq.Payload = b.Bytes()
default:
freq.Payload, err = ioutil.ReadAll(req.Body)
if err != nil {
return nil, err
}
}
}
fres := &pb.URLFetchResponse{}
if err := internal.Call(t.Context, "urlfetch", "Fetch", freq, fres); err != nil {
return nil, err
}
res = &http.Response{}
res.StatusCode = int(*fres.StatusCode)
res.Status = fmt.Sprintf("%d %s", res.StatusCode, statusCodeToText(res.StatusCode))
res.Header = make(http.Header)
res.Request = req
// Faked:
res.ProtoMajor = 1
res.ProtoMinor = 1
res.Proto = "HTTP/1.1"
res.Close = true
for _, h := range fres.Header {
hkey := http.CanonicalHeaderKey(*h.Key)
hval := *h.Value
if hkey == "Content-Length" {
// Will get filled in below for all but HEAD requests.
if req.Method == "HEAD" {
res.ContentLength, _ = strconv.ParseInt(hval, 10, 64)
}
continue
}
res.Header.Add(hkey, hval)
}
if req.Method != "HEAD" {
res.ContentLength = int64(len(fres.Content))
}
truncated := fres.GetContentWasTruncated()
res.Body = &bodyReader{content: fres.Content, truncated: truncated}
return
}
func init() {
internal.RegisterErrorCodeMap("urlfetch", pb.URLFetchServiceError_ErrorCode_name)
internal.RegisterTimeoutErrorCode("urlfetch", int32(pb.URLFetchServiceError_DEADLINE_EXCEEDED))
}

View File

@ -1,27 +0,0 @@
Copyright (c) 2018 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,22 +0,0 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

View File

@ -1,772 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package prototext
import (
"fmt"
"unicode/utf8"
"google.golang.org/protobuf/internal/encoding/messageset"
"google.golang.org/protobuf/internal/encoding/text"
"google.golang.org/protobuf/internal/errors"
"google.golang.org/protobuf/internal/flags"
"google.golang.org/protobuf/internal/genid"
"google.golang.org/protobuf/internal/pragma"
"google.golang.org/protobuf/internal/set"
"google.golang.org/protobuf/internal/strs"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
)
// Unmarshal reads the given []byte into the given [proto.Message].
// The provided message must be mutable (e.g., a non-nil pointer to a message).
func Unmarshal(b []byte, m proto.Message) error {
return UnmarshalOptions{}.Unmarshal(b, m)
}
// UnmarshalOptions is a configurable textproto format unmarshaler.
type UnmarshalOptions struct {
pragma.NoUnkeyedLiterals
// AllowPartial accepts input for messages that will result in missing
// required fields. If AllowPartial is false (the default), Unmarshal will
// return error if there are any missing required fields.
AllowPartial bool
// DiscardUnknown specifies whether to ignore unknown fields when parsing.
// An unknown field is any field whose field name or field number does not
// resolve to any known or extension field in the message.
// By default, unmarshal rejects unknown fields as an error.
DiscardUnknown bool
// Resolver is used for looking up types when unmarshaling
// google.protobuf.Any messages or extension fields.
// If nil, this defaults to using protoregistry.GlobalTypes.
Resolver interface {
protoregistry.MessageTypeResolver
protoregistry.ExtensionTypeResolver
}
}
// Unmarshal reads the given []byte and populates the given [proto.Message]
// using options in the UnmarshalOptions object.
// The provided message must be mutable (e.g., a non-nil pointer to a message).
func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error {
return o.unmarshal(b, m)
}
// unmarshal is a centralized function that all unmarshal operations go through.
// For profiling purposes, avoid changing the name of this function or
// introducing other code paths for unmarshal that do not go through this.
func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error {
proto.Reset(m)
if o.Resolver == nil {
o.Resolver = protoregistry.GlobalTypes
}
dec := decoder{text.NewDecoder(b), o}
if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil {
return err
}
if o.AllowPartial {
return nil
}
return proto.CheckInitialized(m)
}
type decoder struct {
*text.Decoder
opts UnmarshalOptions
}
// newError returns an error object with position info.
func (d decoder) newError(pos int, f string, x ...interface{}) error {
line, column := d.Position(pos)
head := fmt.Sprintf("(line %d:%d): ", line, column)
return errors.New(head+f, x...)
}
// unexpectedTokenError returns a syntax error for the given unexpected token.
func (d decoder) unexpectedTokenError(tok text.Token) error {
return d.syntaxError(tok.Pos(), "unexpected token: %s", tok.RawString())
}
// syntaxError returns a syntax error for given position.
func (d decoder) syntaxError(pos int, f string, x ...interface{}) error {
line, column := d.Position(pos)
head := fmt.Sprintf("syntax error (line %d:%d): ", line, column)
return errors.New(head+f, x...)
}
// unmarshalMessage unmarshals into the given protoreflect.Message.
func (d decoder) unmarshalMessage(m protoreflect.Message, checkDelims bool) error {
messageDesc := m.Descriptor()
if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) {
return errors.New("no support for proto1 MessageSets")
}
if messageDesc.FullName() == genid.Any_message_fullname {
return d.unmarshalAny(m, checkDelims)
}
if checkDelims {
tok, err := d.Read()
if err != nil {
return err
}
if tok.Kind() != text.MessageOpen {
return d.unexpectedTokenError(tok)
}
}
var seenNums set.Ints
var seenOneofs set.Ints
fieldDescs := messageDesc.Fields()
for {
// Read field name.
tok, err := d.Read()
if err != nil {
return err
}
switch typ := tok.Kind(); typ {
case text.Name:
// Continue below.
case text.EOF:
if checkDelims {
return text.ErrUnexpectedEOF
}
return nil
default:
if checkDelims && typ == text.MessageClose {
return nil
}
return d.unexpectedTokenError(tok)
}
// Resolve the field descriptor.
var name protoreflect.Name
var fd protoreflect.FieldDescriptor
var xt protoreflect.ExtensionType
var xtErr error
var isFieldNumberName bool
switch tok.NameKind() {
case text.IdentName:
name = protoreflect.Name(tok.IdentName())
fd = fieldDescs.ByTextName(string(name))
case text.TypeName:
// Handle extensions only. This code path is not for Any.
xt, xtErr = d.opts.Resolver.FindExtensionByName(protoreflect.FullName(tok.TypeName()))
case text.FieldNumber:
isFieldNumberName = true
num := protoreflect.FieldNumber(tok.FieldNumber())
if !num.IsValid() {
return d.newError(tok.Pos(), "invalid field number: %d", num)
}
fd = fieldDescs.ByNumber(num)
if fd == nil {
xt, xtErr = d.opts.Resolver.FindExtensionByNumber(messageDesc.FullName(), num)
}
}
if xt != nil {
fd = xt.TypeDescriptor()
if !messageDesc.ExtensionRanges().Has(fd.Number()) || fd.ContainingMessage().FullName() != messageDesc.FullName() {
return d.newError(tok.Pos(), "message %v cannot be extended by %v", messageDesc.FullName(), fd.FullName())
}
} else if xtErr != nil && xtErr != protoregistry.NotFound {
return d.newError(tok.Pos(), "unable to resolve [%s]: %v", tok.RawString(), xtErr)
}
if flags.ProtoLegacy {
if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() {
fd = nil // reset since the weak reference is not linked in
}
}
// Handle unknown fields.
if fd == nil {
if d.opts.DiscardUnknown || messageDesc.ReservedNames().Has(name) {
d.skipValue()
continue
}
return d.newError(tok.Pos(), "unknown field: %v", tok.RawString())
}
// Handle fields identified by field number.
if isFieldNumberName {
// TODO: Add an option to permit parsing field numbers.
//
// This requires careful thought as the MarshalOptions.EmitUnknown
// option allows formatting unknown fields as the field number and the
// best-effort textual representation of the field value. In that case,
// it may not be possible to unmarshal the value from a parser that does
// have information about the unknown field.
return d.newError(tok.Pos(), "cannot specify field by number: %v", tok.RawString())
}
switch {
case fd.IsList():
kind := fd.Kind()
if kind != protoreflect.MessageKind && kind != protoreflect.GroupKind && !tok.HasSeparator() {
return d.syntaxError(tok.Pos(), "missing field separator :")
}
list := m.Mutable(fd).List()
if err := d.unmarshalList(fd, list); err != nil {
return err
}
case fd.IsMap():
mmap := m.Mutable(fd).Map()
if err := d.unmarshalMap(fd, mmap); err != nil {
return err
}
default:
kind := fd.Kind()
if kind != protoreflect.MessageKind && kind != protoreflect.GroupKind && !tok.HasSeparator() {
return d.syntaxError(tok.Pos(), "missing field separator :")
}
// If field is a oneof, check if it has already been set.
if od := fd.ContainingOneof(); od != nil {
idx := uint64(od.Index())
if seenOneofs.Has(idx) {
return d.newError(tok.Pos(), "error parsing %q, oneof %v is already set", tok.RawString(), od.FullName())
}
seenOneofs.Set(idx)
}
num := uint64(fd.Number())
if seenNums.Has(num) {
return d.newError(tok.Pos(), "non-repeated field %q is repeated", tok.RawString())
}
if err := d.unmarshalSingular(fd, m); err != nil {
return err
}
seenNums.Set(num)
}
}
return nil
}
// unmarshalSingular unmarshals a non-repeated field value specified by the
// given FieldDescriptor.
func (d decoder) unmarshalSingular(fd protoreflect.FieldDescriptor, m protoreflect.Message) error {
var val protoreflect.Value
var err error
switch fd.Kind() {
case protoreflect.MessageKind, protoreflect.GroupKind:
val = m.NewField(fd)
err = d.unmarshalMessage(val.Message(), true)
default:
val, err = d.unmarshalScalar(fd)
}
if err == nil {
m.Set(fd, val)
}
return err
}
// unmarshalScalar unmarshals a scalar/enum protoreflect.Value specified by the
// given FieldDescriptor.
func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
tok, err := d.Read()
if err != nil {
return protoreflect.Value{}, err
}
if tok.Kind() != text.Scalar {
return protoreflect.Value{}, d.unexpectedTokenError(tok)
}
kind := fd.Kind()
switch kind {
case protoreflect.BoolKind:
if b, ok := tok.Bool(); ok {
return protoreflect.ValueOfBool(b), nil
}
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
if n, ok := tok.Int32(); ok {
return protoreflect.ValueOfInt32(n), nil
}
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
if n, ok := tok.Int64(); ok {
return protoreflect.ValueOfInt64(n), nil
}
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
if n, ok := tok.Uint32(); ok {
return protoreflect.ValueOfUint32(n), nil
}
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
if n, ok := tok.Uint64(); ok {
return protoreflect.ValueOfUint64(n), nil
}
case protoreflect.FloatKind:
if n, ok := tok.Float32(); ok {
return protoreflect.ValueOfFloat32(n), nil
}
case protoreflect.DoubleKind:
if n, ok := tok.Float64(); ok {
return protoreflect.ValueOfFloat64(n), nil
}
case protoreflect.StringKind:
if s, ok := tok.String(); ok {
if strs.EnforceUTF8(fd) && !utf8.ValidString(s) {
return protoreflect.Value{}, d.newError(tok.Pos(), "contains invalid UTF-8")
}
return protoreflect.ValueOfString(s), nil
}
case protoreflect.BytesKind:
if b, ok := tok.String(); ok {
return protoreflect.ValueOfBytes([]byte(b)), nil
}
case protoreflect.EnumKind:
if lit, ok := tok.Enum(); ok {
// Lookup EnumNumber based on name.
if enumVal := fd.Enum().Values().ByName(protoreflect.Name(lit)); enumVal != nil {
return protoreflect.ValueOfEnum(enumVal.Number()), nil
}
}
if num, ok := tok.Int32(); ok {
return protoreflect.ValueOfEnum(protoreflect.EnumNumber(num)), nil
}
default:
panic(fmt.Sprintf("invalid scalar kind %v", kind))
}
return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString())
}
// unmarshalList unmarshals into given protoreflect.List. A list value can
// either be in [] syntax or simply just a single scalar/message value.
func (d decoder) unmarshalList(fd protoreflect.FieldDescriptor, list protoreflect.List) error {
tok, err := d.Peek()
if err != nil {
return err
}
switch fd.Kind() {
case protoreflect.MessageKind, protoreflect.GroupKind:
switch tok.Kind() {
case text.ListOpen:
d.Read()
for {
tok, err := d.Peek()
if err != nil {
return err
}
switch tok.Kind() {
case text.ListClose:
d.Read()
return nil
case text.MessageOpen:
pval := list.NewElement()
if err := d.unmarshalMessage(pval.Message(), true); err != nil {
return err
}
list.Append(pval)
default:
return d.unexpectedTokenError(tok)
}
}
case text.MessageOpen:
pval := list.NewElement()
if err := d.unmarshalMessage(pval.Message(), true); err != nil {
return err
}
list.Append(pval)
return nil
}
default:
switch tok.Kind() {
case text.ListOpen:
d.Read()
for {
tok, err := d.Peek()
if err != nil {
return err
}
switch tok.Kind() {
case text.ListClose:
d.Read()
return nil
case text.Scalar:
pval, err := d.unmarshalScalar(fd)
if err != nil {
return err
}
list.Append(pval)
default:
return d.unexpectedTokenError(tok)
}
}
case text.Scalar:
pval, err := d.unmarshalScalar(fd)
if err != nil {
return err
}
list.Append(pval)
return nil
}
}
return d.unexpectedTokenError(tok)
}
// unmarshalMap unmarshals into given protoreflect.Map. A map value is a
// textproto message containing {key: <kvalue>, value: <mvalue>}.
func (d decoder) unmarshalMap(fd protoreflect.FieldDescriptor, mmap protoreflect.Map) error {
// Determine ahead whether map entry is a scalar type or a message type in
// order to call the appropriate unmarshalMapValue func inside
// unmarshalMapEntry.
var unmarshalMapValue func() (protoreflect.Value, error)
switch fd.MapValue().Kind() {
case protoreflect.MessageKind, protoreflect.GroupKind:
unmarshalMapValue = func() (protoreflect.Value, error) {
pval := mmap.NewValue()
if err := d.unmarshalMessage(pval.Message(), true); err != nil {
return protoreflect.Value{}, err
}
return pval, nil
}
default:
unmarshalMapValue = func() (protoreflect.Value, error) {
return d.unmarshalScalar(fd.MapValue())
}
}
tok, err := d.Read()
if err != nil {
return err
}
switch tok.Kind() {
case text.MessageOpen:
return d.unmarshalMapEntry(fd, mmap, unmarshalMapValue)
case text.ListOpen:
for {
tok, err := d.Read()
if err != nil {
return err
}
switch tok.Kind() {
case text.ListClose:
return nil
case text.MessageOpen:
if err := d.unmarshalMapEntry(fd, mmap, unmarshalMapValue); err != nil {
return err
}
default:
return d.unexpectedTokenError(tok)
}
}
default:
return d.unexpectedTokenError(tok)
}
}
// unmarshalMap unmarshals into given protoreflect.Map. A map value is a
// textproto message containing {key: <kvalue>, value: <mvalue>}.
func (d decoder) unmarshalMapEntry(fd protoreflect.FieldDescriptor, mmap protoreflect.Map, unmarshalMapValue func() (protoreflect.Value, error)) error {
var key protoreflect.MapKey
var pval protoreflect.Value
Loop:
for {
// Read field name.
tok, err := d.Read()
if err != nil {
return err
}
switch tok.Kind() {
case text.Name:
if tok.NameKind() != text.IdentName {
if !d.opts.DiscardUnknown {
return d.newError(tok.Pos(), "unknown map entry field %q", tok.RawString())
}
d.skipValue()
continue Loop
}
// Continue below.
case text.MessageClose:
break Loop
default:
return d.unexpectedTokenError(tok)
}
switch name := protoreflect.Name(tok.IdentName()); name {
case genid.MapEntry_Key_field_name:
if !tok.HasSeparator() {
return d.syntaxError(tok.Pos(), "missing field separator :")
}
if key.IsValid() {
return d.newError(tok.Pos(), "map entry %q cannot be repeated", name)
}
val, err := d.unmarshalScalar(fd.MapKey())
if err != nil {
return err
}
key = val.MapKey()
case genid.MapEntry_Value_field_name:
if kind := fd.MapValue().Kind(); (kind != protoreflect.MessageKind) && (kind != protoreflect.GroupKind) {
if !tok.HasSeparator() {
return d.syntaxError(tok.Pos(), "missing field separator :")
}
}
if pval.IsValid() {
return d.newError(tok.Pos(), "map entry %q cannot be repeated", name)
}
pval, err = unmarshalMapValue()
if err != nil {
return err
}
default:
if !d.opts.DiscardUnknown {
return d.newError(tok.Pos(), "unknown map entry field %q", name)
}
d.skipValue()
}
}
if !key.IsValid() {
key = fd.MapKey().Default().MapKey()
}
if !pval.IsValid() {
switch fd.MapValue().Kind() {
case protoreflect.MessageKind, protoreflect.GroupKind:
// If value field is not set for message/group types, construct an
// empty one as default.
pval = mmap.NewValue()
default:
pval = fd.MapValue().Default()
}
}
mmap.Set(key, pval)
return nil
}
// unmarshalAny unmarshals an Any textproto. It can either be in expanded form
// or non-expanded form.
func (d decoder) unmarshalAny(m protoreflect.Message, checkDelims bool) error {
var typeURL string
var bValue []byte
var seenTypeUrl bool
var seenValue bool
var isExpanded bool
if checkDelims {
tok, err := d.Read()
if err != nil {
return err
}
if tok.Kind() != text.MessageOpen {
return d.unexpectedTokenError(tok)
}
}
Loop:
for {
// Read field name. Can only have 3 possible field names, i.e. type_url,
// value and type URL name inside [].
tok, err := d.Read()
if err != nil {
return err
}
if typ := tok.Kind(); typ != text.Name {
if checkDelims {
if typ == text.MessageClose {
break Loop
}
} else if typ == text.EOF {
break Loop
}
return d.unexpectedTokenError(tok)
}
switch tok.NameKind() {
case text.IdentName:
// Both type_url and value fields require field separator :.
if !tok.HasSeparator() {
return d.syntaxError(tok.Pos(), "missing field separator :")
}
switch name := protoreflect.Name(tok.IdentName()); name {
case genid.Any_TypeUrl_field_name:
if seenTypeUrl {
return d.newError(tok.Pos(), "duplicate %v field", genid.Any_TypeUrl_field_fullname)
}
if isExpanded {
return d.newError(tok.Pos(), "conflict with [%s] field", typeURL)
}
tok, err := d.Read()
if err != nil {
return err
}
var ok bool
typeURL, ok = tok.String()
if !ok {
return d.newError(tok.Pos(), "invalid %v field value: %v", genid.Any_TypeUrl_field_fullname, tok.RawString())
}
seenTypeUrl = true
case genid.Any_Value_field_name:
if seenValue {
return d.newError(tok.Pos(), "duplicate %v field", genid.Any_Value_field_fullname)
}
if isExpanded {
return d.newError(tok.Pos(), "conflict with [%s] field", typeURL)
}
tok, err := d.Read()
if err != nil {
return err
}
s, ok := tok.String()
if !ok {
return d.newError(tok.Pos(), "invalid %v field value: %v", genid.Any_Value_field_fullname, tok.RawString())
}
bValue = []byte(s)
seenValue = true
default:
if !d.opts.DiscardUnknown {
return d.newError(tok.Pos(), "invalid field name %q in %v message", tok.RawString(), genid.Any_message_fullname)
}
}
case text.TypeName:
if isExpanded {
return d.newError(tok.Pos(), "cannot have more than one type")
}
if seenTypeUrl {
return d.newError(tok.Pos(), "conflict with type_url field")
}
typeURL = tok.TypeName()
var err error
bValue, err = d.unmarshalExpandedAny(typeURL, tok.Pos())
if err != nil {
return err
}
isExpanded = true
default:
if !d.opts.DiscardUnknown {
return d.newError(tok.Pos(), "invalid field name %q in %v message", tok.RawString(), genid.Any_message_fullname)
}
}
}
fds := m.Descriptor().Fields()
if len(typeURL) > 0 {
m.Set(fds.ByNumber(genid.Any_TypeUrl_field_number), protoreflect.ValueOfString(typeURL))
}
if len(bValue) > 0 {
m.Set(fds.ByNumber(genid.Any_Value_field_number), protoreflect.ValueOfBytes(bValue))
}
return nil
}
func (d decoder) unmarshalExpandedAny(typeURL string, pos int) ([]byte, error) {
mt, err := d.opts.Resolver.FindMessageByURL(typeURL)
if err != nil {
return nil, d.newError(pos, "unable to resolve message [%v]: %v", typeURL, err)
}
// Create new message for the embedded message type and unmarshal the value
// field into it.
m := mt.New()
if err := d.unmarshalMessage(m, true); err != nil {
return nil, err
}
// Serialize the embedded message and return the resulting bytes.
b, err := proto.MarshalOptions{
AllowPartial: true, // Never check required fields inside an Any.
Deterministic: true,
}.Marshal(m.Interface())
if err != nil {
return nil, d.newError(pos, "error in marshaling message into Any.value: %v", err)
}
return b, nil
}
// skipValue makes the decoder parse a field value in order to advance the read
// to the next field. It relies on Read returning an error if the types are not
// in valid sequence.
func (d decoder) skipValue() error {
tok, err := d.Read()
if err != nil {
return err
}
// Only need to continue reading for messages and lists.
switch tok.Kind() {
case text.MessageOpen:
return d.skipMessageValue()
case text.ListOpen:
for {
tok, err := d.Read()
if err != nil {
return err
}
switch tok.Kind() {
case text.ListClose:
return nil
case text.MessageOpen:
if err := d.skipMessageValue(); err != nil {
return err
}
default:
// Skip items. This will not validate whether skipped values are
// of the same type or not, same behavior as C++
// TextFormat::Parser::AllowUnknownField(true) version 3.8.0.
}
}
}
return nil
}
// skipMessageValue makes the decoder parse and skip over all fields in a
// message. It assumes that the previous read type is MessageOpen.
func (d decoder) skipMessageValue() error {
for {
tok, err := d.Read()
if err != nil {
return err
}
switch tok.Kind() {
case text.MessageClose:
return nil
case text.Name:
if err := d.skipValue(); err != nil {
return err
}
}
}
}

View File

@ -1,7 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package prototext marshals and unmarshals protocol buffer messages as the
// textproto format.
package prototext

View File

@ -1,376 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package prototext
import (
"fmt"
"strconv"
"unicode/utf8"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/encoding/messageset"
"google.golang.org/protobuf/internal/encoding/text"
"google.golang.org/protobuf/internal/errors"
"google.golang.org/protobuf/internal/flags"
"google.golang.org/protobuf/internal/genid"
"google.golang.org/protobuf/internal/order"
"google.golang.org/protobuf/internal/pragma"
"google.golang.org/protobuf/internal/strs"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
)
const defaultIndent = " "
// Format formats the message as a multiline string.
// This function is only intended for human consumption and ignores errors.
// Do not depend on the output being stable. It may change over time across
// different versions of the program.
func Format(m proto.Message) string {
return MarshalOptions{Multiline: true}.Format(m)
}
// Marshal writes the given [proto.Message] in textproto format using default
// options. Do not depend on the output being stable. It may change over time
// across different versions of the program.
func Marshal(m proto.Message) ([]byte, error) {
return MarshalOptions{}.Marshal(m)
}
// MarshalOptions is a configurable text format marshaler.
type MarshalOptions struct {
pragma.NoUnkeyedLiterals
// Multiline specifies whether the marshaler should format the output in
// indented-form with every textual element on a new line.
// If Indent is an empty string, then an arbitrary indent is chosen.
Multiline bool
// Indent specifies the set of indentation characters to use in a multiline
// formatted output such that every entry is preceded by Indent and
// terminated by a newline. If non-empty, then Multiline is treated as true.
// Indent can only be composed of space or tab characters.
Indent string
// EmitASCII specifies whether to format strings and bytes as ASCII only
// as opposed to using UTF-8 encoding when possible.
EmitASCII bool
// allowInvalidUTF8 specifies whether to permit the encoding of strings
// with invalid UTF-8. This is unexported as it is intended to only
// be specified by the Format method.
allowInvalidUTF8 bool
// AllowPartial allows messages that have missing required fields to marshal
// without returning an error. If AllowPartial is false (the default),
// Marshal will return error if there are any missing required fields.
AllowPartial bool
// EmitUnknown specifies whether to emit unknown fields in the output.
// If specified, the unmarshaler may be unable to parse the output.
// The default is to exclude unknown fields.
EmitUnknown bool
// Resolver is used for looking up types when expanding google.protobuf.Any
// messages. If nil, this defaults to using protoregistry.GlobalTypes.
Resolver interface {
protoregistry.ExtensionTypeResolver
protoregistry.MessageTypeResolver
}
}
// Format formats the message as a string.
// This method is only intended for human consumption and ignores errors.
// Do not depend on the output being stable. It may change over time across
// different versions of the program.
func (o MarshalOptions) Format(m proto.Message) string {
if m == nil || !m.ProtoReflect().IsValid() {
return "<nil>" // invalid syntax, but okay since this is for debugging
}
o.allowInvalidUTF8 = true
o.AllowPartial = true
o.EmitUnknown = true
b, _ := o.Marshal(m)
return string(b)
}
// Marshal writes the given [proto.Message] in textproto format using options in
// MarshalOptions object. Do not depend on the output being stable. It may
// change over time across different versions of the program.
func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) {
return o.marshal(nil, m)
}
// MarshalAppend appends the textproto format encoding of m to b,
// returning the result.
func (o MarshalOptions) MarshalAppend(b []byte, m proto.Message) ([]byte, error) {
return o.marshal(b, m)
}
// marshal is a centralized function that all marshal operations go through.
// For profiling purposes, avoid changing the name of this function or
// introducing other code paths for marshal that do not go through this.
func (o MarshalOptions) marshal(b []byte, m proto.Message) ([]byte, error) {
var delims = [2]byte{'{', '}'}
if o.Multiline && o.Indent == "" {
o.Indent = defaultIndent
}
if o.Resolver == nil {
o.Resolver = protoregistry.GlobalTypes
}
internalEnc, err := text.NewEncoder(b, o.Indent, delims, o.EmitASCII)
if err != nil {
return nil, err
}
// Treat nil message interface as an empty message,
// in which case there is nothing to output.
if m == nil {
return b, nil
}
enc := encoder{internalEnc, o}
err = enc.marshalMessage(m.ProtoReflect(), false)
if err != nil {
return nil, err
}
out := enc.Bytes()
if len(o.Indent) > 0 && len(out) > 0 {
out = append(out, '\n')
}
if o.AllowPartial {
return out, nil
}
return out, proto.CheckInitialized(m)
}
type encoder struct {
*text.Encoder
opts MarshalOptions
}
// marshalMessage marshals the given protoreflect.Message.
func (e encoder) marshalMessage(m protoreflect.Message, inclDelims bool) error {
messageDesc := m.Descriptor()
if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) {
return errors.New("no support for proto1 MessageSets")
}
if inclDelims {
e.StartMessage()
defer e.EndMessage()
}
// Handle Any expansion.
if messageDesc.FullName() == genid.Any_message_fullname {
if e.marshalAny(m) {
return nil
}
// If unable to expand, continue on to marshal Any as a regular message.
}
// Marshal fields.
var err error
order.RangeFields(m, order.IndexNameFieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
if err = e.marshalField(fd.TextName(), v, fd); err != nil {
return false
}
return true
})
if err != nil {
return err
}
// Marshal unknown fields.
if e.opts.EmitUnknown {
e.marshalUnknown(m.GetUnknown())
}
return nil
}
// marshalField marshals the given field with protoreflect.Value.
func (e encoder) marshalField(name string, val protoreflect.Value, fd protoreflect.FieldDescriptor) error {
switch {
case fd.IsList():
return e.marshalList(name, val.List(), fd)
case fd.IsMap():
return e.marshalMap(name, val.Map(), fd)
default:
e.WriteName(name)
return e.marshalSingular(val, fd)
}
}
// marshalSingular marshals the given non-repeated field value. This includes
// all scalar types, enums, messages, and groups.
func (e encoder) marshalSingular(val protoreflect.Value, fd protoreflect.FieldDescriptor) error {
kind := fd.Kind()
switch kind {
case protoreflect.BoolKind:
e.WriteBool(val.Bool())
case protoreflect.StringKind:
s := val.String()
if !e.opts.allowInvalidUTF8 && strs.EnforceUTF8(fd) && !utf8.ValidString(s) {
return errors.InvalidUTF8(string(fd.FullName()))
}
e.WriteString(s)
case protoreflect.Int32Kind, protoreflect.Int64Kind,
protoreflect.Sint32Kind, protoreflect.Sint64Kind,
protoreflect.Sfixed32Kind, protoreflect.Sfixed64Kind:
e.WriteInt(val.Int())
case protoreflect.Uint32Kind, protoreflect.Uint64Kind,
protoreflect.Fixed32Kind, protoreflect.Fixed64Kind:
e.WriteUint(val.Uint())
case protoreflect.FloatKind:
// Encoder.WriteFloat handles the special numbers NaN and infinites.
e.WriteFloat(val.Float(), 32)
case protoreflect.DoubleKind:
// Encoder.WriteFloat handles the special numbers NaN and infinites.
e.WriteFloat(val.Float(), 64)
case protoreflect.BytesKind:
e.WriteString(string(val.Bytes()))
case protoreflect.EnumKind:
num := val.Enum()
if desc := fd.Enum().Values().ByNumber(num); desc != nil {
e.WriteLiteral(string(desc.Name()))
} else {
// Use numeric value if there is no enum description.
e.WriteInt(int64(num))
}
case protoreflect.MessageKind, protoreflect.GroupKind:
return e.marshalMessage(val.Message(), true)
default:
panic(fmt.Sprintf("%v has unknown kind: %v", fd.FullName(), kind))
}
return nil
}
// marshalList marshals the given protoreflect.List as multiple name-value fields.
func (e encoder) marshalList(name string, list protoreflect.List, fd protoreflect.FieldDescriptor) error {
size := list.Len()
for i := 0; i < size; i++ {
e.WriteName(name)
if err := e.marshalSingular(list.Get(i), fd); err != nil {
return err
}
}
return nil
}
// marshalMap marshals the given protoreflect.Map as multiple name-value fields.
func (e encoder) marshalMap(name string, mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error {
var err error
order.RangeEntries(mmap, order.GenericKeyOrder, func(key protoreflect.MapKey, val protoreflect.Value) bool {
e.WriteName(name)
e.StartMessage()
defer e.EndMessage()
e.WriteName(string(genid.MapEntry_Key_field_name))
err = e.marshalSingular(key.Value(), fd.MapKey())
if err != nil {
return false
}
e.WriteName(string(genid.MapEntry_Value_field_name))
err = e.marshalSingular(val, fd.MapValue())
if err != nil {
return false
}
return true
})
return err
}
// marshalUnknown parses the given []byte and marshals fields out.
// This function assumes proper encoding in the given []byte.
func (e encoder) marshalUnknown(b []byte) {
const dec = 10
const hex = 16
for len(b) > 0 {
num, wtype, n := protowire.ConsumeTag(b)
b = b[n:]
e.WriteName(strconv.FormatInt(int64(num), dec))
switch wtype {
case protowire.VarintType:
var v uint64
v, n = protowire.ConsumeVarint(b)
e.WriteUint(v)
case protowire.Fixed32Type:
var v uint32
v, n = protowire.ConsumeFixed32(b)
e.WriteLiteral("0x" + strconv.FormatUint(uint64(v), hex))
case protowire.Fixed64Type:
var v uint64
v, n = protowire.ConsumeFixed64(b)
e.WriteLiteral("0x" + strconv.FormatUint(v, hex))
case protowire.BytesType:
var v []byte
v, n = protowire.ConsumeBytes(b)
e.WriteString(string(v))
case protowire.StartGroupType:
e.StartMessage()
var v []byte
v, n = protowire.ConsumeGroup(num, b)
e.marshalUnknown(v)
e.EndMessage()
default:
panic(fmt.Sprintf("prototext: error parsing unknown field wire type: %v", wtype))
}
b = b[n:]
}
}
// marshalAny marshals the given google.protobuf.Any message in expanded form.
// It returns true if it was able to marshal, else false.
func (e encoder) marshalAny(any protoreflect.Message) bool {
// Construct the embedded message.
fds := any.Descriptor().Fields()
fdType := fds.ByNumber(genid.Any_TypeUrl_field_number)
typeURL := any.Get(fdType).String()
mt, err := e.opts.Resolver.FindMessageByURL(typeURL)
if err != nil {
return false
}
m := mt.New().Interface()
// Unmarshal bytes into embedded message.
fdValue := fds.ByNumber(genid.Any_Value_field_number)
value := any.Get(fdValue)
err = proto.UnmarshalOptions{
AllowPartial: true,
Resolver: e.opts.Resolver,
}.Unmarshal(value.Bytes(), m)
if err != nil {
return false
}
// Get current encoder position. If marshaling fails, reset encoder output
// back to this position.
pos := e.Snapshot()
// Field name is the proto field name enclosed in [].
e.WriteName("[" + typeURL + "]")
err = e.marshalMessage(m.ProtoReflect(), true)
if err != nil {
e.Reset(pos)
return false
}
return true
}

View File

@ -1,547 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package protowire parses and formats the raw wire encoding.
// See https://protobuf.dev/programming-guides/encoding.
//
// For marshaling and unmarshaling entire protobuf messages,
// use the [google.golang.org/protobuf/proto] package instead.
package protowire
import (
"io"
"math"
"math/bits"
"google.golang.org/protobuf/internal/errors"
)
// Number represents the field number.
type Number int32
const (
MinValidNumber Number = 1
FirstReservedNumber Number = 19000
LastReservedNumber Number = 19999
MaxValidNumber Number = 1<<29 - 1
DefaultRecursionLimit = 10000
)
// IsValid reports whether the field number is semantically valid.
func (n Number) IsValid() bool {
return MinValidNumber <= n && n <= MaxValidNumber
}
// Type represents the wire type.
type Type int8
const (
VarintType Type = 0
Fixed32Type Type = 5
Fixed64Type Type = 1
BytesType Type = 2
StartGroupType Type = 3
EndGroupType Type = 4
)
const (
_ = -iota
errCodeTruncated
errCodeFieldNumber
errCodeOverflow
errCodeReserved
errCodeEndGroup
errCodeRecursionDepth
)
var (
errFieldNumber = errors.New("invalid field number")
errOverflow = errors.New("variable length integer overflow")
errReserved = errors.New("cannot parse reserved wire type")
errEndGroup = errors.New("mismatching end group marker")
errParse = errors.New("parse error")
)
// ParseError converts an error code into an error value.
// This returns nil if n is a non-negative number.
func ParseError(n int) error {
if n >= 0 {
return nil
}
switch n {
case errCodeTruncated:
return io.ErrUnexpectedEOF
case errCodeFieldNumber:
return errFieldNumber
case errCodeOverflow:
return errOverflow
case errCodeReserved:
return errReserved
case errCodeEndGroup:
return errEndGroup
default:
return errParse
}
}
// ConsumeField parses an entire field record (both tag and value) and returns
// the field number, the wire type, and the total length.
// This returns a negative length upon an error (see [ParseError]).
//
// The total length includes the tag header and the end group marker (if the
// field is a group).
func ConsumeField(b []byte) (Number, Type, int) {
num, typ, n := ConsumeTag(b)
if n < 0 {
return 0, 0, n // forward error code
}
m := ConsumeFieldValue(num, typ, b[n:])
if m < 0 {
return 0, 0, m // forward error code
}
return num, typ, n + m
}
// ConsumeFieldValue parses a field value and returns its length.
// This assumes that the field [Number] and wire [Type] have already been parsed.
// This returns a negative length upon an error (see [ParseError]).
//
// When parsing a group, the length includes the end group marker and
// the end group is verified to match the starting field number.
func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) {
return consumeFieldValueD(num, typ, b, DefaultRecursionLimit)
}
func consumeFieldValueD(num Number, typ Type, b []byte, depth int) (n int) {
switch typ {
case VarintType:
_, n = ConsumeVarint(b)
return n
case Fixed32Type:
_, n = ConsumeFixed32(b)
return n
case Fixed64Type:
_, n = ConsumeFixed64(b)
return n
case BytesType:
_, n = ConsumeBytes(b)
return n
case StartGroupType:
if depth < 0 {
return errCodeRecursionDepth
}
n0 := len(b)
for {
num2, typ2, n := ConsumeTag(b)
if n < 0 {
return n // forward error code
}
b = b[n:]
if typ2 == EndGroupType {
if num != num2 {
return errCodeEndGroup
}
return n0 - len(b)
}
n = consumeFieldValueD(num2, typ2, b, depth-1)
if n < 0 {
return n // forward error code
}
b = b[n:]
}
case EndGroupType:
return errCodeEndGroup
default:
return errCodeReserved
}
}
// AppendTag encodes num and typ as a varint-encoded tag and appends it to b.
func AppendTag(b []byte, num Number, typ Type) []byte {
return AppendVarint(b, EncodeTag(num, typ))
}
// ConsumeTag parses b as a varint-encoded tag, reporting its length.
// This returns a negative length upon an error (see [ParseError]).
func ConsumeTag(b []byte) (Number, Type, int) {
v, n := ConsumeVarint(b)
if n < 0 {
return 0, 0, n // forward error code
}
num, typ := DecodeTag(v)
if num < MinValidNumber {
return 0, 0, errCodeFieldNumber
}
return num, typ, n
}
func SizeTag(num Number) int {
return SizeVarint(EncodeTag(num, 0)) // wire type has no effect on size
}
// AppendVarint appends v to b as a varint-encoded uint64.
func AppendVarint(b []byte, v uint64) []byte {
switch {
case v < 1<<7:
b = append(b, byte(v))
case v < 1<<14:
b = append(b,
byte((v>>0)&0x7f|0x80),
byte(v>>7))
case v < 1<<21:
b = append(b,
byte((v>>0)&0x7f|0x80),
byte((v>>7)&0x7f|0x80),
byte(v>>14))
case v < 1<<28:
b = append(b,
byte((v>>0)&0x7f|0x80),
byte((v>>7)&0x7f|0x80),
byte((v>>14)&0x7f|0x80),
byte(v>>21))
case v < 1<<35:
b = append(b,
byte((v>>0)&0x7f|0x80),
byte((v>>7)&0x7f|0x80),
byte((v>>14)&0x7f|0x80),
byte((v>>21)&0x7f|0x80),
byte(v>>28))
case v < 1<<42:
b = append(b,
byte((v>>0)&0x7f|0x80),
byte((v>>7)&0x7f|0x80),
byte((v>>14)&0x7f|0x80),
byte((v>>21)&0x7f|0x80),
byte((v>>28)&0x7f|0x80),
byte(v>>35))
case v < 1<<49:
b = append(b,
byte((v>>0)&0x7f|0x80),
byte((v>>7)&0x7f|0x80),
byte((v>>14)&0x7f|0x80),
byte((v>>21)&0x7f|0x80),
byte((v>>28)&0x7f|0x80),
byte((v>>35)&0x7f|0x80),
byte(v>>42))
case v < 1<<56:
b = append(b,
byte((v>>0)&0x7f|0x80),
byte((v>>7)&0x7f|0x80),
byte((v>>14)&0x7f|0x80),
byte((v>>21)&0x7f|0x80),
byte((v>>28)&0x7f|0x80),
byte((v>>35)&0x7f|0x80),
byte((v>>42)&0x7f|0x80),
byte(v>>49))
case v < 1<<63:
b = append(b,
byte((v>>0)&0x7f|0x80),
byte((v>>7)&0x7f|0x80),
byte((v>>14)&0x7f|0x80),
byte((v>>21)&0x7f|0x80),
byte((v>>28)&0x7f|0x80),
byte((v>>35)&0x7f|0x80),
byte((v>>42)&0x7f|0x80),
byte((v>>49)&0x7f|0x80),
byte(v>>56))
default:
b = append(b,
byte((v>>0)&0x7f|0x80),
byte((v>>7)&0x7f|0x80),
byte((v>>14)&0x7f|0x80),
byte((v>>21)&0x7f|0x80),
byte((v>>28)&0x7f|0x80),
byte((v>>35)&0x7f|0x80),
byte((v>>42)&0x7f|0x80),
byte((v>>49)&0x7f|0x80),
byte((v>>56)&0x7f|0x80),
1)
}
return b
}
// ConsumeVarint parses b as a varint-encoded uint64, reporting its length.
// This returns a negative length upon an error (see [ParseError]).
func ConsumeVarint(b []byte) (v uint64, n int) {
var y uint64
if len(b) <= 0 {
return 0, errCodeTruncated
}
v = uint64(b[0])
if v < 0x80 {
return v, 1
}
v -= 0x80
if len(b) <= 1 {
return 0, errCodeTruncated
}
y = uint64(b[1])
v += y << 7
if y < 0x80 {
return v, 2
}
v -= 0x80 << 7
if len(b) <= 2 {
return 0, errCodeTruncated
}
y = uint64(b[2])
v += y << 14
if y < 0x80 {
return v, 3
}
v -= 0x80 << 14
if len(b) <= 3 {
return 0, errCodeTruncated
}
y = uint64(b[3])
v += y << 21
if y < 0x80 {
return v, 4
}
v -= 0x80 << 21
if len(b) <= 4 {
return 0, errCodeTruncated
}
y = uint64(b[4])
v += y << 28
if y < 0x80 {
return v, 5
}
v -= 0x80 << 28
if len(b) <= 5 {
return 0, errCodeTruncated
}
y = uint64(b[5])
v += y << 35
if y < 0x80 {
return v, 6
}
v -= 0x80 << 35
if len(b) <= 6 {
return 0, errCodeTruncated
}
y = uint64(b[6])
v += y << 42
if y < 0x80 {
return v, 7
}
v -= 0x80 << 42
if len(b) <= 7 {
return 0, errCodeTruncated
}
y = uint64(b[7])
v += y << 49
if y < 0x80 {
return v, 8
}
v -= 0x80 << 49
if len(b) <= 8 {
return 0, errCodeTruncated
}
y = uint64(b[8])
v += y << 56
if y < 0x80 {
return v, 9
}
v -= 0x80 << 56
if len(b) <= 9 {
return 0, errCodeTruncated
}
y = uint64(b[9])
v += y << 63
if y < 2 {
return v, 10
}
return 0, errCodeOverflow
}
// SizeVarint returns the encoded size of a varint.
// The size is guaranteed to be within 1 and 10, inclusive.
func SizeVarint(v uint64) int {
// This computes 1 + (bits.Len64(v)-1)/7.
// 9/64 is a good enough approximation of 1/7
return int(9*uint32(bits.Len64(v))+64) / 64
}
// AppendFixed32 appends v to b as a little-endian uint32.
func AppendFixed32(b []byte, v uint32) []byte {
return append(b,
byte(v>>0),
byte(v>>8),
byte(v>>16),
byte(v>>24))
}
// ConsumeFixed32 parses b as a little-endian uint32, reporting its length.
// This returns a negative length upon an error (see [ParseError]).
func ConsumeFixed32(b []byte) (v uint32, n int) {
if len(b) < 4 {
return 0, errCodeTruncated
}
v = uint32(b[0])<<0 | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
return v, 4
}
// SizeFixed32 returns the encoded size of a fixed32; which is always 4.
func SizeFixed32() int {
return 4
}
// AppendFixed64 appends v to b as a little-endian uint64.
func AppendFixed64(b []byte, v uint64) []byte {
return append(b,
byte(v>>0),
byte(v>>8),
byte(v>>16),
byte(v>>24),
byte(v>>32),
byte(v>>40),
byte(v>>48),
byte(v>>56))
}
// ConsumeFixed64 parses b as a little-endian uint64, reporting its length.
// This returns a negative length upon an error (see [ParseError]).
func ConsumeFixed64(b []byte) (v uint64, n int) {
if len(b) < 8 {
return 0, errCodeTruncated
}
v = uint64(b[0])<<0 | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
return v, 8
}
// SizeFixed64 returns the encoded size of a fixed64; which is always 8.
func SizeFixed64() int {
return 8
}
// AppendBytes appends v to b as a length-prefixed bytes value.
func AppendBytes(b []byte, v []byte) []byte {
return append(AppendVarint(b, uint64(len(v))), v...)
}
// ConsumeBytes parses b as a length-prefixed bytes value, reporting its length.
// This returns a negative length upon an error (see [ParseError]).
func ConsumeBytes(b []byte) (v []byte, n int) {
m, n := ConsumeVarint(b)
if n < 0 {
return nil, n // forward error code
}
if m > uint64(len(b[n:])) {
return nil, errCodeTruncated
}
return b[n:][:m], n + int(m)
}
// SizeBytes returns the encoded size of a length-prefixed bytes value,
// given only the length.
func SizeBytes(n int) int {
return SizeVarint(uint64(n)) + n
}
// AppendString appends v to b as a length-prefixed bytes value.
func AppendString(b []byte, v string) []byte {
return append(AppendVarint(b, uint64(len(v))), v...)
}
// ConsumeString parses b as a length-prefixed bytes value, reporting its length.
// This returns a negative length upon an error (see [ParseError]).
func ConsumeString(b []byte) (v string, n int) {
bb, n := ConsumeBytes(b)
return string(bb), n
}
// AppendGroup appends v to b as group value, with a trailing end group marker.
// The value v must not contain the end marker.
func AppendGroup(b []byte, num Number, v []byte) []byte {
return AppendVarint(append(b, v...), EncodeTag(num, EndGroupType))
}
// ConsumeGroup parses b as a group value until the trailing end group marker,
// and verifies that the end marker matches the provided num. The value v
// does not contain the end marker, while the length does contain the end marker.
// This returns a negative length upon an error (see [ParseError]).
func ConsumeGroup(num Number, b []byte) (v []byte, n int) {
n = ConsumeFieldValue(num, StartGroupType, b)
if n < 0 {
return nil, n // forward error code
}
b = b[:n]
// Truncate off end group marker, but need to handle denormalized varints.
// Assuming end marker is never 0 (which is always the case since
// EndGroupType is non-zero), we can truncate all trailing bytes where the
// lower 7 bits are all zero (implying that the varint is denormalized).
for len(b) > 0 && b[len(b)-1]&0x7f == 0 {
b = b[:len(b)-1]
}
b = b[:len(b)-SizeTag(num)]
return b, n
}
// SizeGroup returns the encoded size of a group, given only the length.
func SizeGroup(num Number, n int) int {
return n + SizeTag(num)
}
// DecodeTag decodes the field [Number] and wire [Type] from its unified form.
// The [Number] is -1 if the decoded field number overflows int32.
// Other than overflow, this does not check for field number validity.
func DecodeTag(x uint64) (Number, Type) {
// NOTE: MessageSet allows for larger field numbers than normal.
if x>>3 > uint64(math.MaxInt32) {
return -1, 0
}
return Number(x >> 3), Type(x & 7)
}
// EncodeTag encodes the field [Number] and wire [Type] into its unified form.
func EncodeTag(num Number, typ Type) uint64 {
return uint64(num)<<3 | uint64(typ&7)
}
// DecodeZigZag decodes a zig-zag-encoded uint64 as an int64.
//
// Input: {…, 5, 3, 1, 0, 2, 4, 6, …}
// Output: {…, -3, -2, -1, 0, +1, +2, +3, …}
func DecodeZigZag(x uint64) int64 {
return int64(x>>1) ^ int64(x)<<63>>63
}
// EncodeZigZag encodes an int64 as a zig-zag-encoded uint64.
//
// Input: {…, -3, -2, -1, 0, +1, +2, +3, …}
// Output: {…, 5, 3, 1, 0, 2, 4, 6, …}
func EncodeZigZag(x int64) uint64 {
return uint64(x<<1) ^ uint64(x>>63)
}
// DecodeBool decodes a uint64 as a bool.
//
// Input: { 0, 1, 2, …}
// Output: {false, true, true, …}
func DecodeBool(x uint64) bool {
return x != 0
}
// EncodeBool encodes a bool as a uint64.
//
// Input: {false, true}
// Output: { 0, 1}
func EncodeBool(x bool) uint64 {
if x {
return 1
}
return 0
}

View File

@ -1,413 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package descfmt provides functionality to format descriptors.
package descfmt
import (
"fmt"
"io"
"reflect"
"strconv"
"strings"
"google.golang.org/protobuf/internal/detrand"
"google.golang.org/protobuf/internal/pragma"
"google.golang.org/protobuf/reflect/protoreflect"
)
type list interface {
Len() int
pragma.DoNotImplement
}
func FormatList(s fmt.State, r rune, vs list) {
io.WriteString(s, formatListOpt(vs, true, r == 'v' && (s.Flag('+') || s.Flag('#'))))
}
func formatListOpt(vs list, isRoot, allowMulti bool) string {
start, end := "[", "]"
if isRoot {
var name string
switch vs.(type) {
case protoreflect.Names:
name = "Names"
case protoreflect.FieldNumbers:
name = "FieldNumbers"
case protoreflect.FieldRanges:
name = "FieldRanges"
case protoreflect.EnumRanges:
name = "EnumRanges"
case protoreflect.FileImports:
name = "FileImports"
case protoreflect.Descriptor:
name = reflect.ValueOf(vs).MethodByName("Get").Type().Out(0).Name() + "s"
default:
name = reflect.ValueOf(vs).Elem().Type().Name()
}
start, end = name+"{", "}"
}
var ss []string
switch vs := vs.(type) {
case protoreflect.Names:
for i := 0; i < vs.Len(); i++ {
ss = append(ss, fmt.Sprint(vs.Get(i)))
}
return start + joinStrings(ss, false) + end
case protoreflect.FieldNumbers:
for i := 0; i < vs.Len(); i++ {
ss = append(ss, fmt.Sprint(vs.Get(i)))
}
return start + joinStrings(ss, false) + end
case protoreflect.FieldRanges:
for i := 0; i < vs.Len(); i++ {
r := vs.Get(i)
if r[0]+1 == r[1] {
ss = append(ss, fmt.Sprintf("%d", r[0]))
} else {
ss = append(ss, fmt.Sprintf("%d:%d", r[0], r[1])) // enum ranges are end exclusive
}
}
return start + joinStrings(ss, false) + end
case protoreflect.EnumRanges:
for i := 0; i < vs.Len(); i++ {
r := vs.Get(i)
if r[0] == r[1] {
ss = append(ss, fmt.Sprintf("%d", r[0]))
} else {
ss = append(ss, fmt.Sprintf("%d:%d", r[0], int64(r[1])+1)) // enum ranges are end inclusive
}
}
return start + joinStrings(ss, false) + end
case protoreflect.FileImports:
for i := 0; i < vs.Len(); i++ {
var rs records
rv := reflect.ValueOf(vs.Get(i))
rs.Append(rv, []methodAndName{
{rv.MethodByName("Path"), "Path"},
{rv.MethodByName("Package"), "Package"},
{rv.MethodByName("IsPublic"), "IsPublic"},
{rv.MethodByName("IsWeak"), "IsWeak"},
}...)
ss = append(ss, "{"+rs.Join()+"}")
}
return start + joinStrings(ss, allowMulti) + end
default:
_, isEnumValue := vs.(protoreflect.EnumValueDescriptors)
for i := 0; i < vs.Len(); i++ {
m := reflect.ValueOf(vs).MethodByName("Get")
v := m.Call([]reflect.Value{reflect.ValueOf(i)})[0].Interface()
ss = append(ss, formatDescOpt(v.(protoreflect.Descriptor), false, allowMulti && !isEnumValue, nil))
}
return start + joinStrings(ss, allowMulti && isEnumValue) + end
}
}
type methodAndName struct {
method reflect.Value
name string
}
func FormatDesc(s fmt.State, r rune, t protoreflect.Descriptor) {
io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#')), nil))
}
func InternalFormatDescOptForTesting(t protoreflect.Descriptor, isRoot, allowMulti bool, record func(string)) string {
return formatDescOpt(t, isRoot, allowMulti, record)
}
func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool, record func(string)) string {
rv := reflect.ValueOf(t)
rt := rv.MethodByName("ProtoType").Type().In(0)
start, end := "{", "}"
if isRoot {
start = rt.Name() + "{"
}
_, isFile := t.(protoreflect.FileDescriptor)
rs := records{
allowMulti: allowMulti,
record: record,
}
if t.IsPlaceholder() {
if isFile {
rs.Append(rv, []methodAndName{
{rv.MethodByName("Path"), "Path"},
{rv.MethodByName("Package"), "Package"},
{rv.MethodByName("IsPlaceholder"), "IsPlaceholder"},
}...)
} else {
rs.Append(rv, []methodAndName{
{rv.MethodByName("FullName"), "FullName"},
{rv.MethodByName("IsPlaceholder"), "IsPlaceholder"},
}...)
}
} else {
switch {
case isFile:
rs.Append(rv, methodAndName{rv.MethodByName("Syntax"), "Syntax"})
case isRoot:
rs.Append(rv, []methodAndName{
{rv.MethodByName("Syntax"), "Syntax"},
{rv.MethodByName("FullName"), "FullName"},
}...)
default:
rs.Append(rv, methodAndName{rv.MethodByName("Name"), "Name"})
}
switch t := t.(type) {
case protoreflect.FieldDescriptor:
accessors := []methodAndName{
{rv.MethodByName("Number"), "Number"},
{rv.MethodByName("Cardinality"), "Cardinality"},
{rv.MethodByName("Kind"), "Kind"},
{rv.MethodByName("HasJSONName"), "HasJSONName"},
{rv.MethodByName("JSONName"), "JSONName"},
{rv.MethodByName("HasPresence"), "HasPresence"},
{rv.MethodByName("IsExtension"), "IsExtension"},
{rv.MethodByName("IsPacked"), "IsPacked"},
{rv.MethodByName("IsWeak"), "IsWeak"},
{rv.MethodByName("IsList"), "IsList"},
{rv.MethodByName("IsMap"), "IsMap"},
{rv.MethodByName("MapKey"), "MapKey"},
{rv.MethodByName("MapValue"), "MapValue"},
{rv.MethodByName("HasDefault"), "HasDefault"},
{rv.MethodByName("Default"), "Default"},
{rv.MethodByName("ContainingOneof"), "ContainingOneof"},
{rv.MethodByName("ContainingMessage"), "ContainingMessage"},
{rv.MethodByName("Message"), "Message"},
{rv.MethodByName("Enum"), "Enum"},
}
for _, s := range accessors {
switch s.name {
case "MapKey":
if k := t.MapKey(); k != nil {
rs.recs = append(rs.recs, [2]string{"MapKey", k.Kind().String()})
}
case "MapValue":
if v := t.MapValue(); v != nil {
switch v.Kind() {
case protoreflect.EnumKind:
rs.AppendRecs("MapValue", [2]string{"MapValue", string(v.Enum().FullName())})
case protoreflect.MessageKind, protoreflect.GroupKind:
rs.AppendRecs("MapValue", [2]string{"MapValue", string(v.Message().FullName())})
default:
rs.AppendRecs("MapValue", [2]string{"MapValue", v.Kind().String()})
}
}
case "ContainingOneof":
if od := t.ContainingOneof(); od != nil {
rs.AppendRecs("ContainingOneof", [2]string{"Oneof", string(od.Name())})
}
case "ContainingMessage":
if t.IsExtension() {
rs.AppendRecs("ContainingMessage", [2]string{"Extendee", string(t.ContainingMessage().FullName())})
}
case "Message":
if !t.IsMap() {
rs.Append(rv, s)
}
default:
rs.Append(rv, s)
}
}
case protoreflect.OneofDescriptor:
var ss []string
fs := t.Fields()
for i := 0; i < fs.Len(); i++ {
ss = append(ss, string(fs.Get(i).Name()))
}
if len(ss) > 0 {
rs.AppendRecs("Fields", [2]string{"Fields", "[" + joinStrings(ss, false) + "]"})
}
case protoreflect.FileDescriptor:
rs.Append(rv, []methodAndName{
{rv.MethodByName("Path"), "Path"},
{rv.MethodByName("Package"), "Package"},
{rv.MethodByName("Imports"), "Imports"},
{rv.MethodByName("Messages"), "Messages"},
{rv.MethodByName("Enums"), "Enums"},
{rv.MethodByName("Extensions"), "Extensions"},
{rv.MethodByName("Services"), "Services"},
}...)
case protoreflect.MessageDescriptor:
rs.Append(rv, []methodAndName{
{rv.MethodByName("IsMapEntry"), "IsMapEntry"},
{rv.MethodByName("Fields"), "Fields"},
{rv.MethodByName("Oneofs"), "Oneofs"},
{rv.MethodByName("ReservedNames"), "ReservedNames"},
{rv.MethodByName("ReservedRanges"), "ReservedRanges"},
{rv.MethodByName("RequiredNumbers"), "RequiredNumbers"},
{rv.MethodByName("ExtensionRanges"), "ExtensionRanges"},
{rv.MethodByName("Messages"), "Messages"},
{rv.MethodByName("Enums"), "Enums"},
{rv.MethodByName("Extensions"), "Extensions"},
}...)
case protoreflect.EnumDescriptor:
rs.Append(rv, []methodAndName{
{rv.MethodByName("Values"), "Values"},
{rv.MethodByName("ReservedNames"), "ReservedNames"},
{rv.MethodByName("ReservedRanges"), "ReservedRanges"},
}...)
case protoreflect.EnumValueDescriptor:
rs.Append(rv, []methodAndName{
{rv.MethodByName("Number"), "Number"},
}...)
case protoreflect.ServiceDescriptor:
rs.Append(rv, []methodAndName{
{rv.MethodByName("Methods"), "Methods"},
}...)
case protoreflect.MethodDescriptor:
rs.Append(rv, []methodAndName{
{rv.MethodByName("Input"), "Input"},
{rv.MethodByName("Output"), "Output"},
{rv.MethodByName("IsStreamingClient"), "IsStreamingClient"},
{rv.MethodByName("IsStreamingServer"), "IsStreamingServer"},
}...)
}
if m := rv.MethodByName("GoType"); m.IsValid() {
rs.Append(rv, methodAndName{m, "GoType"})
}
}
return start + rs.Join() + end
}
type records struct {
recs [][2]string
allowMulti bool
// record is a function that will be called for every Append() or
// AppendRecs() call, to be used for testing with the
// InternalFormatDescOptForTesting function.
record func(string)
}
func (rs *records) AppendRecs(fieldName string, newRecs [2]string) {
if rs.record != nil {
rs.record(fieldName)
}
rs.recs = append(rs.recs, newRecs)
}
func (rs *records) Append(v reflect.Value, accessors ...methodAndName) {
for _, a := range accessors {
if rs.record != nil {
rs.record(a.name)
}
var rv reflect.Value
if a.method.IsValid() {
rv = a.method.Call(nil)[0]
}
if v.Kind() == reflect.Struct && !rv.IsValid() {
rv = v.FieldByName(a.name)
}
if !rv.IsValid() {
panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a.name))
}
if _, ok := rv.Interface().(protoreflect.Value); ok {
rv = rv.MethodByName("Interface").Call(nil)[0]
if !rv.IsNil() {
rv = rv.Elem()
}
}
// Ignore zero values.
var isZero bool
switch rv.Kind() {
case reflect.Interface, reflect.Slice:
isZero = rv.IsNil()
case reflect.Bool:
isZero = rv.Bool() == false
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
isZero = rv.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
isZero = rv.Uint() == 0
case reflect.String:
isZero = rv.String() == ""
}
if n, ok := rv.Interface().(list); ok {
isZero = n.Len() == 0
}
if isZero {
continue
}
// Format the value.
var s string
v := rv.Interface()
switch v := v.(type) {
case list:
s = formatListOpt(v, false, rs.allowMulti)
case protoreflect.FieldDescriptor, protoreflect.OneofDescriptor, protoreflect.EnumValueDescriptor, protoreflect.MethodDescriptor:
s = string(v.(protoreflect.Descriptor).Name())
case protoreflect.Descriptor:
s = string(v.FullName())
case string:
s = strconv.Quote(v)
case []byte:
s = fmt.Sprintf("%q", v)
default:
s = fmt.Sprint(v)
}
rs.recs = append(rs.recs, [2]string{a.name, s})
}
}
func (rs *records) Join() string {
var ss []string
// In single line mode, simply join all records with commas.
if !rs.allowMulti {
for _, r := range rs.recs {
ss = append(ss, r[0]+formatColon(0)+r[1])
}
return joinStrings(ss, false)
}
// In allowMulti line mode, align single line records for more readable output.
var maxLen int
flush := func(i int) {
for _, r := range rs.recs[len(ss):i] {
ss = append(ss, r[0]+formatColon(maxLen-len(r[0]))+r[1])
}
maxLen = 0
}
for i, r := range rs.recs {
if isMulti := strings.Contains(r[1], "\n"); isMulti {
flush(i)
ss = append(ss, r[0]+formatColon(0)+strings.Join(strings.Split(r[1], "\n"), "\n\t"))
} else if maxLen < len(r[0]) {
maxLen = len(r[0])
}
}
flush(len(rs.recs))
return joinStrings(ss, true)
}
func formatColon(padding int) string {
// Deliberately introduce instability into the debug output to
// discourage users from performing string comparisons.
// This provides us flexibility to change the output in the future.
if detrand.Bool() {
return ":" + strings.Repeat(" ", 1+padding) // use non-breaking spaces (U+00a0)
} else {
return ":" + strings.Repeat(" ", 1+padding) // use regular spaces (U+0020)
}
}
func joinStrings(ss []string, isMulti bool) string {
if len(ss) == 0 {
return ""
}
if isMulti {
return "\n\t" + strings.Join(ss, "\n\t") + "\n"
}
return strings.Join(ss, ", ")
}

View File

@ -1,29 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package descopts contains the nil pointers to concrete descriptor options.
//
// This package exists as a form of reverse dependency injection so that certain
// packages (e.g., internal/filedesc and internal/filetype can avoid a direct
// dependency on the descriptor proto package).
package descopts
import pref "google.golang.org/protobuf/reflect/protoreflect"
// These variables are set by the init function in descriptor.pb.go via logic
// in internal/filetype. In other words, so long as the descriptor proto package
// is linked in, these variables will be populated.
//
// Each variable is populated with a nil pointer to the options struct.
var (
File pref.ProtoMessage
Enum pref.ProtoMessage
EnumValue pref.ProtoMessage
Message pref.ProtoMessage
Field pref.ProtoMessage
Oneof pref.ProtoMessage
ExtensionRange pref.ProtoMessage
Service pref.ProtoMessage
Method pref.ProtoMessage
)

View File

@ -1,69 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package detrand provides deterministically random functionality.
//
// The pseudo-randomness of these functions is seeded by the program binary
// itself and guarantees that the output does not change within a program,
// while ensuring that the output is unstable across different builds.
package detrand
import (
"encoding/binary"
"hash/fnv"
"os"
)
// Disable disables detrand such that all functions returns the zero value.
// This function is not concurrent-safe and must be called during program init.
func Disable() {
randSeed = 0
}
// Bool returns a deterministically random boolean.
func Bool() bool {
return randSeed%2 == 1
}
// Intn returns a deterministically random integer between 0 and n-1, inclusive.
func Intn(n int) int {
if n <= 0 {
panic("must be positive")
}
return int(randSeed % uint64(n))
}
// randSeed is a best-effort at an approximate hash of the Go binary.
var randSeed = binaryHash()
func binaryHash() uint64 {
// Open the Go binary.
s, err := os.Executable()
if err != nil {
return 0
}
f, err := os.Open(s)
if err != nil {
return 0
}
defer f.Close()
// Hash the size and several samples of the Go binary.
const numSamples = 8
var buf [64]byte
h := fnv.New64()
fi, err := f.Stat()
if err != nil {
return 0
}
binary.LittleEndian.PutUint64(buf[:8], uint64(fi.Size()))
h.Write(buf[:8])
for i := int64(0); i < numSamples; i++ {
if _, err := f.ReadAt(buf[:], i*fi.Size()/numSamples); err != nil {
return 0
}
h.Write(buf[:])
}
return h.Sum64()
}

View File

@ -1,12 +0,0 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package editiondefaults contains the binary representation of the editions
// defaults.
package editiondefaults
import _ "embed"
//go:embed editions_defaults.binpb
var Defaults []byte

View File

@ -1,4 +0,0 @@
  (0ж
  (0з
  (0и ж

View File

@ -1,213 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package defval marshals and unmarshals textual forms of default values.
//
// This package handles both the form historically used in Go struct field tags
// and also the form used by google.protobuf.FieldDescriptorProto.default_value
// since they differ in superficial ways.
package defval
import (
"fmt"
"math"
"strconv"
ptext "google.golang.org/protobuf/internal/encoding/text"
"google.golang.org/protobuf/internal/errors"
"google.golang.org/protobuf/reflect/protoreflect"
)
// Format is the serialization format used to represent the default value.
type Format int
const (
_ Format = iota
// Descriptor uses the serialization format that protoc uses with the
// google.protobuf.FieldDescriptorProto.default_value field.
Descriptor
// GoTag uses the historical serialization format in Go struct field tags.
GoTag
)
// Unmarshal deserializes the default string s according to the given kind k.
// When k is an enum, a list of enum value descriptors must be provided.
func Unmarshal(s string, k protoreflect.Kind, evs protoreflect.EnumValueDescriptors, f Format) (protoreflect.Value, protoreflect.EnumValueDescriptor, error) {
switch k {
case protoreflect.BoolKind:
if f == GoTag {
switch s {
case "1":
return protoreflect.ValueOfBool(true), nil, nil
case "0":
return protoreflect.ValueOfBool(false), nil, nil
}
} else {
switch s {
case "true":
return protoreflect.ValueOfBool(true), nil, nil
case "false":
return protoreflect.ValueOfBool(false), nil, nil
}
}
case protoreflect.EnumKind:
if f == GoTag {
// Go tags use the numeric form of the enum value.
if n, err := strconv.ParseInt(s, 10, 32); err == nil {
if ev := evs.ByNumber(protoreflect.EnumNumber(n)); ev != nil {
return protoreflect.ValueOfEnum(ev.Number()), ev, nil
}
}
} else {
// Descriptor default_value use the enum identifier.
ev := evs.ByName(protoreflect.Name(s))
if ev != nil {
return protoreflect.ValueOfEnum(ev.Number()), ev, nil
}
}
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
if v, err := strconv.ParseInt(s, 10, 32); err == nil {
return protoreflect.ValueOfInt32(int32(v)), nil, nil
}
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
if v, err := strconv.ParseInt(s, 10, 64); err == nil {
return protoreflect.ValueOfInt64(int64(v)), nil, nil
}
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
if v, err := strconv.ParseUint(s, 10, 32); err == nil {
return protoreflect.ValueOfUint32(uint32(v)), nil, nil
}
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
if v, err := strconv.ParseUint(s, 10, 64); err == nil {
return protoreflect.ValueOfUint64(uint64(v)), nil, nil
}
case protoreflect.FloatKind, protoreflect.DoubleKind:
var v float64
var err error
switch s {
case "-inf":
v = math.Inf(-1)
case "inf":
v = math.Inf(+1)
case "nan":
v = math.NaN()
default:
v, err = strconv.ParseFloat(s, 64)
}
if err == nil {
if k == protoreflect.FloatKind {
return protoreflect.ValueOfFloat32(float32(v)), nil, nil
} else {
return protoreflect.ValueOfFloat64(float64(v)), nil, nil
}
}
case protoreflect.StringKind:
// String values are already unescaped and can be used as is.
return protoreflect.ValueOfString(s), nil, nil
case protoreflect.BytesKind:
if b, ok := unmarshalBytes(s); ok {
return protoreflect.ValueOfBytes(b), nil, nil
}
}
return protoreflect.Value{}, nil, errors.New("could not parse value for %v: %q", k, s)
}
// Marshal serializes v as the default string according to the given kind k.
// When specifying the Descriptor format for an enum kind, the associated
// enum value descriptor must be provided.
func Marshal(v protoreflect.Value, ev protoreflect.EnumValueDescriptor, k protoreflect.Kind, f Format) (string, error) {
switch k {
case protoreflect.BoolKind:
if f == GoTag {
if v.Bool() {
return "1", nil
} else {
return "0", nil
}
} else {
if v.Bool() {
return "true", nil
} else {
return "false", nil
}
}
case protoreflect.EnumKind:
if f == GoTag {
return strconv.FormatInt(int64(v.Enum()), 10), nil
} else {
return string(ev.Name()), nil
}
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
return strconv.FormatInt(v.Int(), 10), nil
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
return strconv.FormatUint(v.Uint(), 10), nil
case protoreflect.FloatKind, protoreflect.DoubleKind:
f := v.Float()
switch {
case math.IsInf(f, -1):
return "-inf", nil
case math.IsInf(f, +1):
return "inf", nil
case math.IsNaN(f):
return "nan", nil
default:
if k == protoreflect.FloatKind {
return strconv.FormatFloat(f, 'g', -1, 32), nil
} else {
return strconv.FormatFloat(f, 'g', -1, 64), nil
}
}
case protoreflect.StringKind:
// String values are serialized as is without any escaping.
return v.String(), nil
case protoreflect.BytesKind:
if s, ok := marshalBytes(v.Bytes()); ok {
return s, nil
}
}
return "", errors.New("could not format value for %v: %v", k, v)
}
// unmarshalBytes deserializes bytes by applying C unescaping.
func unmarshalBytes(s string) ([]byte, bool) {
// Bytes values use the same escaping as the text format,
// however they lack the surrounding double quotes.
v, err := ptext.UnmarshalString(`"` + s + `"`)
if err != nil {
return nil, false
}
return []byte(v), true
}
// marshalBytes serializes bytes by using C escaping.
// To match the exact output of protoc, this is identical to the
// CEscape function in strutil.cc of the protoc source code.
func marshalBytes(b []byte) (string, bool) {
var s []byte
for _, c := range b {
switch c {
case '\n':
s = append(s, `\n`...)
case '\r':
s = append(s, `\r`...)
case '\t':
s = append(s, `\t`...)
case '"':
s = append(s, `\"`...)
case '\'':
s = append(s, `\'`...)
case '\\':
s = append(s, `\\`...)
default:
if printableASCII := c >= 0x20 && c <= 0x7e; printableASCII {
s = append(s, c)
} else {
s = append(s, fmt.Sprintf(`\%03o`, c)...)
}
}
}
return string(s), true
}

View File

@ -1,242 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package messageset encodes and decodes the obsolete MessageSet wire format.
package messageset
import (
"math"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/errors"
"google.golang.org/protobuf/reflect/protoreflect"
)
// The MessageSet wire format is equivalent to a message defined as follows,
// where each Item defines an extension field with a field number of 'type_id'
// and content of 'message'. MessageSet extensions must be non-repeated message
// fields.
//
// message MessageSet {
// repeated group Item = 1 {
// required int32 type_id = 2;
// required string message = 3;
// }
// }
const (
FieldItem = protowire.Number(1)
FieldTypeID = protowire.Number(2)
FieldMessage = protowire.Number(3)
)
// ExtensionName is the field name for extensions of MessageSet.
//
// A valid MessageSet extension must be of the form:
//
// message MyMessage {
// extend proto2.bridge.MessageSet {
// optional MyMessage message_set_extension = 1234;
// }
// ...
// }
const ExtensionName = "message_set_extension"
// IsMessageSet returns whether the message uses the MessageSet wire format.
func IsMessageSet(md protoreflect.MessageDescriptor) bool {
xmd, ok := md.(interface{ IsMessageSet() bool })
return ok && xmd.IsMessageSet()
}
// IsMessageSetExtension reports this field properly extends a MessageSet.
func IsMessageSetExtension(fd protoreflect.FieldDescriptor) bool {
switch {
case fd.Name() != ExtensionName:
return false
case !IsMessageSet(fd.ContainingMessage()):
return false
case fd.FullName().Parent() != fd.Message().FullName():
return false
}
return true
}
// SizeField returns the size of a MessageSet item field containing an extension
// with the given field number, not counting the contents of the message subfield.
func SizeField(num protowire.Number) int {
return 2*protowire.SizeTag(FieldItem) + protowire.SizeTag(FieldTypeID) + protowire.SizeVarint(uint64(num))
}
// Unmarshal parses a MessageSet.
//
// It calls fn with the type ID and value of each item in the MessageSet.
// Unknown fields are discarded.
//
// If wantLen is true, the item values include the varint length prefix.
// This is ugly, but simplifies the fast-path decoder in internal/impl.
func Unmarshal(b []byte, wantLen bool, fn func(typeID protowire.Number, value []byte) error) error {
for len(b) > 0 {
num, wtyp, n := protowire.ConsumeTag(b)
if n < 0 {
return protowire.ParseError(n)
}
b = b[n:]
if num != FieldItem || wtyp != protowire.StartGroupType {
n := protowire.ConsumeFieldValue(num, wtyp, b)
if n < 0 {
return protowire.ParseError(n)
}
b = b[n:]
continue
}
typeID, value, n, err := ConsumeFieldValue(b, wantLen)
if err != nil {
return err
}
b = b[n:]
if typeID == 0 {
continue
}
if err := fn(typeID, value); err != nil {
return err
}
}
return nil
}
// ConsumeFieldValue parses b as a MessageSet item field value until and including
// the trailing end group marker. It assumes the start group tag has already been parsed.
// It returns the contents of the type_id and message subfields and the total
// item length.
//
// If wantLen is true, the returned message value includes the length prefix.
func ConsumeFieldValue(b []byte, wantLen bool) (typeid protowire.Number, message []byte, n int, err error) {
ilen := len(b)
for {
num, wtyp, n := protowire.ConsumeTag(b)
if n < 0 {
return 0, nil, 0, protowire.ParseError(n)
}
b = b[n:]
switch {
case num == FieldItem && wtyp == protowire.EndGroupType:
if wantLen && len(message) == 0 {
// The message field was missing, which should never happen.
// Be prepared for this case anyway.
message = protowire.AppendVarint(message, 0)
}
return typeid, message, ilen - len(b), nil
case num == FieldTypeID && wtyp == protowire.VarintType:
v, n := protowire.ConsumeVarint(b)
if n < 0 {
return 0, nil, 0, protowire.ParseError(n)
}
b = b[n:]
if v < 1 || v > math.MaxInt32 {
return 0, nil, 0, errors.New("invalid type_id in message set")
}
typeid = protowire.Number(v)
case num == FieldMessage && wtyp == protowire.BytesType:
m, n := protowire.ConsumeBytes(b)
if n < 0 {
return 0, nil, 0, protowire.ParseError(n)
}
if message == nil {
if wantLen {
message = b[:n:n]
} else {
message = m[:len(m):len(m)]
}
} else {
// This case should never happen in practice, but handle it for
// correctness: The MessageSet item contains multiple message
// fields, which need to be merged.
//
// In the case where we're returning the length, this becomes
// quite inefficient since we need to strip the length off
// the existing data and reconstruct it with the combined length.
if wantLen {
_, nn := protowire.ConsumeVarint(message)
m0 := message[nn:]
message = nil
message = protowire.AppendVarint(message, uint64(len(m0)+len(m)))
message = append(message, m0...)
message = append(message, m...)
} else {
message = append(message, m...)
}
}
b = b[n:]
default:
// We have no place to put it, so we just ignore unknown fields.
n := protowire.ConsumeFieldValue(num, wtyp, b)
if n < 0 {
return 0, nil, 0, protowire.ParseError(n)
}
b = b[n:]
}
}
}
// AppendFieldStart appends the start of a MessageSet item field containing
// an extension with the given number. The caller must add the message
// subfield (including the tag).
func AppendFieldStart(b []byte, num protowire.Number) []byte {
b = protowire.AppendTag(b, FieldItem, protowire.StartGroupType)
b = protowire.AppendTag(b, FieldTypeID, protowire.VarintType)
b = protowire.AppendVarint(b, uint64(num))
return b
}
// AppendFieldEnd appends the trailing end group marker for a MessageSet item field.
func AppendFieldEnd(b []byte) []byte {
return protowire.AppendTag(b, FieldItem, protowire.EndGroupType)
}
// SizeUnknown returns the size of an unknown fields section in MessageSet format.
//
// See AppendUnknown.
func SizeUnknown(unknown []byte) (size int) {
for len(unknown) > 0 {
num, typ, n := protowire.ConsumeTag(unknown)
if n < 0 || typ != protowire.BytesType {
return 0
}
unknown = unknown[n:]
_, n = protowire.ConsumeBytes(unknown)
if n < 0 {
return 0
}
unknown = unknown[n:]
size += SizeField(num) + protowire.SizeTag(FieldMessage) + n
}
return size
}
// AppendUnknown appends unknown fields to b in MessageSet format.
//
// For historic reasons, unresolved items in a MessageSet are stored in a
// message's unknown fields section in non-MessageSet format. That is, an
// unknown item with typeID T and value V appears in the unknown fields as
// a field with number T and value V.
//
// This function converts the unknown fields back into MessageSet form.
func AppendUnknown(b, unknown []byte) ([]byte, error) {
for len(unknown) > 0 {
num, typ, n := protowire.ConsumeTag(unknown)
if n < 0 || typ != protowire.BytesType {
return nil, errors.New("invalid data in message set unknown fields")
}
unknown = unknown[n:]
_, n = protowire.ConsumeBytes(unknown)
if n < 0 {
return nil, errors.New("invalid data in message set unknown fields")
}
b = AppendFieldStart(b, num)
b = protowire.AppendTag(b, FieldMessage, protowire.BytesType)
b = append(b, unknown[:n]...)
b = AppendFieldEnd(b)
unknown = unknown[n:]
}
return b, nil
}

View File

@ -1,207 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package tag marshals and unmarshals the legacy struct tags as generated
// by historical versions of protoc-gen-go.
package tag
import (
"reflect"
"strconv"
"strings"
"google.golang.org/protobuf/internal/encoding/defval"
"google.golang.org/protobuf/internal/filedesc"
"google.golang.org/protobuf/internal/strs"
"google.golang.org/protobuf/reflect/protoreflect"
)
var byteType = reflect.TypeOf(byte(0))
// Unmarshal decodes the tag into a prototype.Field.
//
// The goType is needed to determine the original protoreflect.Kind since the
// tag does not record sufficient information to determine that.
// The type is the underlying field type (e.g., a repeated field may be
// represented by []T, but the Go type passed in is just T).
// A list of enum value descriptors must be provided for enum fields.
// This does not populate the Enum or Message (except for weak message).
//
// This function is a best effort attempt; parsing errors are ignored.
func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescriptors) protoreflect.FieldDescriptor {
f := new(filedesc.Field)
f.L0.ParentFile = filedesc.SurrogateProto2
for len(tag) > 0 {
i := strings.IndexByte(tag, ',')
if i < 0 {
i = len(tag)
}
switch s := tag[:i]; {
case strings.HasPrefix(s, "name="):
f.L0.FullName = protoreflect.FullName(s[len("name="):])
case strings.Trim(s, "0123456789") == "":
n, _ := strconv.ParseUint(s, 10, 32)
f.L1.Number = protoreflect.FieldNumber(n)
case s == "opt":
f.L1.Cardinality = protoreflect.Optional
case s == "req":
f.L1.Cardinality = protoreflect.Required
case s == "rep":
f.L1.Cardinality = protoreflect.Repeated
case s == "varint":
switch goType.Kind() {
case reflect.Bool:
f.L1.Kind = protoreflect.BoolKind
case reflect.Int32:
f.L1.Kind = protoreflect.Int32Kind
case reflect.Int64:
f.L1.Kind = protoreflect.Int64Kind
case reflect.Uint32:
f.L1.Kind = protoreflect.Uint32Kind
case reflect.Uint64:
f.L1.Kind = protoreflect.Uint64Kind
}
case s == "zigzag32":
if goType.Kind() == reflect.Int32 {
f.L1.Kind = protoreflect.Sint32Kind
}
case s == "zigzag64":
if goType.Kind() == reflect.Int64 {
f.L1.Kind = protoreflect.Sint64Kind
}
case s == "fixed32":
switch goType.Kind() {
case reflect.Int32:
f.L1.Kind = protoreflect.Sfixed32Kind
case reflect.Uint32:
f.L1.Kind = protoreflect.Fixed32Kind
case reflect.Float32:
f.L1.Kind = protoreflect.FloatKind
}
case s == "fixed64":
switch goType.Kind() {
case reflect.Int64:
f.L1.Kind = protoreflect.Sfixed64Kind
case reflect.Uint64:
f.L1.Kind = protoreflect.Fixed64Kind
case reflect.Float64:
f.L1.Kind = protoreflect.DoubleKind
}
case s == "bytes":
switch {
case goType.Kind() == reflect.String:
f.L1.Kind = protoreflect.StringKind
case goType.Kind() == reflect.Slice && goType.Elem() == byteType:
f.L1.Kind = protoreflect.BytesKind
default:
f.L1.Kind = protoreflect.MessageKind
}
case s == "group":
f.L1.Kind = protoreflect.GroupKind
case strings.HasPrefix(s, "enum="):
f.L1.Kind = protoreflect.EnumKind
case strings.HasPrefix(s, "json="):
jsonName := s[len("json="):]
if jsonName != strs.JSONCamelCase(string(f.L0.FullName.Name())) {
f.L1.StringName.InitJSON(jsonName)
}
case s == "packed":
f.L1.HasPacked = true
f.L1.IsPacked = true
case strings.HasPrefix(s, "weak="):
f.L1.IsWeak = true
f.L1.Message = filedesc.PlaceholderMessage(protoreflect.FullName(s[len("weak="):]))
case strings.HasPrefix(s, "def="):
// The default tag is special in that everything afterwards is the
// default regardless of the presence of commas.
s, i = tag[len("def="):], len(tag)
v, ev, _ := defval.Unmarshal(s, f.L1.Kind, evs, defval.GoTag)
f.L1.Default = filedesc.DefaultValue(v, ev)
case s == "proto3":
f.L0.ParentFile = filedesc.SurrogateProto3
}
tag = strings.TrimPrefix(tag[i:], ",")
}
// The generator uses the group message name instead of the field name.
// We obtain the real field name by lowercasing the group name.
if f.L1.Kind == protoreflect.GroupKind {
f.L0.FullName = protoreflect.FullName(strings.ToLower(string(f.L0.FullName)))
}
return f
}
// Marshal encodes the protoreflect.FieldDescriptor as a tag.
//
// The enumName must be provided if the kind is an enum.
// Historically, the formulation of the enum "name" was the proto package
// dot-concatenated with the generated Go identifier for the enum type.
// Depending on the context on how Marshal is called, there are different ways
// through which that information is determined. As such it is the caller's
// responsibility to provide a function to obtain that information.
func Marshal(fd protoreflect.FieldDescriptor, enumName string) string {
var tag []string
switch fd.Kind() {
case protoreflect.BoolKind, protoreflect.EnumKind, protoreflect.Int32Kind, protoreflect.Uint32Kind, protoreflect.Int64Kind, protoreflect.Uint64Kind:
tag = append(tag, "varint")
case protoreflect.Sint32Kind:
tag = append(tag, "zigzag32")
case protoreflect.Sint64Kind:
tag = append(tag, "zigzag64")
case protoreflect.Sfixed32Kind, protoreflect.Fixed32Kind, protoreflect.FloatKind:
tag = append(tag, "fixed32")
case protoreflect.Sfixed64Kind, protoreflect.Fixed64Kind, protoreflect.DoubleKind:
tag = append(tag, "fixed64")
case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind:
tag = append(tag, "bytes")
case protoreflect.GroupKind:
tag = append(tag, "group")
}
tag = append(tag, strconv.Itoa(int(fd.Number())))
switch fd.Cardinality() {
case protoreflect.Optional:
tag = append(tag, "opt")
case protoreflect.Required:
tag = append(tag, "req")
case protoreflect.Repeated:
tag = append(tag, "rep")
}
if fd.IsPacked() {
tag = append(tag, "packed")
}
name := string(fd.Name())
if fd.Kind() == protoreflect.GroupKind {
// The name of the FieldDescriptor for a group field is
// lowercased. To find the original capitalization, we
// look in the field's MessageType.
name = string(fd.Message().Name())
}
tag = append(tag, "name="+name)
if jsonName := fd.JSONName(); jsonName != "" && jsonName != name && !fd.IsExtension() {
// NOTE: The jsonName != name condition is suspect, but it preserve
// the exact same semantics from the previous generator.
tag = append(tag, "json="+jsonName)
}
if fd.IsWeak() {
tag = append(tag, "weak="+string(fd.Message().FullName()))
}
// The previous implementation does not tag extension fields as proto3,
// even when the field is defined in a proto3 file. Match that behavior
// for consistency.
if fd.Syntax() == protoreflect.Proto3 && !fd.IsExtension() {
tag = append(tag, "proto3")
}
if fd.Kind() == protoreflect.EnumKind && enumName != "" {
tag = append(tag, "enum="+enumName)
}
if fd.ContainingOneof() != nil {
tag = append(tag, "oneof")
}
// This must appear last in the tag, since commas in strings aren't escaped.
if fd.HasDefault() {
def, _ := defval.Marshal(fd.Default(), fd.DefaultEnumValue(), fd.Kind(), defval.GoTag)
tag = append(tag, "def="+def)
}
return strings.Join(tag, ",")
}

View File

@ -1,686 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package text
import (
"bytes"
"fmt"
"io"
"strconv"
"unicode/utf8"
"google.golang.org/protobuf/internal/errors"
)
// Decoder is a token-based textproto decoder.
type Decoder struct {
// lastCall is last method called, either readCall or peekCall.
// Initial value is readCall.
lastCall call
// lastToken contains the last read token.
lastToken Token
// lastErr contains the last read error.
lastErr error
// openStack is a stack containing the byte characters for MessageOpen and
// ListOpen kinds. The top of stack represents the message or the list that
// the current token is nested in. An empty stack means the current token is
// at the top level message. The characters '{' and '<' both represent the
// MessageOpen kind.
openStack []byte
// orig is used in reporting line and column.
orig []byte
// in contains the unconsumed input.
in []byte
}
// NewDecoder returns a Decoder to read the given []byte.
func NewDecoder(b []byte) *Decoder {
return &Decoder{orig: b, in: b}
}
// ErrUnexpectedEOF means that EOF was encountered in the middle of the input.
var ErrUnexpectedEOF = errors.New("%v", io.ErrUnexpectedEOF)
// call specifies which Decoder method was invoked.
type call uint8
const (
readCall call = iota
peekCall
)
// Peek looks ahead and returns the next token and error without advancing a read.
func (d *Decoder) Peek() (Token, error) {
defer func() { d.lastCall = peekCall }()
if d.lastCall == readCall {
d.lastToken, d.lastErr = d.Read()
}
return d.lastToken, d.lastErr
}
// Read returns the next token.
// It will return an error if there is no valid token.
func (d *Decoder) Read() (Token, error) {
defer func() { d.lastCall = readCall }()
if d.lastCall == peekCall {
return d.lastToken, d.lastErr
}
tok, err := d.parseNext(d.lastToken.kind)
if err != nil {
return Token{}, err
}
switch tok.kind {
case comma, semicolon:
tok, err = d.parseNext(tok.kind)
if err != nil {
return Token{}, err
}
}
d.lastToken = tok
return tok, nil
}
const (
mismatchedFmt = "mismatched close character %q"
unexpectedFmt = "unexpected character %q"
)
// parseNext parses the next Token based on given last kind.
func (d *Decoder) parseNext(lastKind Kind) (Token, error) {
// Trim leading spaces.
d.consume(0)
isEOF := false
if len(d.in) == 0 {
isEOF = true
}
switch lastKind {
case EOF:
return d.consumeToken(EOF, 0, 0), nil
case bof:
// Start of top level message. Next token can be EOF or Name.
if isEOF {
return d.consumeToken(EOF, 0, 0), nil
}
return d.parseFieldName()
case Name:
// Next token can be MessageOpen, ListOpen or Scalar.
if isEOF {
return Token{}, ErrUnexpectedEOF
}
switch ch := d.in[0]; ch {
case '{', '<':
d.pushOpenStack(ch)
return d.consumeToken(MessageOpen, 1, 0), nil
case '[':
d.pushOpenStack(ch)
return d.consumeToken(ListOpen, 1, 0), nil
default:
return d.parseScalar()
}
case Scalar:
openKind, closeCh := d.currentOpenKind()
switch openKind {
case bof:
// Top level message.
// Next token can be EOF, comma, semicolon or Name.
if isEOF {
return d.consumeToken(EOF, 0, 0), nil
}
switch d.in[0] {
case ',':
return d.consumeToken(comma, 1, 0), nil
case ';':
return d.consumeToken(semicolon, 1, 0), nil
default:
return d.parseFieldName()
}
case MessageOpen:
// Next token can be MessageClose, comma, semicolon or Name.
if isEOF {
return Token{}, ErrUnexpectedEOF
}
switch ch := d.in[0]; ch {
case closeCh:
d.popOpenStack()
return d.consumeToken(MessageClose, 1, 0), nil
case otherCloseChar[closeCh]:
return Token{}, d.newSyntaxError(mismatchedFmt, ch)
case ',':
return d.consumeToken(comma, 1, 0), nil
case ';':
return d.consumeToken(semicolon, 1, 0), nil
default:
return d.parseFieldName()
}
case ListOpen:
// Next token can be ListClose or comma.
if isEOF {
return Token{}, ErrUnexpectedEOF
}
switch ch := d.in[0]; ch {
case ']':
d.popOpenStack()
return d.consumeToken(ListClose, 1, 0), nil
case ',':
return d.consumeToken(comma, 1, 0), nil
default:
return Token{}, d.newSyntaxError(unexpectedFmt, ch)
}
}
case MessageOpen:
// Next token can be MessageClose or Name.
if isEOF {
return Token{}, ErrUnexpectedEOF
}
_, closeCh := d.currentOpenKind()
switch ch := d.in[0]; ch {
case closeCh:
d.popOpenStack()
return d.consumeToken(MessageClose, 1, 0), nil
case otherCloseChar[closeCh]:
return Token{}, d.newSyntaxError(mismatchedFmt, ch)
default:
return d.parseFieldName()
}
case MessageClose:
openKind, closeCh := d.currentOpenKind()
switch openKind {
case bof:
// Top level message.
// Next token can be EOF, comma, semicolon or Name.
if isEOF {
return d.consumeToken(EOF, 0, 0), nil
}
switch ch := d.in[0]; ch {
case ',':
return d.consumeToken(comma, 1, 0), nil
case ';':
return d.consumeToken(semicolon, 1, 0), nil
default:
return d.parseFieldName()
}
case MessageOpen:
// Next token can be MessageClose, comma, semicolon or Name.
if isEOF {
return Token{}, ErrUnexpectedEOF
}
switch ch := d.in[0]; ch {
case closeCh:
d.popOpenStack()
return d.consumeToken(MessageClose, 1, 0), nil
case otherCloseChar[closeCh]:
return Token{}, d.newSyntaxError(mismatchedFmt, ch)
case ',':
return d.consumeToken(comma, 1, 0), nil
case ';':
return d.consumeToken(semicolon, 1, 0), nil
default:
return d.parseFieldName()
}
case ListOpen:
// Next token can be ListClose or comma
if isEOF {
return Token{}, ErrUnexpectedEOF
}
switch ch := d.in[0]; ch {
case closeCh:
d.popOpenStack()
return d.consumeToken(ListClose, 1, 0), nil
case ',':
return d.consumeToken(comma, 1, 0), nil
default:
return Token{}, d.newSyntaxError(unexpectedFmt, ch)
}
}
case ListOpen:
// Next token can be ListClose, MessageStart or Scalar.
if isEOF {
return Token{}, ErrUnexpectedEOF
}
switch ch := d.in[0]; ch {
case ']':
d.popOpenStack()
return d.consumeToken(ListClose, 1, 0), nil
case '{', '<':
d.pushOpenStack(ch)
return d.consumeToken(MessageOpen, 1, 0), nil
default:
return d.parseScalar()
}
case ListClose:
openKind, closeCh := d.currentOpenKind()
switch openKind {
case bof:
// Top level message.
// Next token can be EOF, comma, semicolon or Name.
if isEOF {
return d.consumeToken(EOF, 0, 0), nil
}
switch ch := d.in[0]; ch {
case ',':
return d.consumeToken(comma, 1, 0), nil
case ';':
return d.consumeToken(semicolon, 1, 0), nil
default:
return d.parseFieldName()
}
case MessageOpen:
// Next token can be MessageClose, comma, semicolon or Name.
if isEOF {
return Token{}, ErrUnexpectedEOF
}
switch ch := d.in[0]; ch {
case closeCh:
d.popOpenStack()
return d.consumeToken(MessageClose, 1, 0), nil
case otherCloseChar[closeCh]:
return Token{}, d.newSyntaxError(mismatchedFmt, ch)
case ',':
return d.consumeToken(comma, 1, 0), nil
case ';':
return d.consumeToken(semicolon, 1, 0), nil
default:
return d.parseFieldName()
}
default:
// It is not possible to have this case. Let it panic below.
}
case comma, semicolon:
openKind, closeCh := d.currentOpenKind()
switch openKind {
case bof:
// Top level message. Next token can be EOF or Name.
if isEOF {
return d.consumeToken(EOF, 0, 0), nil
}
return d.parseFieldName()
case MessageOpen:
// Next token can be MessageClose or Name.
if isEOF {
return Token{}, ErrUnexpectedEOF
}
switch ch := d.in[0]; ch {
case closeCh:
d.popOpenStack()
return d.consumeToken(MessageClose, 1, 0), nil
case otherCloseChar[closeCh]:
return Token{}, d.newSyntaxError(mismatchedFmt, ch)
default:
return d.parseFieldName()
}
case ListOpen:
if lastKind == semicolon {
// It is not be possible to have this case as logic here
// should not have produced a semicolon Token when inside a
// list. Let it panic below.
break
}
// Next token can be MessageOpen or Scalar.
if isEOF {
return Token{}, ErrUnexpectedEOF
}
switch ch := d.in[0]; ch {
case '{', '<':
d.pushOpenStack(ch)
return d.consumeToken(MessageOpen, 1, 0), nil
default:
return d.parseScalar()
}
}
}
line, column := d.Position(len(d.orig) - len(d.in))
panic(fmt.Sprintf("Decoder.parseNext: bug at handling line %d:%d with lastKind=%v", line, column, lastKind))
}
var otherCloseChar = map[byte]byte{
'}': '>',
'>': '}',
}
// currentOpenKind indicates whether current position is inside a message, list
// or top-level message by returning MessageOpen, ListOpen or bof respectively.
// If the returned kind is either a MessageOpen or ListOpen, it also returns the
// corresponding closing character.
func (d *Decoder) currentOpenKind() (Kind, byte) {
if len(d.openStack) == 0 {
return bof, 0
}
openCh := d.openStack[len(d.openStack)-1]
switch openCh {
case '{':
return MessageOpen, '}'
case '<':
return MessageOpen, '>'
case '[':
return ListOpen, ']'
}
panic(fmt.Sprintf("Decoder: openStack contains invalid byte %c", openCh))
}
func (d *Decoder) pushOpenStack(ch byte) {
d.openStack = append(d.openStack, ch)
}
func (d *Decoder) popOpenStack() {
d.openStack = d.openStack[:len(d.openStack)-1]
}
// parseFieldName parses field name and separator.
func (d *Decoder) parseFieldName() (tok Token, err error) {
defer func() {
if err == nil && d.tryConsumeChar(':') {
tok.attrs |= hasSeparator
}
}()
// Extension or Any type URL.
if d.in[0] == '[' {
return d.parseTypeName()
}
// Identifier.
if size := parseIdent(d.in, false); size > 0 {
return d.consumeToken(Name, size, uint8(IdentName)), nil
}
// Field number. Identify if input is a valid number that is not negative
// and is decimal integer within 32-bit range.
if num := parseNumber(d.in); num.size > 0 {
str := num.string(d.in)
if !num.neg && num.kind == numDec {
if _, err := strconv.ParseInt(str, 10, 32); err == nil {
return d.consumeToken(Name, num.size, uint8(FieldNumber)), nil
}
}
return Token{}, d.newSyntaxError("invalid field number: %s", str)
}
return Token{}, d.newSyntaxError("invalid field name: %s", errId(d.in))
}
// parseTypeName parses Any type URL or extension field name. The name is
// enclosed in [ and ] characters. The C++ parser does not handle many legal URL
// strings. This implementation is more liberal and allows for the pattern
// ^[-_a-zA-Z0-9]+([./][-_a-zA-Z0-9]+)*`). Whitespaces and comments are allowed
// in between [ ], '.', '/' and the sub names.
func (d *Decoder) parseTypeName() (Token, error) {
startPos := len(d.orig) - len(d.in)
// Use alias s to advance first in order to use d.in for error handling.
// Caller already checks for [ as first character.
s := consume(d.in[1:], 0)
if len(s) == 0 {
return Token{}, ErrUnexpectedEOF
}
var name []byte
for len(s) > 0 && isTypeNameChar(s[0]) {
name = append(name, s[0])
s = s[1:]
}
s = consume(s, 0)
var closed bool
for len(s) > 0 && !closed {
switch {
case s[0] == ']':
s = s[1:]
closed = true
case s[0] == '/', s[0] == '.':
if len(name) > 0 && (name[len(name)-1] == '/' || name[len(name)-1] == '.') {
return Token{}, d.newSyntaxError("invalid type URL/extension field name: %s",
d.orig[startPos:len(d.orig)-len(s)+1])
}
name = append(name, s[0])
s = s[1:]
s = consume(s, 0)
for len(s) > 0 && isTypeNameChar(s[0]) {
name = append(name, s[0])
s = s[1:]
}
s = consume(s, 0)
default:
return Token{}, d.newSyntaxError(
"invalid type URL/extension field name: %s", d.orig[startPos:len(d.orig)-len(s)+1])
}
}
if !closed {
return Token{}, ErrUnexpectedEOF
}
// First character cannot be '.'. Last character cannot be '.' or '/'.
size := len(name)
if size == 0 || name[0] == '.' || name[size-1] == '.' || name[size-1] == '/' {
return Token{}, d.newSyntaxError("invalid type URL/extension field name: %s",
d.orig[startPos:len(d.orig)-len(s)])
}
d.in = s
endPos := len(d.orig) - len(d.in)
d.consume(0)
return Token{
kind: Name,
attrs: uint8(TypeName),
pos: startPos,
raw: d.orig[startPos:endPos],
str: string(name),
}, nil
}
func isTypeNameChar(b byte) bool {
return (b == '-' || b == '_' ||
('0' <= b && b <= '9') ||
('a' <= b && b <= 'z') ||
('A' <= b && b <= 'Z'))
}
func isWhiteSpace(b byte) bool {
switch b {
case ' ', '\n', '\r', '\t':
return true
default:
return false
}
}
// parseIdent parses an unquoted proto identifier and returns size.
// If allowNeg is true, it allows '-' to be the first character in the
// identifier. This is used when parsing literal values like -infinity, etc.
// Regular expression matches an identifier: `^[_a-zA-Z][_a-zA-Z0-9]*`
func parseIdent(input []byte, allowNeg bool) int {
var size int
s := input
if len(s) == 0 {
return 0
}
if allowNeg && s[0] == '-' {
s = s[1:]
size++
if len(s) == 0 {
return 0
}
}
switch {
case s[0] == '_',
'a' <= s[0] && s[0] <= 'z',
'A' <= s[0] && s[0] <= 'Z':
s = s[1:]
size++
default:
return 0
}
for len(s) > 0 && (s[0] == '_' ||
'a' <= s[0] && s[0] <= 'z' ||
'A' <= s[0] && s[0] <= 'Z' ||
'0' <= s[0] && s[0] <= '9') {
s = s[1:]
size++
}
if len(s) > 0 && !isDelim(s[0]) {
return 0
}
return size
}
// parseScalar parses for a string, literal or number value.
func (d *Decoder) parseScalar() (Token, error) {
if d.in[0] == '"' || d.in[0] == '\'' {
return d.parseStringValue()
}
if tok, ok := d.parseLiteralValue(); ok {
return tok, nil
}
if tok, ok := d.parseNumberValue(); ok {
return tok, nil
}
return Token{}, d.newSyntaxError("invalid scalar value: %s", errId(d.in))
}
// parseLiteralValue parses a literal value. A literal value is used for
// bools, special floats and enums. This function simply identifies that the
// field value is a literal.
func (d *Decoder) parseLiteralValue() (Token, bool) {
size := parseIdent(d.in, true)
if size == 0 {
return Token{}, false
}
return d.consumeToken(Scalar, size, literalValue), true
}
// consumeToken constructs a Token for given Kind from d.in and consumes given
// size-length from it.
func (d *Decoder) consumeToken(kind Kind, size int, attrs uint8) Token {
// Important to compute raw and pos before consuming.
tok := Token{
kind: kind,
attrs: attrs,
pos: len(d.orig) - len(d.in),
raw: d.in[:size],
}
d.consume(size)
return tok
}
// newSyntaxError returns a syntax error with line and column information for
// current position.
func (d *Decoder) newSyntaxError(f string, x ...interface{}) error {
e := errors.New(f, x...)
line, column := d.Position(len(d.orig) - len(d.in))
return errors.New("syntax error (line %d:%d): %v", line, column, e)
}
// Position returns line and column number of given index of the original input.
// It will panic if index is out of range.
func (d *Decoder) Position(idx int) (line int, column int) {
b := d.orig[:idx]
line = bytes.Count(b, []byte("\n")) + 1
if i := bytes.LastIndexByte(b, '\n'); i >= 0 {
b = b[i+1:]
}
column = utf8.RuneCount(b) + 1 // ignore multi-rune characters
return line, column
}
func (d *Decoder) tryConsumeChar(c byte) bool {
if len(d.in) > 0 && d.in[0] == c {
d.consume(1)
return true
}
return false
}
// consume consumes n bytes of input and any subsequent whitespace or comments.
func (d *Decoder) consume(n int) {
d.in = consume(d.in, n)
return
}
// consume consumes n bytes of input and any subsequent whitespace or comments.
func consume(b []byte, n int) []byte {
b = b[n:]
for len(b) > 0 {
switch b[0] {
case ' ', '\n', '\r', '\t':
b = b[1:]
case '#':
if i := bytes.IndexByte(b, '\n'); i >= 0 {
b = b[i+len("\n"):]
} else {
b = nil
}
default:
return b
}
}
return b
}
// errId extracts a byte sequence that looks like an invalid ID
// (for the purposes of error reporting).
func errId(seq []byte) []byte {
const maxLen = 32
for i := 0; i < len(seq); {
if i > maxLen {
return append(seq[:i:i], "…"...)
}
r, size := utf8.DecodeRune(seq[i:])
if r > utf8.RuneSelf || (r != '/' && isDelim(byte(r))) {
if i == 0 {
// Either the first byte is invalid UTF-8 or a
// delimiter, or the first rune is non-ASCII.
// Return it as-is.
i = size
}
return seq[:i:i]
}
i += size
}
// No delimiter found.
return seq
}
// isDelim returns true if given byte is a delimiter character.
func isDelim(c byte) bool {
return !(c == '-' || c == '+' || c == '.' || c == '_' ||
('a' <= c && c <= 'z') ||
('A' <= c && c <= 'Z') ||
('0' <= c && c <= '9'))
}

View File

@ -1,211 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package text
// parseNumberValue parses a number from the input and returns a Token object.
func (d *Decoder) parseNumberValue() (Token, bool) {
in := d.in
num := parseNumber(in)
if num.size == 0 {
return Token{}, false
}
numAttrs := num.kind
if num.neg {
numAttrs |= isNegative
}
tok := Token{
kind: Scalar,
attrs: numberValue,
pos: len(d.orig) - len(d.in),
raw: d.in[:num.size],
str: num.string(d.in),
numAttrs: numAttrs,
}
d.consume(num.size)
return tok, true
}
const (
numDec uint8 = (1 << iota) / 2
numHex
numOct
numFloat
)
// number is the result of parsing out a valid number from parseNumber. It
// contains data for doing float or integer conversion via the strconv package
// in conjunction with the input bytes.
type number struct {
kind uint8
neg bool
size int
// if neg, this is the length of whitespace and comments between
// the minus sign and the rest fo the number literal
sep int
}
func (num number) string(data []byte) string {
strSize := num.size
last := num.size - 1
if num.kind == numFloat && (data[last] == 'f' || data[last] == 'F') {
strSize = last
}
if num.neg && num.sep > 0 {
// strip whitespace/comments between negative sign and the rest
strLen := strSize - num.sep
str := make([]byte, strLen)
str[0] = data[0]
copy(str[1:], data[num.sep+1:strSize])
return string(str)
}
return string(data[:strSize])
}
// parseNumber constructs a number object from given input. It allows for the
// following patterns:
//
// integer: ^-?([1-9][0-9]*|0[xX][0-9a-fA-F]+|0[0-7]*)
// float: ^-?((0|[1-9][0-9]*)?([.][0-9]*)?([eE][+-]?[0-9]+)?[fF]?)
//
// It also returns the number of parsed bytes for the given number, 0 if it is
// not a number.
func parseNumber(input []byte) number {
kind := numDec
var size int
var neg bool
s := input
if len(s) == 0 {
return number{}
}
// Optional -
var sep int
if s[0] == '-' {
neg = true
s = s[1:]
size++
// Consume any whitespace or comments between the
// negative sign and the rest of the number
lenBefore := len(s)
s = consume(s, 0)
sep = lenBefore - len(s)
size += sep
if len(s) == 0 {
return number{}
}
}
switch {
case s[0] == '0':
if len(s) > 1 {
switch {
case s[1] == 'x' || s[1] == 'X':
// Parse as hex number.
kind = numHex
n := 2
s = s[2:]
for len(s) > 0 && (('0' <= s[0] && s[0] <= '9') ||
('a' <= s[0] && s[0] <= 'f') ||
('A' <= s[0] && s[0] <= 'F')) {
s = s[1:]
n++
}
if n == 2 {
return number{}
}
size += n
case '0' <= s[1] && s[1] <= '7':
// Parse as octal number.
kind = numOct
n := 2
s = s[2:]
for len(s) > 0 && '0' <= s[0] && s[0] <= '7' {
s = s[1:]
n++
}
size += n
}
if kind&(numHex|numOct) > 0 {
if len(s) > 0 && !isDelim(s[0]) {
return number{}
}
return number{kind: kind, neg: neg, size: size, sep: sep}
}
}
s = s[1:]
size++
case '1' <= s[0] && s[0] <= '9':
n := 1
s = s[1:]
for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
s = s[1:]
n++
}
size += n
case s[0] == '.':
// Set kind to numFloat to signify the intent to parse as float. And
// that it needs to have other digits after '.'.
kind = numFloat
default:
return number{}
}
// . followed by 0 or more digits.
if len(s) > 0 && s[0] == '.' {
n := 1
s = s[1:]
// If decimal point was before any digits, it should be followed by
// other digits.
if len(s) == 0 && kind == numFloat {
return number{}
}
for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
s = s[1:]
n++
}
size += n
kind = numFloat
}
// e or E followed by an optional - or + and 1 or more digits.
if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') {
kind = numFloat
s = s[1:]
n := 1
if s[0] == '+' || s[0] == '-' {
s = s[1:]
n++
if len(s) == 0 {
return number{}
}
}
for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
s = s[1:]
n++
}
size += n
}
// Optional suffix f or F for floats.
if len(s) > 0 && (s[0] == 'f' || s[0] == 'F') {
kind = numFloat
s = s[1:]
size++
}
// Check that next byte is a delimiter or it is at the end.
if len(s) > 0 && !isDelim(s[0]) {
return number{}
}
return number{kind: kind, neg: neg, size: size, sep: sep}
}

View File

@ -1,161 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package text
import (
"bytes"
"strconv"
"strings"
"unicode"
"unicode/utf16"
"unicode/utf8"
"google.golang.org/protobuf/internal/strs"
)
// parseStringValue parses string field token.
// This differs from parseString since the text format allows
// multiple back-to-back string literals where they are semantically treated
// as a single large string with all values concatenated.
//
// E.g., `"foo" "bar" "baz"` => "foobarbaz"
func (d *Decoder) parseStringValue() (Token, error) {
// Note that the ending quote is sufficient to unambiguously mark the end
// of a string. Thus, the text grammar does not require intervening
// whitespace or control characters in-between strings.
// Thus, the following is valid:
// `"foo"'bar'"baz"` => "foobarbaz"
in0 := d.in
var ss []string
for len(d.in) > 0 && (d.in[0] == '"' || d.in[0] == '\'') {
s, err := d.parseString()
if err != nil {
return Token{}, err
}
ss = append(ss, s)
}
// d.in already points to the end of the value at this point.
return Token{
kind: Scalar,
attrs: stringValue,
pos: len(d.orig) - len(in0),
raw: in0[:len(in0)-len(d.in)],
str: strings.Join(ss, ""),
}, nil
}
// parseString parses a string value enclosed in " or '.
func (d *Decoder) parseString() (string, error) {
in := d.in
if len(in) == 0 {
return "", ErrUnexpectedEOF
}
quote := in[0]
in = in[1:]
i := indexNeedEscapeInBytes(in)
in, out := in[i:], in[:i:i] // set cap to prevent mutations
for len(in) > 0 {
switch r, n := utf8.DecodeRune(in); {
case r == utf8.RuneError && n == 1:
return "", d.newSyntaxError("invalid UTF-8 detected")
case r == 0 || r == '\n':
return "", d.newSyntaxError("invalid character %q in string", r)
case r == rune(quote):
in = in[1:]
d.consume(len(d.in) - len(in))
return string(out), nil
case r == '\\':
if len(in) < 2 {
return "", ErrUnexpectedEOF
}
switch r := in[1]; r {
case '"', '\'', '\\', '?':
in, out = in[2:], append(out, r)
case 'a':
in, out = in[2:], append(out, '\a')
case 'b':
in, out = in[2:], append(out, '\b')
case 'n':
in, out = in[2:], append(out, '\n')
case 'r':
in, out = in[2:], append(out, '\r')
case 't':
in, out = in[2:], append(out, '\t')
case 'v':
in, out = in[2:], append(out, '\v')
case 'f':
in, out = in[2:], append(out, '\f')
case '0', '1', '2', '3', '4', '5', '6', '7':
// One, two, or three octal characters.
n := len(in[1:]) - len(bytes.TrimLeft(in[1:], "01234567"))
if n > 3 {
n = 3
}
v, err := strconv.ParseUint(string(in[1:1+n]), 8, 8)
if err != nil {
return "", d.newSyntaxError("invalid octal escape code %q in string", in[:1+n])
}
in, out = in[1+n:], append(out, byte(v))
case 'x':
// One or two hexadecimal characters.
n := len(in[2:]) - len(bytes.TrimLeft(in[2:], "0123456789abcdefABCDEF"))
if n > 2 {
n = 2
}
v, err := strconv.ParseUint(string(in[2:2+n]), 16, 8)
if err != nil {
return "", d.newSyntaxError("invalid hex escape code %q in string", in[:2+n])
}
in, out = in[2+n:], append(out, byte(v))
case 'u', 'U':
// Four or eight hexadecimal characters
n := 6
if r == 'U' {
n = 10
}
if len(in) < n {
return "", ErrUnexpectedEOF
}
v, err := strconv.ParseUint(string(in[2:n]), 16, 32)
if utf8.MaxRune < v || err != nil {
return "", d.newSyntaxError("invalid Unicode escape code %q in string", in[:n])
}
in = in[n:]
r := rune(v)
if utf16.IsSurrogate(r) {
if len(in) < 6 {
return "", ErrUnexpectedEOF
}
v, err := strconv.ParseUint(string(in[2:6]), 16, 16)
r = utf16.DecodeRune(r, rune(v))
if in[0] != '\\' || in[1] != 'u' || r == unicode.ReplacementChar || err != nil {
return "", d.newSyntaxError("invalid Unicode escape code %q in string", in[:6])
}
in = in[6:]
}
out = append(out, string(r)...)
default:
return "", d.newSyntaxError("invalid escape code %q in string", in[:2])
}
default:
i := indexNeedEscapeInBytes(in[n:])
in, out = in[n+i:], append(out, in[:n+i]...)
}
}
return "", ErrUnexpectedEOF
}
// indexNeedEscapeInString returns the index of the character that needs
// escaping. If no characters need escaping, this returns the input length.
func indexNeedEscapeInBytes(b []byte) int { return indexNeedEscapeInString(strs.UnsafeString(b)) }
// UnmarshalString returns an unescaped string given a textproto string value.
// String value needs to contain single or double quotes. This is only used by
// internal/encoding/defval package for unmarshaling bytes.
func UnmarshalString(s string) (string, error) {
d := NewDecoder([]byte(s))
return d.parseString()
}

View File

@ -1,373 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package text
import (
"bytes"
"fmt"
"math"
"strconv"
"strings"
"google.golang.org/protobuf/internal/flags"
)
// Kind represents a token kind expressible in the textproto format.
type Kind uint8
// Kind values.
const (
Invalid Kind = iota
EOF
Name // Name indicates the field name.
Scalar // Scalar are scalar values, e.g. "string", 47, ENUM_LITERAL, true.
MessageOpen
MessageClose
ListOpen
ListClose
// comma and semi-colon are only for parsing in between values and should not be exposed.
comma
semicolon
// bof indicates beginning of file, which is the default token
// kind at the beginning of parsing.
bof = Invalid
)
func (t Kind) String() string {
switch t {
case Invalid:
return "<invalid>"
case EOF:
return "eof"
case Scalar:
return "scalar"
case Name:
return "name"
case MessageOpen:
return "{"
case MessageClose:
return "}"
case ListOpen:
return "["
case ListClose:
return "]"
case comma:
return ","
case semicolon:
return ";"
default:
return fmt.Sprintf("<invalid:%v>", uint8(t))
}
}
// NameKind represents different types of field names.
type NameKind uint8
// NameKind values.
const (
IdentName NameKind = iota + 1
TypeName
FieldNumber
)
func (t NameKind) String() string {
switch t {
case IdentName:
return "IdentName"
case TypeName:
return "TypeName"
case FieldNumber:
return "FieldNumber"
default:
return fmt.Sprintf("<invalid:%v>", uint8(t))
}
}
// Bit mask in Token.attrs to indicate if a Name token is followed by the
// separator char ':'. The field name separator char is optional for message
// field or repeated message field, but required for all other types. Decoder
// simply indicates whether a Name token is followed by separator or not. It is
// up to the prototext package to validate.
const hasSeparator = 1 << 7
// Scalar value types.
const (
numberValue = iota + 1
stringValue
literalValue
)
// Bit mask in Token.numAttrs to indicate that the number is a negative.
const isNegative = 1 << 7
// Token provides a parsed token kind and value. Values are provided by the
// different accessor methods.
type Token struct {
// Kind of the Token object.
kind Kind
// attrs contains metadata for the following Kinds:
// Name: hasSeparator bit and one of NameKind.
// Scalar: one of numberValue, stringValue, literalValue.
attrs uint8
// numAttrs contains metadata for numberValue:
// - highest bit is whether negative or positive.
// - lower bits indicate one of numDec, numHex, numOct, numFloat.
numAttrs uint8
// pos provides the position of the token in the original input.
pos int
// raw bytes of the serialized token.
// This is a subslice into the original input.
raw []byte
// str contains parsed string for the following:
// - stringValue of Scalar kind
// - numberValue of Scalar kind
// - TypeName of Name kind
str string
}
// Kind returns the token kind.
func (t Token) Kind() Kind {
return t.kind
}
// RawString returns the read value in string.
func (t Token) RawString() string {
return string(t.raw)
}
// Pos returns the token position from the input.
func (t Token) Pos() int {
return t.pos
}
// NameKind returns IdentName, TypeName or FieldNumber.
// It panics if type is not Name.
func (t Token) NameKind() NameKind {
if t.kind == Name {
return NameKind(t.attrs &^ hasSeparator)
}
panic(fmt.Sprintf("Token is not a Name type: %s", t.kind))
}
// HasSeparator returns true if the field name is followed by the separator char
// ':', else false. It panics if type is not Name.
func (t Token) HasSeparator() bool {
if t.kind == Name {
return t.attrs&hasSeparator != 0
}
panic(fmt.Sprintf("Token is not a Name type: %s", t.kind))
}
// IdentName returns the value for IdentName type.
func (t Token) IdentName() string {
if t.kind == Name && t.attrs&uint8(IdentName) != 0 {
return string(t.raw)
}
panic(fmt.Sprintf("Token is not an IdentName: %s:%s", t.kind, NameKind(t.attrs&^hasSeparator)))
}
// TypeName returns the value for TypeName type.
func (t Token) TypeName() string {
if t.kind == Name && t.attrs&uint8(TypeName) != 0 {
return t.str
}
panic(fmt.Sprintf("Token is not a TypeName: %s:%s", t.kind, NameKind(t.attrs&^hasSeparator)))
}
// FieldNumber returns the value for FieldNumber type. It returns a
// non-negative int32 value. Caller will still need to validate for the correct
// field number range.
func (t Token) FieldNumber() int32 {
if t.kind != Name || t.attrs&uint8(FieldNumber) == 0 {
panic(fmt.Sprintf("Token is not a FieldNumber: %s:%s", t.kind, NameKind(t.attrs&^hasSeparator)))
}
// Following should not return an error as it had already been called right
// before this Token was constructed.
num, _ := strconv.ParseInt(string(t.raw), 10, 32)
return int32(num)
}
// String returns the string value for a Scalar type.
func (t Token) String() (string, bool) {
if t.kind != Scalar || t.attrs != stringValue {
return "", false
}
return t.str, true
}
// Enum returns the literal value for a Scalar type for use as enum literals.
func (t Token) Enum() (string, bool) {
if t.kind != Scalar || t.attrs != literalValue || (len(t.raw) > 0 && t.raw[0] == '-') {
return "", false
}
return string(t.raw), true
}
// Bool returns the bool value for a Scalar type.
func (t Token) Bool() (bool, bool) {
if t.kind != Scalar {
return false, false
}
switch t.attrs {
case literalValue:
if b, ok := boolLits[string(t.raw)]; ok {
return b, true
}
case numberValue:
// Unsigned integer representation of 0 or 1 is permitted: 00, 0x0, 01,
// 0x1, etc.
n, err := strconv.ParseUint(t.str, 0, 64)
if err == nil {
switch n {
case 0:
return false, true
case 1:
return true, true
}
}
}
return false, false
}
// These exact boolean literals are the ones supported in C++.
var boolLits = map[string]bool{
"t": true,
"true": true,
"True": true,
"f": false,
"false": false,
"False": false,
}
// Uint64 returns the uint64 value for a Scalar type.
func (t Token) Uint64() (uint64, bool) {
if t.kind != Scalar || t.attrs != numberValue ||
t.numAttrs&isNegative > 0 || t.numAttrs&numFloat > 0 {
return 0, false
}
n, err := strconv.ParseUint(t.str, 0, 64)
if err != nil {
return 0, false
}
return n, true
}
// Uint32 returns the uint32 value for a Scalar type.
func (t Token) Uint32() (uint32, bool) {
if t.kind != Scalar || t.attrs != numberValue ||
t.numAttrs&isNegative > 0 || t.numAttrs&numFloat > 0 {
return 0, false
}
n, err := strconv.ParseUint(t.str, 0, 32)
if err != nil {
return 0, false
}
return uint32(n), true
}
// Int64 returns the int64 value for a Scalar type.
func (t Token) Int64() (int64, bool) {
if t.kind != Scalar || t.attrs != numberValue || t.numAttrs&numFloat > 0 {
return 0, false
}
if n, err := strconv.ParseInt(t.str, 0, 64); err == nil {
return n, true
}
// C++ accepts large positive hex numbers as negative values.
// This feature is here for proto1 backwards compatibility purposes.
if flags.ProtoLegacy && (t.numAttrs == numHex) {
if n, err := strconv.ParseUint(t.str, 0, 64); err == nil {
return int64(n), true
}
}
return 0, false
}
// Int32 returns the int32 value for a Scalar type.
func (t Token) Int32() (int32, bool) {
if t.kind != Scalar || t.attrs != numberValue || t.numAttrs&numFloat > 0 {
return 0, false
}
if n, err := strconv.ParseInt(t.str, 0, 32); err == nil {
return int32(n), true
}
// C++ accepts large positive hex numbers as negative values.
// This feature is here for proto1 backwards compatibility purposes.
if flags.ProtoLegacy && (t.numAttrs == numHex) {
if n, err := strconv.ParseUint(t.str, 0, 32); err == nil {
return int32(n), true
}
}
return 0, false
}
// Float64 returns the float64 value for a Scalar type.
func (t Token) Float64() (float64, bool) {
if t.kind != Scalar {
return 0, false
}
switch t.attrs {
case literalValue:
if f, ok := floatLits[strings.ToLower(string(t.raw))]; ok {
return f, true
}
case numberValue:
n, err := strconv.ParseFloat(t.str, 64)
if err == nil {
return n, true
}
nerr := err.(*strconv.NumError)
if nerr.Err == strconv.ErrRange {
return n, true
}
}
return 0, false
}
// Float32 returns the float32 value for a Scalar type.
func (t Token) Float32() (float32, bool) {
if t.kind != Scalar {
return 0, false
}
switch t.attrs {
case literalValue:
if f, ok := floatLits[strings.ToLower(string(t.raw))]; ok {
return float32(f), true
}
case numberValue:
n, err := strconv.ParseFloat(t.str, 64)
if err == nil {
// Overflows are treated as (-)infinity.
return float32(n), true
}
nerr := err.(*strconv.NumError)
if nerr.Err == strconv.ErrRange {
return float32(n), true
}
}
return 0, false
}
// These are the supported float literals which C++ permits case-insensitive
// variants of these.
var floatLits = map[string]float64{
"nan": math.NaN(),
"inf": math.Inf(1),
"infinity": math.Inf(1),
"-inf": math.Inf(-1),
"-infinity": math.Inf(-1),
}
// TokenEquals returns true if given Tokens are equal, else false.
func TokenEquals(x, y Token) bool {
return x.kind == y.kind &&
x.attrs == y.attrs &&
x.numAttrs == y.numAttrs &&
x.pos == y.pos &&
bytes.Equal(x.raw, y.raw) &&
x.str == y.str
}

View File

@ -1,29 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package text implements the text format for protocol buffers.
// This package has no semantic understanding for protocol buffers and is only
// a parser and composer for the format.
//
// There is no formal specification for the protobuf text format, as such the
// C++ implementation (see google::protobuf::TextFormat) is the reference
// implementation of the text format.
//
// This package is neither a superset nor a subset of the C++ implementation.
// This implementation permits a more liberal grammar in some cases to be
// backwards compatible with the historical Go implementation.
// Future parsings unique to Go should not be added.
// Some grammars allowed by the C++ implementation are deliberately
// not implemented here because they are considered a bug by the protobuf team
// and should not be replicated.
//
// The Go implementation should implement a sufficient amount of the C++
// grammar such that the default text serialization by C++ can be parsed by Go.
// However, just because the C++ parser accepts some input does not mean that
// the Go implementation should as well.
//
// The text format is almost a superset of JSON except:
// - message keys are not quoted strings, but identifiers
// - the top-level value must be a message without the delimiters
package text

View File

@ -1,272 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package text
import (
"math"
"math/bits"
"strconv"
"strings"
"unicode/utf8"
"google.golang.org/protobuf/internal/detrand"
"google.golang.org/protobuf/internal/errors"
)
// encType represents an encoding type.
type encType uint8
const (
_ encType = (1 << iota) / 2
name
scalar
messageOpen
messageClose
)
// Encoder provides methods to write out textproto constructs and values. The user is
// responsible for producing valid sequences of constructs and values.
type Encoder struct {
encoderState
indent string
delims [2]byte
outputASCII bool
}
type encoderState struct {
lastType encType
indents []byte
out []byte
}
// NewEncoder returns an Encoder.
//
// If indent is a non-empty string, it causes every entry in a List or Message
// to be preceded by the indent and trailed by a newline.
//
// If delims is not the zero value, it controls the delimiter characters used
// for messages (e.g., "{}" vs "<>").
//
// If outputASCII is true, strings will be serialized in such a way that
// multi-byte UTF-8 sequences are escaped. This property ensures that the
// overall output is ASCII (as opposed to UTF-8).
func NewEncoder(buf []byte, indent string, delims [2]byte, outputASCII bool) (*Encoder, error) {
e := &Encoder{
encoderState: encoderState{out: buf},
}
if len(indent) > 0 {
if strings.Trim(indent, " \t") != "" {
return nil, errors.New("indent may only be composed of space and tab characters")
}
e.indent = indent
}
switch delims {
case [2]byte{0, 0}:
e.delims = [2]byte{'{', '}'}
case [2]byte{'{', '}'}, [2]byte{'<', '>'}:
e.delims = delims
default:
return nil, errors.New("delimiters may only be \"{}\" or \"<>\"")
}
e.outputASCII = outputASCII
return e, nil
}
// Bytes returns the content of the written bytes.
func (e *Encoder) Bytes() []byte {
return e.out
}
// StartMessage writes out the '{' or '<' symbol.
func (e *Encoder) StartMessage() {
e.prepareNext(messageOpen)
e.out = append(e.out, e.delims[0])
}
// EndMessage writes out the '}' or '>' symbol.
func (e *Encoder) EndMessage() {
e.prepareNext(messageClose)
e.out = append(e.out, e.delims[1])
}
// WriteName writes out the field name and the separator ':'.
func (e *Encoder) WriteName(s string) {
e.prepareNext(name)
e.out = append(e.out, s...)
e.out = append(e.out, ':')
}
// WriteBool writes out the given boolean value.
func (e *Encoder) WriteBool(b bool) {
if b {
e.WriteLiteral("true")
} else {
e.WriteLiteral("false")
}
}
// WriteString writes out the given string value.
func (e *Encoder) WriteString(s string) {
e.prepareNext(scalar)
e.out = appendString(e.out, s, e.outputASCII)
}
func appendString(out []byte, in string, outputASCII bool) []byte {
out = append(out, '"')
i := indexNeedEscapeInString(in)
in, out = in[i:], append(out, in[:i]...)
for len(in) > 0 {
switch r, n := utf8.DecodeRuneInString(in); {
case r == utf8.RuneError && n == 1:
// We do not report invalid UTF-8 because strings in the text format
// are used to represent both the proto string and bytes type.
r = rune(in[0])
fallthrough
case r < ' ' || r == '"' || r == '\\' || r == 0x7f:
out = append(out, '\\')
switch r {
case '"', '\\':
out = append(out, byte(r))
case '\n':
out = append(out, 'n')
case '\r':
out = append(out, 'r')
case '\t':
out = append(out, 't')
default:
out = append(out, 'x')
out = append(out, "00"[1+(bits.Len32(uint32(r))-1)/4:]...)
out = strconv.AppendUint(out, uint64(r), 16)
}
in = in[n:]
case r >= utf8.RuneSelf && (outputASCII || r <= 0x009f):
out = append(out, '\\')
if r <= math.MaxUint16 {
out = append(out, 'u')
out = append(out, "0000"[1+(bits.Len32(uint32(r))-1)/4:]...)
out = strconv.AppendUint(out, uint64(r), 16)
} else {
out = append(out, 'U')
out = append(out, "00000000"[1+(bits.Len32(uint32(r))-1)/4:]...)
out = strconv.AppendUint(out, uint64(r), 16)
}
in = in[n:]
default:
i := indexNeedEscapeInString(in[n:])
in, out = in[n+i:], append(out, in[:n+i]...)
}
}
out = append(out, '"')
return out
}
// indexNeedEscapeInString returns the index of the character that needs
// escaping. If no characters need escaping, this returns the input length.
func indexNeedEscapeInString(s string) int {
for i := 0; i < len(s); i++ {
if c := s[i]; c < ' ' || c == '"' || c == '\'' || c == '\\' || c >= 0x7f {
return i
}
}
return len(s)
}
// WriteFloat writes out the given float value for given bitSize.
func (e *Encoder) WriteFloat(n float64, bitSize int) {
e.prepareNext(scalar)
e.out = appendFloat(e.out, n, bitSize)
}
func appendFloat(out []byte, n float64, bitSize int) []byte {
switch {
case math.IsNaN(n):
return append(out, "nan"...)
case math.IsInf(n, +1):
return append(out, "inf"...)
case math.IsInf(n, -1):
return append(out, "-inf"...)
default:
return strconv.AppendFloat(out, n, 'g', -1, bitSize)
}
}
// WriteInt writes out the given signed integer value.
func (e *Encoder) WriteInt(n int64) {
e.prepareNext(scalar)
e.out = strconv.AppendInt(e.out, n, 10)
}
// WriteUint writes out the given unsigned integer value.
func (e *Encoder) WriteUint(n uint64) {
e.prepareNext(scalar)
e.out = strconv.AppendUint(e.out, n, 10)
}
// WriteLiteral writes out the given string as a literal value without quotes.
// This is used for writing enum literal strings.
func (e *Encoder) WriteLiteral(s string) {
e.prepareNext(scalar)
e.out = append(e.out, s...)
}
// prepareNext adds possible space and indentation for the next value based
// on last encType and indent option. It also updates e.lastType to next.
func (e *Encoder) prepareNext(next encType) {
defer func() {
e.lastType = next
}()
// Single line.
if len(e.indent) == 0 {
// Add space after each field before the next one.
if e.lastType&(scalar|messageClose) != 0 && next == name {
e.out = append(e.out, ' ')
// Add a random extra space to make output unstable.
if detrand.Bool() {
e.out = append(e.out, ' ')
}
}
return
}
// Multi-line.
switch {
case e.lastType == name:
e.out = append(e.out, ' ')
// Add a random extra space after name: to make output unstable.
if detrand.Bool() {
e.out = append(e.out, ' ')
}
case e.lastType == messageOpen && next != messageClose:
e.indents = append(e.indents, e.indent...)
e.out = append(e.out, '\n')
e.out = append(e.out, e.indents...)
case e.lastType&(scalar|messageClose) != 0:
if next == messageClose {
e.indents = e.indents[:len(e.indents)-len(e.indent)]
}
e.out = append(e.out, '\n')
e.out = append(e.out, e.indents...)
}
}
// Snapshot returns the current snapshot for use in Reset.
func (e *Encoder) Snapshot() encoderState {
return e.encoderState
}
// Reset resets the Encoder to the given encoderState from a Snapshot.
func (e *Encoder) Reset(es encoderState) {
e.encoderState = es
}
// AppendString appends the escaped form of the input string to b.
func AppendString(b []byte, s string) []byte {
return appendString(b, s, false)
}

View File

@ -1,89 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package errors implements functions to manipulate errors.
package errors
import (
"errors"
"fmt"
"google.golang.org/protobuf/internal/detrand"
)
// Error is a sentinel matching all errors produced by this package.
var Error = errors.New("protobuf error")
// New formats a string according to the format specifier and arguments and
// returns an error that has a "proto" prefix.
func New(f string, x ...interface{}) error {
return &prefixError{s: format(f, x...)}
}
type prefixError struct{ s string }
var prefix = func() string {
// Deliberately introduce instability into the error message string to
// discourage users from performing error string comparisons.
if detrand.Bool() {
return "proto: " // use non-breaking spaces (U+00a0)
} else {
return "proto: " // use regular spaces (U+0020)
}
}()
func (e *prefixError) Error() string {
return prefix + e.s
}
func (e *prefixError) Unwrap() error {
return Error
}
// Wrap returns an error that has a "proto" prefix, the formatted string described
// by the format specifier and arguments, and a suffix of err. The error wraps err.
func Wrap(err error, f string, x ...interface{}) error {
return &wrapError{
s: format(f, x...),
err: err,
}
}
type wrapError struct {
s string
err error
}
func (e *wrapError) Error() string {
return format("%v%v: %v", prefix, e.s, e.err)
}
func (e *wrapError) Unwrap() error {
return e.err
}
func (e *wrapError) Is(target error) bool {
return target == Error
}
func format(f string, x ...interface{}) string {
// avoid "proto: " prefix when chaining
for i := 0; i < len(x); i++ {
switch e := x[i].(type) {
case *prefixError:
x[i] = e.s
case *wrapError:
x[i] = format("%v: %v", e.s, e.err)
}
}
return fmt.Sprintf(f, x...)
}
func InvalidUTF8(name string) error {
return New("field %v contains invalid UTF-8", name)
}
func RequiredNotSet(name string) error {
return New("required field %v not set", name)
}

View File

@ -1,40 +0,0 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.13
// +build !go1.13
package errors
import "reflect"
// Is is a copy of Go 1.13's errors.Is for use with older Go versions.
func Is(err, target error) bool {
if target == nil {
return err == target
}
isComparable := reflect.TypeOf(target).Comparable()
for {
if isComparable && err == target {
return true
}
if x, ok := err.(interface{ Is(error) bool }); ok && x.Is(target) {
return true
}
if err = unwrap(err); err == nil {
return false
}
}
}
func unwrap(err error) error {
u, ok := err.(interface {
Unwrap() error
})
if !ok {
return nil
}
return u.Unwrap()
}

View File

@ -1,13 +0,0 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.13
// +build go1.13
package errors
import "errors"
// Is is errors.Is.
func Is(err, target error) bool { return errors.Is(err, target) }

View File

@ -1,157 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package filedesc provides functionality for constructing descriptors.
//
// The types in this package implement interfaces in the protoreflect package
// related to protobuf descripriptors.
package filedesc
import (
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/genid"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
)
// Builder construct a protoreflect.FileDescriptor from the raw descriptor.
type Builder struct {
// GoPackagePath is the Go package path that is invoking this builder.
GoPackagePath string
// RawDescriptor is the wire-encoded bytes of FileDescriptorProto
// and must be populated.
RawDescriptor []byte
// NumEnums is the total number of enums declared in the file.
NumEnums int32
// NumMessages is the total number of messages declared in the file.
// It includes the implicit message declarations for map entries.
NumMessages int32
// NumExtensions is the total number of extensions declared in the file.
NumExtensions int32
// NumServices is the total number of services declared in the file.
NumServices int32
// TypeResolver resolves extension field types for descriptor options.
// If nil, it uses protoregistry.GlobalTypes.
TypeResolver interface {
protoregistry.ExtensionTypeResolver
}
// FileRegistry is use to lookup file, enum, and message dependencies.
// Once constructed, the file descriptor is registered here.
// If nil, it uses protoregistry.GlobalFiles.
FileRegistry interface {
FindFileByPath(string) (protoreflect.FileDescriptor, error)
FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error)
RegisterFile(protoreflect.FileDescriptor) error
}
}
// resolverByIndex is an interface Builder.FileRegistry may implement.
// If so, it permits looking up an enum or message dependency based on the
// sub-list and element index into filetype.Builder.DependencyIndexes.
type resolverByIndex interface {
FindEnumByIndex(int32, int32, []Enum, []Message) protoreflect.EnumDescriptor
FindMessageByIndex(int32, int32, []Enum, []Message) protoreflect.MessageDescriptor
}
// Indexes of each sub-list in filetype.Builder.DependencyIndexes.
const (
listFieldDeps int32 = iota
listExtTargets
listExtDeps
listMethInDeps
listMethOutDeps
)
// Out is the output of the Builder.
type Out struct {
File protoreflect.FileDescriptor
// Enums is all enum descriptors in "flattened ordering".
Enums []Enum
// Messages is all message descriptors in "flattened ordering".
// It includes the implicit message declarations for map entries.
Messages []Message
// Extensions is all extension descriptors in "flattened ordering".
Extensions []Extension
// Service is all service descriptors in "flattened ordering".
Services []Service
}
// Build constructs a FileDescriptor given the parameters set in Builder.
// It assumes that the inputs are well-formed and panics if any inconsistencies
// are encountered.
//
// If NumEnums+NumMessages+NumExtensions+NumServices is zero,
// then Build automatically derives them from the raw descriptor.
func (db Builder) Build() (out Out) {
// Populate the counts if uninitialized.
if db.NumEnums+db.NumMessages+db.NumExtensions+db.NumServices == 0 {
db.unmarshalCounts(db.RawDescriptor, true)
}
// Initialize resolvers and registries if unpopulated.
if db.TypeResolver == nil {
db.TypeResolver = protoregistry.GlobalTypes
}
if db.FileRegistry == nil {
db.FileRegistry = protoregistry.GlobalFiles
}
fd := newRawFile(db)
out.File = fd
out.Enums = fd.allEnums
out.Messages = fd.allMessages
out.Extensions = fd.allExtensions
out.Services = fd.allServices
if err := db.FileRegistry.RegisterFile(fd); err != nil {
panic(err)
}
return out
}
// unmarshalCounts counts the number of enum, message, extension, and service
// declarations in the raw message, which is either a FileDescriptorProto
// or a MessageDescriptorProto depending on whether isFile is set.
func (db *Builder) unmarshalCounts(b []byte, isFile bool) {
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
if isFile {
switch num {
case genid.FileDescriptorProto_EnumType_field_number:
db.NumEnums++
case genid.FileDescriptorProto_MessageType_field_number:
db.unmarshalCounts(v, false)
db.NumMessages++
case genid.FileDescriptorProto_Extension_field_number:
db.NumExtensions++
case genid.FileDescriptorProto_Service_field_number:
db.NumServices++
}
} else {
switch num {
case genid.DescriptorProto_EnumType_field_number:
db.NumEnums++
case genid.DescriptorProto_NestedType_field_number:
db.unmarshalCounts(v, false)
db.NumMessages++
case genid.DescriptorProto_Extension_field_number:
db.NumExtensions++
}
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
}

View File

@ -1,703 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package filedesc
import (
"bytes"
"fmt"
"sync"
"sync/atomic"
"google.golang.org/protobuf/internal/descfmt"
"google.golang.org/protobuf/internal/descopts"
"google.golang.org/protobuf/internal/encoding/defval"
"google.golang.org/protobuf/internal/encoding/messageset"
"google.golang.org/protobuf/internal/genid"
"google.golang.org/protobuf/internal/pragma"
"google.golang.org/protobuf/internal/strs"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
)
// Edition is an Enum for proto2.Edition
type Edition int32
// These values align with the value of Enum in descriptor.proto which allows
// direct conversion between the proto enum and this enum.
const (
EditionUnknown Edition = 0
EditionProto2 Edition = 998
EditionProto3 Edition = 999
Edition2023 Edition = 1000
EditionUnsupported Edition = 100000
)
// The types in this file may have a suffix:
// • L0: Contains fields common to all descriptors (except File) and
// must be initialized up front.
// • L1: Contains fields specific to a descriptor and
// must be initialized up front. If the associated proto uses Editions, the
// Editions features must always be resolved. If not explicitly set, the
// appropriate default must be resolved and set.
// • L2: Contains fields that are lazily initialized when constructing
// from the raw file descriptor. When constructing as a literal, the L2
// fields must be initialized up front.
//
// The types are exported so that packages like reflect/protodesc can
// directly construct descriptors.
type (
File struct {
fileRaw
L1 FileL1
once uint32 // atomically set if L2 is valid
mu sync.Mutex // protects L2
L2 *FileL2
}
FileL1 struct {
Syntax protoreflect.Syntax
Edition Edition // Only used if Syntax == Editions
Path string
Package protoreflect.FullName
Enums Enums
Messages Messages
Extensions Extensions
Services Services
EditionFeatures EditionFeatures
}
FileL2 struct {
Options func() protoreflect.ProtoMessage
Imports FileImports
Locations SourceLocations
}
EditionFeatures struct {
// IsFieldPresence is true if field_presence is EXPLICIT
// https://protobuf.dev/editions/features/#field_presence
IsFieldPresence bool
// IsFieldPresence is true if field_presence is LEGACY_REQUIRED
// https://protobuf.dev/editions/features/#field_presence
IsLegacyRequired bool
// IsOpenEnum is true if enum_type is OPEN
// https://protobuf.dev/editions/features/#enum_type
IsOpenEnum bool
// IsPacked is true if repeated_field_encoding is PACKED
// https://protobuf.dev/editions/features/#repeated_field_encoding
IsPacked bool
// IsUTF8Validated is true if utf_validation is VERIFY
// https://protobuf.dev/editions/features/#utf8_validation
IsUTF8Validated bool
// IsDelimitedEncoded is true if message_encoding is DELIMITED
// https://protobuf.dev/editions/features/#message_encoding
IsDelimitedEncoded bool
// IsJSONCompliant is true if json_format is ALLOW
// https://protobuf.dev/editions/features/#json_format
IsJSONCompliant bool
// GenerateLegacyUnmarshalJSON determines if the plugin generates the
// UnmarshalJSON([]byte) error method for enums.
GenerateLegacyUnmarshalJSON bool
}
)
func (fd *File) ParentFile() protoreflect.FileDescriptor { return fd }
func (fd *File) Parent() protoreflect.Descriptor { return nil }
func (fd *File) Index() int { return 0 }
func (fd *File) Syntax() protoreflect.Syntax { return fd.L1.Syntax }
func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() }
func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package }
func (fd *File) IsPlaceholder() bool { return false }
func (fd *File) Options() protoreflect.ProtoMessage {
if f := fd.lazyInit().Options; f != nil {
return f()
}
return descopts.File
}
func (fd *File) Path() string { return fd.L1.Path }
func (fd *File) Package() protoreflect.FullName { return fd.L1.Package }
func (fd *File) Imports() protoreflect.FileImports { return &fd.lazyInit().Imports }
func (fd *File) Enums() protoreflect.EnumDescriptors { return &fd.L1.Enums }
func (fd *File) Messages() protoreflect.MessageDescriptors { return &fd.L1.Messages }
func (fd *File) Extensions() protoreflect.ExtensionDescriptors { return &fd.L1.Extensions }
func (fd *File) Services() protoreflect.ServiceDescriptors { return &fd.L1.Services }
func (fd *File) SourceLocations() protoreflect.SourceLocations { return &fd.lazyInit().Locations }
func (fd *File) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) }
func (fd *File) ProtoType(protoreflect.FileDescriptor) {}
func (fd *File) ProtoInternal(pragma.DoNotImplement) {}
func (fd *File) lazyInit() *FileL2 {
if atomic.LoadUint32(&fd.once) == 0 {
fd.lazyInitOnce()
}
return fd.L2
}
func (fd *File) lazyInitOnce() {
fd.mu.Lock()
if fd.L2 == nil {
fd.lazyRawInit() // recursively initializes all L2 structures
}
atomic.StoreUint32(&fd.once, 1)
fd.mu.Unlock()
}
// GoPackagePath is a pseudo-internal API for determining the Go package path
// that this file descriptor is declared in.
//
// WARNING: This method is exempt from the compatibility promise and may be
// removed in the future without warning.
func (fd *File) GoPackagePath() string {
return fd.builder.GoPackagePath
}
type (
Enum struct {
Base
L1 EnumL1
L2 *EnumL2 // protected by fileDesc.once
}
EnumL1 struct {
eagerValues bool // controls whether EnumL2.Values is already populated
EditionFeatures EditionFeatures
}
EnumL2 struct {
Options func() protoreflect.ProtoMessage
Values EnumValues
ReservedNames Names
ReservedRanges EnumRanges
}
EnumValue struct {
Base
L1 EnumValueL1
}
EnumValueL1 struct {
Options func() protoreflect.ProtoMessage
Number protoreflect.EnumNumber
}
)
func (ed *Enum) Options() protoreflect.ProtoMessage {
if f := ed.lazyInit().Options; f != nil {
return f()
}
return descopts.Enum
}
func (ed *Enum) Values() protoreflect.EnumValueDescriptors {
if ed.L1.eagerValues {
return &ed.L2.Values
}
return &ed.lazyInit().Values
}
func (ed *Enum) ReservedNames() protoreflect.Names { return &ed.lazyInit().ReservedNames }
func (ed *Enum) ReservedRanges() protoreflect.EnumRanges { return &ed.lazyInit().ReservedRanges }
func (ed *Enum) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) }
func (ed *Enum) ProtoType(protoreflect.EnumDescriptor) {}
func (ed *Enum) lazyInit() *EnumL2 {
ed.L0.ParentFile.lazyInit() // implicitly initializes L2
return ed.L2
}
func (ed *EnumValue) Options() protoreflect.ProtoMessage {
if f := ed.L1.Options; f != nil {
return f()
}
return descopts.EnumValue
}
func (ed *EnumValue) Number() protoreflect.EnumNumber { return ed.L1.Number }
func (ed *EnumValue) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) }
func (ed *EnumValue) ProtoType(protoreflect.EnumValueDescriptor) {}
type (
Message struct {
Base
L1 MessageL1
L2 *MessageL2 // protected by fileDesc.once
}
MessageL1 struct {
Enums Enums
Messages Messages
Extensions Extensions
IsMapEntry bool // promoted from google.protobuf.MessageOptions
IsMessageSet bool // promoted from google.protobuf.MessageOptions
EditionFeatures EditionFeatures
}
MessageL2 struct {
Options func() protoreflect.ProtoMessage
Fields Fields
Oneofs Oneofs
ReservedNames Names
ReservedRanges FieldRanges
RequiredNumbers FieldNumbers // must be consistent with Fields.Cardinality
ExtensionRanges FieldRanges
ExtensionRangeOptions []func() protoreflect.ProtoMessage // must be same length as ExtensionRanges
}
Field struct {
Base
L1 FieldL1
}
FieldL1 struct {
Options func() protoreflect.ProtoMessage
Number protoreflect.FieldNumber
Cardinality protoreflect.Cardinality // must be consistent with Message.RequiredNumbers
Kind protoreflect.Kind
StringName stringName
IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto
IsWeak bool // promoted from google.protobuf.FieldOptions
HasPacked bool // promoted from google.protobuf.FieldOptions
IsPacked bool // promoted from google.protobuf.FieldOptions
HasEnforceUTF8 bool // promoted from google.protobuf.FieldOptions
EnforceUTF8 bool // promoted from google.protobuf.FieldOptions
Default defaultValue
ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields
Enum protoreflect.EnumDescriptor
Message protoreflect.MessageDescriptor
EditionFeatures EditionFeatures
}
Oneof struct {
Base
L1 OneofL1
}
OneofL1 struct {
Options func() protoreflect.ProtoMessage
Fields OneofFields // must be consistent with Message.Fields.ContainingOneof
EditionFeatures EditionFeatures
}
)
func (md *Message) Options() protoreflect.ProtoMessage {
if f := md.lazyInit().Options; f != nil {
return f()
}
return descopts.Message
}
func (md *Message) IsMapEntry() bool { return md.L1.IsMapEntry }
func (md *Message) Fields() protoreflect.FieldDescriptors { return &md.lazyInit().Fields }
func (md *Message) Oneofs() protoreflect.OneofDescriptors { return &md.lazyInit().Oneofs }
func (md *Message) ReservedNames() protoreflect.Names { return &md.lazyInit().ReservedNames }
func (md *Message) ReservedRanges() protoreflect.FieldRanges { return &md.lazyInit().ReservedRanges }
func (md *Message) RequiredNumbers() protoreflect.FieldNumbers { return &md.lazyInit().RequiredNumbers }
func (md *Message) ExtensionRanges() protoreflect.FieldRanges { return &md.lazyInit().ExtensionRanges }
func (md *Message) ExtensionRangeOptions(i int) protoreflect.ProtoMessage {
if f := md.lazyInit().ExtensionRangeOptions[i]; f != nil {
return f()
}
return descopts.ExtensionRange
}
func (md *Message) Enums() protoreflect.EnumDescriptors { return &md.L1.Enums }
func (md *Message) Messages() protoreflect.MessageDescriptors { return &md.L1.Messages }
func (md *Message) Extensions() protoreflect.ExtensionDescriptors { return &md.L1.Extensions }
func (md *Message) ProtoType(protoreflect.MessageDescriptor) {}
func (md *Message) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) }
func (md *Message) lazyInit() *MessageL2 {
md.L0.ParentFile.lazyInit() // implicitly initializes L2
return md.L2
}
// IsMessageSet is a pseudo-internal API for checking whether a message
// should serialize in the proto1 message format.
//
// WARNING: This method is exempt from the compatibility promise and may be
// removed in the future without warning.
func (md *Message) IsMessageSet() bool {
return md.L1.IsMessageSet
}
func (fd *Field) Options() protoreflect.ProtoMessage {
if f := fd.L1.Options; f != nil {
return f()
}
return descopts.Field
}
func (fd *Field) Number() protoreflect.FieldNumber { return fd.L1.Number }
func (fd *Field) Cardinality() protoreflect.Cardinality { return fd.L1.Cardinality }
func (fd *Field) Kind() protoreflect.Kind {
return fd.L1.Kind
}
func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON }
func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) }
func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) }
func (fd *Field) HasPresence() bool {
if fd.L1.Cardinality == protoreflect.Repeated {
return false
}
explicitFieldPresence := fd.Syntax() == protoreflect.Editions && fd.L1.EditionFeatures.IsFieldPresence
return fd.Syntax() == protoreflect.Proto2 || explicitFieldPresence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil
}
func (fd *Field) HasOptionalKeyword() bool {
return (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional
}
func (fd *Field) IsPacked() bool {
if fd.L1.Cardinality != protoreflect.Repeated {
return false
}
switch fd.L1.Kind {
case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind:
return false
}
if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions {
return fd.L1.EditionFeatures.IsPacked
}
if fd.L0.ParentFile.L1.Syntax == protoreflect.Proto3 {
// proto3 repeated fields are packed by default.
return !fd.L1.HasPacked || fd.L1.IsPacked
}
return fd.L1.IsPacked
}
func (fd *Field) IsExtension() bool { return false }
func (fd *Field) IsWeak() bool { return fd.L1.IsWeak }
func (fd *Field) IsList() bool { return fd.Cardinality() == protoreflect.Repeated && !fd.IsMap() }
func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() }
func (fd *Field) MapKey() protoreflect.FieldDescriptor {
if !fd.IsMap() {
return nil
}
return fd.Message().Fields().ByNumber(genid.MapEntry_Key_field_number)
}
func (fd *Field) MapValue() protoreflect.FieldDescriptor {
if !fd.IsMap() {
return nil
}
return fd.Message().Fields().ByNumber(genid.MapEntry_Value_field_number)
}
func (fd *Field) HasDefault() bool { return fd.L1.Default.has }
func (fd *Field) Default() protoreflect.Value { return fd.L1.Default.get(fd) }
func (fd *Field) DefaultEnumValue() protoreflect.EnumValueDescriptor { return fd.L1.Default.enum }
func (fd *Field) ContainingOneof() protoreflect.OneofDescriptor { return fd.L1.ContainingOneof }
func (fd *Field) ContainingMessage() protoreflect.MessageDescriptor {
return fd.L0.Parent.(protoreflect.MessageDescriptor)
}
func (fd *Field) Enum() protoreflect.EnumDescriptor {
return fd.L1.Enum
}
func (fd *Field) Message() protoreflect.MessageDescriptor {
if fd.L1.IsWeak {
if d, _ := protoregistry.GlobalFiles.FindDescriptorByName(fd.L1.Message.FullName()); d != nil {
return d.(protoreflect.MessageDescriptor)
}
}
return fd.L1.Message
}
func (fd *Field) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) }
func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {}
// EnforceUTF8 is a pseudo-internal API to determine whether to enforce UTF-8
// validation for the string field. This exists for Google-internal use only
// since proto3 did not enforce UTF-8 validity prior to the open-source release.
// If this method does not exist, the default is to enforce valid UTF-8.
//
// WARNING: This method is exempt from the compatibility promise and may be
// removed in the future without warning.
func (fd *Field) EnforceUTF8() bool {
if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions {
return fd.L1.EditionFeatures.IsUTF8Validated
}
if fd.L1.HasEnforceUTF8 {
return fd.L1.EnforceUTF8
}
return fd.L0.ParentFile.L1.Syntax == protoreflect.Proto3
}
func (od *Oneof) IsSynthetic() bool {
return od.L0.ParentFile.L1.Syntax == protoreflect.Proto3 && len(od.L1.Fields.List) == 1 && od.L1.Fields.List[0].HasOptionalKeyword()
}
func (od *Oneof) Options() protoreflect.ProtoMessage {
if f := od.L1.Options; f != nil {
return f()
}
return descopts.Oneof
}
func (od *Oneof) Fields() protoreflect.FieldDescriptors { return &od.L1.Fields }
func (od *Oneof) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, od) }
func (od *Oneof) ProtoType(protoreflect.OneofDescriptor) {}
type (
Extension struct {
Base
L1 ExtensionL1
L2 *ExtensionL2 // protected by fileDesc.once
}
ExtensionL1 struct {
Number protoreflect.FieldNumber
Extendee protoreflect.MessageDescriptor
Cardinality protoreflect.Cardinality
Kind protoreflect.Kind
EditionFeatures EditionFeatures
}
ExtensionL2 struct {
Options func() protoreflect.ProtoMessage
StringName stringName
IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto
IsPacked bool // promoted from google.protobuf.FieldOptions
Default defaultValue
Enum protoreflect.EnumDescriptor
Message protoreflect.MessageDescriptor
}
)
func (xd *Extension) Options() protoreflect.ProtoMessage {
if f := xd.lazyInit().Options; f != nil {
return f()
}
return descopts.Field
}
func (xd *Extension) Number() protoreflect.FieldNumber { return xd.L1.Number }
func (xd *Extension) Cardinality() protoreflect.Cardinality { return xd.L1.Cardinality }
func (xd *Extension) Kind() protoreflect.Kind { return xd.L1.Kind }
func (xd *Extension) HasJSONName() bool { return xd.lazyInit().StringName.hasJSON }
func (xd *Extension) JSONName() string { return xd.lazyInit().StringName.getJSON(xd) }
func (xd *Extension) TextName() string { return xd.lazyInit().StringName.getText(xd) }
func (xd *Extension) HasPresence() bool { return xd.L1.Cardinality != protoreflect.Repeated }
func (xd *Extension) HasOptionalKeyword() bool {
return (xd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && xd.L1.Cardinality == protoreflect.Optional) || xd.lazyInit().IsProto3Optional
}
func (xd *Extension) IsPacked() bool { return xd.lazyInit().IsPacked }
func (xd *Extension) IsExtension() bool { return true }
func (xd *Extension) IsWeak() bool { return false }
func (xd *Extension) IsList() bool { return xd.Cardinality() == protoreflect.Repeated }
func (xd *Extension) IsMap() bool { return false }
func (xd *Extension) MapKey() protoreflect.FieldDescriptor { return nil }
func (xd *Extension) MapValue() protoreflect.FieldDescriptor { return nil }
func (xd *Extension) HasDefault() bool { return xd.lazyInit().Default.has }
func (xd *Extension) Default() protoreflect.Value { return xd.lazyInit().Default.get(xd) }
func (xd *Extension) DefaultEnumValue() protoreflect.EnumValueDescriptor {
return xd.lazyInit().Default.enum
}
func (xd *Extension) ContainingOneof() protoreflect.OneofDescriptor { return nil }
func (xd *Extension) ContainingMessage() protoreflect.MessageDescriptor { return xd.L1.Extendee }
func (xd *Extension) Enum() protoreflect.EnumDescriptor { return xd.lazyInit().Enum }
func (xd *Extension) Message() protoreflect.MessageDescriptor { return xd.lazyInit().Message }
func (xd *Extension) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, xd) }
func (xd *Extension) ProtoType(protoreflect.FieldDescriptor) {}
func (xd *Extension) ProtoInternal(pragma.DoNotImplement) {}
func (xd *Extension) lazyInit() *ExtensionL2 {
xd.L0.ParentFile.lazyInit() // implicitly initializes L2
return xd.L2
}
type (
Service struct {
Base
L1 ServiceL1
L2 *ServiceL2 // protected by fileDesc.once
}
ServiceL1 struct{}
ServiceL2 struct {
Options func() protoreflect.ProtoMessage
Methods Methods
}
Method struct {
Base
L1 MethodL1
}
MethodL1 struct {
Options func() protoreflect.ProtoMessage
Input protoreflect.MessageDescriptor
Output protoreflect.MessageDescriptor
IsStreamingClient bool
IsStreamingServer bool
}
)
func (sd *Service) Options() protoreflect.ProtoMessage {
if f := sd.lazyInit().Options; f != nil {
return f()
}
return descopts.Service
}
func (sd *Service) Methods() protoreflect.MethodDescriptors { return &sd.lazyInit().Methods }
func (sd *Service) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, sd) }
func (sd *Service) ProtoType(protoreflect.ServiceDescriptor) {}
func (sd *Service) ProtoInternal(pragma.DoNotImplement) {}
func (sd *Service) lazyInit() *ServiceL2 {
sd.L0.ParentFile.lazyInit() // implicitly initializes L2
return sd.L2
}
func (md *Method) Options() protoreflect.ProtoMessage {
if f := md.L1.Options; f != nil {
return f()
}
return descopts.Method
}
func (md *Method) Input() protoreflect.MessageDescriptor { return md.L1.Input }
func (md *Method) Output() protoreflect.MessageDescriptor { return md.L1.Output }
func (md *Method) IsStreamingClient() bool { return md.L1.IsStreamingClient }
func (md *Method) IsStreamingServer() bool { return md.L1.IsStreamingServer }
func (md *Method) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) }
func (md *Method) ProtoType(protoreflect.MethodDescriptor) {}
func (md *Method) ProtoInternal(pragma.DoNotImplement) {}
// Surrogate files are can be used to create standalone descriptors
// where the syntax is only information derived from the parent file.
var (
SurrogateProto2 = &File{L1: FileL1{Syntax: protoreflect.Proto2}, L2: &FileL2{}}
SurrogateProto3 = &File{L1: FileL1{Syntax: protoreflect.Proto3}, L2: &FileL2{}}
)
type (
Base struct {
L0 BaseL0
}
BaseL0 struct {
FullName protoreflect.FullName // must be populated
ParentFile *File // must be populated
Parent protoreflect.Descriptor
Index int
}
)
func (d *Base) Name() protoreflect.Name { return d.L0.FullName.Name() }
func (d *Base) FullName() protoreflect.FullName { return d.L0.FullName }
func (d *Base) ParentFile() protoreflect.FileDescriptor {
if d.L0.ParentFile == SurrogateProto2 || d.L0.ParentFile == SurrogateProto3 {
return nil // surrogate files are not real parents
}
return d.L0.ParentFile
}
func (d *Base) Parent() protoreflect.Descriptor { return d.L0.Parent }
func (d *Base) Index() int { return d.L0.Index }
func (d *Base) Syntax() protoreflect.Syntax { return d.L0.ParentFile.Syntax() }
func (d *Base) IsPlaceholder() bool { return false }
func (d *Base) ProtoInternal(pragma.DoNotImplement) {}
type stringName struct {
hasJSON bool
once sync.Once
nameJSON string
nameText string
}
// InitJSON initializes the name. It is exported for use by other internal packages.
func (s *stringName) InitJSON(name string) {
s.hasJSON = true
s.nameJSON = name
}
func (s *stringName) lazyInit(fd protoreflect.FieldDescriptor) *stringName {
s.once.Do(func() {
if fd.IsExtension() {
// For extensions, JSON and text are formatted the same way.
var name string
if messageset.IsMessageSetExtension(fd) {
name = string("[" + fd.FullName().Parent() + "]")
} else {
name = string("[" + fd.FullName() + "]")
}
s.nameJSON = name
s.nameText = name
} else {
// Format the JSON name.
if !s.hasJSON {
s.nameJSON = strs.JSONCamelCase(string(fd.Name()))
}
// Format the text name.
s.nameText = string(fd.Name())
if fd.Kind() == protoreflect.GroupKind {
s.nameText = string(fd.Message().Name())
}
}
})
return s
}
func (s *stringName) getJSON(fd protoreflect.FieldDescriptor) string { return s.lazyInit(fd).nameJSON }
func (s *stringName) getText(fd protoreflect.FieldDescriptor) string { return s.lazyInit(fd).nameText }
func DefaultValue(v protoreflect.Value, ev protoreflect.EnumValueDescriptor) defaultValue {
dv := defaultValue{has: v.IsValid(), val: v, enum: ev}
if b, ok := v.Interface().([]byte); ok {
// Store a copy of the default bytes, so that we can detect
// accidental mutations of the original value.
dv.bytes = append([]byte(nil), b...)
}
return dv
}
func unmarshalDefault(b []byte, k protoreflect.Kind, pf *File, ed protoreflect.EnumDescriptor) defaultValue {
var evs protoreflect.EnumValueDescriptors
if k == protoreflect.EnumKind {
// If the enum is declared within the same file, be careful not to
// blindly call the Values method, lest we bind ourselves in a deadlock.
if e, ok := ed.(*Enum); ok && e.L0.ParentFile == pf {
evs = &e.L2.Values
} else {
evs = ed.Values()
}
// If we are unable to resolve the enum dependency, use a placeholder
// enum value since we will not be able to parse the default value.
if ed.IsPlaceholder() && protoreflect.Name(b).IsValid() {
v := protoreflect.ValueOfEnum(0)
ev := PlaceholderEnumValue(ed.FullName().Parent().Append(protoreflect.Name(b)))
return DefaultValue(v, ev)
}
}
v, ev, err := defval.Unmarshal(string(b), k, evs, defval.Descriptor)
if err != nil {
panic(err)
}
return DefaultValue(v, ev)
}
type defaultValue struct {
has bool
val protoreflect.Value
enum protoreflect.EnumValueDescriptor
bytes []byte
}
func (dv *defaultValue) get(fd protoreflect.FieldDescriptor) protoreflect.Value {
// Return the zero value as the default if unpopulated.
if !dv.has {
if fd.Cardinality() == protoreflect.Repeated {
return protoreflect.Value{}
}
switch fd.Kind() {
case protoreflect.BoolKind:
return protoreflect.ValueOfBool(false)
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
return protoreflect.ValueOfInt32(0)
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
return protoreflect.ValueOfInt64(0)
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
return protoreflect.ValueOfUint32(0)
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
return protoreflect.ValueOfUint64(0)
case protoreflect.FloatKind:
return protoreflect.ValueOfFloat32(0)
case protoreflect.DoubleKind:
return protoreflect.ValueOfFloat64(0)
case protoreflect.StringKind:
return protoreflect.ValueOfString("")
case protoreflect.BytesKind:
return protoreflect.ValueOfBytes(nil)
case protoreflect.EnumKind:
if evs := fd.Enum().Values(); evs.Len() > 0 {
return protoreflect.ValueOfEnum(evs.Get(0).Number())
}
return protoreflect.ValueOfEnum(0)
}
}
if len(dv.bytes) > 0 && !bytes.Equal(dv.bytes, dv.val.Bytes()) {
// TODO: Avoid panic if we're running with the race detector
// and instead spawn a goroutine that periodically resets
// this value back to the original to induce a race.
panic(fmt.Sprintf("detected mutation on the default bytes for %v", fd.FullName()))
}
return dv.val
}

View File

@ -1,523 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package filedesc
import (
"fmt"
"sync"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/genid"
"google.golang.org/protobuf/internal/strs"
"google.golang.org/protobuf/reflect/protoreflect"
)
// fileRaw is a data struct used when initializing a file descriptor from
// a raw FileDescriptorProto.
type fileRaw struct {
builder Builder
allEnums []Enum
allMessages []Message
allExtensions []Extension
allServices []Service
}
func newRawFile(db Builder) *File {
fd := &File{fileRaw: fileRaw{builder: db}}
fd.initDecls(db.NumEnums, db.NumMessages, db.NumExtensions, db.NumServices)
fd.unmarshalSeed(db.RawDescriptor)
// Extended message targets are eagerly resolved since registration
// needs this information at program init time.
for i := range fd.allExtensions {
xd := &fd.allExtensions[i]
xd.L1.Extendee = fd.resolveMessageDependency(xd.L1.Extendee, listExtTargets, int32(i))
}
fd.checkDecls()
return fd
}
// initDecls pre-allocates slices for the exact number of enums, messages
// (including map entries), extensions, and services declared in the proto file.
// This is done to avoid regrowing the slice, which would change the address
// for any previously seen declaration.
//
// The alloc methods "allocates" slices by pulling from the capacity.
func (fd *File) initDecls(numEnums, numMessages, numExtensions, numServices int32) {
fd.allEnums = make([]Enum, 0, numEnums)
fd.allMessages = make([]Message, 0, numMessages)
fd.allExtensions = make([]Extension, 0, numExtensions)
fd.allServices = make([]Service, 0, numServices)
}
func (fd *File) allocEnums(n int) []Enum {
total := len(fd.allEnums)
es := fd.allEnums[total : total+n]
fd.allEnums = fd.allEnums[:total+n]
return es
}
func (fd *File) allocMessages(n int) []Message {
total := len(fd.allMessages)
ms := fd.allMessages[total : total+n]
fd.allMessages = fd.allMessages[:total+n]
return ms
}
func (fd *File) allocExtensions(n int) []Extension {
total := len(fd.allExtensions)
xs := fd.allExtensions[total : total+n]
fd.allExtensions = fd.allExtensions[:total+n]
return xs
}
func (fd *File) allocServices(n int) []Service {
total := len(fd.allServices)
xs := fd.allServices[total : total+n]
fd.allServices = fd.allServices[:total+n]
return xs
}
// checkDecls performs a sanity check that the expected number of expected
// declarations matches the number that were found in the descriptor proto.
func (fd *File) checkDecls() {
switch {
case len(fd.allEnums) != cap(fd.allEnums):
case len(fd.allMessages) != cap(fd.allMessages):
case len(fd.allExtensions) != cap(fd.allExtensions):
case len(fd.allServices) != cap(fd.allServices):
default:
return
}
panic("mismatching cardinality")
}
func (fd *File) unmarshalSeed(b []byte) {
sb := getBuilder()
defer putBuilder(sb)
var prevField protoreflect.FieldNumber
var numEnums, numMessages, numExtensions, numServices int
var posEnums, posMessages, posExtensions, posServices int
var options []byte
b0 := b
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
case genid.FileDescriptorProto_Syntax_field_number:
switch string(v) {
case "proto2":
fd.L1.Syntax = protoreflect.Proto2
case "proto3":
fd.L1.Syntax = protoreflect.Proto3
case "editions":
fd.L1.Syntax = protoreflect.Editions
default:
panic("invalid syntax")
}
case genid.FileDescriptorProto_Name_field_number:
fd.L1.Path = sb.MakeString(v)
case genid.FileDescriptorProto_Package_field_number:
fd.L1.Package = protoreflect.FullName(sb.MakeString(v))
case genid.FileDescriptorProto_Options_field_number:
options = v
case genid.FileDescriptorProto_EnumType_field_number:
if prevField != genid.FileDescriptorProto_EnumType_field_number {
if numEnums > 0 {
panic("non-contiguous repeated field")
}
posEnums = len(b0) - len(b) - n - m
}
numEnums++
case genid.FileDescriptorProto_MessageType_field_number:
if prevField != genid.FileDescriptorProto_MessageType_field_number {
if numMessages > 0 {
panic("non-contiguous repeated field")
}
posMessages = len(b0) - len(b) - n - m
}
numMessages++
case genid.FileDescriptorProto_Extension_field_number:
if prevField != genid.FileDescriptorProto_Extension_field_number {
if numExtensions > 0 {
panic("non-contiguous repeated field")
}
posExtensions = len(b0) - len(b) - n - m
}
numExtensions++
case genid.FileDescriptorProto_Service_field_number:
if prevField != genid.FileDescriptorProto_Service_field_number {
if numServices > 0 {
panic("non-contiguous repeated field")
}
posServices = len(b0) - len(b) - n - m
}
numServices++
}
prevField = num
case protowire.VarintType:
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
case genid.FileDescriptorProto_Edition_field_number:
fd.L1.Edition = Edition(v)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
prevField = -1 // ignore known field numbers of unknown wire type
}
}
// If syntax is missing, it is assumed to be proto2.
if fd.L1.Syntax == 0 {
fd.L1.Syntax = protoreflect.Proto2
}
if fd.L1.Syntax == protoreflect.Editions {
fd.L1.EditionFeatures = getFeaturesFor(fd.L1.Edition)
}
// Parse editions features from options if any
if options != nil {
fd.unmarshalSeedOptions(options)
}
// Must allocate all declarations before parsing each descriptor type
// to ensure we handled all descriptors in "flattened ordering".
if numEnums > 0 {
fd.L1.Enums.List = fd.allocEnums(numEnums)
}
if numMessages > 0 {
fd.L1.Messages.List = fd.allocMessages(numMessages)
}
if numExtensions > 0 {
fd.L1.Extensions.List = fd.allocExtensions(numExtensions)
}
if numServices > 0 {
fd.L1.Services.List = fd.allocServices(numServices)
}
if numEnums > 0 {
b := b0[posEnums:]
for i := range fd.L1.Enums.List {
_, n := protowire.ConsumeVarint(b)
v, m := protowire.ConsumeBytes(b[n:])
fd.L1.Enums.List[i].unmarshalSeed(v, sb, fd, fd, i)
b = b[n+m:]
}
}
if numMessages > 0 {
b := b0[posMessages:]
for i := range fd.L1.Messages.List {
_, n := protowire.ConsumeVarint(b)
v, m := protowire.ConsumeBytes(b[n:])
fd.L1.Messages.List[i].unmarshalSeed(v, sb, fd, fd, i)
b = b[n+m:]
}
}
if numExtensions > 0 {
b := b0[posExtensions:]
for i := range fd.L1.Extensions.List {
_, n := protowire.ConsumeVarint(b)
v, m := protowire.ConsumeBytes(b[n:])
fd.L1.Extensions.List[i].unmarshalSeed(v, sb, fd, fd, i)
b = b[n+m:]
}
}
if numServices > 0 {
b := b0[posServices:]
for i := range fd.L1.Services.List {
_, n := protowire.ConsumeVarint(b)
v, m := protowire.ConsumeBytes(b[n:])
fd.L1.Services.List[i].unmarshalSeed(v, sb, fd, fd, i)
b = b[n+m:]
}
}
}
func (fd *File) unmarshalSeedOptions(b []byte) {
for b := b; len(b) > 0; {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
case genid.FileOptions_Features_field_number:
if fd.Syntax() != protoreflect.Editions {
panic(fmt.Sprintf("invalid descriptor: using edition features in a proto with syntax %s", fd.Syntax()))
}
fd.L1.EditionFeatures = unmarshalFeatureSet(v, fd.L1.EditionFeatures)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
}
func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) {
ed.L0.ParentFile = pf
ed.L0.Parent = pd
ed.L0.Index = i
var numValues int
for b := b; len(b) > 0; {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
case genid.EnumDescriptorProto_Name_field_number:
ed.L0.FullName = appendFullName(sb, pd.FullName(), v)
case genid.EnumDescriptorProto_Value_field_number:
numValues++
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
// Only construct enum value descriptors for top-level enums since
// they are needed for registration.
if pd != pf {
return
}
ed.L1.eagerValues = true
ed.L2 = new(EnumL2)
ed.L2.Values.List = make([]EnumValue, numValues)
for i := 0; len(b) > 0; {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
case genid.EnumDescriptorProto_Value_field_number:
ed.L2.Values.List[i].unmarshalFull(v, sb, pf, ed, i)
i++
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
}
func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) {
md.L0.ParentFile = pf
md.L0.Parent = pd
md.L0.Index = i
md.L1.EditionFeatures = featuresFromParentDesc(md.Parent())
var prevField protoreflect.FieldNumber
var numEnums, numMessages, numExtensions int
var posEnums, posMessages, posExtensions int
b0 := b
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
case genid.DescriptorProto_Name_field_number:
md.L0.FullName = appendFullName(sb, pd.FullName(), v)
case genid.DescriptorProto_EnumType_field_number:
if prevField != genid.DescriptorProto_EnumType_field_number {
if numEnums > 0 {
panic("non-contiguous repeated field")
}
posEnums = len(b0) - len(b) - n - m
}
numEnums++
case genid.DescriptorProto_NestedType_field_number:
if prevField != genid.DescriptorProto_NestedType_field_number {
if numMessages > 0 {
panic("non-contiguous repeated field")
}
posMessages = len(b0) - len(b) - n - m
}
numMessages++
case genid.DescriptorProto_Extension_field_number:
if prevField != genid.DescriptorProto_Extension_field_number {
if numExtensions > 0 {
panic("non-contiguous repeated field")
}
posExtensions = len(b0) - len(b) - n - m
}
numExtensions++
case genid.DescriptorProto_Options_field_number:
md.unmarshalSeedOptions(v)
}
prevField = num
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
prevField = -1 // ignore known field numbers of unknown wire type
}
}
// Must allocate all declarations before parsing each descriptor type
// to ensure we handled all descriptors in "flattened ordering".
if numEnums > 0 {
md.L1.Enums.List = pf.allocEnums(numEnums)
}
if numMessages > 0 {
md.L1.Messages.List = pf.allocMessages(numMessages)
}
if numExtensions > 0 {
md.L1.Extensions.List = pf.allocExtensions(numExtensions)
}
if numEnums > 0 {
b := b0[posEnums:]
for i := range md.L1.Enums.List {
_, n := protowire.ConsumeVarint(b)
v, m := protowire.ConsumeBytes(b[n:])
md.L1.Enums.List[i].unmarshalSeed(v, sb, pf, md, i)
b = b[n+m:]
}
}
if numMessages > 0 {
b := b0[posMessages:]
for i := range md.L1.Messages.List {
_, n := protowire.ConsumeVarint(b)
v, m := protowire.ConsumeBytes(b[n:])
md.L1.Messages.List[i].unmarshalSeed(v, sb, pf, md, i)
b = b[n+m:]
}
}
if numExtensions > 0 {
b := b0[posExtensions:]
for i := range md.L1.Extensions.List {
_, n := protowire.ConsumeVarint(b)
v, m := protowire.ConsumeBytes(b[n:])
md.L1.Extensions.List[i].unmarshalSeed(v, sb, pf, md, i)
b = b[n+m:]
}
}
}
func (md *Message) unmarshalSeedOptions(b []byte) {
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.VarintType:
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
case genid.MessageOptions_MapEntry_field_number:
md.L1.IsMapEntry = protowire.DecodeBool(v)
case genid.MessageOptions_MessageSetWireFormat_field_number:
md.L1.IsMessageSet = protowire.DecodeBool(v)
}
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
case genid.MessageOptions_Features_field_number:
md.L1.EditionFeatures = unmarshalFeatureSet(v, md.L1.EditionFeatures)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
}
func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) {
xd.L0.ParentFile = pf
xd.L0.Parent = pd
xd.L0.Index = i
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.VarintType:
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
case genid.FieldDescriptorProto_Number_field_number:
xd.L1.Number = protoreflect.FieldNumber(v)
case genid.FieldDescriptorProto_Label_field_number:
xd.L1.Cardinality = protoreflect.Cardinality(v)
case genid.FieldDescriptorProto_Type_field_number:
xd.L1.Kind = protoreflect.Kind(v)
}
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
case genid.FieldDescriptorProto_Name_field_number:
xd.L0.FullName = appendFullName(sb, pd.FullName(), v)
case genid.FieldDescriptorProto_Extendee_field_number:
xd.L1.Extendee = PlaceholderMessage(makeFullName(sb, v))
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
}
func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) {
sd.L0.ParentFile = pf
sd.L0.Parent = pd
sd.L0.Index = i
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
case genid.ServiceDescriptorProto_Name_field_number:
sd.L0.FullName = appendFullName(sb, pd.FullName(), v)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
}
var nameBuilderPool = sync.Pool{
New: func() interface{} { return new(strs.Builder) },
}
func getBuilder() *strs.Builder {
return nameBuilderPool.Get().(*strs.Builder)
}
func putBuilder(b *strs.Builder) {
nameBuilderPool.Put(b)
}
// makeFullName converts b to a protoreflect.FullName,
// where b must start with a leading dot.
func makeFullName(sb *strs.Builder, b []byte) protoreflect.FullName {
if len(b) == 0 || b[0] != '.' {
panic("name reference must be fully qualified")
}
return protoreflect.FullName(sb.MakeString(b[1:]))
}
func appendFullName(sb *strs.Builder, prefix protoreflect.FullName, suffix []byte) protoreflect.FullName {
return sb.AppendFullName(prefix, protoreflect.Name(strs.UnsafeString(suffix)))
}

View File

@ -1,732 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package filedesc
import (
"reflect"
"sync"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/descopts"
"google.golang.org/protobuf/internal/genid"
"google.golang.org/protobuf/internal/strs"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
)
func (fd *File) lazyRawInit() {
fd.unmarshalFull(fd.builder.RawDescriptor)
fd.resolveMessages()
fd.resolveExtensions()
fd.resolveServices()
}
func (file *File) resolveMessages() {
var depIdx int32
for i := range file.allMessages {
md := &file.allMessages[i]
// Resolve message field dependencies.
for j := range md.L2.Fields.List {
fd := &md.L2.Fields.List[j]
// Weak fields are resolved upon actual use.
if fd.L1.IsWeak {
continue
}
// Resolve message field dependency.
switch fd.L1.Kind {
case protoreflect.EnumKind:
fd.L1.Enum = file.resolveEnumDependency(fd.L1.Enum, listFieldDeps, depIdx)
depIdx++
case protoreflect.MessageKind, protoreflect.GroupKind:
fd.L1.Message = file.resolveMessageDependency(fd.L1.Message, listFieldDeps, depIdx)
depIdx++
}
// Default is resolved here since it depends on Enum being resolved.
if v := fd.L1.Default.val; v.IsValid() {
fd.L1.Default = unmarshalDefault(v.Bytes(), fd.L1.Kind, file, fd.L1.Enum)
}
}
}
}
func (file *File) resolveExtensions() {
var depIdx int32
for i := range file.allExtensions {
xd := &file.allExtensions[i]
// Resolve extension field dependency.
switch xd.L1.Kind {
case protoreflect.EnumKind:
xd.L2.Enum = file.resolveEnumDependency(xd.L2.Enum, listExtDeps, depIdx)
depIdx++
case protoreflect.MessageKind, protoreflect.GroupKind:
xd.L2.Message = file.resolveMessageDependency(xd.L2.Message, listExtDeps, depIdx)
depIdx++
}
// Default is resolved here since it depends on Enum being resolved.
if v := xd.L2.Default.val; v.IsValid() {
xd.L2.Default = unmarshalDefault(v.Bytes(), xd.L1.Kind, file, xd.L2.Enum)
}
}
}
func (file *File) resolveServices() {
var depIdx int32
for i := range file.allServices {
sd := &file.allServices[i]
// Resolve method dependencies.
for j := range sd.L2.Methods.List {
md := &sd.L2.Methods.List[j]
md.L1.Input = file.resolveMessageDependency(md.L1.Input, listMethInDeps, depIdx)
md.L1.Output = file.resolveMessageDependency(md.L1.Output, listMethOutDeps, depIdx)
depIdx++
}
}
}
func (file *File) resolveEnumDependency(ed protoreflect.EnumDescriptor, i, j int32) protoreflect.EnumDescriptor {
r := file.builder.FileRegistry
if r, ok := r.(resolverByIndex); ok {
if ed2 := r.FindEnumByIndex(i, j, file.allEnums, file.allMessages); ed2 != nil {
return ed2
}
}
for i := range file.allEnums {
if ed2 := &file.allEnums[i]; ed2.L0.FullName == ed.FullName() {
return ed2
}
}
if d, _ := r.FindDescriptorByName(ed.FullName()); d != nil {
return d.(protoreflect.EnumDescriptor)
}
return ed
}
func (file *File) resolveMessageDependency(md protoreflect.MessageDescriptor, i, j int32) protoreflect.MessageDescriptor {
r := file.builder.FileRegistry
if r, ok := r.(resolverByIndex); ok {
if md2 := r.FindMessageByIndex(i, j, file.allEnums, file.allMessages); md2 != nil {
return md2
}
}
for i := range file.allMessages {
if md2 := &file.allMessages[i]; md2.L0.FullName == md.FullName() {
return md2
}
}
if d, _ := r.FindDescriptorByName(md.FullName()); d != nil {
return d.(protoreflect.MessageDescriptor)
}
return md
}
func (fd *File) unmarshalFull(b []byte) {
sb := getBuilder()
defer putBuilder(sb)
var enumIdx, messageIdx, extensionIdx, serviceIdx int
var rawOptions []byte
fd.L2 = new(FileL2)
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.VarintType:
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
case genid.FileDescriptorProto_PublicDependency_field_number:
fd.L2.Imports[v].IsPublic = true
case genid.FileDescriptorProto_WeakDependency_field_number:
fd.L2.Imports[v].IsWeak = true
}
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
case genid.FileDescriptorProto_Dependency_field_number:
path := sb.MakeString(v)
imp, _ := fd.builder.FileRegistry.FindFileByPath(path)
if imp == nil {
imp = PlaceholderFile(path)
}
fd.L2.Imports = append(fd.L2.Imports, protoreflect.FileImport{FileDescriptor: imp})
case genid.FileDescriptorProto_EnumType_field_number:
fd.L1.Enums.List[enumIdx].unmarshalFull(v, sb)
enumIdx++
case genid.FileDescriptorProto_MessageType_field_number:
fd.L1.Messages.List[messageIdx].unmarshalFull(v, sb)
messageIdx++
case genid.FileDescriptorProto_Extension_field_number:
fd.L1.Extensions.List[extensionIdx].unmarshalFull(v, sb)
extensionIdx++
case genid.FileDescriptorProto_Service_field_number:
fd.L1.Services.List[serviceIdx].unmarshalFull(v, sb)
serviceIdx++
case genid.FileDescriptorProto_Options_field_number:
rawOptions = appendOptions(rawOptions, v)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
fd.L2.Options = fd.builder.optionsUnmarshaler(&descopts.File, rawOptions)
}
func (ed *Enum) unmarshalFull(b []byte, sb *strs.Builder) {
var rawValues [][]byte
var rawOptions []byte
if !ed.L1.eagerValues {
ed.L2 = new(EnumL2)
}
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
case genid.EnumDescriptorProto_Value_field_number:
rawValues = append(rawValues, v)
case genid.EnumDescriptorProto_ReservedName_field_number:
ed.L2.ReservedNames.List = append(ed.L2.ReservedNames.List, protoreflect.Name(sb.MakeString(v)))
case genid.EnumDescriptorProto_ReservedRange_field_number:
ed.L2.ReservedRanges.List = append(ed.L2.ReservedRanges.List, unmarshalEnumReservedRange(v))
case genid.EnumDescriptorProto_Options_field_number:
rawOptions = appendOptions(rawOptions, v)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
if !ed.L1.eagerValues && len(rawValues) > 0 {
ed.L2.Values.List = make([]EnumValue, len(rawValues))
for i, b := range rawValues {
ed.L2.Values.List[i].unmarshalFull(b, sb, ed.L0.ParentFile, ed, i)
}
}
ed.L2.Options = ed.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Enum, rawOptions)
}
func unmarshalEnumReservedRange(b []byte) (r [2]protoreflect.EnumNumber) {
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.VarintType:
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
case genid.EnumDescriptorProto_EnumReservedRange_Start_field_number:
r[0] = protoreflect.EnumNumber(v)
case genid.EnumDescriptorProto_EnumReservedRange_End_field_number:
r[1] = protoreflect.EnumNumber(v)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
return r
}
func (vd *EnumValue) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) {
vd.L0.ParentFile = pf
vd.L0.Parent = pd
vd.L0.Index = i
var rawOptions []byte
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.VarintType:
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
case genid.EnumValueDescriptorProto_Number_field_number:
vd.L1.Number = protoreflect.EnumNumber(v)
}
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
case genid.EnumValueDescriptorProto_Name_field_number:
// NOTE: Enum values are in the same scope as the enum parent.
vd.L0.FullName = appendFullName(sb, pd.Parent().FullName(), v)
case genid.EnumValueDescriptorProto_Options_field_number:
rawOptions = appendOptions(rawOptions, v)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
vd.L1.Options = pf.builder.optionsUnmarshaler(&descopts.EnumValue, rawOptions)
}
func (md *Message) unmarshalFull(b []byte, sb *strs.Builder) {
var rawFields, rawOneofs [][]byte
var enumIdx, messageIdx, extensionIdx int
var rawOptions []byte
md.L2 = new(MessageL2)
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
case genid.DescriptorProto_Field_field_number:
rawFields = append(rawFields, v)
case genid.DescriptorProto_OneofDecl_field_number:
rawOneofs = append(rawOneofs, v)
case genid.DescriptorProto_ReservedName_field_number:
md.L2.ReservedNames.List = append(md.L2.ReservedNames.List, protoreflect.Name(sb.MakeString(v)))
case genid.DescriptorProto_ReservedRange_field_number:
md.L2.ReservedRanges.List = append(md.L2.ReservedRanges.List, unmarshalMessageReservedRange(v))
case genid.DescriptorProto_ExtensionRange_field_number:
r, rawOptions := unmarshalMessageExtensionRange(v)
opts := md.L0.ParentFile.builder.optionsUnmarshaler(&descopts.ExtensionRange, rawOptions)
md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, r)
md.L2.ExtensionRangeOptions = append(md.L2.ExtensionRangeOptions, opts)
case genid.DescriptorProto_EnumType_field_number:
md.L1.Enums.List[enumIdx].unmarshalFull(v, sb)
enumIdx++
case genid.DescriptorProto_NestedType_field_number:
md.L1.Messages.List[messageIdx].unmarshalFull(v, sb)
messageIdx++
case genid.DescriptorProto_Extension_field_number:
md.L1.Extensions.List[extensionIdx].unmarshalFull(v, sb)
extensionIdx++
case genid.DescriptorProto_Options_field_number:
md.unmarshalOptions(v)
rawOptions = appendOptions(rawOptions, v)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
if len(rawFields) > 0 || len(rawOneofs) > 0 {
md.L2.Fields.List = make([]Field, len(rawFields))
md.L2.Oneofs.List = make([]Oneof, len(rawOneofs))
for i, b := range rawFields {
fd := &md.L2.Fields.List[i]
fd.unmarshalFull(b, sb, md.L0.ParentFile, md, i)
if fd.L1.Cardinality == protoreflect.Required {
md.L2.RequiredNumbers.List = append(md.L2.RequiredNumbers.List, fd.L1.Number)
}
}
for i, b := range rawOneofs {
od := &md.L2.Oneofs.List[i]
od.unmarshalFull(b, sb, md.L0.ParentFile, md, i)
}
}
md.L2.Options = md.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Message, rawOptions)
}
func (md *Message) unmarshalOptions(b []byte) {
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.VarintType:
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
case genid.MessageOptions_MapEntry_field_number:
md.L1.IsMapEntry = protowire.DecodeBool(v)
case genid.MessageOptions_MessageSetWireFormat_field_number:
md.L1.IsMessageSet = protowire.DecodeBool(v)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
}
func unmarshalMessageReservedRange(b []byte) (r [2]protoreflect.FieldNumber) {
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.VarintType:
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
case genid.DescriptorProto_ReservedRange_Start_field_number:
r[0] = protoreflect.FieldNumber(v)
case genid.DescriptorProto_ReservedRange_End_field_number:
r[1] = protoreflect.FieldNumber(v)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
return r
}
func unmarshalMessageExtensionRange(b []byte) (r [2]protoreflect.FieldNumber, rawOptions []byte) {
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.VarintType:
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
case genid.DescriptorProto_ExtensionRange_Start_field_number:
r[0] = protoreflect.FieldNumber(v)
case genid.DescriptorProto_ExtensionRange_End_field_number:
r[1] = protoreflect.FieldNumber(v)
}
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
case genid.DescriptorProto_ExtensionRange_Options_field_number:
rawOptions = appendOptions(rawOptions, v)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
return r, rawOptions
}
func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) {
fd.L0.ParentFile = pf
fd.L0.Parent = pd
fd.L0.Index = i
fd.L1.EditionFeatures = featuresFromParentDesc(fd.Parent())
var rawTypeName []byte
var rawOptions []byte
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.VarintType:
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
case genid.FieldDescriptorProto_Number_field_number:
fd.L1.Number = protoreflect.FieldNumber(v)
case genid.FieldDescriptorProto_Label_field_number:
fd.L1.Cardinality = protoreflect.Cardinality(v)
case genid.FieldDescriptorProto_Type_field_number:
fd.L1.Kind = protoreflect.Kind(v)
case genid.FieldDescriptorProto_OneofIndex_field_number:
// In Message.unmarshalFull, we allocate slices for both
// the field and oneof descriptors before unmarshaling either
// of them. This ensures pointers to slice elements are stable.
od := &pd.(*Message).L2.Oneofs.List[v]
od.L1.Fields.List = append(od.L1.Fields.List, fd)
if fd.L1.ContainingOneof != nil {
panic("oneof type already set")
}
fd.L1.ContainingOneof = od
case genid.FieldDescriptorProto_Proto3Optional_field_number:
fd.L1.IsProto3Optional = protowire.DecodeBool(v)
}
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
case genid.FieldDescriptorProto_Name_field_number:
fd.L0.FullName = appendFullName(sb, pd.FullName(), v)
case genid.FieldDescriptorProto_JsonName_field_number:
fd.L1.StringName.InitJSON(sb.MakeString(v))
case genid.FieldDescriptorProto_DefaultValue_field_number:
fd.L1.Default.val = protoreflect.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveMessages
case genid.FieldDescriptorProto_TypeName_field_number:
rawTypeName = v
case genid.FieldDescriptorProto_Options_field_number:
fd.unmarshalOptions(v)
rawOptions = appendOptions(rawOptions, v)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
if fd.Syntax() == protoreflect.Editions && fd.L1.Kind == protoreflect.MessageKind && fd.L1.EditionFeatures.IsDelimitedEncoded {
fd.L1.Kind = protoreflect.GroupKind
}
if fd.Syntax() == protoreflect.Editions && fd.L1.EditionFeatures.IsLegacyRequired {
fd.L1.Cardinality = protoreflect.Required
}
if rawTypeName != nil {
name := makeFullName(sb, rawTypeName)
switch fd.L1.Kind {
case protoreflect.EnumKind:
fd.L1.Enum = PlaceholderEnum(name)
case protoreflect.MessageKind, protoreflect.GroupKind:
fd.L1.Message = PlaceholderMessage(name)
}
}
fd.L1.Options = pf.builder.optionsUnmarshaler(&descopts.Field, rawOptions)
}
func (fd *Field) unmarshalOptions(b []byte) {
const FieldOptions_EnforceUTF8 = 13
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.VarintType:
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
case genid.FieldOptions_Packed_field_number:
fd.L1.HasPacked = true
fd.L1.IsPacked = protowire.DecodeBool(v)
case genid.FieldOptions_Weak_field_number:
fd.L1.IsWeak = protowire.DecodeBool(v)
case FieldOptions_EnforceUTF8:
fd.L1.HasEnforceUTF8 = true
fd.L1.EnforceUTF8 = protowire.DecodeBool(v)
}
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
case genid.FieldOptions_Features_field_number:
fd.L1.EditionFeatures = unmarshalFeatureSet(v, fd.L1.EditionFeatures)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
}
func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) {
od.L0.ParentFile = pf
od.L0.Parent = pd
od.L0.Index = i
var rawOptions []byte
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
case genid.OneofDescriptorProto_Name_field_number:
od.L0.FullName = appendFullName(sb, pd.FullName(), v)
case genid.OneofDescriptorProto_Options_field_number:
rawOptions = appendOptions(rawOptions, v)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
od.L1.Options = pf.builder.optionsUnmarshaler(&descopts.Oneof, rawOptions)
}
func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) {
var rawTypeName []byte
var rawOptions []byte
xd.L1.EditionFeatures = featuresFromParentDesc(xd.L1.Extendee)
xd.L2 = new(ExtensionL2)
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.VarintType:
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
case genid.FieldDescriptorProto_Proto3Optional_field_number:
xd.L2.IsProto3Optional = protowire.DecodeBool(v)
}
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
case genid.FieldDescriptorProto_JsonName_field_number:
xd.L2.StringName.InitJSON(sb.MakeString(v))
case genid.FieldDescriptorProto_DefaultValue_field_number:
xd.L2.Default.val = protoreflect.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveExtensions
case genid.FieldDescriptorProto_TypeName_field_number:
rawTypeName = v
case genid.FieldDescriptorProto_Options_field_number:
xd.unmarshalOptions(v)
rawOptions = appendOptions(rawOptions, v)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
if xd.Syntax() == protoreflect.Editions && xd.L1.Kind == protoreflect.MessageKind && xd.L1.EditionFeatures.IsDelimitedEncoded {
xd.L1.Kind = protoreflect.GroupKind
}
if xd.Syntax() == protoreflect.Editions && xd.L1.EditionFeatures.IsLegacyRequired {
xd.L1.Cardinality = protoreflect.Required
}
if rawTypeName != nil {
name := makeFullName(sb, rawTypeName)
switch xd.L1.Kind {
case protoreflect.EnumKind:
xd.L2.Enum = PlaceholderEnum(name)
case protoreflect.MessageKind, protoreflect.GroupKind:
xd.L2.Message = PlaceholderMessage(name)
}
}
xd.L2.Options = xd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Field, rawOptions)
}
func (xd *Extension) unmarshalOptions(b []byte) {
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.VarintType:
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
case genid.FieldOptions_Packed_field_number:
xd.L2.IsPacked = protowire.DecodeBool(v)
}
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
case genid.FieldOptions_Features_field_number:
xd.L1.EditionFeatures = unmarshalFeatureSet(v, xd.L1.EditionFeatures)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
}
func (sd *Service) unmarshalFull(b []byte, sb *strs.Builder) {
var rawMethods [][]byte
var rawOptions []byte
sd.L2 = new(ServiceL2)
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
case genid.ServiceDescriptorProto_Method_field_number:
rawMethods = append(rawMethods, v)
case genid.ServiceDescriptorProto_Options_field_number:
rawOptions = appendOptions(rawOptions, v)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
if len(rawMethods) > 0 {
sd.L2.Methods.List = make([]Method, len(rawMethods))
for i, b := range rawMethods {
sd.L2.Methods.List[i].unmarshalFull(b, sb, sd.L0.ParentFile, sd, i)
}
}
sd.L2.Options = sd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Service, rawOptions)
}
func (md *Method) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) {
md.L0.ParentFile = pf
md.L0.Parent = pd
md.L0.Index = i
var rawOptions []byte
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.VarintType:
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
case genid.MethodDescriptorProto_ClientStreaming_field_number:
md.L1.IsStreamingClient = protowire.DecodeBool(v)
case genid.MethodDescriptorProto_ServerStreaming_field_number:
md.L1.IsStreamingServer = protowire.DecodeBool(v)
}
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
case genid.MethodDescriptorProto_Name_field_number:
md.L0.FullName = appendFullName(sb, pd.FullName(), v)
case genid.MethodDescriptorProto_InputType_field_number:
md.L1.Input = PlaceholderMessage(makeFullName(sb, v))
case genid.MethodDescriptorProto_OutputType_field_number:
md.L1.Output = PlaceholderMessage(makeFullName(sb, v))
case genid.MethodDescriptorProto_Options_field_number:
rawOptions = appendOptions(rawOptions, v)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
md.L1.Options = pf.builder.optionsUnmarshaler(&descopts.Method, rawOptions)
}
// appendOptions appends src to dst, where the returned slice is never nil.
// This is necessary to distinguish between empty and unpopulated options.
func appendOptions(dst, src []byte) []byte {
if dst == nil {
dst = []byte{}
}
return append(dst, src...)
}
// optionsUnmarshaler constructs a lazy unmarshal function for an options message.
//
// The type of message to unmarshal to is passed as a pointer since the
// vars in descopts may not yet be populated at the time this function is called.
func (db *Builder) optionsUnmarshaler(p *protoreflect.ProtoMessage, b []byte) func() protoreflect.ProtoMessage {
if b == nil {
return nil
}
var opts protoreflect.ProtoMessage
var once sync.Once
return func() protoreflect.ProtoMessage {
once.Do(func() {
if *p == nil {
panic("Descriptor.Options called without importing the descriptor package")
}
opts = reflect.New(reflect.TypeOf(*p).Elem()).Interface().(protoreflect.ProtoMessage)
if err := (proto.UnmarshalOptions{
AllowPartial: true,
Resolver: db.TypeResolver,
}).Unmarshal(b, opts); err != nil {
panic(err)
}
})
return opts
}
}

View File

@ -1,457 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package filedesc
import (
"fmt"
"math"
"sort"
"sync"
"google.golang.org/protobuf/internal/genid"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/descfmt"
"google.golang.org/protobuf/internal/errors"
"google.golang.org/protobuf/internal/pragma"
"google.golang.org/protobuf/reflect/protoreflect"
)
type FileImports []protoreflect.FileImport
func (p *FileImports) Len() int { return len(*p) }
func (p *FileImports) Get(i int) protoreflect.FileImport { return (*p)[i] }
func (p *FileImports) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) }
func (p *FileImports) ProtoInternal(pragma.DoNotImplement) {}
type Names struct {
List []protoreflect.Name
once sync.Once
has map[protoreflect.Name]int // protected by once
}
func (p *Names) Len() int { return len(p.List) }
func (p *Names) Get(i int) protoreflect.Name { return p.List[i] }
func (p *Names) Has(s protoreflect.Name) bool { return p.lazyInit().has[s] > 0 }
func (p *Names) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) }
func (p *Names) ProtoInternal(pragma.DoNotImplement) {}
func (p *Names) lazyInit() *Names {
p.once.Do(func() {
if len(p.List) > 0 {
p.has = make(map[protoreflect.Name]int, len(p.List))
for _, s := range p.List {
p.has[s] = p.has[s] + 1
}
}
})
return p
}
// CheckValid reports any errors with the set of names with an error message
// that completes the sentence: "ranges is invalid because it has ..."
func (p *Names) CheckValid() error {
for s, n := range p.lazyInit().has {
switch {
case n > 1:
return errors.New("duplicate name: %q", s)
case false && !s.IsValid():
// NOTE: The C++ implementation does not validate the identifier.
// See https://github.com/protocolbuffers/protobuf/issues/6335.
return errors.New("invalid name: %q", s)
}
}
return nil
}
type EnumRanges struct {
List [][2]protoreflect.EnumNumber // start inclusive; end inclusive
once sync.Once
sorted [][2]protoreflect.EnumNumber // protected by once
}
func (p *EnumRanges) Len() int { return len(p.List) }
func (p *EnumRanges) Get(i int) [2]protoreflect.EnumNumber { return p.List[i] }
func (p *EnumRanges) Has(n protoreflect.EnumNumber) bool {
for ls := p.lazyInit().sorted; len(ls) > 0; {
i := len(ls) / 2
switch r := enumRange(ls[i]); {
case n < r.Start():
ls = ls[:i] // search lower
case n > r.End():
ls = ls[i+1:] // search upper
default:
return true
}
}
return false
}
func (p *EnumRanges) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) }
func (p *EnumRanges) ProtoInternal(pragma.DoNotImplement) {}
func (p *EnumRanges) lazyInit() *EnumRanges {
p.once.Do(func() {
p.sorted = append(p.sorted, p.List...)
sort.Slice(p.sorted, func(i, j int) bool {
return p.sorted[i][0] < p.sorted[j][0]
})
})
return p
}
// CheckValid reports any errors with the set of names with an error message
// that completes the sentence: "ranges is invalid because it has ..."
func (p *EnumRanges) CheckValid() error {
var rp enumRange
for i, r := range p.lazyInit().sorted {
r := enumRange(r)
switch {
case !(r.Start() <= r.End()):
return errors.New("invalid range: %v", r)
case !(rp.End() < r.Start()) && i > 0:
return errors.New("overlapping ranges: %v with %v", rp, r)
}
rp = r
}
return nil
}
type enumRange [2]protoreflect.EnumNumber
func (r enumRange) Start() protoreflect.EnumNumber { return r[0] } // inclusive
func (r enumRange) End() protoreflect.EnumNumber { return r[1] } // inclusive
func (r enumRange) String() string {
if r.Start() == r.End() {
return fmt.Sprintf("%d", r.Start())
}
return fmt.Sprintf("%d to %d", r.Start(), r.End())
}
type FieldRanges struct {
List [][2]protoreflect.FieldNumber // start inclusive; end exclusive
once sync.Once
sorted [][2]protoreflect.FieldNumber // protected by once
}
func (p *FieldRanges) Len() int { return len(p.List) }
func (p *FieldRanges) Get(i int) [2]protoreflect.FieldNumber { return p.List[i] }
func (p *FieldRanges) Has(n protoreflect.FieldNumber) bool {
for ls := p.lazyInit().sorted; len(ls) > 0; {
i := len(ls) / 2
switch r := fieldRange(ls[i]); {
case n < r.Start():
ls = ls[:i] // search lower
case n > r.End():
ls = ls[i+1:] // search upper
default:
return true
}
}
return false
}
func (p *FieldRanges) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) }
func (p *FieldRanges) ProtoInternal(pragma.DoNotImplement) {}
func (p *FieldRanges) lazyInit() *FieldRanges {
p.once.Do(func() {
p.sorted = append(p.sorted, p.List...)
sort.Slice(p.sorted, func(i, j int) bool {
return p.sorted[i][0] < p.sorted[j][0]
})
})
return p
}
// CheckValid reports any errors with the set of ranges with an error message
// that completes the sentence: "ranges is invalid because it has ..."
func (p *FieldRanges) CheckValid(isMessageSet bool) error {
var rp fieldRange
for i, r := range p.lazyInit().sorted {
r := fieldRange(r)
switch {
case !isValidFieldNumber(r.Start(), isMessageSet):
return errors.New("invalid field number: %d", r.Start())
case !isValidFieldNumber(r.End(), isMessageSet):
return errors.New("invalid field number: %d", r.End())
case !(r.Start() <= r.End()):
return errors.New("invalid range: %v", r)
case !(rp.End() < r.Start()) && i > 0:
return errors.New("overlapping ranges: %v with %v", rp, r)
}
rp = r
}
return nil
}
// isValidFieldNumber reports whether the field number is valid.
// Unlike the FieldNumber.IsValid method, it allows ranges that cover the
// reserved number range.
func isValidFieldNumber(n protoreflect.FieldNumber, isMessageSet bool) bool {
return protowire.MinValidNumber <= n && (n <= protowire.MaxValidNumber || isMessageSet)
}
// CheckOverlap reports an error if p and q overlap.
func (p *FieldRanges) CheckOverlap(q *FieldRanges) error {
rps := p.lazyInit().sorted
rqs := q.lazyInit().sorted
for pi, qi := 0, 0; pi < len(rps) && qi < len(rqs); {
rp := fieldRange(rps[pi])
rq := fieldRange(rqs[qi])
if !(rp.End() < rq.Start() || rq.End() < rp.Start()) {
return errors.New("overlapping ranges: %v with %v", rp, rq)
}
if rp.Start() < rq.Start() {
pi++
} else {
qi++
}
}
return nil
}
type fieldRange [2]protoreflect.FieldNumber
func (r fieldRange) Start() protoreflect.FieldNumber { return r[0] } // inclusive
func (r fieldRange) End() protoreflect.FieldNumber { return r[1] - 1 } // inclusive
func (r fieldRange) String() string {
if r.Start() == r.End() {
return fmt.Sprintf("%d", r.Start())
}
return fmt.Sprintf("%d to %d", r.Start(), r.End())
}
type FieldNumbers struct {
List []protoreflect.FieldNumber
once sync.Once
has map[protoreflect.FieldNumber]struct{} // protected by once
}
func (p *FieldNumbers) Len() int { return len(p.List) }
func (p *FieldNumbers) Get(i int) protoreflect.FieldNumber { return p.List[i] }
func (p *FieldNumbers) Has(n protoreflect.FieldNumber) bool {
p.once.Do(func() {
if len(p.List) > 0 {
p.has = make(map[protoreflect.FieldNumber]struct{}, len(p.List))
for _, n := range p.List {
p.has[n] = struct{}{}
}
}
})
_, ok := p.has[n]
return ok
}
func (p *FieldNumbers) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) }
func (p *FieldNumbers) ProtoInternal(pragma.DoNotImplement) {}
type OneofFields struct {
List []protoreflect.FieldDescriptor
once sync.Once
byName map[protoreflect.Name]protoreflect.FieldDescriptor // protected by once
byJSON map[string]protoreflect.FieldDescriptor // protected by once
byText map[string]protoreflect.FieldDescriptor // protected by once
byNum map[protoreflect.FieldNumber]protoreflect.FieldDescriptor // protected by once
}
func (p *OneofFields) Len() int { return len(p.List) }
func (p *OneofFields) Get(i int) protoreflect.FieldDescriptor { return p.List[i] }
func (p *OneofFields) ByName(s protoreflect.Name) protoreflect.FieldDescriptor {
return p.lazyInit().byName[s]
}
func (p *OneofFields) ByJSONName(s string) protoreflect.FieldDescriptor {
return p.lazyInit().byJSON[s]
}
func (p *OneofFields) ByTextName(s string) protoreflect.FieldDescriptor {
return p.lazyInit().byText[s]
}
func (p *OneofFields) ByNumber(n protoreflect.FieldNumber) protoreflect.FieldDescriptor {
return p.lazyInit().byNum[n]
}
func (p *OneofFields) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) }
func (p *OneofFields) ProtoInternal(pragma.DoNotImplement) {}
func (p *OneofFields) lazyInit() *OneofFields {
p.once.Do(func() {
if len(p.List) > 0 {
p.byName = make(map[protoreflect.Name]protoreflect.FieldDescriptor, len(p.List))
p.byJSON = make(map[string]protoreflect.FieldDescriptor, len(p.List))
p.byText = make(map[string]protoreflect.FieldDescriptor, len(p.List))
p.byNum = make(map[protoreflect.FieldNumber]protoreflect.FieldDescriptor, len(p.List))
for _, f := range p.List {
// Field names and numbers are guaranteed to be unique.
p.byName[f.Name()] = f
p.byJSON[f.JSONName()] = f
p.byText[f.TextName()] = f
p.byNum[f.Number()] = f
}
}
})
return p
}
type SourceLocations struct {
// List is a list of SourceLocations.
// The SourceLocation.Next field does not need to be populated
// as it will be lazily populated upon first need.
List []protoreflect.SourceLocation
// File is the parent file descriptor that these locations are relative to.
// If non-nil, ByDescriptor verifies that the provided descriptor
// is a child of this file descriptor.
File protoreflect.FileDescriptor
once sync.Once
byPath map[pathKey]int
}
func (p *SourceLocations) Len() int { return len(p.List) }
func (p *SourceLocations) Get(i int) protoreflect.SourceLocation { return p.lazyInit().List[i] }
func (p *SourceLocations) byKey(k pathKey) protoreflect.SourceLocation {
if i, ok := p.lazyInit().byPath[k]; ok {
return p.List[i]
}
return protoreflect.SourceLocation{}
}
func (p *SourceLocations) ByPath(path protoreflect.SourcePath) protoreflect.SourceLocation {
return p.byKey(newPathKey(path))
}
func (p *SourceLocations) ByDescriptor(desc protoreflect.Descriptor) protoreflect.SourceLocation {
if p.File != nil && desc != nil && p.File != desc.ParentFile() {
return protoreflect.SourceLocation{} // mismatching parent files
}
var pathArr [16]int32
path := pathArr[:0]
for {
switch desc.(type) {
case protoreflect.FileDescriptor:
// Reverse the path since it was constructed in reverse.
for i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 {
path[i], path[j] = path[j], path[i]
}
return p.byKey(newPathKey(path))
case protoreflect.MessageDescriptor:
path = append(path, int32(desc.Index()))
desc = desc.Parent()
switch desc.(type) {
case protoreflect.FileDescriptor:
path = append(path, int32(genid.FileDescriptorProto_MessageType_field_number))
case protoreflect.MessageDescriptor:
path = append(path, int32(genid.DescriptorProto_NestedType_field_number))
default:
return protoreflect.SourceLocation{}
}
case protoreflect.FieldDescriptor:
isExtension := desc.(protoreflect.FieldDescriptor).IsExtension()
path = append(path, int32(desc.Index()))
desc = desc.Parent()
if isExtension {
switch desc.(type) {
case protoreflect.FileDescriptor:
path = append(path, int32(genid.FileDescriptorProto_Extension_field_number))
case protoreflect.MessageDescriptor:
path = append(path, int32(genid.DescriptorProto_Extension_field_number))
default:
return protoreflect.SourceLocation{}
}
} else {
switch desc.(type) {
case protoreflect.MessageDescriptor:
path = append(path, int32(genid.DescriptorProto_Field_field_number))
default:
return protoreflect.SourceLocation{}
}
}
case protoreflect.OneofDescriptor:
path = append(path, int32(desc.Index()))
desc = desc.Parent()
switch desc.(type) {
case protoreflect.MessageDescriptor:
path = append(path, int32(genid.DescriptorProto_OneofDecl_field_number))
default:
return protoreflect.SourceLocation{}
}
case protoreflect.EnumDescriptor:
path = append(path, int32(desc.Index()))
desc = desc.Parent()
switch desc.(type) {
case protoreflect.FileDescriptor:
path = append(path, int32(genid.FileDescriptorProto_EnumType_field_number))
case protoreflect.MessageDescriptor:
path = append(path, int32(genid.DescriptorProto_EnumType_field_number))
default:
return protoreflect.SourceLocation{}
}
case protoreflect.EnumValueDescriptor:
path = append(path, int32(desc.Index()))
desc = desc.Parent()
switch desc.(type) {
case protoreflect.EnumDescriptor:
path = append(path, int32(genid.EnumDescriptorProto_Value_field_number))
default:
return protoreflect.SourceLocation{}
}
case protoreflect.ServiceDescriptor:
path = append(path, int32(desc.Index()))
desc = desc.Parent()
switch desc.(type) {
case protoreflect.FileDescriptor:
path = append(path, int32(genid.FileDescriptorProto_Service_field_number))
default:
return protoreflect.SourceLocation{}
}
case protoreflect.MethodDescriptor:
path = append(path, int32(desc.Index()))
desc = desc.Parent()
switch desc.(type) {
case protoreflect.ServiceDescriptor:
path = append(path, int32(genid.ServiceDescriptorProto_Method_field_number))
default:
return protoreflect.SourceLocation{}
}
default:
return protoreflect.SourceLocation{}
}
}
}
func (p *SourceLocations) lazyInit() *SourceLocations {
p.once.Do(func() {
if len(p.List) > 0 {
// Collect all the indexes for a given path.
pathIdxs := make(map[pathKey][]int, len(p.List))
for i, l := range p.List {
k := newPathKey(l.Path)
pathIdxs[k] = append(pathIdxs[k], i)
}
// Update the next index for all locations.
p.byPath = make(map[pathKey]int, len(p.List))
for k, idxs := range pathIdxs {
for i := 0; i < len(idxs)-1; i++ {
p.List[idxs[i]].Next = idxs[i+1]
}
p.List[idxs[len(idxs)-1]].Next = 0
p.byPath[k] = idxs[0] // record the first location for this path
}
}
})
return p
}
func (p *SourceLocations) ProtoInternal(pragma.DoNotImplement) {}
// pathKey is a comparable representation of protoreflect.SourcePath.
type pathKey struct {
arr [16]uint8 // first n-1 path segments; last element is the length
str string // used if the path does not fit in arr
}
func newPathKey(p protoreflect.SourcePath) (k pathKey) {
if len(p) < len(k.arr) {
for i, ps := range p {
if ps < 0 || math.MaxUint8 <= ps {
return pathKey{str: p.String()}
}
k.arr[i] = uint8(ps)
}
k.arr[len(k.arr)-1] = uint8(len(p))
return k
}
return pathKey{str: p.String()}
}

View File

@ -1,356 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated by generate-types. DO NOT EDIT.
package filedesc
import (
"fmt"
"sync"
"google.golang.org/protobuf/internal/descfmt"
"google.golang.org/protobuf/internal/pragma"
"google.golang.org/protobuf/reflect/protoreflect"
)
type Enums struct {
List []Enum
once sync.Once
byName map[protoreflect.Name]*Enum // protected by once
}
func (p *Enums) Len() int {
return len(p.List)
}
func (p *Enums) Get(i int) protoreflect.EnumDescriptor {
return &p.List[i]
}
func (p *Enums) ByName(s protoreflect.Name) protoreflect.EnumDescriptor {
if d := p.lazyInit().byName[s]; d != nil {
return d
}
return nil
}
func (p *Enums) Format(s fmt.State, r rune) {
descfmt.FormatList(s, r, p)
}
func (p *Enums) ProtoInternal(pragma.DoNotImplement) {}
func (p *Enums) lazyInit() *Enums {
p.once.Do(func() {
if len(p.List) > 0 {
p.byName = make(map[protoreflect.Name]*Enum, len(p.List))
for i := range p.List {
d := &p.List[i]
if _, ok := p.byName[d.Name()]; !ok {
p.byName[d.Name()] = d
}
}
}
})
return p
}
type EnumValues struct {
List []EnumValue
once sync.Once
byName map[protoreflect.Name]*EnumValue // protected by once
byNum map[protoreflect.EnumNumber]*EnumValue // protected by once
}
func (p *EnumValues) Len() int {
return len(p.List)
}
func (p *EnumValues) Get(i int) protoreflect.EnumValueDescriptor {
return &p.List[i]
}
func (p *EnumValues) ByName(s protoreflect.Name) protoreflect.EnumValueDescriptor {
if d := p.lazyInit().byName[s]; d != nil {
return d
}
return nil
}
func (p *EnumValues) ByNumber(n protoreflect.EnumNumber) protoreflect.EnumValueDescriptor {
if d := p.lazyInit().byNum[n]; d != nil {
return d
}
return nil
}
func (p *EnumValues) Format(s fmt.State, r rune) {
descfmt.FormatList(s, r, p)
}
func (p *EnumValues) ProtoInternal(pragma.DoNotImplement) {}
func (p *EnumValues) lazyInit() *EnumValues {
p.once.Do(func() {
if len(p.List) > 0 {
p.byName = make(map[protoreflect.Name]*EnumValue, len(p.List))
p.byNum = make(map[protoreflect.EnumNumber]*EnumValue, len(p.List))
for i := range p.List {
d := &p.List[i]
if _, ok := p.byName[d.Name()]; !ok {
p.byName[d.Name()] = d
}
if _, ok := p.byNum[d.Number()]; !ok {
p.byNum[d.Number()] = d
}
}
}
})
return p
}
type Messages struct {
List []Message
once sync.Once
byName map[protoreflect.Name]*Message // protected by once
}
func (p *Messages) Len() int {
return len(p.List)
}
func (p *Messages) Get(i int) protoreflect.MessageDescriptor {
return &p.List[i]
}
func (p *Messages) ByName(s protoreflect.Name) protoreflect.MessageDescriptor {
if d := p.lazyInit().byName[s]; d != nil {
return d
}
return nil
}
func (p *Messages) Format(s fmt.State, r rune) {
descfmt.FormatList(s, r, p)
}
func (p *Messages) ProtoInternal(pragma.DoNotImplement) {}
func (p *Messages) lazyInit() *Messages {
p.once.Do(func() {
if len(p.List) > 0 {
p.byName = make(map[protoreflect.Name]*Message, len(p.List))
for i := range p.List {
d := &p.List[i]
if _, ok := p.byName[d.Name()]; !ok {
p.byName[d.Name()] = d
}
}
}
})
return p
}
type Fields struct {
List []Field
once sync.Once
byName map[protoreflect.Name]*Field // protected by once
byJSON map[string]*Field // protected by once
byText map[string]*Field // protected by once
byNum map[protoreflect.FieldNumber]*Field // protected by once
}
func (p *Fields) Len() int {
return len(p.List)
}
func (p *Fields) Get(i int) protoreflect.FieldDescriptor {
return &p.List[i]
}
func (p *Fields) ByName(s protoreflect.Name) protoreflect.FieldDescriptor {
if d := p.lazyInit().byName[s]; d != nil {
return d
}
return nil
}
func (p *Fields) ByJSONName(s string) protoreflect.FieldDescriptor {
if d := p.lazyInit().byJSON[s]; d != nil {
return d
}
return nil
}
func (p *Fields) ByTextName(s string) protoreflect.FieldDescriptor {
if d := p.lazyInit().byText[s]; d != nil {
return d
}
return nil
}
func (p *Fields) ByNumber(n protoreflect.FieldNumber) protoreflect.FieldDescriptor {
if d := p.lazyInit().byNum[n]; d != nil {
return d
}
return nil
}
func (p *Fields) Format(s fmt.State, r rune) {
descfmt.FormatList(s, r, p)
}
func (p *Fields) ProtoInternal(pragma.DoNotImplement) {}
func (p *Fields) lazyInit() *Fields {
p.once.Do(func() {
if len(p.List) > 0 {
p.byName = make(map[protoreflect.Name]*Field, len(p.List))
p.byJSON = make(map[string]*Field, len(p.List))
p.byText = make(map[string]*Field, len(p.List))
p.byNum = make(map[protoreflect.FieldNumber]*Field, len(p.List))
for i := range p.List {
d := &p.List[i]
if _, ok := p.byName[d.Name()]; !ok {
p.byName[d.Name()] = d
}
if _, ok := p.byJSON[d.JSONName()]; !ok {
p.byJSON[d.JSONName()] = d
}
if _, ok := p.byText[d.TextName()]; !ok {
p.byText[d.TextName()] = d
}
if _, ok := p.byNum[d.Number()]; !ok {
p.byNum[d.Number()] = d
}
}
}
})
return p
}
type Oneofs struct {
List []Oneof
once sync.Once
byName map[protoreflect.Name]*Oneof // protected by once
}
func (p *Oneofs) Len() int {
return len(p.List)
}
func (p *Oneofs) Get(i int) protoreflect.OneofDescriptor {
return &p.List[i]
}
func (p *Oneofs) ByName(s protoreflect.Name) protoreflect.OneofDescriptor {
if d := p.lazyInit().byName[s]; d != nil {
return d
}
return nil
}
func (p *Oneofs) Format(s fmt.State, r rune) {
descfmt.FormatList(s, r, p)
}
func (p *Oneofs) ProtoInternal(pragma.DoNotImplement) {}
func (p *Oneofs) lazyInit() *Oneofs {
p.once.Do(func() {
if len(p.List) > 0 {
p.byName = make(map[protoreflect.Name]*Oneof, len(p.List))
for i := range p.List {
d := &p.List[i]
if _, ok := p.byName[d.Name()]; !ok {
p.byName[d.Name()] = d
}
}
}
})
return p
}
type Extensions struct {
List []Extension
once sync.Once
byName map[protoreflect.Name]*Extension // protected by once
}
func (p *Extensions) Len() int {
return len(p.List)
}
func (p *Extensions) Get(i int) protoreflect.ExtensionDescriptor {
return &p.List[i]
}
func (p *Extensions) ByName(s protoreflect.Name) protoreflect.ExtensionDescriptor {
if d := p.lazyInit().byName[s]; d != nil {
return d
}
return nil
}
func (p *Extensions) Format(s fmt.State, r rune) {
descfmt.FormatList(s, r, p)
}
func (p *Extensions) ProtoInternal(pragma.DoNotImplement) {}
func (p *Extensions) lazyInit() *Extensions {
p.once.Do(func() {
if len(p.List) > 0 {
p.byName = make(map[protoreflect.Name]*Extension, len(p.List))
for i := range p.List {
d := &p.List[i]
if _, ok := p.byName[d.Name()]; !ok {
p.byName[d.Name()] = d
}
}
}
})
return p
}
type Services struct {
List []Service
once sync.Once
byName map[protoreflect.Name]*Service // protected by once
}
func (p *Services) Len() int {
return len(p.List)
}
func (p *Services) Get(i int) protoreflect.ServiceDescriptor {
return &p.List[i]
}
func (p *Services) ByName(s protoreflect.Name) protoreflect.ServiceDescriptor {
if d := p.lazyInit().byName[s]; d != nil {
return d
}
return nil
}
func (p *Services) Format(s fmt.State, r rune) {
descfmt.FormatList(s, r, p)
}
func (p *Services) ProtoInternal(pragma.DoNotImplement) {}
func (p *Services) lazyInit() *Services {
p.once.Do(func() {
if len(p.List) > 0 {
p.byName = make(map[protoreflect.Name]*Service, len(p.List))
for i := range p.List {
d := &p.List[i]
if _, ok := p.byName[d.Name()]; !ok {
p.byName[d.Name()] = d
}
}
}
})
return p
}
type Methods struct {
List []Method
once sync.Once
byName map[protoreflect.Name]*Method // protected by once
}
func (p *Methods) Len() int {
return len(p.List)
}
func (p *Methods) Get(i int) protoreflect.MethodDescriptor {
return &p.List[i]
}
func (p *Methods) ByName(s protoreflect.Name) protoreflect.MethodDescriptor {
if d := p.lazyInit().byName[s]; d != nil {
return d
}
return nil
}
func (p *Methods) Format(s fmt.State, r rune) {
descfmt.FormatList(s, r, p)
}
func (p *Methods) ProtoInternal(pragma.DoNotImplement) {}
func (p *Methods) lazyInit() *Methods {
p.once.Do(func() {
if len(p.List) > 0 {
p.byName = make(map[protoreflect.Name]*Method, len(p.List))
for i := range p.List {
d := &p.List[i]
if _, ok := p.byName[d.Name()]; !ok {
p.byName[d.Name()] = d
}
}
}
})
return p
}

View File

@ -1,142 +0,0 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package filedesc
import (
"fmt"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/editiondefaults"
"google.golang.org/protobuf/internal/genid"
"google.golang.org/protobuf/reflect/protoreflect"
)
var defaultsCache = make(map[Edition]EditionFeatures)
func init() {
unmarshalEditionDefaults(editiondefaults.Defaults)
}
func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures {
for len(b) > 0 {
num, _, n := protowire.ConsumeTag(b)
b = b[n:]
switch num {
case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number:
v, m := protowire.ConsumeVarint(b)
b = b[m:]
parent.GenerateLegacyUnmarshalJSON = protowire.DecodeBool(v)
default:
panic(fmt.Sprintf("unkown field number %d while unmarshalling GoFeatures", num))
}
}
return parent
}
func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures {
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.VarintType:
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
case genid.FeatureSet_FieldPresence_field_number:
parent.IsFieldPresence = v == genid.FeatureSet_EXPLICIT_enum_value || v == genid.FeatureSet_LEGACY_REQUIRED_enum_value
parent.IsLegacyRequired = v == genid.FeatureSet_LEGACY_REQUIRED_enum_value
case genid.FeatureSet_EnumType_field_number:
parent.IsOpenEnum = v == genid.FeatureSet_OPEN_enum_value
case genid.FeatureSet_RepeatedFieldEncoding_field_number:
parent.IsPacked = v == genid.FeatureSet_PACKED_enum_value
case genid.FeatureSet_Utf8Validation_field_number:
parent.IsUTF8Validated = v == genid.FeatureSet_VERIFY_enum_value
case genid.FeatureSet_MessageEncoding_field_number:
parent.IsDelimitedEncoded = v == genid.FeatureSet_DELIMITED_enum_value
case genid.FeatureSet_JsonFormat_field_number:
parent.IsJSONCompliant = v == genid.FeatureSet_ALLOW_enum_value
default:
panic(fmt.Sprintf("unkown field number %d while unmarshalling FeatureSet", num))
}
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number:
parent = unmarshalGoFeature(v, parent)
}
}
}
return parent
}
func featuresFromParentDesc(parentDesc protoreflect.Descriptor) EditionFeatures {
var parentFS EditionFeatures
switch p := parentDesc.(type) {
case *File:
parentFS = p.L1.EditionFeatures
case *Message:
parentFS = p.L1.EditionFeatures
default:
panic(fmt.Sprintf("unknown parent type %T", parentDesc))
}
return parentFS
}
func unmarshalEditionDefault(b []byte) {
var ed Edition
var fs EditionFeatures
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.VarintType:
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
case genid.FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number:
ed = Edition(v)
}
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
case genid.FeatureSetDefaults_FeatureSetEditionDefault_Features_field_number:
fs = unmarshalFeatureSet(v, fs)
}
}
}
defaultsCache[ed] = fs
}
func unmarshalEditionDefaults(b []byte) {
for len(b) > 0 {
num, _, n := protowire.ConsumeTag(b)
b = b[n:]
switch num {
case genid.FeatureSetDefaults_Defaults_field_number:
def, m := protowire.ConsumeBytes(b)
b = b[m:]
unmarshalEditionDefault(def)
case genid.FeatureSetDefaults_MinimumEdition_field_number,
genid.FeatureSetDefaults_MaximumEdition_field_number:
// We don't care about the minimum and maximum editions. If the
// edition we are looking for later on is not in the cache we know
// it is outside of the range between minimum and maximum edition.
_, m := protowire.ConsumeVarint(b)
b = b[m:]
default:
panic(fmt.Sprintf("unkown field number %d while unmarshalling EditionDefault", num))
}
}
}
func getFeaturesFor(ed Edition) EditionFeatures {
if def, ok := defaultsCache[ed]; ok {
return def
}
panic(fmt.Sprintf("unsupported edition: %v", ed))
}

View File

@ -1,109 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package filedesc
import (
"google.golang.org/protobuf/internal/descopts"
"google.golang.org/protobuf/internal/pragma"
"google.golang.org/protobuf/reflect/protoreflect"
)
var (
emptyNames = new(Names)
emptyEnumRanges = new(EnumRanges)
emptyFieldRanges = new(FieldRanges)
emptyFieldNumbers = new(FieldNumbers)
emptySourceLocations = new(SourceLocations)
emptyFiles = new(FileImports)
emptyMessages = new(Messages)
emptyFields = new(Fields)
emptyOneofs = new(Oneofs)
emptyEnums = new(Enums)
emptyEnumValues = new(EnumValues)
emptyExtensions = new(Extensions)
emptyServices = new(Services)
)
// PlaceholderFile is a placeholder, representing only the file path.
type PlaceholderFile string
func (f PlaceholderFile) ParentFile() protoreflect.FileDescriptor { return f }
func (f PlaceholderFile) Parent() protoreflect.Descriptor { return nil }
func (f PlaceholderFile) Index() int { return 0 }
func (f PlaceholderFile) Syntax() protoreflect.Syntax { return 0 }
func (f PlaceholderFile) Name() protoreflect.Name { return "" }
func (f PlaceholderFile) FullName() protoreflect.FullName { return "" }
func (f PlaceholderFile) IsPlaceholder() bool { return true }
func (f PlaceholderFile) Options() protoreflect.ProtoMessage { return descopts.File }
func (f PlaceholderFile) Path() string { return string(f) }
func (f PlaceholderFile) Package() protoreflect.FullName { return "" }
func (f PlaceholderFile) Imports() protoreflect.FileImports { return emptyFiles }
func (f PlaceholderFile) Messages() protoreflect.MessageDescriptors { return emptyMessages }
func (f PlaceholderFile) Enums() protoreflect.EnumDescriptors { return emptyEnums }
func (f PlaceholderFile) Extensions() protoreflect.ExtensionDescriptors { return emptyExtensions }
func (f PlaceholderFile) Services() protoreflect.ServiceDescriptors { return emptyServices }
func (f PlaceholderFile) SourceLocations() protoreflect.SourceLocations { return emptySourceLocations }
func (f PlaceholderFile) ProtoType(protoreflect.FileDescriptor) { return }
func (f PlaceholderFile) ProtoInternal(pragma.DoNotImplement) { return }
// PlaceholderEnum is a placeholder, representing only the full name.
type PlaceholderEnum protoreflect.FullName
func (e PlaceholderEnum) ParentFile() protoreflect.FileDescriptor { return nil }
func (e PlaceholderEnum) Parent() protoreflect.Descriptor { return nil }
func (e PlaceholderEnum) Index() int { return 0 }
func (e PlaceholderEnum) Syntax() protoreflect.Syntax { return 0 }
func (e PlaceholderEnum) Name() protoreflect.Name { return protoreflect.FullName(e).Name() }
func (e PlaceholderEnum) FullName() protoreflect.FullName { return protoreflect.FullName(e) }
func (e PlaceholderEnum) IsPlaceholder() bool { return true }
func (e PlaceholderEnum) Options() protoreflect.ProtoMessage { return descopts.Enum }
func (e PlaceholderEnum) Values() protoreflect.EnumValueDescriptors { return emptyEnumValues }
func (e PlaceholderEnum) ReservedNames() protoreflect.Names { return emptyNames }
func (e PlaceholderEnum) ReservedRanges() protoreflect.EnumRanges { return emptyEnumRanges }
func (e PlaceholderEnum) ProtoType(protoreflect.EnumDescriptor) { return }
func (e PlaceholderEnum) ProtoInternal(pragma.DoNotImplement) { return }
// PlaceholderEnumValue is a placeholder, representing only the full name.
type PlaceholderEnumValue protoreflect.FullName
func (e PlaceholderEnumValue) ParentFile() protoreflect.FileDescriptor { return nil }
func (e PlaceholderEnumValue) Parent() protoreflect.Descriptor { return nil }
func (e PlaceholderEnumValue) Index() int { return 0 }
func (e PlaceholderEnumValue) Syntax() protoreflect.Syntax { return 0 }
func (e PlaceholderEnumValue) Name() protoreflect.Name { return protoreflect.FullName(e).Name() }
func (e PlaceholderEnumValue) FullName() protoreflect.FullName { return protoreflect.FullName(e) }
func (e PlaceholderEnumValue) IsPlaceholder() bool { return true }
func (e PlaceholderEnumValue) Options() protoreflect.ProtoMessage { return descopts.EnumValue }
func (e PlaceholderEnumValue) Number() protoreflect.EnumNumber { return 0 }
func (e PlaceholderEnumValue) ProtoType(protoreflect.EnumValueDescriptor) { return }
func (e PlaceholderEnumValue) ProtoInternal(pragma.DoNotImplement) { return }
// PlaceholderMessage is a placeholder, representing only the full name.
type PlaceholderMessage protoreflect.FullName
func (m PlaceholderMessage) ParentFile() protoreflect.FileDescriptor { return nil }
func (m PlaceholderMessage) Parent() protoreflect.Descriptor { return nil }
func (m PlaceholderMessage) Index() int { return 0 }
func (m PlaceholderMessage) Syntax() protoreflect.Syntax { return 0 }
func (m PlaceholderMessage) Name() protoreflect.Name { return protoreflect.FullName(m).Name() }
func (m PlaceholderMessage) FullName() protoreflect.FullName { return protoreflect.FullName(m) }
func (m PlaceholderMessage) IsPlaceholder() bool { return true }
func (m PlaceholderMessage) Options() protoreflect.ProtoMessage { return descopts.Message }
func (m PlaceholderMessage) IsMapEntry() bool { return false }
func (m PlaceholderMessage) Fields() protoreflect.FieldDescriptors { return emptyFields }
func (m PlaceholderMessage) Oneofs() protoreflect.OneofDescriptors { return emptyOneofs }
func (m PlaceholderMessage) ReservedNames() protoreflect.Names { return emptyNames }
func (m PlaceholderMessage) ReservedRanges() protoreflect.FieldRanges { return emptyFieldRanges }
func (m PlaceholderMessage) RequiredNumbers() protoreflect.FieldNumbers { return emptyFieldNumbers }
func (m PlaceholderMessage) ExtensionRanges() protoreflect.FieldRanges { return emptyFieldRanges }
func (m PlaceholderMessage) ExtensionRangeOptions(int) protoreflect.ProtoMessage {
panic("index out of range")
}
func (m PlaceholderMessage) Messages() protoreflect.MessageDescriptors { return emptyMessages }
func (m PlaceholderMessage) Enums() protoreflect.EnumDescriptors { return emptyEnums }
func (m PlaceholderMessage) Extensions() protoreflect.ExtensionDescriptors { return emptyExtensions }
func (m PlaceholderMessage) ProtoType(protoreflect.MessageDescriptor) { return }
func (m PlaceholderMessage) ProtoInternal(pragma.DoNotImplement) { return }

View File

@ -1,296 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package filetype provides functionality for wrapping descriptors
// with Go type information.
package filetype
import (
"reflect"
"google.golang.org/protobuf/internal/descopts"
"google.golang.org/protobuf/internal/filedesc"
pimpl "google.golang.org/protobuf/internal/impl"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
)
// Builder constructs type descriptors from a raw file descriptor
// and associated Go types for each enum and message declaration.
//
// # Flattened Ordering
//
// The protobuf type system represents declarations as a tree. Certain nodes in
// the tree require us to either associate it with a concrete Go type or to
// resolve a dependency, which is information that must be provided separately
// since it cannot be derived from the file descriptor alone.
//
// However, representing a tree as Go literals is difficult to simply do in a
// space and time efficient way. Thus, we store them as a flattened list of
// objects where the serialization order from the tree-based form is important.
//
// The "flattened ordering" is defined as a tree traversal of all enum, message,
// extension, and service declarations using the following algorithm:
//
// def VisitFileDecls(fd):
// for e in fd.Enums: yield e
// for m in fd.Messages: yield m
// for x in fd.Extensions: yield x
// for s in fd.Services: yield s
// for m in fd.Messages: yield from VisitMessageDecls(m)
//
// def VisitMessageDecls(md):
// for e in md.Enums: yield e
// for m in md.Messages: yield m
// for x in md.Extensions: yield x
// for m in md.Messages: yield from VisitMessageDecls(m)
//
// The traversal starts at the root file descriptor and yields each direct
// declaration within each node before traversing into sub-declarations
// that children themselves may have.
type Builder struct {
// File is the underlying file descriptor builder.
File filedesc.Builder
// GoTypes is a unique set of the Go types for all declarations and
// dependencies. Each type is represented as a zero value of the Go type.
//
// Declarations are Go types generated for enums and messages directly
// declared (not publicly imported) in the proto source file.
// Messages for map entries are accounted for, but represented by nil.
// Enum declarations in "flattened ordering" come first, followed by
// message declarations in "flattened ordering".
//
// Dependencies are Go types for enums or messages referenced by
// message fields (excluding weak fields), for parent extended messages of
// extension fields, for enums or messages referenced by extension fields,
// and for input and output messages referenced by service methods.
// Dependencies must come after declarations, but the ordering of
// dependencies themselves is unspecified.
GoTypes []interface{}
// DependencyIndexes is an ordered list of indexes into GoTypes for the
// dependencies of messages, extensions, or services.
//
// There are 5 sub-lists in "flattened ordering" concatenated back-to-back:
// 0. Message field dependencies: list of the enum or message type
// referred to by every message field.
// 1. Extension field targets: list of the extended parent message of
// every extension.
// 2. Extension field dependencies: list of the enum or message type
// referred to by every extension field.
// 3. Service method inputs: list of the input message type
// referred to by every service method.
// 4. Service method outputs: list of the output message type
// referred to by every service method.
//
// The offset into DependencyIndexes for the start of each sub-list
// is appended to the end in reverse order.
DependencyIndexes []int32
// EnumInfos is a list of enum infos in "flattened ordering".
EnumInfos []pimpl.EnumInfo
// MessageInfos is a list of message infos in "flattened ordering".
// If provided, the GoType and PBType for each element is populated.
//
// Requirement: len(MessageInfos) == len(Build.Messages)
MessageInfos []pimpl.MessageInfo
// ExtensionInfos is a list of extension infos in "flattened ordering".
// Each element is initialized and registered with the protoregistry package.
//
// Requirement: len(LegacyExtensions) == len(Build.Extensions)
ExtensionInfos []pimpl.ExtensionInfo
// TypeRegistry is the registry to register each type descriptor.
// If nil, it uses protoregistry.GlobalTypes.
TypeRegistry interface {
RegisterMessage(protoreflect.MessageType) error
RegisterEnum(protoreflect.EnumType) error
RegisterExtension(protoreflect.ExtensionType) error
}
}
// Out is the output of the builder.
type Out struct {
File protoreflect.FileDescriptor
}
func (tb Builder) Build() (out Out) {
// Replace the resolver with one that resolves dependencies by index,
// which is faster and more reliable than relying on the global registry.
if tb.File.FileRegistry == nil {
tb.File.FileRegistry = protoregistry.GlobalFiles
}
tb.File.FileRegistry = &resolverByIndex{
goTypes: tb.GoTypes,
depIdxs: tb.DependencyIndexes,
fileRegistry: tb.File.FileRegistry,
}
// Initialize registry if unpopulated.
if tb.TypeRegistry == nil {
tb.TypeRegistry = protoregistry.GlobalTypes
}
fbOut := tb.File.Build()
out.File = fbOut.File
// Process enums.
enumGoTypes := tb.GoTypes[:len(fbOut.Enums)]
if len(tb.EnumInfos) != len(fbOut.Enums) {
panic("mismatching enum lengths")
}
if len(fbOut.Enums) > 0 {
for i := range fbOut.Enums {
tb.EnumInfos[i] = pimpl.EnumInfo{
GoReflectType: reflect.TypeOf(enumGoTypes[i]),
Desc: &fbOut.Enums[i],
}
// Register enum types.
if err := tb.TypeRegistry.RegisterEnum(&tb.EnumInfos[i]); err != nil {
panic(err)
}
}
}
// Process messages.
messageGoTypes := tb.GoTypes[len(fbOut.Enums):][:len(fbOut.Messages)]
if len(tb.MessageInfos) != len(fbOut.Messages) {
panic("mismatching message lengths")
}
if len(fbOut.Messages) > 0 {
for i := range fbOut.Messages {
if messageGoTypes[i] == nil {
continue // skip map entry
}
tb.MessageInfos[i].GoReflectType = reflect.TypeOf(messageGoTypes[i])
tb.MessageInfos[i].Desc = &fbOut.Messages[i]
// Register message types.
if err := tb.TypeRegistry.RegisterMessage(&tb.MessageInfos[i]); err != nil {
panic(err)
}
}
// As a special-case for descriptor.proto,
// locally register concrete message type for the options.
if out.File.Path() == "google/protobuf/descriptor.proto" && out.File.Package() == "google.protobuf" {
for i := range fbOut.Messages {
switch fbOut.Messages[i].Name() {
case "FileOptions":
descopts.File = messageGoTypes[i].(protoreflect.ProtoMessage)
case "EnumOptions":
descopts.Enum = messageGoTypes[i].(protoreflect.ProtoMessage)
case "EnumValueOptions":
descopts.EnumValue = messageGoTypes[i].(protoreflect.ProtoMessage)
case "MessageOptions":
descopts.Message = messageGoTypes[i].(protoreflect.ProtoMessage)
case "FieldOptions":
descopts.Field = messageGoTypes[i].(protoreflect.ProtoMessage)
case "OneofOptions":
descopts.Oneof = messageGoTypes[i].(protoreflect.ProtoMessage)
case "ExtensionRangeOptions":
descopts.ExtensionRange = messageGoTypes[i].(protoreflect.ProtoMessage)
case "ServiceOptions":
descopts.Service = messageGoTypes[i].(protoreflect.ProtoMessage)
case "MethodOptions":
descopts.Method = messageGoTypes[i].(protoreflect.ProtoMessage)
}
}
}
}
// Process extensions.
if len(tb.ExtensionInfos) != len(fbOut.Extensions) {
panic("mismatching extension lengths")
}
var depIdx int32
for i := range fbOut.Extensions {
// For enum and message kinds, determine the referent Go type so
// that we can construct their constructors.
const listExtDeps = 2
var goType reflect.Type
switch fbOut.Extensions[i].L1.Kind {
case protoreflect.EnumKind:
j := depIdxs.Get(tb.DependencyIndexes, listExtDeps, depIdx)
goType = reflect.TypeOf(tb.GoTypes[j])
depIdx++
case protoreflect.MessageKind, protoreflect.GroupKind:
j := depIdxs.Get(tb.DependencyIndexes, listExtDeps, depIdx)
goType = reflect.TypeOf(tb.GoTypes[j])
depIdx++
default:
goType = goTypeForPBKind[fbOut.Extensions[i].L1.Kind]
}
if fbOut.Extensions[i].IsList() {
goType = reflect.SliceOf(goType)
}
pimpl.InitExtensionInfo(&tb.ExtensionInfos[i], &fbOut.Extensions[i], goType)
// Register extension types.
if err := tb.TypeRegistry.RegisterExtension(&tb.ExtensionInfos[i]); err != nil {
panic(err)
}
}
return out
}
var goTypeForPBKind = map[protoreflect.Kind]reflect.Type{
protoreflect.BoolKind: reflect.TypeOf(bool(false)),
protoreflect.Int32Kind: reflect.TypeOf(int32(0)),
protoreflect.Sint32Kind: reflect.TypeOf(int32(0)),
protoreflect.Sfixed32Kind: reflect.TypeOf(int32(0)),
protoreflect.Int64Kind: reflect.TypeOf(int64(0)),
protoreflect.Sint64Kind: reflect.TypeOf(int64(0)),
protoreflect.Sfixed64Kind: reflect.TypeOf(int64(0)),
protoreflect.Uint32Kind: reflect.TypeOf(uint32(0)),
protoreflect.Fixed32Kind: reflect.TypeOf(uint32(0)),
protoreflect.Uint64Kind: reflect.TypeOf(uint64(0)),
protoreflect.Fixed64Kind: reflect.TypeOf(uint64(0)),
protoreflect.FloatKind: reflect.TypeOf(float32(0)),
protoreflect.DoubleKind: reflect.TypeOf(float64(0)),
protoreflect.StringKind: reflect.TypeOf(string("")),
protoreflect.BytesKind: reflect.TypeOf([]byte(nil)),
}
type depIdxs []int32
// Get retrieves the jth element of the ith sub-list.
func (x depIdxs) Get(i, j int32) int32 {
return x[x[int32(len(x))-i-1]+j]
}
type (
resolverByIndex struct {
goTypes []interface{}
depIdxs depIdxs
fileRegistry
}
fileRegistry interface {
FindFileByPath(string) (protoreflect.FileDescriptor, error)
FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error)
RegisterFile(protoreflect.FileDescriptor) error
}
)
func (r *resolverByIndex) FindEnumByIndex(i, j int32, es []filedesc.Enum, ms []filedesc.Message) protoreflect.EnumDescriptor {
if depIdx := int(r.depIdxs.Get(i, j)); int(depIdx) < len(es)+len(ms) {
return &es[depIdx]
} else {
return pimpl.Export{}.EnumDescriptorOf(r.goTypes[depIdx])
}
}
func (r *resolverByIndex) FindMessageByIndex(i, j int32, es []filedesc.Enum, ms []filedesc.Message) protoreflect.MessageDescriptor {
if depIdx := int(r.depIdxs.Get(i, j)); depIdx < len(es)+len(ms) {
return &ms[depIdx-len(es)]
} else {
return pimpl.Export{}.MessageDescriptorOf(r.goTypes[depIdx])
}
}

View File

@ -1,24 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package flags provides a set of flags controlled by build tags.
package flags
// ProtoLegacy specifies whether to enable support for legacy functionality
// such as MessageSets, weak fields, and various other obscure behavior
// that is necessary to maintain backwards compatibility with proto1 or
// the pre-release variants of proto2 and proto3.
//
// This is disabled by default unless built with the "protolegacy" tag.
//
// WARNING: The compatibility agreement covers nothing provided by this flag.
// As such, functionality may suddenly be removed or changed at our discretion.
const ProtoLegacy = protoLegacy
// LazyUnmarshalExtensions specifies whether to lazily unmarshal extensions.
//
// Lazy extension unmarshaling validates the contents of message-valued
// extension fields at unmarshal time, but defers creating the message
// structure until the extension is first accessed.
const LazyUnmarshalExtensions = ProtoLegacy

View File

@ -1,10 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !protolegacy
// +build !protolegacy
package flags
const protoLegacy = false

View File

@ -1,10 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build protolegacy
// +build protolegacy
package flags
const protoLegacy = true

View File

@ -1,34 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated by generate-protos. DO NOT EDIT.
package genid
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
)
const File_google_protobuf_any_proto = "google/protobuf/any.proto"
// Names for google.protobuf.Any.
const (
Any_message_name protoreflect.Name = "Any"
Any_message_fullname protoreflect.FullName = "google.protobuf.Any"
)
// Field names for google.protobuf.Any.
const (
Any_TypeUrl_field_name protoreflect.Name = "type_url"
Any_Value_field_name protoreflect.Name = "value"
Any_TypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Any.type_url"
Any_Value_field_fullname protoreflect.FullName = "google.protobuf.Any.value"
)
// Field numbers for google.protobuf.Any.
const (
Any_TypeUrl_field_number protoreflect.FieldNumber = 1
Any_Value_field_number protoreflect.FieldNumber = 2
)

View File

@ -1,106 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated by generate-protos. DO NOT EDIT.
package genid
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
)
const File_google_protobuf_api_proto = "google/protobuf/api.proto"
// Names for google.protobuf.Api.
const (
Api_message_name protoreflect.Name = "Api"
Api_message_fullname protoreflect.FullName = "google.protobuf.Api"
)
// Field names for google.protobuf.Api.
const (
Api_Name_field_name protoreflect.Name = "name"
Api_Methods_field_name protoreflect.Name = "methods"
Api_Options_field_name protoreflect.Name = "options"
Api_Version_field_name protoreflect.Name = "version"
Api_SourceContext_field_name protoreflect.Name = "source_context"
Api_Mixins_field_name protoreflect.Name = "mixins"
Api_Syntax_field_name protoreflect.Name = "syntax"
Api_Name_field_fullname protoreflect.FullName = "google.protobuf.Api.name"
Api_Methods_field_fullname protoreflect.FullName = "google.protobuf.Api.methods"
Api_Options_field_fullname protoreflect.FullName = "google.protobuf.Api.options"
Api_Version_field_fullname protoreflect.FullName = "google.protobuf.Api.version"
Api_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Api.source_context"
Api_Mixins_field_fullname protoreflect.FullName = "google.protobuf.Api.mixins"
Api_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Api.syntax"
)
// Field numbers for google.protobuf.Api.
const (
Api_Name_field_number protoreflect.FieldNumber = 1
Api_Methods_field_number protoreflect.FieldNumber = 2
Api_Options_field_number protoreflect.FieldNumber = 3
Api_Version_field_number protoreflect.FieldNumber = 4
Api_SourceContext_field_number protoreflect.FieldNumber = 5
Api_Mixins_field_number protoreflect.FieldNumber = 6
Api_Syntax_field_number protoreflect.FieldNumber = 7
)
// Names for google.protobuf.Method.
const (
Method_message_name protoreflect.Name = "Method"
Method_message_fullname protoreflect.FullName = "google.protobuf.Method"
)
// Field names for google.protobuf.Method.
const (
Method_Name_field_name protoreflect.Name = "name"
Method_RequestTypeUrl_field_name protoreflect.Name = "request_type_url"
Method_RequestStreaming_field_name protoreflect.Name = "request_streaming"
Method_ResponseTypeUrl_field_name protoreflect.Name = "response_type_url"
Method_ResponseStreaming_field_name protoreflect.Name = "response_streaming"
Method_Options_field_name protoreflect.Name = "options"
Method_Syntax_field_name protoreflect.Name = "syntax"
Method_Name_field_fullname protoreflect.FullName = "google.protobuf.Method.name"
Method_RequestTypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Method.request_type_url"
Method_RequestStreaming_field_fullname protoreflect.FullName = "google.protobuf.Method.request_streaming"
Method_ResponseTypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Method.response_type_url"
Method_ResponseStreaming_field_fullname protoreflect.FullName = "google.protobuf.Method.response_streaming"
Method_Options_field_fullname protoreflect.FullName = "google.protobuf.Method.options"
Method_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Method.syntax"
)
// Field numbers for google.protobuf.Method.
const (
Method_Name_field_number protoreflect.FieldNumber = 1
Method_RequestTypeUrl_field_number protoreflect.FieldNumber = 2
Method_RequestStreaming_field_number protoreflect.FieldNumber = 3
Method_ResponseTypeUrl_field_number protoreflect.FieldNumber = 4
Method_ResponseStreaming_field_number protoreflect.FieldNumber = 5
Method_Options_field_number protoreflect.FieldNumber = 6
Method_Syntax_field_number protoreflect.FieldNumber = 7
)
// Names for google.protobuf.Mixin.
const (
Mixin_message_name protoreflect.Name = "Mixin"
Mixin_message_fullname protoreflect.FullName = "google.protobuf.Mixin"
)
// Field names for google.protobuf.Mixin.
const (
Mixin_Name_field_name protoreflect.Name = "name"
Mixin_Root_field_name protoreflect.Name = "root"
Mixin_Name_field_fullname protoreflect.FullName = "google.protobuf.Mixin.name"
Mixin_Root_field_fullname protoreflect.FullName = "google.protobuf.Mixin.root"
)
// Field numbers for google.protobuf.Mixin.
const (
Mixin_Name_field_number protoreflect.FieldNumber = 1
Mixin_Root_field_number protoreflect.FieldNumber = 2
)

File diff suppressed because it is too large Load Diff

View File

@ -1,11 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package genid contains constants for declarations in descriptor.proto
// and the well-known types.
package genid
import protoreflect "google.golang.org/protobuf/reflect/protoreflect"
const GoogleProtobuf_package protoreflect.FullName = "google.protobuf"

View File

@ -1,34 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated by generate-protos. DO NOT EDIT.
package genid
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
)
const File_google_protobuf_duration_proto = "google/protobuf/duration.proto"
// Names for google.protobuf.Duration.
const (
Duration_message_name protoreflect.Name = "Duration"
Duration_message_fullname protoreflect.FullName = "google.protobuf.Duration"
)
// Field names for google.protobuf.Duration.
const (
Duration_Seconds_field_name protoreflect.Name = "seconds"
Duration_Nanos_field_name protoreflect.Name = "nanos"
Duration_Seconds_field_fullname protoreflect.FullName = "google.protobuf.Duration.seconds"
Duration_Nanos_field_fullname protoreflect.FullName = "google.protobuf.Duration.nanos"
)
// Field numbers for google.protobuf.Duration.
const (
Duration_Seconds_field_number protoreflect.FieldNumber = 1
Duration_Nanos_field_number protoreflect.FieldNumber = 2
)

View File

@ -1,19 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated by generate-protos. DO NOT EDIT.
package genid
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
)
const File_google_protobuf_empty_proto = "google/protobuf/empty.proto"
// Names for google.protobuf.Empty.
const (
Empty_message_name protoreflect.Name = "Empty"
Empty_message_fullname protoreflect.FullName = "google.protobuf.Empty"
)

View File

@ -1,31 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated by generate-protos. DO NOT EDIT.
package genid
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
)
const File_google_protobuf_field_mask_proto = "google/protobuf/field_mask.proto"
// Names for google.protobuf.FieldMask.
const (
FieldMask_message_name protoreflect.Name = "FieldMask"
FieldMask_message_fullname protoreflect.FullName = "google.protobuf.FieldMask"
)
// Field names for google.protobuf.FieldMask.
const (
FieldMask_Paths_field_name protoreflect.Name = "paths"
FieldMask_Paths_field_fullname protoreflect.FullName = "google.protobuf.FieldMask.paths"
)
// Field numbers for google.protobuf.FieldMask.
const (
FieldMask_Paths_field_number protoreflect.FieldNumber = 1
)

View File

@ -1,31 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated by generate-protos. DO NOT EDIT.
package genid
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
)
const File_reflect_protodesc_proto_go_features_proto = "reflect/protodesc/proto/go_features.proto"
// Names for google.protobuf.GoFeatures.
const (
GoFeatures_message_name protoreflect.Name = "GoFeatures"
GoFeatures_message_fullname protoreflect.FullName = "google.protobuf.GoFeatures"
)
// Field names for google.protobuf.GoFeatures.
const (
GoFeatures_LegacyUnmarshalJsonEnum_field_name protoreflect.Name = "legacy_unmarshal_json_enum"
GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "google.protobuf.GoFeatures.legacy_unmarshal_json_enum"
)
// Field numbers for google.protobuf.GoFeatures.
const (
GoFeatures_LegacyUnmarshalJsonEnum_field_number protoreflect.FieldNumber = 1
)

View File

@ -1,25 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package genid
// Go names of implementation-specific struct fields in generated messages.
const (
State_goname = "state"
SizeCache_goname = "sizeCache"
SizeCacheA_goname = "XXX_sizecache"
WeakFields_goname = "weakFields"
WeakFieldsA_goname = "XXX_weak"
UnknownFields_goname = "unknownFields"
UnknownFieldsA_goname = "XXX_unrecognized"
ExtensionFields_goname = "extensionFields"
ExtensionFieldsA_goname = "XXX_InternalExtensions"
ExtensionFieldsB_goname = "XXX_extensions"
WeakFieldPrefix_goname = "XXX_weak_"
)

Some files were not shown because too many files have changed in this diff Show More