mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-01-22 04:39:30 +00:00
rebase: vendor dependencies for Vault API
Uses github.com/libopenstorage/secrets to communicate with Vault. This removes the need for maintaining our own limited Vault APIs. By adding the new dependency, several other packages got updated in the process. Unused indirect dependencies have been removed from go.mod. Signed-off-by: Niels de Vos <ndevos@redhat.com>
This commit is contained in:
parent
7824cb5ed7
commit
91774fc936
32
go.mod
32
go.mod
@ -5,45 +5,37 @@ go 1.13
|
|||||||
require (
|
require (
|
||||||
github.com/ceph/go-ceph v0.6.0
|
github.com/ceph/go-ceph v0.6.0
|
||||||
github.com/container-storage-interface/spec v1.3.0
|
github.com/container-storage-interface/spec v1.3.0
|
||||||
github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c // indirect
|
|
||||||
github.com/go-logr/logr v0.2.1 // indirect
|
github.com/go-logr/logr v0.2.1 // indirect
|
||||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
|
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
|
||||||
github.com/golang/protobuf v1.3.5
|
github.com/golang/protobuf v1.4.2
|
||||||
github.com/googleapis/gnostic v0.4.0 // indirect
|
github.com/googleapis/gnostic v0.4.0 // indirect
|
||||||
github.com/grpc-ecosystem/go-grpc-middleware v1.2.0
|
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4
|
||||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
|
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
|
||||||
github.com/hashicorp/golang-lru v0.5.4 // indirect
|
github.com/hashicorp/vault/api v1.0.5-0.20200902155336-f9d5ce5a171a
|
||||||
github.com/kubernetes-csi/csi-lib-utils v0.7.0
|
github.com/kubernetes-csi/csi-lib-utils v0.7.0
|
||||||
github.com/kubernetes-csi/external-snapshotter/v2 v2.1.1-0.20200504125226-859696c419ff
|
github.com/kubernetes-csi/external-snapshotter/v2 v2.1.1
|
||||||
|
github.com/libopenstorage/secrets v0.0.0-20201006135900-af310b01fe47
|
||||||
github.com/onsi/ginkgo v1.12.0
|
github.com/onsi/ginkgo v1.12.0
|
||||||
github.com/onsi/gomega v1.9.0
|
github.com/onsi/gomega v1.9.0
|
||||||
github.com/pborman/uuid v1.2.0
|
github.com/pborman/uuid v1.2.0
|
||||||
github.com/prometheus/client_golang v1.5.1
|
github.com/prometheus/client_golang v1.5.1
|
||||||
github.com/prometheus/procfs v0.0.11 // indirect
|
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980
|
||||||
go.uber.org/zap v1.14.1 // indirect
|
google.golang.org/grpc v1.29.1
|
||||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect
|
|
||||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e // indirect
|
|
||||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d // indirect
|
|
||||||
golang.org/x/sys v0.0.0-20200501145240-bc7a7d42d5c3
|
|
||||||
golang.org/x/text v0.3.3 // indirect
|
|
||||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 // indirect
|
|
||||||
google.golang.org/appengine v1.6.5 // indirect
|
|
||||||
google.golang.org/genproto v0.0.0-20200413115906-b5235f65be36 // indirect
|
|
||||||
google.golang.org/grpc v1.28.0
|
|
||||||
gopkg.in/square/go-jose.v2 v2.5.0 // indirect
|
|
||||||
k8s.io/api v0.18.6
|
k8s.io/api v0.18.6
|
||||||
k8s.io/apimachinery v0.18.6
|
k8s.io/apimachinery v0.18.6
|
||||||
k8s.io/client-go v0.18.6
|
k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible
|
||||||
k8s.io/cloud-provider v0.18.6
|
k8s.io/cloud-provider v0.18.6
|
||||||
k8s.io/cri-api v0.18.6 // indirect
|
|
||||||
k8s.io/klog/v2 v2.3.0
|
k8s.io/klog/v2 v2.3.0
|
||||||
k8s.io/kubectl v0.18.6 // indirect
|
|
||||||
k8s.io/kubernetes v1.18.6
|
k8s.io/kubernetes v1.18.6
|
||||||
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89
|
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89
|
||||||
sigs.k8s.io/controller-runtime v0.6.0
|
sigs.k8s.io/controller-runtime v0.6.0
|
||||||
)
|
)
|
||||||
|
|
||||||
replace (
|
replace (
|
||||||
|
github.com/hashicorp/vault/api => github.com/hashicorp/vault/api v1.0.5-0.20200902155336-f9d5ce5a171a
|
||||||
|
github.com/hashicorp/vault/sdk => github.com/hashicorp/vault/sdk v0.1.14-0.20201116234512-b4d4137dfe8b
|
||||||
|
github.com/kubernetes-csi/external-snapshotter/v2 => github.com/kubernetes-csi/external-snapshotter/v2 v2.1.1-0.20200504125226-859696c419ff
|
||||||
|
github.com/kubernetes-incubator/external-storage => github.com/kubernetes-incubator/external-storage v5.5.0+incompatible
|
||||||
google.golang.org/grpc => google.golang.org/grpc v1.26.0
|
google.golang.org/grpc => google.golang.org/grpc v1.26.0
|
||||||
k8s.io/api => k8s.io/api v0.18.6
|
k8s.io/api => k8s.io/api v0.18.6
|
||||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.18.6
|
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.18.6
|
||||||
|
11
vendor/github.com/docker/spdystream/connection.go
generated
vendored
11
vendor/github.com/docker/spdystream/connection.go
generated
vendored
@ -14,7 +14,7 @@ import (
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
ErrInvalidStreamId = errors.New("Invalid stream id")
|
ErrInvalidStreamId = errors.New("Invalid stream id")
|
||||||
ErrTimeout = errors.New("Timeout occurred")
|
ErrTimeout = errors.New("Timeout occured")
|
||||||
ErrReset = errors.New("Stream reset")
|
ErrReset = errors.New("Stream reset")
|
||||||
ErrWriteClosedStream = errors.New("Write on closed stream")
|
ErrWriteClosedStream = errors.New("Write on closed stream")
|
||||||
)
|
)
|
||||||
@ -325,7 +325,7 @@ Loop:
|
|||||||
readFrame, err := s.framer.ReadFrame()
|
readFrame, err := s.framer.ReadFrame()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err != io.EOF {
|
if err != io.EOF {
|
||||||
debugMessage("frame read error: %s", err)
|
fmt.Errorf("frame read error: %s", err)
|
||||||
} else {
|
} else {
|
||||||
debugMessage("(%p) EOF received", s)
|
debugMessage("(%p) EOF received", s)
|
||||||
}
|
}
|
||||||
@ -421,7 +421,7 @@ func (s *Connection) frameHandler(frameQueue *PriorityFrameQueue, newHandler Str
|
|||||||
}
|
}
|
||||||
|
|
||||||
if frameErr != nil {
|
if frameErr != nil {
|
||||||
debugMessage("frame handling error: %s", frameErr)
|
fmt.Errorf("frame handling error: %s", frameErr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -451,7 +451,6 @@ func (s *Connection) addStreamFrame(frame *spdy.SynStreamFrame) {
|
|||||||
dataChan: make(chan []byte),
|
dataChan: make(chan []byte),
|
||||||
headerChan: make(chan http.Header),
|
headerChan: make(chan http.Header),
|
||||||
closeChan: make(chan bool),
|
closeChan: make(chan bool),
|
||||||
priority: frame.Priority,
|
|
||||||
}
|
}
|
||||||
if frame.CFHeader.Flags&spdy.ControlFlagFin != 0x00 {
|
if frame.CFHeader.Flags&spdy.ControlFlagFin != 0x00 {
|
||||||
stream.closeRemoteChannels()
|
stream.closeRemoteChannels()
|
||||||
@ -474,7 +473,7 @@ func (s *Connection) checkStreamFrame(frame *spdy.SynStreamFrame) bool {
|
|||||||
go func() {
|
go func() {
|
||||||
resetErr := s.sendResetFrame(spdy.ProtocolError, frame.StreamId)
|
resetErr := s.sendResetFrame(spdy.ProtocolError, frame.StreamId)
|
||||||
if resetErr != nil {
|
if resetErr != nil {
|
||||||
debugMessage("reset error: %s", resetErr)
|
fmt.Errorf("reset error: %s", resetErr)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
return false
|
return false
|
||||||
@ -719,7 +718,7 @@ func (s *Connection) shutdown(closeTimeout time.Duration) {
|
|||||||
select {
|
select {
|
||||||
case err, ok := <-s.shutdownChan:
|
case err, ok := <-s.shutdownChan:
|
||||||
if ok {
|
if ok {
|
||||||
debugMessage("Unhandled close error after %s: %s", duration, err)
|
fmt.Errorf("Unhandled close error after %s: %s", duration, err)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
4
vendor/github.com/docker/spdystream/handlers.go
generated
vendored
4
vendor/github.com/docker/spdystream/handlers.go
generated
vendored
@ -30,7 +30,9 @@ func MirrorStreamHandler(stream *Stream) {
|
|||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
// NoopStreamHandler does nothing when stream connects.
|
// NoopStreamHandler does nothing when stream connects, most
|
||||||
|
// likely used with RejectAuthHandler which will not allow any
|
||||||
|
// streams to make it to the stream handler.
|
||||||
func NoOpStreamHandler(stream *Stream) {
|
func NoOpStreamHandler(stream *Stream) {
|
||||||
stream.SendReply(http.Header{}, false)
|
stream.SendReply(http.Header{}, false)
|
||||||
}
|
}
|
||||||
|
20
vendor/github.com/fatih/color/LICENSE.md
generated
vendored
Normal file
20
vendor/github.com/fatih/color/LICENSE.md
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2013 Fatih Arslan
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
this software and associated documentation files (the "Software"), to deal in
|
||||||
|
the Software without restriction, including without limitation the rights to
|
||||||
|
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||||
|
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||||
|
subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||||
|
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||||
|
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||||
|
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||||
|
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
182
vendor/github.com/fatih/color/README.md
generated
vendored
Normal file
182
vendor/github.com/fatih/color/README.md
generated
vendored
Normal file
@ -0,0 +1,182 @@
|
|||||||
|
# Archived project. No maintenance.
|
||||||
|
|
||||||
|
This project is not maintained anymore and is archived. Feel free to fork and
|
||||||
|
make your own changes if needed. For more detail read my blog post: [Taking an indefinite sabbatical from my projects](https://arslan.io/2018/10/09/taking-an-indefinite-sabbatical-from-my-projects/)
|
||||||
|
|
||||||
|
Thanks to everyone for their valuable feedback and contributions.
|
||||||
|
|
||||||
|
|
||||||
|
# Color [![GoDoc](https://godoc.org/github.com/fatih/color?status.svg)](https://godoc.org/github.com/fatih/color)
|
||||||
|
|
||||||
|
Color lets you use colorized outputs in terms of [ANSI Escape
|
||||||
|
Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It
|
||||||
|
has support for Windows too! The API can be used in several ways, pick one that
|
||||||
|
suits you.
|
||||||
|
|
||||||
|
|
||||||
|
![Color](https://i.imgur.com/c1JI0lA.png)
|
||||||
|
|
||||||
|
|
||||||
|
## Install
|
||||||
|
|
||||||
|
```bash
|
||||||
|
go get github.com/fatih/color
|
||||||
|
```
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
### Standard colors
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Print with default helper functions
|
||||||
|
color.Cyan("Prints text in cyan.")
|
||||||
|
|
||||||
|
// A newline will be appended automatically
|
||||||
|
color.Blue("Prints %s in blue.", "text")
|
||||||
|
|
||||||
|
// These are using the default foreground colors
|
||||||
|
color.Red("We have red")
|
||||||
|
color.Magenta("And many others ..")
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
### Mix and reuse colors
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Create a new color object
|
||||||
|
c := color.New(color.FgCyan).Add(color.Underline)
|
||||||
|
c.Println("Prints cyan text with an underline.")
|
||||||
|
|
||||||
|
// Or just add them to New()
|
||||||
|
d := color.New(color.FgCyan, color.Bold)
|
||||||
|
d.Printf("This prints bold cyan %s\n", "too!.")
|
||||||
|
|
||||||
|
// Mix up foreground and background colors, create new mixes!
|
||||||
|
red := color.New(color.FgRed)
|
||||||
|
|
||||||
|
boldRed := red.Add(color.Bold)
|
||||||
|
boldRed.Println("This will print text in bold red.")
|
||||||
|
|
||||||
|
whiteBackground := red.Add(color.BgWhite)
|
||||||
|
whiteBackground.Println("Red text with white background.")
|
||||||
|
```
|
||||||
|
|
||||||
|
### Use your own output (io.Writer)
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Use your own io.Writer output
|
||||||
|
color.New(color.FgBlue).Fprintln(myWriter, "blue color!")
|
||||||
|
|
||||||
|
blue := color.New(color.FgBlue)
|
||||||
|
blue.Fprint(writer, "This will print text in blue.")
|
||||||
|
```
|
||||||
|
|
||||||
|
### Custom print functions (PrintFunc)
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Create a custom print function for convenience
|
||||||
|
red := color.New(color.FgRed).PrintfFunc()
|
||||||
|
red("Warning")
|
||||||
|
red("Error: %s", err)
|
||||||
|
|
||||||
|
// Mix up multiple attributes
|
||||||
|
notice := color.New(color.Bold, color.FgGreen).PrintlnFunc()
|
||||||
|
notice("Don't forget this...")
|
||||||
|
```
|
||||||
|
|
||||||
|
### Custom fprint functions (FprintFunc)
|
||||||
|
|
||||||
|
```go
|
||||||
|
blue := color.New(FgBlue).FprintfFunc()
|
||||||
|
blue(myWriter, "important notice: %s", stars)
|
||||||
|
|
||||||
|
// Mix up with multiple attributes
|
||||||
|
success := color.New(color.Bold, color.FgGreen).FprintlnFunc()
|
||||||
|
success(myWriter, "Don't forget this...")
|
||||||
|
```
|
||||||
|
|
||||||
|
### Insert into noncolor strings (SprintFunc)
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Create SprintXxx functions to mix strings with other non-colorized strings:
|
||||||
|
yellow := color.New(color.FgYellow).SprintFunc()
|
||||||
|
red := color.New(color.FgRed).SprintFunc()
|
||||||
|
fmt.Printf("This is a %s and this is %s.\n", yellow("warning"), red("error"))
|
||||||
|
|
||||||
|
info := color.New(color.FgWhite, color.BgGreen).SprintFunc()
|
||||||
|
fmt.Printf("This %s rocks!\n", info("package"))
|
||||||
|
|
||||||
|
// Use helper functions
|
||||||
|
fmt.Println("This", color.RedString("warning"), "should be not neglected.")
|
||||||
|
fmt.Printf("%v %v\n", color.GreenString("Info:"), "an important message.")
|
||||||
|
|
||||||
|
// Windows supported too! Just don't forget to change the output to color.Output
|
||||||
|
fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS"))
|
||||||
|
```
|
||||||
|
|
||||||
|
### Plug into existing code
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Use handy standard colors
|
||||||
|
color.Set(color.FgYellow)
|
||||||
|
|
||||||
|
fmt.Println("Existing text will now be in yellow")
|
||||||
|
fmt.Printf("This one %s\n", "too")
|
||||||
|
|
||||||
|
color.Unset() // Don't forget to unset
|
||||||
|
|
||||||
|
// You can mix up parameters
|
||||||
|
color.Set(color.FgMagenta, color.Bold)
|
||||||
|
defer color.Unset() // Use it in your function
|
||||||
|
|
||||||
|
fmt.Println("All text will now be bold magenta.")
|
||||||
|
```
|
||||||
|
|
||||||
|
### Disable/Enable color
|
||||||
|
|
||||||
|
There might be a case where you want to explicitly disable/enable color output. the
|
||||||
|
`go-isatty` package will automatically disable color output for non-tty output streams
|
||||||
|
(for example if the output were piped directly to `less`)
|
||||||
|
|
||||||
|
`Color` has support to disable/enable colors both globally and for single color
|
||||||
|
definitions. For example suppose you have a CLI app and a `--no-color` bool flag. You
|
||||||
|
can easily disable the color output with:
|
||||||
|
|
||||||
|
```go
|
||||||
|
|
||||||
|
var flagNoColor = flag.Bool("no-color", false, "Disable color output")
|
||||||
|
|
||||||
|
if *flagNoColor {
|
||||||
|
color.NoColor = true // disables colorized output
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
It also has support for single color definitions (local). You can
|
||||||
|
disable/enable color output on the fly:
|
||||||
|
|
||||||
|
```go
|
||||||
|
c := color.New(color.FgCyan)
|
||||||
|
c.Println("Prints cyan text")
|
||||||
|
|
||||||
|
c.DisableColor()
|
||||||
|
c.Println("This is printed without any color")
|
||||||
|
|
||||||
|
c.EnableColor()
|
||||||
|
c.Println("This prints again cyan...")
|
||||||
|
```
|
||||||
|
|
||||||
|
## Todo
|
||||||
|
|
||||||
|
* Save/Return previous values
|
||||||
|
* Evaluate fmt.Formatter interface
|
||||||
|
|
||||||
|
|
||||||
|
## Credits
|
||||||
|
|
||||||
|
* [Fatih Arslan](https://github.com/fatih)
|
||||||
|
* Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable)
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
The MIT License (MIT) - see [`LICENSE.md`](https://github.com/fatih/color/blob/master/LICENSE.md) for more details
|
||||||
|
|
603
vendor/github.com/fatih/color/color.go
generated
vendored
Normal file
603
vendor/github.com/fatih/color/color.go
generated
vendored
Normal file
@ -0,0 +1,603 @@
|
|||||||
|
package color
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/mattn/go-colorable"
|
||||||
|
"github.com/mattn/go-isatty"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// NoColor defines if the output is colorized or not. It's dynamically set to
|
||||||
|
// false or true based on the stdout's file descriptor referring to a terminal
|
||||||
|
// or not. This is a global option and affects all colors. For more control
|
||||||
|
// over each color block use the methods DisableColor() individually.
|
||||||
|
NoColor = os.Getenv("TERM") == "dumb" ||
|
||||||
|
(!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd()))
|
||||||
|
|
||||||
|
// Output defines the standard output of the print functions. By default
|
||||||
|
// os.Stdout is used.
|
||||||
|
Output = colorable.NewColorableStdout()
|
||||||
|
|
||||||
|
// Error defines a color supporting writer for os.Stderr.
|
||||||
|
Error = colorable.NewColorableStderr()
|
||||||
|
|
||||||
|
// colorsCache is used to reduce the count of created Color objects and
|
||||||
|
// allows to reuse already created objects with required Attribute.
|
||||||
|
colorsCache = make(map[Attribute]*Color)
|
||||||
|
colorsCacheMu sync.Mutex // protects colorsCache
|
||||||
|
)
|
||||||
|
|
||||||
|
// Color defines a custom color object which is defined by SGR parameters.
|
||||||
|
type Color struct {
|
||||||
|
params []Attribute
|
||||||
|
noColor *bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attribute defines a single SGR Code
|
||||||
|
type Attribute int
|
||||||
|
|
||||||
|
const escape = "\x1b"
|
||||||
|
|
||||||
|
// Base attributes
|
||||||
|
const (
|
||||||
|
Reset Attribute = iota
|
||||||
|
Bold
|
||||||
|
Faint
|
||||||
|
Italic
|
||||||
|
Underline
|
||||||
|
BlinkSlow
|
||||||
|
BlinkRapid
|
||||||
|
ReverseVideo
|
||||||
|
Concealed
|
||||||
|
CrossedOut
|
||||||
|
)
|
||||||
|
|
||||||
|
// Foreground text colors
|
||||||
|
const (
|
||||||
|
FgBlack Attribute = iota + 30
|
||||||
|
FgRed
|
||||||
|
FgGreen
|
||||||
|
FgYellow
|
||||||
|
FgBlue
|
||||||
|
FgMagenta
|
||||||
|
FgCyan
|
||||||
|
FgWhite
|
||||||
|
)
|
||||||
|
|
||||||
|
// Foreground Hi-Intensity text colors
|
||||||
|
const (
|
||||||
|
FgHiBlack Attribute = iota + 90
|
||||||
|
FgHiRed
|
||||||
|
FgHiGreen
|
||||||
|
FgHiYellow
|
||||||
|
FgHiBlue
|
||||||
|
FgHiMagenta
|
||||||
|
FgHiCyan
|
||||||
|
FgHiWhite
|
||||||
|
)
|
||||||
|
|
||||||
|
// Background text colors
|
||||||
|
const (
|
||||||
|
BgBlack Attribute = iota + 40
|
||||||
|
BgRed
|
||||||
|
BgGreen
|
||||||
|
BgYellow
|
||||||
|
BgBlue
|
||||||
|
BgMagenta
|
||||||
|
BgCyan
|
||||||
|
BgWhite
|
||||||
|
)
|
||||||
|
|
||||||
|
// Background Hi-Intensity text colors
|
||||||
|
const (
|
||||||
|
BgHiBlack Attribute = iota + 100
|
||||||
|
BgHiRed
|
||||||
|
BgHiGreen
|
||||||
|
BgHiYellow
|
||||||
|
BgHiBlue
|
||||||
|
BgHiMagenta
|
||||||
|
BgHiCyan
|
||||||
|
BgHiWhite
|
||||||
|
)
|
||||||
|
|
||||||
|
// New returns a newly created color object.
|
||||||
|
func New(value ...Attribute) *Color {
|
||||||
|
c := &Color{params: make([]Attribute, 0)}
|
||||||
|
c.Add(value...)
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set sets the given parameters immediately. It will change the color of
|
||||||
|
// output with the given SGR parameters until color.Unset() is called.
|
||||||
|
func Set(p ...Attribute) *Color {
|
||||||
|
c := New(p...)
|
||||||
|
c.Set()
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unset resets all escape attributes and clears the output. Usually should
|
||||||
|
// be called after Set().
|
||||||
|
func Unset() {
|
||||||
|
if NoColor {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(Output, "%s[%dm", escape, Reset)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set sets the SGR sequence.
|
||||||
|
func (c *Color) Set() *Color {
|
||||||
|
if c.isNoColorSet() {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(Output, c.format())
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Color) unset() {
|
||||||
|
if c.isNoColorSet() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
Unset()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Color) setWriter(w io.Writer) *Color {
|
||||||
|
if c.isNoColorSet() {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(w, c.format())
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Color) unsetWriter(w io.Writer) {
|
||||||
|
if c.isNoColorSet() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if NoColor {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(w, "%s[%dm", escape, Reset)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add is used to chain SGR parameters. Use as many as parameters to combine
|
||||||
|
// and create custom color objects. Example: Add(color.FgRed, color.Underline).
|
||||||
|
func (c *Color) Add(value ...Attribute) *Color {
|
||||||
|
c.params = append(c.params, value...)
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Color) prepend(value Attribute) {
|
||||||
|
c.params = append(c.params, 0)
|
||||||
|
copy(c.params[1:], c.params[0:])
|
||||||
|
c.params[0] = value
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fprint formats using the default formats for its operands and writes to w.
|
||||||
|
// Spaces are added between operands when neither is a string.
|
||||||
|
// It returns the number of bytes written and any write error encountered.
|
||||||
|
// On Windows, users should wrap w with colorable.NewColorable() if w is of
|
||||||
|
// type *os.File.
|
||||||
|
func (c *Color) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
|
||||||
|
c.setWriter(w)
|
||||||
|
defer c.unsetWriter(w)
|
||||||
|
|
||||||
|
return fmt.Fprint(w, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print formats using the default formats for its operands and writes to
|
||||||
|
// standard output. Spaces are added between operands when neither is a
|
||||||
|
// string. It returns the number of bytes written and any write error
|
||||||
|
// encountered. This is the standard fmt.Print() method wrapped with the given
|
||||||
|
// color.
|
||||||
|
func (c *Color) Print(a ...interface{}) (n int, err error) {
|
||||||
|
c.Set()
|
||||||
|
defer c.unset()
|
||||||
|
|
||||||
|
return fmt.Fprint(Output, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fprintf formats according to a format specifier and writes to w.
|
||||||
|
// It returns the number of bytes written and any write error encountered.
|
||||||
|
// On Windows, users should wrap w with colorable.NewColorable() if w is of
|
||||||
|
// type *os.File.
|
||||||
|
func (c *Color) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
|
||||||
|
c.setWriter(w)
|
||||||
|
defer c.unsetWriter(w)
|
||||||
|
|
||||||
|
return fmt.Fprintf(w, format, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Printf formats according to a format specifier and writes to standard output.
|
||||||
|
// It returns the number of bytes written and any write error encountered.
|
||||||
|
// This is the standard fmt.Printf() method wrapped with the given color.
|
||||||
|
func (c *Color) Printf(format string, a ...interface{}) (n int, err error) {
|
||||||
|
c.Set()
|
||||||
|
defer c.unset()
|
||||||
|
|
||||||
|
return fmt.Fprintf(Output, format, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fprintln formats using the default formats for its operands and writes to w.
|
||||||
|
// Spaces are always added between operands and a newline is appended.
|
||||||
|
// On Windows, users should wrap w with colorable.NewColorable() if w is of
|
||||||
|
// type *os.File.
|
||||||
|
func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
|
||||||
|
c.setWriter(w)
|
||||||
|
defer c.unsetWriter(w)
|
||||||
|
|
||||||
|
return fmt.Fprintln(w, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Println formats using the default formats for its operands and writes to
|
||||||
|
// standard output. Spaces are always added between operands and a newline is
|
||||||
|
// appended. It returns the number of bytes written and any write error
|
||||||
|
// encountered. This is the standard fmt.Print() method wrapped with the given
|
||||||
|
// color.
|
||||||
|
func (c *Color) Println(a ...interface{}) (n int, err error) {
|
||||||
|
c.Set()
|
||||||
|
defer c.unset()
|
||||||
|
|
||||||
|
return fmt.Fprintln(Output, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sprint is just like Print, but returns a string instead of printing it.
|
||||||
|
func (c *Color) Sprint(a ...interface{}) string {
|
||||||
|
return c.wrap(fmt.Sprint(a...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sprintln is just like Println, but returns a string instead of printing it.
|
||||||
|
func (c *Color) Sprintln(a ...interface{}) string {
|
||||||
|
return c.wrap(fmt.Sprintln(a...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sprintf is just like Printf, but returns a string instead of printing it.
|
||||||
|
func (c *Color) Sprintf(format string, a ...interface{}) string {
|
||||||
|
return c.wrap(fmt.Sprintf(format, a...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// FprintFunc returns a new function that prints the passed arguments as
|
||||||
|
// colorized with color.Fprint().
|
||||||
|
func (c *Color) FprintFunc() func(w io.Writer, a ...interface{}) {
|
||||||
|
return func(w io.Writer, a ...interface{}) {
|
||||||
|
c.Fprint(w, a...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrintFunc returns a new function that prints the passed arguments as
|
||||||
|
// colorized with color.Print().
|
||||||
|
func (c *Color) PrintFunc() func(a ...interface{}) {
|
||||||
|
return func(a ...interface{}) {
|
||||||
|
c.Print(a...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FprintfFunc returns a new function that prints the passed arguments as
|
||||||
|
// colorized with color.Fprintf().
|
||||||
|
func (c *Color) FprintfFunc() func(w io.Writer, format string, a ...interface{}) {
|
||||||
|
return func(w io.Writer, format string, a ...interface{}) {
|
||||||
|
c.Fprintf(w, format, a...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrintfFunc returns a new function that prints the passed arguments as
|
||||||
|
// colorized with color.Printf().
|
||||||
|
func (c *Color) PrintfFunc() func(format string, a ...interface{}) {
|
||||||
|
return func(format string, a ...interface{}) {
|
||||||
|
c.Printf(format, a...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FprintlnFunc returns a new function that prints the passed arguments as
|
||||||
|
// colorized with color.Fprintln().
|
||||||
|
func (c *Color) FprintlnFunc() func(w io.Writer, a ...interface{}) {
|
||||||
|
return func(w io.Writer, a ...interface{}) {
|
||||||
|
c.Fprintln(w, a...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrintlnFunc returns a new function that prints the passed arguments as
|
||||||
|
// colorized with color.Println().
|
||||||
|
func (c *Color) PrintlnFunc() func(a ...interface{}) {
|
||||||
|
return func(a ...interface{}) {
|
||||||
|
c.Println(a...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SprintFunc returns a new function that returns colorized strings for the
|
||||||
|
// given arguments with fmt.Sprint(). Useful to put into or mix into other
|
||||||
|
// string. Windows users should use this in conjunction with color.Output, example:
|
||||||
|
//
|
||||||
|
// put := New(FgYellow).SprintFunc()
|
||||||
|
// fmt.Fprintf(color.Output, "This is a %s", put("warning"))
|
||||||
|
func (c *Color) SprintFunc() func(a ...interface{}) string {
|
||||||
|
return func(a ...interface{}) string {
|
||||||
|
return c.wrap(fmt.Sprint(a...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SprintfFunc returns a new function that returns colorized strings for the
|
||||||
|
// given arguments with fmt.Sprintf(). Useful to put into or mix into other
|
||||||
|
// string. Windows users should use this in conjunction with color.Output.
|
||||||
|
func (c *Color) SprintfFunc() func(format string, a ...interface{}) string {
|
||||||
|
return func(format string, a ...interface{}) string {
|
||||||
|
return c.wrap(fmt.Sprintf(format, a...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SprintlnFunc returns a new function that returns colorized strings for the
|
||||||
|
// given arguments with fmt.Sprintln(). Useful to put into or mix into other
|
||||||
|
// string. Windows users should use this in conjunction with color.Output.
|
||||||
|
func (c *Color) SprintlnFunc() func(a ...interface{}) string {
|
||||||
|
return func(a ...interface{}) string {
|
||||||
|
return c.wrap(fmt.Sprintln(a...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// sequence returns a formatted SGR sequence to be plugged into a "\x1b[...m"
|
||||||
|
// an example output might be: "1;36" -> bold cyan
|
||||||
|
func (c *Color) sequence() string {
|
||||||
|
format := make([]string, len(c.params))
|
||||||
|
for i, v := range c.params {
|
||||||
|
format[i] = strconv.Itoa(int(v))
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(format, ";")
|
||||||
|
}
|
||||||
|
|
||||||
|
// wrap wraps the s string with the colors attributes. The string is ready to
|
||||||
|
// be printed.
|
||||||
|
func (c *Color) wrap(s string) string {
|
||||||
|
if c.isNoColorSet() {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.format() + s + c.unformat()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Color) format() string {
|
||||||
|
return fmt.Sprintf("%s[%sm", escape, c.sequence())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Color) unformat() string {
|
||||||
|
return fmt.Sprintf("%s[%dm", escape, Reset)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DisableColor disables the color output. Useful to not change any existing
|
||||||
|
// code and still being able to output. Can be used for flags like
|
||||||
|
// "--no-color". To enable back use EnableColor() method.
|
||||||
|
func (c *Color) DisableColor() {
|
||||||
|
c.noColor = boolPtr(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnableColor enables the color output. Use it in conjunction with
|
||||||
|
// DisableColor(). Otherwise this method has no side effects.
|
||||||
|
func (c *Color) EnableColor() {
|
||||||
|
c.noColor = boolPtr(false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Color) isNoColorSet() bool {
|
||||||
|
// check first if we have user setted action
|
||||||
|
if c.noColor != nil {
|
||||||
|
return *c.noColor
|
||||||
|
}
|
||||||
|
|
||||||
|
// if not return the global option, which is disabled by default
|
||||||
|
return NoColor
|
||||||
|
}
|
||||||
|
|
||||||
|
// Equals returns a boolean value indicating whether two colors are equal.
|
||||||
|
func (c *Color) Equals(c2 *Color) bool {
|
||||||
|
if len(c.params) != len(c2.params) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, attr := range c.params {
|
||||||
|
if !c2.attrExists(attr) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Color) attrExists(a Attribute) bool {
|
||||||
|
for _, attr := range c.params {
|
||||||
|
if attr == a {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func boolPtr(v bool) *bool {
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
func getCachedColor(p Attribute) *Color {
|
||||||
|
colorsCacheMu.Lock()
|
||||||
|
defer colorsCacheMu.Unlock()
|
||||||
|
|
||||||
|
c, ok := colorsCache[p]
|
||||||
|
if !ok {
|
||||||
|
c = New(p)
|
||||||
|
colorsCache[p] = c
|
||||||
|
}
|
||||||
|
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
func colorPrint(format string, p Attribute, a ...interface{}) {
|
||||||
|
c := getCachedColor(p)
|
||||||
|
|
||||||
|
if !strings.HasSuffix(format, "\n") {
|
||||||
|
format += "\n"
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(a) == 0 {
|
||||||
|
c.Print(format)
|
||||||
|
} else {
|
||||||
|
c.Printf(format, a...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func colorString(format string, p Attribute, a ...interface{}) string {
|
||||||
|
c := getCachedColor(p)
|
||||||
|
|
||||||
|
if len(a) == 0 {
|
||||||
|
return c.SprintFunc()(format)
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.SprintfFunc()(format, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Black is a convenient helper function to print with black foreground. A
|
||||||
|
// newline is appended to format by default.
|
||||||
|
func Black(format string, a ...interface{}) { colorPrint(format, FgBlack, a...) }
|
||||||
|
|
||||||
|
// Red is a convenient helper function to print with red foreground. A
|
||||||
|
// newline is appended to format by default.
|
||||||
|
func Red(format string, a ...interface{}) { colorPrint(format, FgRed, a...) }
|
||||||
|
|
||||||
|
// Green is a convenient helper function to print with green foreground. A
|
||||||
|
// newline is appended to format by default.
|
||||||
|
func Green(format string, a ...interface{}) { colorPrint(format, FgGreen, a...) }
|
||||||
|
|
||||||
|
// Yellow is a convenient helper function to print with yellow foreground.
|
||||||
|
// A newline is appended to format by default.
|
||||||
|
func Yellow(format string, a ...interface{}) { colorPrint(format, FgYellow, a...) }
|
||||||
|
|
||||||
|
// Blue is a convenient helper function to print with blue foreground. A
|
||||||
|
// newline is appended to format by default.
|
||||||
|
func Blue(format string, a ...interface{}) { colorPrint(format, FgBlue, a...) }
|
||||||
|
|
||||||
|
// Magenta is a convenient helper function to print with magenta foreground.
|
||||||
|
// A newline is appended to format by default.
|
||||||
|
func Magenta(format string, a ...interface{}) { colorPrint(format, FgMagenta, a...) }
|
||||||
|
|
||||||
|
// Cyan is a convenient helper function to print with cyan foreground. A
|
||||||
|
// newline is appended to format by default.
|
||||||
|
func Cyan(format string, a ...interface{}) { colorPrint(format, FgCyan, a...) }
|
||||||
|
|
||||||
|
// White is a convenient helper function to print with white foreground. A
|
||||||
|
// newline is appended to format by default.
|
||||||
|
func White(format string, a ...interface{}) { colorPrint(format, FgWhite, a...) }
|
||||||
|
|
||||||
|
// BlackString is a convenient helper function to return a string with black
|
||||||
|
// foreground.
|
||||||
|
func BlackString(format string, a ...interface{}) string { return colorString(format, FgBlack, a...) }
|
||||||
|
|
||||||
|
// RedString is a convenient helper function to return a string with red
|
||||||
|
// foreground.
|
||||||
|
func RedString(format string, a ...interface{}) string { return colorString(format, FgRed, a...) }
|
||||||
|
|
||||||
|
// GreenString is a convenient helper function to return a string with green
|
||||||
|
// foreground.
|
||||||
|
func GreenString(format string, a ...interface{}) string { return colorString(format, FgGreen, a...) }
|
||||||
|
|
||||||
|
// YellowString is a convenient helper function to return a string with yellow
|
||||||
|
// foreground.
|
||||||
|
func YellowString(format string, a ...interface{}) string { return colorString(format, FgYellow, a...) }
|
||||||
|
|
||||||
|
// BlueString is a convenient helper function to return a string with blue
|
||||||
|
// foreground.
|
||||||
|
func BlueString(format string, a ...interface{}) string { return colorString(format, FgBlue, a...) }
|
||||||
|
|
||||||
|
// MagentaString is a convenient helper function to return a string with magenta
|
||||||
|
// foreground.
|
||||||
|
func MagentaString(format string, a ...interface{}) string {
|
||||||
|
return colorString(format, FgMagenta, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CyanString is a convenient helper function to return a string with cyan
|
||||||
|
// foreground.
|
||||||
|
func CyanString(format string, a ...interface{}) string { return colorString(format, FgCyan, a...) }
|
||||||
|
|
||||||
|
// WhiteString is a convenient helper function to return a string with white
|
||||||
|
// foreground.
|
||||||
|
func WhiteString(format string, a ...interface{}) string { return colorString(format, FgWhite, a...) }
|
||||||
|
|
||||||
|
// HiBlack is a convenient helper function to print with hi-intensity black foreground. A
|
||||||
|
// newline is appended to format by default.
|
||||||
|
func HiBlack(format string, a ...interface{}) { colorPrint(format, FgHiBlack, a...) }
|
||||||
|
|
||||||
|
// HiRed is a convenient helper function to print with hi-intensity red foreground. A
|
||||||
|
// newline is appended to format by default.
|
||||||
|
func HiRed(format string, a ...interface{}) { colorPrint(format, FgHiRed, a...) }
|
||||||
|
|
||||||
|
// HiGreen is a convenient helper function to print with hi-intensity green foreground. A
|
||||||
|
// newline is appended to format by default.
|
||||||
|
func HiGreen(format string, a ...interface{}) { colorPrint(format, FgHiGreen, a...) }
|
||||||
|
|
||||||
|
// HiYellow is a convenient helper function to print with hi-intensity yellow foreground.
|
||||||
|
// A newline is appended to format by default.
|
||||||
|
func HiYellow(format string, a ...interface{}) { colorPrint(format, FgHiYellow, a...) }
|
||||||
|
|
||||||
|
// HiBlue is a convenient helper function to print with hi-intensity blue foreground. A
|
||||||
|
// newline is appended to format by default.
|
||||||
|
func HiBlue(format string, a ...interface{}) { colorPrint(format, FgHiBlue, a...) }
|
||||||
|
|
||||||
|
// HiMagenta is a convenient helper function to print with hi-intensity magenta foreground.
|
||||||
|
// A newline is appended to format by default.
|
||||||
|
func HiMagenta(format string, a ...interface{}) { colorPrint(format, FgHiMagenta, a...) }
|
||||||
|
|
||||||
|
// HiCyan is a convenient helper function to print with hi-intensity cyan foreground. A
|
||||||
|
// newline is appended to format by default.
|
||||||
|
func HiCyan(format string, a ...interface{}) { colorPrint(format, FgHiCyan, a...) }
|
||||||
|
|
||||||
|
// HiWhite is a convenient helper function to print with hi-intensity white foreground. A
|
||||||
|
// newline is appended to format by default.
|
||||||
|
func HiWhite(format string, a ...interface{}) { colorPrint(format, FgHiWhite, a...) }
|
||||||
|
|
||||||
|
// HiBlackString is a convenient helper function to return a string with hi-intensity black
|
||||||
|
// foreground.
|
||||||
|
func HiBlackString(format string, a ...interface{}) string {
|
||||||
|
return colorString(format, FgHiBlack, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HiRedString is a convenient helper function to return a string with hi-intensity red
|
||||||
|
// foreground.
|
||||||
|
func HiRedString(format string, a ...interface{}) string { return colorString(format, FgHiRed, a...) }
|
||||||
|
|
||||||
|
// HiGreenString is a convenient helper function to return a string with hi-intensity green
|
||||||
|
// foreground.
|
||||||
|
func HiGreenString(format string, a ...interface{}) string {
|
||||||
|
return colorString(format, FgHiGreen, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HiYellowString is a convenient helper function to return a string with hi-intensity yellow
|
||||||
|
// foreground.
|
||||||
|
func HiYellowString(format string, a ...interface{}) string {
|
||||||
|
return colorString(format, FgHiYellow, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HiBlueString is a convenient helper function to return a string with hi-intensity blue
|
||||||
|
// foreground.
|
||||||
|
func HiBlueString(format string, a ...interface{}) string { return colorString(format, FgHiBlue, a...) }
|
||||||
|
|
||||||
|
// HiMagentaString is a convenient helper function to return a string with hi-intensity magenta
|
||||||
|
// foreground.
|
||||||
|
func HiMagentaString(format string, a ...interface{}) string {
|
||||||
|
return colorString(format, FgHiMagenta, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HiCyanString is a convenient helper function to return a string with hi-intensity cyan
|
||||||
|
// foreground.
|
||||||
|
func HiCyanString(format string, a ...interface{}) string { return colorString(format, FgHiCyan, a...) }
|
||||||
|
|
||||||
|
// HiWhiteString is a convenient helper function to return a string with hi-intensity white
|
||||||
|
// foreground.
|
||||||
|
func HiWhiteString(format string, a ...interface{}) string {
|
||||||
|
return colorString(format, FgHiWhite, a...)
|
||||||
|
}
|
133
vendor/github.com/fatih/color/doc.go
generated
vendored
Normal file
133
vendor/github.com/fatih/color/doc.go
generated
vendored
Normal file
@ -0,0 +1,133 @@
|
|||||||
|
/*
|
||||||
|
Package color is an ANSI color package to output colorized or SGR defined
|
||||||
|
output to the standard output. The API can be used in several way, pick one
|
||||||
|
that suits you.
|
||||||
|
|
||||||
|
Use simple and default helper functions with predefined foreground colors:
|
||||||
|
|
||||||
|
color.Cyan("Prints text in cyan.")
|
||||||
|
|
||||||
|
// a newline will be appended automatically
|
||||||
|
color.Blue("Prints %s in blue.", "text")
|
||||||
|
|
||||||
|
// More default foreground colors..
|
||||||
|
color.Red("We have red")
|
||||||
|
color.Yellow("Yellow color too!")
|
||||||
|
color.Magenta("And many others ..")
|
||||||
|
|
||||||
|
// Hi-intensity colors
|
||||||
|
color.HiGreen("Bright green color.")
|
||||||
|
color.HiBlack("Bright black means gray..")
|
||||||
|
color.HiWhite("Shiny white color!")
|
||||||
|
|
||||||
|
However there are times where custom color mixes are required. Below are some
|
||||||
|
examples to create custom color objects and use the print functions of each
|
||||||
|
separate color object.
|
||||||
|
|
||||||
|
// Create a new color object
|
||||||
|
c := color.New(color.FgCyan).Add(color.Underline)
|
||||||
|
c.Println("Prints cyan text with an underline.")
|
||||||
|
|
||||||
|
// Or just add them to New()
|
||||||
|
d := color.New(color.FgCyan, color.Bold)
|
||||||
|
d.Printf("This prints bold cyan %s\n", "too!.")
|
||||||
|
|
||||||
|
|
||||||
|
// Mix up foreground and background colors, create new mixes!
|
||||||
|
red := color.New(color.FgRed)
|
||||||
|
|
||||||
|
boldRed := red.Add(color.Bold)
|
||||||
|
boldRed.Println("This will print text in bold red.")
|
||||||
|
|
||||||
|
whiteBackground := red.Add(color.BgWhite)
|
||||||
|
whiteBackground.Println("Red text with White background.")
|
||||||
|
|
||||||
|
// Use your own io.Writer output
|
||||||
|
color.New(color.FgBlue).Fprintln(myWriter, "blue color!")
|
||||||
|
|
||||||
|
blue := color.New(color.FgBlue)
|
||||||
|
blue.Fprint(myWriter, "This will print text in blue.")
|
||||||
|
|
||||||
|
You can create PrintXxx functions to simplify even more:
|
||||||
|
|
||||||
|
// Create a custom print function for convenient
|
||||||
|
red := color.New(color.FgRed).PrintfFunc()
|
||||||
|
red("warning")
|
||||||
|
red("error: %s", err)
|
||||||
|
|
||||||
|
// Mix up multiple attributes
|
||||||
|
notice := color.New(color.Bold, color.FgGreen).PrintlnFunc()
|
||||||
|
notice("don't forget this...")
|
||||||
|
|
||||||
|
You can also FprintXxx functions to pass your own io.Writer:
|
||||||
|
|
||||||
|
blue := color.New(FgBlue).FprintfFunc()
|
||||||
|
blue(myWriter, "important notice: %s", stars)
|
||||||
|
|
||||||
|
// Mix up with multiple attributes
|
||||||
|
success := color.New(color.Bold, color.FgGreen).FprintlnFunc()
|
||||||
|
success(myWriter, don't forget this...")
|
||||||
|
|
||||||
|
|
||||||
|
Or create SprintXxx functions to mix strings with other non-colorized strings:
|
||||||
|
|
||||||
|
yellow := New(FgYellow).SprintFunc()
|
||||||
|
red := New(FgRed).SprintFunc()
|
||||||
|
|
||||||
|
fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error"))
|
||||||
|
|
||||||
|
info := New(FgWhite, BgGreen).SprintFunc()
|
||||||
|
fmt.Printf("this %s rocks!\n", info("package"))
|
||||||
|
|
||||||
|
Windows support is enabled by default. All Print functions work as intended.
|
||||||
|
However only for color.SprintXXX functions, user should use fmt.FprintXXX and
|
||||||
|
set the output to color.Output:
|
||||||
|
|
||||||
|
fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS"))
|
||||||
|
|
||||||
|
info := New(FgWhite, BgGreen).SprintFunc()
|
||||||
|
fmt.Fprintf(color.Output, "this %s rocks!\n", info("package"))
|
||||||
|
|
||||||
|
Using with existing code is possible. Just use the Set() method to set the
|
||||||
|
standard output to the given parameters. That way a rewrite of an existing
|
||||||
|
code is not required.
|
||||||
|
|
||||||
|
// Use handy standard colors.
|
||||||
|
color.Set(color.FgYellow)
|
||||||
|
|
||||||
|
fmt.Println("Existing text will be now in Yellow")
|
||||||
|
fmt.Printf("This one %s\n", "too")
|
||||||
|
|
||||||
|
color.Unset() // don't forget to unset
|
||||||
|
|
||||||
|
// You can mix up parameters
|
||||||
|
color.Set(color.FgMagenta, color.Bold)
|
||||||
|
defer color.Unset() // use it in your function
|
||||||
|
|
||||||
|
fmt.Println("All text will be now bold magenta.")
|
||||||
|
|
||||||
|
There might be a case where you want to disable color output (for example to
|
||||||
|
pipe the standard output of your app to somewhere else). `Color` has support to
|
||||||
|
disable colors both globally and for single color definition. For example
|
||||||
|
suppose you have a CLI app and a `--no-color` bool flag. You can easily disable
|
||||||
|
the color output with:
|
||||||
|
|
||||||
|
var flagNoColor = flag.Bool("no-color", false, "Disable color output")
|
||||||
|
|
||||||
|
if *flagNoColor {
|
||||||
|
color.NoColor = true // disables colorized output
|
||||||
|
}
|
||||||
|
|
||||||
|
It also has support for single color definitions (local). You can
|
||||||
|
disable/enable color output on the fly:
|
||||||
|
|
||||||
|
c := color.New(color.FgCyan)
|
||||||
|
c.Println("Prints cyan text")
|
||||||
|
|
||||||
|
c.DisableColor()
|
||||||
|
c.Println("This is printed without any color")
|
||||||
|
|
||||||
|
c.EnableColor()
|
||||||
|
c.Println("This prints again cyan...")
|
||||||
|
*/
|
||||||
|
package color
|
8
vendor/github.com/fatih/color/go.mod
generated
vendored
Normal file
8
vendor/github.com/fatih/color/go.mod
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
module github.com/fatih/color
|
||||||
|
|
||||||
|
go 1.13
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/mattn/go-colorable v0.1.4
|
||||||
|
github.com/mattn/go-isatty v0.0.11
|
||||||
|
)
|
8
vendor/github.com/fatih/color/go.sum
generated
vendored
Normal file
8
vendor/github.com/fatih/color/go.sum
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
|
||||||
|
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||||
|
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||||
|
github.com/mattn/go-isatty v0.0.11 h1:FxPOTFNqGkuDUGi3H/qkUbQO4ZiBa2brKq5r0l8TGeM=
|
||||||
|
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
|
||||||
|
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4=
|
||||||
|
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
230
vendor/github.com/golang/protobuf/descriptor/descriptor.go
generated
vendored
230
vendor/github.com/golang/protobuf/descriptor/descriptor.go
generated
vendored
@ -1,92 +1,184 @@
|
|||||||
// Go support for Protocol Buffers - Google's data interchange format
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
//
|
// Use of this source code is governed by a BSD-style
|
||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
// license that can be found in the LICENSE file.
|
||||||
// https://github.com/golang/protobuf
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
// Package descriptor provides functions for obtaining protocol buffer
|
// Package descriptor provides functions for obtaining the protocol buffer
|
||||||
// descriptors for generated Go types.
|
// descriptors of generated Go types.
|
||||||
//
|
//
|
||||||
// These functions cannot go in package proto because they depend on the
|
// Deprecated: See the "google.golang.org/protobuf/reflect/protoreflect" package
|
||||||
// generated protobuf descriptor messages, which themselves depend on proto.
|
// for how to obtain an EnumDescriptor or MessageDescriptor in order to
|
||||||
|
// programatically interact with the protobuf type system.
|
||||||
package descriptor
|
package descriptor
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"sync"
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
|
"google.golang.org/protobuf/reflect/protodesc"
|
||||||
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
"google.golang.org/protobuf/runtime/protoimpl"
|
||||||
|
|
||||||
|
descriptorpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
|
||||||
)
|
)
|
||||||
|
|
||||||
// extractFile extracts a FileDescriptorProto from a gzip'd buffer.
|
// Message is proto.Message with a method to return its descriptor.
|
||||||
func extractFile(gz []byte) (*protobuf.FileDescriptorProto, error) {
|
|
||||||
r, err := gzip.NewReader(bytes.NewReader(gz))
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to open gzip reader: %v", err)
|
|
||||||
}
|
|
||||||
defer r.Close()
|
|
||||||
|
|
||||||
b, err := ioutil.ReadAll(r)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to uncompress descriptor: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fd := new(protobuf.FileDescriptorProto)
|
|
||||||
if err := proto.Unmarshal(b, fd); err != nil {
|
|
||||||
return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return fd, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Message is a proto.Message with a method to return its descriptor.
|
|
||||||
//
|
//
|
||||||
// Message types generated by the protocol compiler always satisfy
|
// Deprecated: The Descriptor method may not be generated by future
|
||||||
// the Message interface.
|
// versions of protoc-gen-go, meaning that this interface may not
|
||||||
|
// be implemented by many concrete message types.
|
||||||
type Message interface {
|
type Message interface {
|
||||||
proto.Message
|
proto.Message
|
||||||
Descriptor() ([]byte, []int)
|
Descriptor() ([]byte, []int)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it
|
// ForMessage returns the file descriptor proto containing
|
||||||
// describing the given message.
|
// the message and the message descriptor proto for the message itself.
|
||||||
func ForMessage(msg Message) (fd *protobuf.FileDescriptorProto, md *protobuf.DescriptorProto) {
|
// The returned proto messages must not be mutated.
|
||||||
gz, path := msg.Descriptor()
|
//
|
||||||
fd, err := extractFile(gz)
|
// Deprecated: Not all concrete message types satisfy the Message interface.
|
||||||
if err != nil {
|
// Use MessageDescriptorProto instead. If possible, the calling code should
|
||||||
panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err))
|
// be rewritten to use protobuf reflection instead.
|
||||||
|
// See package "google.golang.org/protobuf/reflect/protoreflect" for details.
|
||||||
|
func ForMessage(m Message) (*descriptorpb.FileDescriptorProto, *descriptorpb.DescriptorProto) {
|
||||||
|
return MessageDescriptorProto(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
type rawDesc struct {
|
||||||
|
fileDesc []byte
|
||||||
|
indexes []int
|
||||||
|
}
|
||||||
|
|
||||||
|
var rawDescCache sync.Map // map[protoreflect.Descriptor]*rawDesc
|
||||||
|
|
||||||
|
func deriveRawDescriptor(d protoreflect.Descriptor) ([]byte, []int) {
|
||||||
|
// Fast-path: check whether raw descriptors are already cached.
|
||||||
|
origDesc := d
|
||||||
|
if v, ok := rawDescCache.Load(origDesc); ok {
|
||||||
|
return v.(*rawDesc).fileDesc, v.(*rawDesc).indexes
|
||||||
}
|
}
|
||||||
|
|
||||||
md = fd.MessageType[path[0]]
|
// Slow-path: derive the raw descriptor from the v2 descriptor.
|
||||||
for _, i := range path[1:] {
|
|
||||||
|
// Start with the leaf (a given enum or message declaration) and
|
||||||
|
// ascend upwards until we hit the parent file descriptor.
|
||||||
|
var idxs []int
|
||||||
|
for {
|
||||||
|
idxs = append(idxs, d.Index())
|
||||||
|
d = d.Parent()
|
||||||
|
if d == nil {
|
||||||
|
// TODO: We could construct a FileDescriptor stub for standalone
|
||||||
|
// descriptors to satisfy the API.
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
if _, ok := d.(protoreflect.FileDescriptor); ok {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Obtain the raw file descriptor.
|
||||||
|
var raw []byte
|
||||||
|
switch fd := d.(type) {
|
||||||
|
case interface{ ProtoLegacyRawDesc() []byte }:
|
||||||
|
raw = fd.ProtoLegacyRawDesc()
|
||||||
|
case protoreflect.FileDescriptor:
|
||||||
|
raw, _ = proto.Marshal(protodesc.ToFileDescriptorProto(fd))
|
||||||
|
}
|
||||||
|
file := protoimpl.X.CompressGZIP(raw)
|
||||||
|
|
||||||
|
// Reverse the indexes, since we populated it in reverse.
|
||||||
|
for i, j := 0, len(idxs)-1; i < j; i, j = i+1, j-1 {
|
||||||
|
idxs[i], idxs[j] = idxs[j], idxs[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := rawDescCache.LoadOrStore(origDesc, &rawDesc{file, idxs}); ok {
|
||||||
|
return v.(*rawDesc).fileDesc, v.(*rawDesc).indexes
|
||||||
|
}
|
||||||
|
return file, idxs
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnumRawDescriptor returns the GZIP'd raw file descriptor representing
|
||||||
|
// the enum and the index path to reach the enum declaration.
|
||||||
|
// The returned slices must not be mutated.
|
||||||
|
func EnumRawDescriptor(e proto.GeneratedEnum) ([]byte, []int) {
|
||||||
|
if ev, ok := e.(interface{ EnumDescriptor() ([]byte, []int) }); ok {
|
||||||
|
return ev.EnumDescriptor()
|
||||||
|
}
|
||||||
|
ed := protoimpl.X.EnumTypeOf(e)
|
||||||
|
return deriveRawDescriptor(ed.Descriptor())
|
||||||
|
}
|
||||||
|
|
||||||
|
// MessageRawDescriptor returns the GZIP'd raw file descriptor representing
|
||||||
|
// the message and the index path to reach the message declaration.
|
||||||
|
// The returned slices must not be mutated.
|
||||||
|
func MessageRawDescriptor(m proto.GeneratedMessage) ([]byte, []int) {
|
||||||
|
if mv, ok := m.(interface{ Descriptor() ([]byte, []int) }); ok {
|
||||||
|
return mv.Descriptor()
|
||||||
|
}
|
||||||
|
md := protoimpl.X.MessageTypeOf(m)
|
||||||
|
return deriveRawDescriptor(md.Descriptor())
|
||||||
|
}
|
||||||
|
|
||||||
|
var fileDescCache sync.Map // map[*byte]*descriptorpb.FileDescriptorProto
|
||||||
|
|
||||||
|
func deriveFileDescriptor(rawDesc []byte) *descriptorpb.FileDescriptorProto {
|
||||||
|
// Fast-path: check whether descriptor protos are already cached.
|
||||||
|
if v, ok := fileDescCache.Load(&rawDesc[0]); ok {
|
||||||
|
return v.(*descriptorpb.FileDescriptorProto)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Slow-path: derive the descriptor proto from the GZIP'd message.
|
||||||
|
zr, err := gzip.NewReader(bytes.NewReader(rawDesc))
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
b, err := ioutil.ReadAll(zr)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
fd := new(descriptorpb.FileDescriptorProto)
|
||||||
|
if err := proto.Unmarshal(b, fd); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
if v, ok := fileDescCache.LoadOrStore(&rawDesc[0], fd); ok {
|
||||||
|
return v.(*descriptorpb.FileDescriptorProto)
|
||||||
|
}
|
||||||
|
return fd
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnumDescriptorProto returns the file descriptor proto representing
|
||||||
|
// the enum and the enum descriptor proto for the enum itself.
|
||||||
|
// The returned proto messages must not be mutated.
|
||||||
|
func EnumDescriptorProto(e proto.GeneratedEnum) (*descriptorpb.FileDescriptorProto, *descriptorpb.EnumDescriptorProto) {
|
||||||
|
rawDesc, idxs := EnumRawDescriptor(e)
|
||||||
|
if rawDesc == nil || idxs == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
fd := deriveFileDescriptor(rawDesc)
|
||||||
|
if len(idxs) == 1 {
|
||||||
|
return fd, fd.EnumType[idxs[0]]
|
||||||
|
}
|
||||||
|
md := fd.MessageType[idxs[0]]
|
||||||
|
for _, i := range idxs[1 : len(idxs)-1] {
|
||||||
|
md = md.NestedType[i]
|
||||||
|
}
|
||||||
|
ed := md.EnumType[idxs[len(idxs)-1]]
|
||||||
|
return fd, ed
|
||||||
|
}
|
||||||
|
|
||||||
|
// MessageDescriptorProto returns the file descriptor proto representing
|
||||||
|
// the message and the message descriptor proto for the message itself.
|
||||||
|
// The returned proto messages must not be mutated.
|
||||||
|
func MessageDescriptorProto(m proto.GeneratedMessage) (*descriptorpb.FileDescriptorProto, *descriptorpb.DescriptorProto) {
|
||||||
|
rawDesc, idxs := MessageRawDescriptor(m)
|
||||||
|
if rawDesc == nil || idxs == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
fd := deriveFileDescriptor(rawDesc)
|
||||||
|
md := fd.MessageType[idxs[0]]
|
||||||
|
for _, i := range idxs[1:] {
|
||||||
md = md.NestedType[i]
|
md = md.NestedType[i]
|
||||||
}
|
}
|
||||||
return fd, md
|
return fd, md
|
||||||
|
324
vendor/github.com/golang/protobuf/proto/buffer.go
generated
vendored
Normal file
324
vendor/github.com/golang/protobuf/proto/buffer.go
generated
vendored
Normal file
@ -0,0 +1,324 @@
|
|||||||
|
// Copyright 2019 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package proto
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/encoding/prototext"
|
||||||
|
"google.golang.org/protobuf/encoding/protowire"
|
||||||
|
"google.golang.org/protobuf/runtime/protoimpl"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
WireVarint = 0
|
||||||
|
WireFixed32 = 5
|
||||||
|
WireFixed64 = 1
|
||||||
|
WireBytes = 2
|
||||||
|
WireStartGroup = 3
|
||||||
|
WireEndGroup = 4
|
||||||
|
)
|
||||||
|
|
||||||
|
// EncodeVarint returns the varint encoded bytes of v.
|
||||||
|
func EncodeVarint(v uint64) []byte {
|
||||||
|
return protowire.AppendVarint(nil, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SizeVarint returns the length of the varint encoded bytes of v.
|
||||||
|
// This is equal to len(EncodeVarint(v)).
|
||||||
|
func SizeVarint(v uint64) int {
|
||||||
|
return protowire.SizeVarint(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeVarint parses a varint encoded integer from b,
|
||||||
|
// returning the integer value and the length of the varint.
|
||||||
|
// It returns (0, 0) if there is a parse error.
|
||||||
|
func DecodeVarint(b []byte) (uint64, int) {
|
||||||
|
v, n := protowire.ConsumeVarint(b)
|
||||||
|
if n < 0 {
|
||||||
|
return 0, 0
|
||||||
|
}
|
||||||
|
return v, n
|
||||||
|
}
|
||||||
|
|
||||||
|
// Buffer is a buffer for encoding and decoding the protobuf wire format.
|
||||||
|
// It may be reused between invocations to reduce memory usage.
|
||||||
|
type Buffer struct {
|
||||||
|
buf []byte
|
||||||
|
idx int
|
||||||
|
deterministic bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBuffer allocates a new Buffer initialized with buf,
|
||||||
|
// where the contents of buf are considered the unread portion of the buffer.
|
||||||
|
func NewBuffer(buf []byte) *Buffer {
|
||||||
|
return &Buffer{buf: buf}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetDeterministic specifies whether to use deterministic serialization.
|
||||||
|
//
|
||||||
|
// Deterministic serialization guarantees that for a given binary, equal
|
||||||
|
// messages will always be serialized to the same bytes. This implies:
|
||||||
|
//
|
||||||
|
// - Repeated serialization of a message will return the same bytes.
|
||||||
|
// - Different processes of the same binary (which may be executing on
|
||||||
|
// different machines) will serialize equal messages to the same bytes.
|
||||||
|
//
|
||||||
|
// Note that the deterministic serialization is NOT canonical across
|
||||||
|
// languages. It is not guaranteed to remain stable over time. It is unstable
|
||||||
|
// across different builds with schema changes due to unknown fields.
|
||||||
|
// Users who need canonical serialization (e.g., persistent storage in a
|
||||||
|
// canonical form, fingerprinting, etc.) should define their own
|
||||||
|
// canonicalization specification and implement their own serializer rather
|
||||||
|
// than relying on this API.
|
||||||
|
//
|
||||||
|
// If deterministic serialization is requested, map entries will be sorted
|
||||||
|
// by keys in lexographical order. This is an implementation detail and
|
||||||
|
// subject to change.
|
||||||
|
func (b *Buffer) SetDeterministic(deterministic bool) {
|
||||||
|
b.deterministic = deterministic
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetBuf sets buf as the internal buffer,
|
||||||
|
// where the contents of buf are considered the unread portion of the buffer.
|
||||||
|
func (b *Buffer) SetBuf(buf []byte) {
|
||||||
|
b.buf = buf
|
||||||
|
b.idx = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset clears the internal buffer of all written and unread data.
|
||||||
|
func (b *Buffer) Reset() {
|
||||||
|
b.buf = b.buf[:0]
|
||||||
|
b.idx = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bytes returns the internal buffer.
|
||||||
|
func (b *Buffer) Bytes() []byte {
|
||||||
|
return b.buf
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unread returns the unread portion of the buffer.
|
||||||
|
func (b *Buffer) Unread() []byte {
|
||||||
|
return b.buf[b.idx:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal appends the wire-format encoding of m to the buffer.
|
||||||
|
func (b *Buffer) Marshal(m Message) error {
|
||||||
|
var err error
|
||||||
|
b.buf, err = marshalAppend(b.buf, m, b.deterministic)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal parses the wire-format message in the buffer and
|
||||||
|
// places the decoded results in m.
|
||||||
|
// It does not reset m before unmarshaling.
|
||||||
|
func (b *Buffer) Unmarshal(m Message) error {
|
||||||
|
err := UnmarshalMerge(b.Unread(), m)
|
||||||
|
b.idx = len(b.buf)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields }
|
||||||
|
|
||||||
|
func (m *unknownFields) String() string { panic("not implemented") }
|
||||||
|
func (m *unknownFields) Reset() { panic("not implemented") }
|
||||||
|
func (m *unknownFields) ProtoMessage() { panic("not implemented") }
|
||||||
|
|
||||||
|
// DebugPrint dumps the encoded bytes of b with a header and footer including s
|
||||||
|
// to stdout. This is only intended for debugging.
|
||||||
|
func (*Buffer) DebugPrint(s string, b []byte) {
|
||||||
|
m := MessageReflect(new(unknownFields))
|
||||||
|
m.SetUnknown(b)
|
||||||
|
b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface())
|
||||||
|
fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeVarint appends an unsigned varint encoding to the buffer.
|
||||||
|
func (b *Buffer) EncodeVarint(v uint64) error {
|
||||||
|
b.buf = protowire.AppendVarint(b.buf, v)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer.
|
||||||
|
func (b *Buffer) EncodeZigzag32(v uint64) error {
|
||||||
|
return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer.
|
||||||
|
func (b *Buffer) EncodeZigzag64(v uint64) error {
|
||||||
|
return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63))))
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeFixed32 appends a 32-bit little-endian integer to the buffer.
|
||||||
|
func (b *Buffer) EncodeFixed32(v uint64) error {
|
||||||
|
b.buf = protowire.AppendFixed32(b.buf, uint32(v))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeFixed64 appends a 64-bit little-endian integer to the buffer.
|
||||||
|
func (b *Buffer) EncodeFixed64(v uint64) error {
|
||||||
|
b.buf = protowire.AppendFixed64(b.buf, uint64(v))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeRawBytes appends a length-prefixed raw bytes to the buffer.
|
||||||
|
func (b *Buffer) EncodeRawBytes(v []byte) error {
|
||||||
|
b.buf = protowire.AppendBytes(b.buf, v)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeStringBytes appends a length-prefixed raw bytes to the buffer.
|
||||||
|
// It does not validate whether v contains valid UTF-8.
|
||||||
|
func (b *Buffer) EncodeStringBytes(v string) error {
|
||||||
|
b.buf = protowire.AppendString(b.buf, v)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeMessage appends a length-prefixed encoded message to the buffer.
|
||||||
|
func (b *Buffer) EncodeMessage(m Message) error {
|
||||||
|
var err error
|
||||||
|
b.buf = protowire.AppendVarint(b.buf, uint64(Size(m)))
|
||||||
|
b.buf, err = marshalAppend(b.buf, m, b.deterministic)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeVarint consumes an encoded unsigned varint from the buffer.
|
||||||
|
func (b *Buffer) DecodeVarint() (uint64, error) {
|
||||||
|
v, n := protowire.ConsumeVarint(b.buf[b.idx:])
|
||||||
|
if n < 0 {
|
||||||
|
return 0, protowire.ParseError(n)
|
||||||
|
}
|
||||||
|
b.idx += n
|
||||||
|
return uint64(v), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer.
|
||||||
|
func (b *Buffer) DecodeZigzag32() (uint64, error) {
|
||||||
|
v, err := b.DecodeVarint()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer.
|
||||||
|
func (b *Buffer) DecodeZigzag64() (uint64, error) {
|
||||||
|
v, err := b.DecodeVarint()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer.
|
||||||
|
func (b *Buffer) DecodeFixed32() (uint64, error) {
|
||||||
|
v, n := protowire.ConsumeFixed32(b.buf[b.idx:])
|
||||||
|
if n < 0 {
|
||||||
|
return 0, protowire.ParseError(n)
|
||||||
|
}
|
||||||
|
b.idx += n
|
||||||
|
return uint64(v), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer.
|
||||||
|
func (b *Buffer) DecodeFixed64() (uint64, error) {
|
||||||
|
v, n := protowire.ConsumeFixed64(b.buf[b.idx:])
|
||||||
|
if n < 0 {
|
||||||
|
return 0, protowire.ParseError(n)
|
||||||
|
}
|
||||||
|
b.idx += n
|
||||||
|
return uint64(v), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer.
|
||||||
|
// If alloc is specified, it returns a copy the raw bytes
|
||||||
|
// rather than a sub-slice of the buffer.
|
||||||
|
func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) {
|
||||||
|
v, n := protowire.ConsumeBytes(b.buf[b.idx:])
|
||||||
|
if n < 0 {
|
||||||
|
return nil, protowire.ParseError(n)
|
||||||
|
}
|
||||||
|
b.idx += n
|
||||||
|
if alloc {
|
||||||
|
v = append([]byte(nil), v...)
|
||||||
|
}
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer.
|
||||||
|
// It does not validate whether the raw bytes contain valid UTF-8.
|
||||||
|
func (b *Buffer) DecodeStringBytes() (string, error) {
|
||||||
|
v, n := protowire.ConsumeString(b.buf[b.idx:])
|
||||||
|
if n < 0 {
|
||||||
|
return "", protowire.ParseError(n)
|
||||||
|
}
|
||||||
|
b.idx += n
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeMessage consumes a length-prefixed message from the buffer.
|
||||||
|
// It does not reset m before unmarshaling.
|
||||||
|
func (b *Buffer) DecodeMessage(m Message) error {
|
||||||
|
v, err := b.DecodeRawBytes(false)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return UnmarshalMerge(v, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeGroup consumes a message group from the buffer.
|
||||||
|
// It assumes that the start group marker has already been consumed and
|
||||||
|
// consumes all bytes until (and including the end group marker).
|
||||||
|
// It does not reset m before unmarshaling.
|
||||||
|
func (b *Buffer) DecodeGroup(m Message) error {
|
||||||
|
v, n, err := consumeGroup(b.buf[b.idx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
b.idx += n
|
||||||
|
return UnmarshalMerge(v, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// consumeGroup parses b until it finds an end group marker, returning
|
||||||
|
// the raw bytes of the message (excluding the end group marker) and the
|
||||||
|
// the total length of the message (including the end group marker).
|
||||||
|
func consumeGroup(b []byte) ([]byte, int, error) {
|
||||||
|
b0 := b
|
||||||
|
depth := 1 // assume this follows a start group marker
|
||||||
|
for {
|
||||||
|
_, wtyp, tagLen := protowire.ConsumeTag(b)
|
||||||
|
if tagLen < 0 {
|
||||||
|
return nil, 0, protowire.ParseError(tagLen)
|
||||||
|
}
|
||||||
|
b = b[tagLen:]
|
||||||
|
|
||||||
|
var valLen int
|
||||||
|
switch wtyp {
|
||||||
|
case protowire.VarintType:
|
||||||
|
_, valLen = protowire.ConsumeVarint(b)
|
||||||
|
case protowire.Fixed32Type:
|
||||||
|
_, valLen = protowire.ConsumeFixed32(b)
|
||||||
|
case protowire.Fixed64Type:
|
||||||
|
_, valLen = protowire.ConsumeFixed64(b)
|
||||||
|
case protowire.BytesType:
|
||||||
|
_, valLen = protowire.ConsumeBytes(b)
|
||||||
|
case protowire.StartGroupType:
|
||||||
|
depth++
|
||||||
|
case protowire.EndGroupType:
|
||||||
|
depth--
|
||||||
|
default:
|
||||||
|
return nil, 0, errors.New("proto: cannot parse reserved wire type")
|
||||||
|
}
|
||||||
|
if valLen < 0 {
|
||||||
|
return nil, 0, protowire.ParseError(valLen)
|
||||||
|
}
|
||||||
|
b = b[valLen:]
|
||||||
|
|
||||||
|
if depth == 0 {
|
||||||
|
return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
253
vendor/github.com/golang/protobuf/proto/clone.go
generated
vendored
253
vendor/github.com/golang/protobuf/proto/clone.go
generated
vendored
@ -1,253 +0,0 @@
|
|||||||
// Go support for Protocol Buffers - Google's data interchange format
|
|
||||||
//
|
|
||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// https://github.com/golang/protobuf
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
// Protocol buffer deep copy and merge.
|
|
||||||
// TODO: RawMessage.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Clone returns a deep copy of a protocol buffer.
|
|
||||||
func Clone(src Message) Message {
|
|
||||||
in := reflect.ValueOf(src)
|
|
||||||
if in.IsNil() {
|
|
||||||
return src
|
|
||||||
}
|
|
||||||
out := reflect.New(in.Type().Elem())
|
|
||||||
dst := out.Interface().(Message)
|
|
||||||
Merge(dst, src)
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// Merger is the interface representing objects that can merge messages of the same type.
|
|
||||||
type Merger interface {
|
|
||||||
// Merge merges src into this message.
|
|
||||||
// Required and optional fields that are set in src will be set to that value in dst.
|
|
||||||
// Elements of repeated fields will be appended.
|
|
||||||
//
|
|
||||||
// Merge may panic if called with a different argument type than the receiver.
|
|
||||||
Merge(src Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
// generatedMerger is the custom merge method that generated protos will have.
|
|
||||||
// We must add this method since a generate Merge method will conflict with
|
|
||||||
// many existing protos that have a Merge data field already defined.
|
|
||||||
type generatedMerger interface {
|
|
||||||
XXX_Merge(src Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Merge merges src into dst.
|
|
||||||
// Required and optional fields that are set in src will be set to that value in dst.
|
|
||||||
// Elements of repeated fields will be appended.
|
|
||||||
// Merge panics if src and dst are not the same type, or if dst is nil.
|
|
||||||
func Merge(dst, src Message) {
|
|
||||||
if m, ok := dst.(Merger); ok {
|
|
||||||
m.Merge(src)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
in := reflect.ValueOf(src)
|
|
||||||
out := reflect.ValueOf(dst)
|
|
||||||
if out.IsNil() {
|
|
||||||
panic("proto: nil destination")
|
|
||||||
}
|
|
||||||
if in.Type() != out.Type() {
|
|
||||||
panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
|
|
||||||
}
|
|
||||||
if in.IsNil() {
|
|
||||||
return // Merge from nil src is a noop
|
|
||||||
}
|
|
||||||
if m, ok := dst.(generatedMerger); ok {
|
|
||||||
m.XXX_Merge(src)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
mergeStruct(out.Elem(), in.Elem())
|
|
||||||
}
|
|
||||||
|
|
||||||
func mergeStruct(out, in reflect.Value) {
|
|
||||||
sprop := GetProperties(in.Type())
|
|
||||||
for i := 0; i < in.NumField(); i++ {
|
|
||||||
f := in.Type().Field(i)
|
|
||||||
if strings.HasPrefix(f.Name, "XXX_") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
|
|
||||||
}
|
|
||||||
|
|
||||||
if emIn, err := extendable(in.Addr().Interface()); err == nil {
|
|
||||||
emOut, _ := extendable(out.Addr().Interface())
|
|
||||||
mIn, muIn := emIn.extensionsRead()
|
|
||||||
if mIn != nil {
|
|
||||||
mOut := emOut.extensionsWrite()
|
|
||||||
muIn.Lock()
|
|
||||||
mergeExtension(mOut, mIn)
|
|
||||||
muIn.Unlock()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
uf := in.FieldByName("XXX_unrecognized")
|
|
||||||
if !uf.IsValid() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
uin := uf.Bytes()
|
|
||||||
if len(uin) > 0 {
|
|
||||||
out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// mergeAny performs a merge between two values of the same type.
|
|
||||||
// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
|
|
||||||
// prop is set if this is a struct field (it may be nil).
|
|
||||||
func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
|
|
||||||
if in.Type() == protoMessageType {
|
|
||||||
if !in.IsNil() {
|
|
||||||
if out.IsNil() {
|
|
||||||
out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
|
|
||||||
} else {
|
|
||||||
Merge(out.Interface().(Message), in.Interface().(Message))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch in.Kind() {
|
|
||||||
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
|
|
||||||
reflect.String, reflect.Uint32, reflect.Uint64:
|
|
||||||
if !viaPtr && isProto3Zero(in) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
out.Set(in)
|
|
||||||
case reflect.Interface:
|
|
||||||
// Probably a oneof field; copy non-nil values.
|
|
||||||
if in.IsNil() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Allocate destination if it is not set, or set to a different type.
|
|
||||||
// Otherwise we will merge as normal.
|
|
||||||
if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
|
|
||||||
out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
|
|
||||||
}
|
|
||||||
mergeAny(out.Elem(), in.Elem(), false, nil)
|
|
||||||
case reflect.Map:
|
|
||||||
if in.Len() == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if out.IsNil() {
|
|
||||||
out.Set(reflect.MakeMap(in.Type()))
|
|
||||||
}
|
|
||||||
// For maps with value types of *T or []byte we need to deep copy each value.
|
|
||||||
elemKind := in.Type().Elem().Kind()
|
|
||||||
for _, key := range in.MapKeys() {
|
|
||||||
var val reflect.Value
|
|
||||||
switch elemKind {
|
|
||||||
case reflect.Ptr:
|
|
||||||
val = reflect.New(in.Type().Elem().Elem())
|
|
||||||
mergeAny(val, in.MapIndex(key), false, nil)
|
|
||||||
case reflect.Slice:
|
|
||||||
val = in.MapIndex(key)
|
|
||||||
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
|
|
||||||
default:
|
|
||||||
val = in.MapIndex(key)
|
|
||||||
}
|
|
||||||
out.SetMapIndex(key, val)
|
|
||||||
}
|
|
||||||
case reflect.Ptr:
|
|
||||||
if in.IsNil() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if out.IsNil() {
|
|
||||||
out.Set(reflect.New(in.Elem().Type()))
|
|
||||||
}
|
|
||||||
mergeAny(out.Elem(), in.Elem(), true, nil)
|
|
||||||
case reflect.Slice:
|
|
||||||
if in.IsNil() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if in.Type().Elem().Kind() == reflect.Uint8 {
|
|
||||||
// []byte is a scalar bytes field, not a repeated field.
|
|
||||||
|
|
||||||
// Edge case: if this is in a proto3 message, a zero length
|
|
||||||
// bytes field is considered the zero value, and should not
|
|
||||||
// be merged.
|
|
||||||
if prop != nil && prop.proto3 && in.Len() == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make a deep copy.
|
|
||||||
// Append to []byte{} instead of []byte(nil) so that we never end up
|
|
||||||
// with a nil result.
|
|
||||||
out.SetBytes(append([]byte{}, in.Bytes()...))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
n := in.Len()
|
|
||||||
if out.IsNil() {
|
|
||||||
out.Set(reflect.MakeSlice(in.Type(), 0, n))
|
|
||||||
}
|
|
||||||
switch in.Type().Elem().Kind() {
|
|
||||||
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
|
|
||||||
reflect.String, reflect.Uint32, reflect.Uint64:
|
|
||||||
out.Set(reflect.AppendSlice(out, in))
|
|
||||||
default:
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
x := reflect.Indirect(reflect.New(in.Type().Elem()))
|
|
||||||
mergeAny(x, in.Index(i), false, nil)
|
|
||||||
out.Set(reflect.Append(out, x))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case reflect.Struct:
|
|
||||||
mergeStruct(out, in)
|
|
||||||
default:
|
|
||||||
// unknown type, so not a protocol buffer
|
|
||||||
log.Printf("proto: don't know how to copy %v", in)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func mergeExtension(out, in map[int32]Extension) {
|
|
||||||
for extNum, eIn := range in {
|
|
||||||
eOut := Extension{desc: eIn.desc}
|
|
||||||
if eIn.value != nil {
|
|
||||||
v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
|
|
||||||
mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
|
|
||||||
eOut.value = v.Interface()
|
|
||||||
}
|
|
||||||
if eIn.enc != nil {
|
|
||||||
eOut.enc = make([]byte, len(eIn.enc))
|
|
||||||
copy(eOut.enc, eIn.enc)
|
|
||||||
}
|
|
||||||
|
|
||||||
out[extNum] = eOut
|
|
||||||
}
|
|
||||||
}
|
|
427
vendor/github.com/golang/protobuf/proto/decode.go
generated
vendored
427
vendor/github.com/golang/protobuf/proto/decode.go
generated
vendored
@ -1,427 +0,0 @@
|
|||||||
// Go support for Protocol Buffers - Google's data interchange format
|
|
||||||
//
|
|
||||||
// Copyright 2010 The Go Authors. All rights reserved.
|
|
||||||
// https://github.com/golang/protobuf
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Routines for decoding protocol buffer data to construct in-memory representations.
|
|
||||||
*/
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// errOverflow is returned when an integer is too large to be represented.
|
|
||||||
var errOverflow = errors.New("proto: integer overflow")
|
|
||||||
|
|
||||||
// ErrInternalBadWireType is returned by generated code when an incorrect
|
|
||||||
// wire type is encountered. It does not get returned to user code.
|
|
||||||
var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
|
|
||||||
|
|
||||||
// DecodeVarint reads a varint-encoded integer from the slice.
|
|
||||||
// It returns the integer and the number of bytes consumed, or
|
|
||||||
// zero if there is not enough.
|
|
||||||
// This is the format for the
|
|
||||||
// int32, int64, uint32, uint64, bool, and enum
|
|
||||||
// protocol buffer types.
|
|
||||||
func DecodeVarint(buf []byte) (x uint64, n int) {
|
|
||||||
for shift := uint(0); shift < 64; shift += 7 {
|
|
||||||
if n >= len(buf) {
|
|
||||||
return 0, 0
|
|
||||||
}
|
|
||||||
b := uint64(buf[n])
|
|
||||||
n++
|
|
||||||
x |= (b & 0x7F) << shift
|
|
||||||
if (b & 0x80) == 0 {
|
|
||||||
return x, n
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// The number is too large to represent in a 64-bit value.
|
|
||||||
return 0, 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
|
|
||||||
i := p.index
|
|
||||||
l := len(p.buf)
|
|
||||||
|
|
||||||
for shift := uint(0); shift < 64; shift += 7 {
|
|
||||||
if i >= l {
|
|
||||||
err = io.ErrUnexpectedEOF
|
|
||||||
return
|
|
||||||
}
|
|
||||||
b := p.buf[i]
|
|
||||||
i++
|
|
||||||
x |= (uint64(b) & 0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
p.index = i
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// The number is too large to represent in a 64-bit value.
|
|
||||||
err = errOverflow
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeVarint reads a varint-encoded integer from the Buffer.
|
|
||||||
// This is the format for the
|
|
||||||
// int32, int64, uint32, uint64, bool, and enum
|
|
||||||
// protocol buffer types.
|
|
||||||
func (p *Buffer) DecodeVarint() (x uint64, err error) {
|
|
||||||
i := p.index
|
|
||||||
buf := p.buf
|
|
||||||
|
|
||||||
if i >= len(buf) {
|
|
||||||
return 0, io.ErrUnexpectedEOF
|
|
||||||
} else if buf[i] < 0x80 {
|
|
||||||
p.index++
|
|
||||||
return uint64(buf[i]), nil
|
|
||||||
} else if len(buf)-i < 10 {
|
|
||||||
return p.decodeVarintSlow()
|
|
||||||
}
|
|
||||||
|
|
||||||
var b uint64
|
|
||||||
// we already checked the first byte
|
|
||||||
x = uint64(buf[i]) - 0x80
|
|
||||||
i++
|
|
||||||
|
|
||||||
b = uint64(buf[i])
|
|
||||||
i++
|
|
||||||
x += b << 7
|
|
||||||
if b&0x80 == 0 {
|
|
||||||
goto done
|
|
||||||
}
|
|
||||||
x -= 0x80 << 7
|
|
||||||
|
|
||||||
b = uint64(buf[i])
|
|
||||||
i++
|
|
||||||
x += b << 14
|
|
||||||
if b&0x80 == 0 {
|
|
||||||
goto done
|
|
||||||
}
|
|
||||||
x -= 0x80 << 14
|
|
||||||
|
|
||||||
b = uint64(buf[i])
|
|
||||||
i++
|
|
||||||
x += b << 21
|
|
||||||
if b&0x80 == 0 {
|
|
||||||
goto done
|
|
||||||
}
|
|
||||||
x -= 0x80 << 21
|
|
||||||
|
|
||||||
b = uint64(buf[i])
|
|
||||||
i++
|
|
||||||
x += b << 28
|
|
||||||
if b&0x80 == 0 {
|
|
||||||
goto done
|
|
||||||
}
|
|
||||||
x -= 0x80 << 28
|
|
||||||
|
|
||||||
b = uint64(buf[i])
|
|
||||||
i++
|
|
||||||
x += b << 35
|
|
||||||
if b&0x80 == 0 {
|
|
||||||
goto done
|
|
||||||
}
|
|
||||||
x -= 0x80 << 35
|
|
||||||
|
|
||||||
b = uint64(buf[i])
|
|
||||||
i++
|
|
||||||
x += b << 42
|
|
||||||
if b&0x80 == 0 {
|
|
||||||
goto done
|
|
||||||
}
|
|
||||||
x -= 0x80 << 42
|
|
||||||
|
|
||||||
b = uint64(buf[i])
|
|
||||||
i++
|
|
||||||
x += b << 49
|
|
||||||
if b&0x80 == 0 {
|
|
||||||
goto done
|
|
||||||
}
|
|
||||||
x -= 0x80 << 49
|
|
||||||
|
|
||||||
b = uint64(buf[i])
|
|
||||||
i++
|
|
||||||
x += b << 56
|
|
||||||
if b&0x80 == 0 {
|
|
||||||
goto done
|
|
||||||
}
|
|
||||||
x -= 0x80 << 56
|
|
||||||
|
|
||||||
b = uint64(buf[i])
|
|
||||||
i++
|
|
||||||
x += b << 63
|
|
||||||
if b&0x80 == 0 {
|
|
||||||
goto done
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0, errOverflow
|
|
||||||
|
|
||||||
done:
|
|
||||||
p.index = i
|
|
||||||
return x, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeFixed64 reads a 64-bit integer from the Buffer.
|
|
||||||
// This is the format for the
|
|
||||||
// fixed64, sfixed64, and double protocol buffer types.
|
|
||||||
func (p *Buffer) DecodeFixed64() (x uint64, err error) {
|
|
||||||
// x, err already 0
|
|
||||||
i := p.index + 8
|
|
||||||
if i < 0 || i > len(p.buf) {
|
|
||||||
err = io.ErrUnexpectedEOF
|
|
||||||
return
|
|
||||||
}
|
|
||||||
p.index = i
|
|
||||||
|
|
||||||
x = uint64(p.buf[i-8])
|
|
||||||
x |= uint64(p.buf[i-7]) << 8
|
|
||||||
x |= uint64(p.buf[i-6]) << 16
|
|
||||||
x |= uint64(p.buf[i-5]) << 24
|
|
||||||
x |= uint64(p.buf[i-4]) << 32
|
|
||||||
x |= uint64(p.buf[i-3]) << 40
|
|
||||||
x |= uint64(p.buf[i-2]) << 48
|
|
||||||
x |= uint64(p.buf[i-1]) << 56
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeFixed32 reads a 32-bit integer from the Buffer.
|
|
||||||
// This is the format for the
|
|
||||||
// fixed32, sfixed32, and float protocol buffer types.
|
|
||||||
func (p *Buffer) DecodeFixed32() (x uint64, err error) {
|
|
||||||
// x, err already 0
|
|
||||||
i := p.index + 4
|
|
||||||
if i < 0 || i > len(p.buf) {
|
|
||||||
err = io.ErrUnexpectedEOF
|
|
||||||
return
|
|
||||||
}
|
|
||||||
p.index = i
|
|
||||||
|
|
||||||
x = uint64(p.buf[i-4])
|
|
||||||
x |= uint64(p.buf[i-3]) << 8
|
|
||||||
x |= uint64(p.buf[i-2]) << 16
|
|
||||||
x |= uint64(p.buf[i-1]) << 24
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
|
|
||||||
// from the Buffer.
|
|
||||||
// This is the format used for the sint64 protocol buffer type.
|
|
||||||
func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
|
|
||||||
x, err = p.DecodeVarint()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
|
|
||||||
// from the Buffer.
|
|
||||||
// This is the format used for the sint32 protocol buffer type.
|
|
||||||
func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
|
|
||||||
x, err = p.DecodeVarint()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
|
|
||||||
// This is the format used for the bytes protocol buffer
|
|
||||||
// type and for embedded messages.
|
|
||||||
func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
|
|
||||||
n, err := p.DecodeVarint()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
nb := int(n)
|
|
||||||
if nb < 0 {
|
|
||||||
return nil, fmt.Errorf("proto: bad byte length %d", nb)
|
|
||||||
}
|
|
||||||
end := p.index + nb
|
|
||||||
if end < p.index || end > len(p.buf) {
|
|
||||||
return nil, io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
|
|
||||||
if !alloc {
|
|
||||||
// todo: check if can get more uses of alloc=false
|
|
||||||
buf = p.buf[p.index:end]
|
|
||||||
p.index += nb
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
buf = make([]byte, nb)
|
|
||||||
copy(buf, p.buf[p.index:])
|
|
||||||
p.index += nb
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeStringBytes reads an encoded string from the Buffer.
|
|
||||||
// This is the format used for the proto2 string type.
|
|
||||||
func (p *Buffer) DecodeStringBytes() (s string, err error) {
|
|
||||||
buf, err := p.DecodeRawBytes(false)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return string(buf), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unmarshaler is the interface representing objects that can
|
|
||||||
// unmarshal themselves. The argument points to data that may be
|
|
||||||
// overwritten, so implementations should not keep references to the
|
|
||||||
// buffer.
|
|
||||||
// Unmarshal implementations should not clear the receiver.
|
|
||||||
// Any unmarshaled data should be merged into the receiver.
|
|
||||||
// Callers of Unmarshal that do not want to retain existing data
|
|
||||||
// should Reset the receiver before calling Unmarshal.
|
|
||||||
type Unmarshaler interface {
|
|
||||||
Unmarshal([]byte) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// newUnmarshaler is the interface representing objects that can
|
|
||||||
// unmarshal themselves. The semantics are identical to Unmarshaler.
|
|
||||||
//
|
|
||||||
// This exists to support protoc-gen-go generated messages.
|
|
||||||
// The proto package will stop type-asserting to this interface in the future.
|
|
||||||
//
|
|
||||||
// DO NOT DEPEND ON THIS.
|
|
||||||
type newUnmarshaler interface {
|
|
||||||
XXX_Unmarshal([]byte) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unmarshal parses the protocol buffer representation in buf and places the
|
|
||||||
// decoded result in pb. If the struct underlying pb does not match
|
|
||||||
// the data in buf, the results can be unpredictable.
|
|
||||||
//
|
|
||||||
// Unmarshal resets pb before starting to unmarshal, so any
|
|
||||||
// existing data in pb is always removed. Use UnmarshalMerge
|
|
||||||
// to preserve and append to existing data.
|
|
||||||
func Unmarshal(buf []byte, pb Message) error {
|
|
||||||
pb.Reset()
|
|
||||||
if u, ok := pb.(newUnmarshaler); ok {
|
|
||||||
return u.XXX_Unmarshal(buf)
|
|
||||||
}
|
|
||||||
if u, ok := pb.(Unmarshaler); ok {
|
|
||||||
return u.Unmarshal(buf)
|
|
||||||
}
|
|
||||||
return NewBuffer(buf).Unmarshal(pb)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalMerge parses the protocol buffer representation in buf and
|
|
||||||
// writes the decoded result to pb. If the struct underlying pb does not match
|
|
||||||
// the data in buf, the results can be unpredictable.
|
|
||||||
//
|
|
||||||
// UnmarshalMerge merges into existing data in pb.
|
|
||||||
// Most code should use Unmarshal instead.
|
|
||||||
func UnmarshalMerge(buf []byte, pb Message) error {
|
|
||||||
if u, ok := pb.(newUnmarshaler); ok {
|
|
||||||
return u.XXX_Unmarshal(buf)
|
|
||||||
}
|
|
||||||
if u, ok := pb.(Unmarshaler); ok {
|
|
||||||
// NOTE: The history of proto have unfortunately been inconsistent
|
|
||||||
// whether Unmarshaler should or should not implicitly clear itself.
|
|
||||||
// Some implementations do, most do not.
|
|
||||||
// Thus, calling this here may or may not do what people want.
|
|
||||||
//
|
|
||||||
// See https://github.com/golang/protobuf/issues/424
|
|
||||||
return u.Unmarshal(buf)
|
|
||||||
}
|
|
||||||
return NewBuffer(buf).Unmarshal(pb)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeMessage reads a count-delimited message from the Buffer.
|
|
||||||
func (p *Buffer) DecodeMessage(pb Message) error {
|
|
||||||
enc, err := p.DecodeRawBytes(false)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return NewBuffer(enc).Unmarshal(pb)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeGroup reads a tag-delimited group from the Buffer.
|
|
||||||
// StartGroup tag is already consumed. This function consumes
|
|
||||||
// EndGroup tag.
|
|
||||||
func (p *Buffer) DecodeGroup(pb Message) error {
|
|
||||||
b := p.buf[p.index:]
|
|
||||||
x, y := findEndGroup(b)
|
|
||||||
if x < 0 {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
err := Unmarshal(b[:x], pb)
|
|
||||||
p.index += y
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unmarshal parses the protocol buffer representation in the
|
|
||||||
// Buffer and places the decoded result in pb. If the struct
|
|
||||||
// underlying pb does not match the data in the buffer, the results can be
|
|
||||||
// unpredictable.
|
|
||||||
//
|
|
||||||
// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
|
|
||||||
func (p *Buffer) Unmarshal(pb Message) error {
|
|
||||||
// If the object can unmarshal itself, let it.
|
|
||||||
if u, ok := pb.(newUnmarshaler); ok {
|
|
||||||
err := u.XXX_Unmarshal(p.buf[p.index:])
|
|
||||||
p.index = len(p.buf)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if u, ok := pb.(Unmarshaler); ok {
|
|
||||||
// NOTE: The history of proto have unfortunately been inconsistent
|
|
||||||
// whether Unmarshaler should or should not implicitly clear itself.
|
|
||||||
// Some implementations do, most do not.
|
|
||||||
// Thus, calling this here may or may not do what people want.
|
|
||||||
//
|
|
||||||
// See https://github.com/golang/protobuf/issues/424
|
|
||||||
err := u.Unmarshal(p.buf[p.index:])
|
|
||||||
p.index = len(p.buf)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Slow workaround for messages that aren't Unmarshalers.
|
|
||||||
// This includes some hand-coded .pb.go files and
|
|
||||||
// bootstrap protos.
|
|
||||||
// TODO: fix all of those and then add Unmarshal to
|
|
||||||
// the Message interface. Then:
|
|
||||||
// The cast above and code below can be deleted.
|
|
||||||
// The old unmarshaler can be deleted.
|
|
||||||
// Clients can call Unmarshal directly (can already do that, actually).
|
|
||||||
var info InternalMessageInfo
|
|
||||||
err := info.Unmarshal(pb, p.buf[p.index:])
|
|
||||||
p.index = len(p.buf)
|
|
||||||
return err
|
|
||||||
}
|
|
63
vendor/github.com/golang/protobuf/proto/defaults.go
generated
vendored
Normal file
63
vendor/github.com/golang/protobuf/proto/defaults.go
generated
vendored
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
// Copyright 2019 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package proto
|
||||||
|
|
||||||
|
import (
|
||||||
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SetDefaults sets unpopulated scalar fields to their default values.
|
||||||
|
// Fields within a oneof are not set even if they have a default value.
|
||||||
|
// SetDefaults is recursively called upon any populated message fields.
|
||||||
|
func SetDefaults(m Message) {
|
||||||
|
if m != nil {
|
||||||
|
setDefaults(MessageReflect(m))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func setDefaults(m protoreflect.Message) {
|
||||||
|
fds := m.Descriptor().Fields()
|
||||||
|
for i := 0; i < fds.Len(); i++ {
|
||||||
|
fd := fds.Get(i)
|
||||||
|
if !m.Has(fd) {
|
||||||
|
if fd.HasDefault() && fd.ContainingOneof() == nil {
|
||||||
|
v := fd.Default()
|
||||||
|
if fd.Kind() == protoreflect.BytesKind {
|
||||||
|
v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes
|
||||||
|
}
|
||||||
|
m.Set(fd, v)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
|
||||||
|
switch {
|
||||||
|
// Handle singular message.
|
||||||
|
case fd.Cardinality() != protoreflect.Repeated:
|
||||||
|
if fd.Message() != nil {
|
||||||
|
setDefaults(m.Get(fd).Message())
|
||||||
|
}
|
||||||
|
// Handle list of messages.
|
||||||
|
case fd.IsList():
|
||||||
|
if fd.Message() != nil {
|
||||||
|
ls := m.Get(fd).List()
|
||||||
|
for i := 0; i < ls.Len(); i++ {
|
||||||
|
setDefaults(ls.Get(i).Message())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Handle map of messages.
|
||||||
|
case fd.IsMap():
|
||||||
|
if fd.MapValue().Message() != nil {
|
||||||
|
ms := m.Get(fd).Map()
|
||||||
|
ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
|
||||||
|
setDefaults(v.Message())
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
}
|
126
vendor/github.com/golang/protobuf/proto/deprecated.go
generated
vendored
126
vendor/github.com/golang/protobuf/proto/deprecated.go
generated
vendored
@ -1,63 +1,113 @@
|
|||||||
// Go support for Protocol Buffers - Google's data interchange format
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
//
|
// Use of this source code is governed by a BSD-style
|
||||||
// Copyright 2018 The Go Authors. All rights reserved.
|
// license that can be found in the LICENSE file.
|
||||||
// https://github.com/golang/protobuf
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
package proto
|
package proto
|
||||||
|
|
||||||
import "errors"
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
// Deprecated: do not use.
|
protoV2 "google.golang.org/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Deprecated: No longer returned.
|
||||||
|
ErrNil = errors.New("proto: Marshal called with nil")
|
||||||
|
|
||||||
|
// Deprecated: No longer returned.
|
||||||
|
ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
|
||||||
|
|
||||||
|
// Deprecated: No longer returned.
|
||||||
|
ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Deprecated: Do not use.
|
||||||
type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
|
type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
|
||||||
|
|
||||||
// Deprecated: do not use.
|
// Deprecated: Do not use.
|
||||||
func GetStats() Stats { return Stats{} }
|
func GetStats() Stats { return Stats{} }
|
||||||
|
|
||||||
// Deprecated: do not use.
|
// Deprecated: Do not use.
|
||||||
func MarshalMessageSet(interface{}) ([]byte, error) {
|
func MarshalMessageSet(interface{}) ([]byte, error) {
|
||||||
return nil, errors.New("proto: not implemented")
|
return nil, errors.New("proto: not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deprecated: do not use.
|
// Deprecated: Do not use.
|
||||||
func UnmarshalMessageSet([]byte, interface{}) error {
|
func UnmarshalMessageSet([]byte, interface{}) error {
|
||||||
return errors.New("proto: not implemented")
|
return errors.New("proto: not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deprecated: do not use.
|
// Deprecated: Do not use.
|
||||||
func MarshalMessageSetJSON(interface{}) ([]byte, error) {
|
func MarshalMessageSetJSON(interface{}) ([]byte, error) {
|
||||||
return nil, errors.New("proto: not implemented")
|
return nil, errors.New("proto: not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deprecated: do not use.
|
// Deprecated: Do not use.
|
||||||
func UnmarshalMessageSetJSON([]byte, interface{}) error {
|
func UnmarshalMessageSetJSON([]byte, interface{}) error {
|
||||||
return errors.New("proto: not implemented")
|
return errors.New("proto: not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deprecated: do not use.
|
// Deprecated: Do not use.
|
||||||
func RegisterMessageSetType(Message, int32, string) {}
|
func RegisterMessageSetType(Message, int32, string) {}
|
||||||
|
|
||||||
|
// Deprecated: Do not use.
|
||||||
|
func EnumName(m map[int32]string, v int32) string {
|
||||||
|
s, ok := m[v]
|
||||||
|
if ok {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
return strconv.Itoa(int(v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Do not use.
|
||||||
|
func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
|
||||||
|
if data[0] == '"' {
|
||||||
|
// New style: enums are strings.
|
||||||
|
var repr string
|
||||||
|
if err := json.Unmarshal(data, &repr); err != nil {
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
val, ok := m[repr]
|
||||||
|
if !ok {
|
||||||
|
return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
|
||||||
|
}
|
||||||
|
return val, nil
|
||||||
|
}
|
||||||
|
// Old style: enums are ints.
|
||||||
|
var val int32
|
||||||
|
if err := json.Unmarshal(data, &val); err != nil {
|
||||||
|
return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
|
||||||
|
}
|
||||||
|
return val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Do not use; this type existed for intenal-use only.
|
||||||
|
type InternalMessageInfo struct{}
|
||||||
|
|
||||||
|
// Deprecated: Do not use; this method existed for intenal-use only.
|
||||||
|
func (*InternalMessageInfo) DiscardUnknown(m Message) {
|
||||||
|
DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Do not use; this method existed for intenal-use only.
|
||||||
|
func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) {
|
||||||
|
return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Do not use; this method existed for intenal-use only.
|
||||||
|
func (*InternalMessageInfo) Merge(dst, src Message) {
|
||||||
|
protoV2.Merge(MessageV2(dst), MessageV2(src))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Do not use; this method existed for intenal-use only.
|
||||||
|
func (*InternalMessageInfo) Size(m Message) int {
|
||||||
|
return protoV2.Size(MessageV2(m))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Do not use; this method existed for intenal-use only.
|
||||||
|
func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error {
|
||||||
|
return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m))
|
||||||
|
}
|
||||||
|
356
vendor/github.com/golang/protobuf/proto/discard.go
generated
vendored
356
vendor/github.com/golang/protobuf/proto/discard.go
generated
vendored
@ -1,48 +1,13 @@
|
|||||||
// Go support for Protocol Buffers - Google's data interchange format
|
// Copyright 2019 The Go Authors. All rights reserved.
|
||||||
//
|
// Use of this source code is governed by a BSD-style
|
||||||
// Copyright 2017 The Go Authors. All rights reserved.
|
// license that can be found in the LICENSE file.
|
||||||
// https://github.com/golang/protobuf
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
package proto
|
package proto
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type generatedDiscarder interface {
|
|
||||||
XXX_DiscardUnknown()
|
|
||||||
}
|
|
||||||
|
|
||||||
// DiscardUnknown recursively discards all unknown fields from this message
|
// DiscardUnknown recursively discards all unknown fields from this message
|
||||||
// and all embedded messages.
|
// and all embedded messages.
|
||||||
//
|
//
|
||||||
@ -51,300 +16,43 @@ type generatedDiscarder interface {
|
|||||||
// marshal to be able to produce a message that continues to have those
|
// marshal to be able to produce a message that continues to have those
|
||||||
// unrecognized fields. To avoid this, DiscardUnknown is used to
|
// unrecognized fields. To avoid this, DiscardUnknown is used to
|
||||||
// explicitly clear the unknown fields after unmarshaling.
|
// explicitly clear the unknown fields after unmarshaling.
|
||||||
//
|
|
||||||
// For proto2 messages, the unknown fields of message extensions are only
|
|
||||||
// discarded from messages that have been accessed via GetExtension.
|
|
||||||
func DiscardUnknown(m Message) {
|
func DiscardUnknown(m Message) {
|
||||||
if m, ok := m.(generatedDiscarder); ok {
|
if m != nil {
|
||||||
m.XXX_DiscardUnknown()
|
discardUnknown(MessageReflect(m))
|
||||||
return
|
|
||||||
}
|
}
|
||||||
// TODO: Dynamically populate a InternalMessageInfo for legacy messages,
|
|
||||||
// but the master branch has no implementation for InternalMessageInfo,
|
|
||||||
// so it would be more work to replicate that approach.
|
|
||||||
discardLegacy(m)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DiscardUnknown recursively discards all unknown fields.
|
func discardUnknown(m protoreflect.Message) {
|
||||||
func (a *InternalMessageInfo) DiscardUnknown(m Message) {
|
m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool {
|
||||||
di := atomicLoadDiscardInfo(&a.discard)
|
switch {
|
||||||
if di == nil {
|
// Handle singular message.
|
||||||
di = getDiscardInfo(reflect.TypeOf(m).Elem())
|
case fd.Cardinality() != protoreflect.Repeated:
|
||||||
atomicStoreDiscardInfo(&a.discard, di)
|
if fd.Message() != nil {
|
||||||
}
|
discardUnknown(m.Get(fd).Message())
|
||||||
di.discard(toPointer(&m))
|
|
||||||
}
|
|
||||||
|
|
||||||
type discardInfo struct {
|
|
||||||
typ reflect.Type
|
|
||||||
|
|
||||||
initialized int32 // 0: only typ is valid, 1: everything is valid
|
|
||||||
lock sync.Mutex
|
|
||||||
|
|
||||||
fields []discardFieldInfo
|
|
||||||
unrecognized field
|
|
||||||
}
|
|
||||||
|
|
||||||
type discardFieldInfo struct {
|
|
||||||
field field // Offset of field, guaranteed to be valid
|
|
||||||
discard func(src pointer)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
discardInfoMap = map[reflect.Type]*discardInfo{}
|
|
||||||
discardInfoLock sync.Mutex
|
|
||||||
)
|
|
||||||
|
|
||||||
func getDiscardInfo(t reflect.Type) *discardInfo {
|
|
||||||
discardInfoLock.Lock()
|
|
||||||
defer discardInfoLock.Unlock()
|
|
||||||
di := discardInfoMap[t]
|
|
||||||
if di == nil {
|
|
||||||
di = &discardInfo{typ: t}
|
|
||||||
discardInfoMap[t] = di
|
|
||||||
}
|
|
||||||
return di
|
|
||||||
}
|
|
||||||
|
|
||||||
func (di *discardInfo) discard(src pointer) {
|
|
||||||
if src.isNil() {
|
|
||||||
return // Nothing to do.
|
|
||||||
}
|
|
||||||
|
|
||||||
if atomic.LoadInt32(&di.initialized) == 0 {
|
|
||||||
di.computeDiscardInfo()
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, fi := range di.fields {
|
|
||||||
sfp := src.offset(fi.field)
|
|
||||||
fi.discard(sfp)
|
|
||||||
}
|
|
||||||
|
|
||||||
// For proto2 messages, only discard unknown fields in message extensions
|
|
||||||
// that have been accessed via GetExtension.
|
|
||||||
if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
|
|
||||||
// Ignore lock since DiscardUnknown is not concurrency safe.
|
|
||||||
emm, _ := em.extensionsRead()
|
|
||||||
for _, mx := range emm {
|
|
||||||
if m, ok := mx.value.(Message); ok {
|
|
||||||
DiscardUnknown(m)
|
|
||||||
}
|
}
|
||||||
}
|
// Handle list of messages.
|
||||||
}
|
case fd.IsList():
|
||||||
|
if fd.Message() != nil {
|
||||||
if di.unrecognized.IsValid() {
|
ls := m.Get(fd).List()
|
||||||
*src.offset(di.unrecognized).toBytes() = nil
|
for i := 0; i < ls.Len(); i++ {
|
||||||
}
|
discardUnknown(ls.Get(i).Message())
|
||||||
}
|
|
||||||
|
|
||||||
func (di *discardInfo) computeDiscardInfo() {
|
|
||||||
di.lock.Lock()
|
|
||||||
defer di.lock.Unlock()
|
|
||||||
if di.initialized != 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t := di.typ
|
|
||||||
n := t.NumField()
|
|
||||||
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
f := t.Field(i)
|
|
||||||
if strings.HasPrefix(f.Name, "XXX_") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
dfi := discardFieldInfo{field: toField(&f)}
|
|
||||||
tf := f.Type
|
|
||||||
|
|
||||||
// Unwrap tf to get its most basic type.
|
|
||||||
var isPointer, isSlice bool
|
|
||||||
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
|
|
||||||
isSlice = true
|
|
||||||
tf = tf.Elem()
|
|
||||||
}
|
|
||||||
if tf.Kind() == reflect.Ptr {
|
|
||||||
isPointer = true
|
|
||||||
tf = tf.Elem()
|
|
||||||
}
|
|
||||||
if isPointer && isSlice && tf.Kind() != reflect.Struct {
|
|
||||||
panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
|
|
||||||
}
|
|
||||||
|
|
||||||
switch tf.Kind() {
|
|
||||||
case reflect.Struct:
|
|
||||||
switch {
|
|
||||||
case !isPointer:
|
|
||||||
panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
|
|
||||||
case isSlice: // E.g., []*pb.T
|
|
||||||
di := getDiscardInfo(tf)
|
|
||||||
dfi.discard = func(src pointer) {
|
|
||||||
sps := src.getPointerSlice()
|
|
||||||
for _, sp := range sps {
|
|
||||||
if !sp.isNil() {
|
|
||||||
di.discard(sp)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default: // E.g., *pb.T
|
|
||||||
di := getDiscardInfo(tf)
|
|
||||||
dfi.discard = func(src pointer) {
|
|
||||||
sp := src.getPointer()
|
|
||||||
if !sp.isNil() {
|
|
||||||
di.discard(sp)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case reflect.Map:
|
// Handle map of messages.
|
||||||
switch {
|
case fd.IsMap():
|
||||||
case isPointer || isSlice:
|
if fd.MapValue().Message() != nil {
|
||||||
panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
|
ms := m.Get(fd).Map()
|
||||||
default: // E.g., map[K]V
|
ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
|
||||||
if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
|
discardUnknown(v.Message())
|
||||||
dfi.discard = func(src pointer) {
|
return true
|
||||||
sm := src.asPointerTo(tf).Elem()
|
})
|
||||||
if sm.Len() == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for _, key := range sm.MapKeys() {
|
|
||||||
val := sm.MapIndex(key)
|
|
||||||
DiscardUnknown(val.Interface().(Message))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
dfi.discard = func(pointer) {} // Noop
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case reflect.Interface:
|
|
||||||
// Must be oneof field.
|
|
||||||
switch {
|
|
||||||
case isPointer || isSlice:
|
|
||||||
panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
|
|
||||||
default: // E.g., interface{}
|
|
||||||
// TODO: Make this faster?
|
|
||||||
dfi.discard = func(src pointer) {
|
|
||||||
su := src.asPointerTo(tf).Elem()
|
|
||||||
if !su.IsNil() {
|
|
||||||
sv := su.Elem().Elem().Field(0)
|
|
||||||
if sv.Kind() == reflect.Ptr && sv.IsNil() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch sv.Type().Kind() {
|
|
||||||
case reflect.Ptr: // Proto struct (e.g., *T)
|
|
||||||
DiscardUnknown(sv.Interface().(Message))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
di.fields = append(di.fields, dfi)
|
|
||||||
}
|
|
||||||
|
|
||||||
di.unrecognized = invalidField
|
|
||||||
if f, ok := t.FieldByName("XXX_unrecognized"); ok {
|
|
||||||
if f.Type != reflect.TypeOf([]byte{}) {
|
|
||||||
panic("expected XXX_unrecognized to be of type []byte")
|
|
||||||
}
|
|
||||||
di.unrecognized = toField(&f)
|
|
||||||
}
|
|
||||||
|
|
||||||
atomic.StoreInt32(&di.initialized, 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func discardLegacy(m Message) {
|
|
||||||
v := reflect.ValueOf(m)
|
|
||||||
if v.Kind() != reflect.Ptr || v.IsNil() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
v = v.Elem()
|
|
||||||
if v.Kind() != reflect.Struct {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t := v.Type()
|
|
||||||
|
|
||||||
for i := 0; i < v.NumField(); i++ {
|
|
||||||
f := t.Field(i)
|
|
||||||
if strings.HasPrefix(f.Name, "XXX_") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
vf := v.Field(i)
|
|
||||||
tf := f.Type
|
|
||||||
|
|
||||||
// Unwrap tf to get its most basic type.
|
|
||||||
var isPointer, isSlice bool
|
|
||||||
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
|
|
||||||
isSlice = true
|
|
||||||
tf = tf.Elem()
|
|
||||||
}
|
|
||||||
if tf.Kind() == reflect.Ptr {
|
|
||||||
isPointer = true
|
|
||||||
tf = tf.Elem()
|
|
||||||
}
|
|
||||||
if isPointer && isSlice && tf.Kind() != reflect.Struct {
|
|
||||||
panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
|
|
||||||
}
|
|
||||||
|
|
||||||
switch tf.Kind() {
|
|
||||||
case reflect.Struct:
|
|
||||||
switch {
|
|
||||||
case !isPointer:
|
|
||||||
panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
|
|
||||||
case isSlice: // E.g., []*pb.T
|
|
||||||
for j := 0; j < vf.Len(); j++ {
|
|
||||||
discardLegacy(vf.Index(j).Interface().(Message))
|
|
||||||
}
|
|
||||||
default: // E.g., *pb.T
|
|
||||||
discardLegacy(vf.Interface().(Message))
|
|
||||||
}
|
|
||||||
case reflect.Map:
|
|
||||||
switch {
|
|
||||||
case isPointer || isSlice:
|
|
||||||
panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
|
|
||||||
default: // E.g., map[K]V
|
|
||||||
tv := vf.Type().Elem()
|
|
||||||
if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
|
|
||||||
for _, key := range vf.MapKeys() {
|
|
||||||
val := vf.MapIndex(key)
|
|
||||||
discardLegacy(val.Interface().(Message))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case reflect.Interface:
|
|
||||||
// Must be oneof field.
|
|
||||||
switch {
|
|
||||||
case isPointer || isSlice:
|
|
||||||
panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
|
|
||||||
default: // E.g., test_proto.isCommunique_Union interface
|
|
||||||
if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
|
|
||||||
vf = vf.Elem() // E.g., *test_proto.Communique_Msg
|
|
||||||
if !vf.IsNil() {
|
|
||||||
vf = vf.Elem() // E.g., test_proto.Communique_Msg
|
|
||||||
vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
|
|
||||||
if vf.Kind() == reflect.Ptr {
|
|
||||||
discardLegacy(vf.Interface().(Message))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
return true
|
||||||
|
})
|
||||||
|
|
||||||
if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
|
// Discard unknown fields.
|
||||||
if vf.Type() != reflect.TypeOf([]byte{}) {
|
if len(m.GetUnknown()) > 0 {
|
||||||
panic("expected XXX_unrecognized to be of type []byte")
|
m.SetUnknown(nil)
|
||||||
}
|
|
||||||
vf.Set(reflect.ValueOf([]byte(nil)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// For proto2 messages, only discard unknown fields in message extensions
|
|
||||||
// that have been accessed via GetExtension.
|
|
||||||
if em, err := extendable(m); err == nil {
|
|
||||||
// Ignore lock since discardLegacy is not concurrency safe.
|
|
||||||
emm, _ := em.extensionsRead()
|
|
||||||
for _, mx := range emm {
|
|
||||||
if m, ok := mx.value.(Message); ok {
|
|
||||||
discardLegacy(m)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
203
vendor/github.com/golang/protobuf/proto/encode.go
generated
vendored
203
vendor/github.com/golang/protobuf/proto/encode.go
generated
vendored
@ -1,203 +0,0 @@
|
|||||||
// Go support for Protocol Buffers - Google's data interchange format
|
|
||||||
//
|
|
||||||
// Copyright 2010 The Go Authors. All rights reserved.
|
|
||||||
// https://github.com/golang/protobuf
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Routines for encoding data into the wire format for protocol buffers.
|
|
||||||
*/
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"reflect"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// errRepeatedHasNil is the error returned if Marshal is called with
|
|
||||||
// a struct with a repeated field containing a nil element.
|
|
||||||
errRepeatedHasNil = errors.New("proto: repeated field has nil element")
|
|
||||||
|
|
||||||
// errOneofHasNil is the error returned if Marshal is called with
|
|
||||||
// a struct with a oneof field containing a nil element.
|
|
||||||
errOneofHasNil = errors.New("proto: oneof field has nil value")
|
|
||||||
|
|
||||||
// ErrNil is the error returned if Marshal is called with nil.
|
|
||||||
ErrNil = errors.New("proto: Marshal called with nil")
|
|
||||||
|
|
||||||
// ErrTooLarge is the error returned if Marshal is called with a
|
|
||||||
// message that encodes to >2GB.
|
|
||||||
ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
|
|
||||||
)
|
|
||||||
|
|
||||||
// The fundamental encoders that put bytes on the wire.
|
|
||||||
// Those that take integer types all accept uint64 and are
|
|
||||||
// therefore of type valueEncoder.
|
|
||||||
|
|
||||||
const maxVarintBytes = 10 // maximum length of a varint
|
|
||||||
|
|
||||||
// EncodeVarint returns the varint encoding of x.
|
|
||||||
// This is the format for the
|
|
||||||
// int32, int64, uint32, uint64, bool, and enum
|
|
||||||
// protocol buffer types.
|
|
||||||
// Not used by the package itself, but helpful to clients
|
|
||||||
// wishing to use the same encoding.
|
|
||||||
func EncodeVarint(x uint64) []byte {
|
|
||||||
var buf [maxVarintBytes]byte
|
|
||||||
var n int
|
|
||||||
for n = 0; x > 127; n++ {
|
|
||||||
buf[n] = 0x80 | uint8(x&0x7F)
|
|
||||||
x >>= 7
|
|
||||||
}
|
|
||||||
buf[n] = uint8(x)
|
|
||||||
n++
|
|
||||||
return buf[0:n]
|
|
||||||
}
|
|
||||||
|
|
||||||
// EncodeVarint writes a varint-encoded integer to the Buffer.
|
|
||||||
// This is the format for the
|
|
||||||
// int32, int64, uint32, uint64, bool, and enum
|
|
||||||
// protocol buffer types.
|
|
||||||
func (p *Buffer) EncodeVarint(x uint64) error {
|
|
||||||
for x >= 1<<7 {
|
|
||||||
p.buf = append(p.buf, uint8(x&0x7f|0x80))
|
|
||||||
x >>= 7
|
|
||||||
}
|
|
||||||
p.buf = append(p.buf, uint8(x))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SizeVarint returns the varint encoding size of an integer.
|
|
||||||
func SizeVarint(x uint64) int {
|
|
||||||
switch {
|
|
||||||
case x < 1<<7:
|
|
||||||
return 1
|
|
||||||
case x < 1<<14:
|
|
||||||
return 2
|
|
||||||
case x < 1<<21:
|
|
||||||
return 3
|
|
||||||
case x < 1<<28:
|
|
||||||
return 4
|
|
||||||
case x < 1<<35:
|
|
||||||
return 5
|
|
||||||
case x < 1<<42:
|
|
||||||
return 6
|
|
||||||
case x < 1<<49:
|
|
||||||
return 7
|
|
||||||
case x < 1<<56:
|
|
||||||
return 8
|
|
||||||
case x < 1<<63:
|
|
||||||
return 9
|
|
||||||
}
|
|
||||||
return 10
|
|
||||||
}
|
|
||||||
|
|
||||||
// EncodeFixed64 writes a 64-bit integer to the Buffer.
|
|
||||||
// This is the format for the
|
|
||||||
// fixed64, sfixed64, and double protocol buffer types.
|
|
||||||
func (p *Buffer) EncodeFixed64(x uint64) error {
|
|
||||||
p.buf = append(p.buf,
|
|
||||||
uint8(x),
|
|
||||||
uint8(x>>8),
|
|
||||||
uint8(x>>16),
|
|
||||||
uint8(x>>24),
|
|
||||||
uint8(x>>32),
|
|
||||||
uint8(x>>40),
|
|
||||||
uint8(x>>48),
|
|
||||||
uint8(x>>56))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// EncodeFixed32 writes a 32-bit integer to the Buffer.
|
|
||||||
// This is the format for the
|
|
||||||
// fixed32, sfixed32, and float protocol buffer types.
|
|
||||||
func (p *Buffer) EncodeFixed32(x uint64) error {
|
|
||||||
p.buf = append(p.buf,
|
|
||||||
uint8(x),
|
|
||||||
uint8(x>>8),
|
|
||||||
uint8(x>>16),
|
|
||||||
uint8(x>>24))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
|
|
||||||
// to the Buffer.
|
|
||||||
// This is the format used for the sint64 protocol buffer type.
|
|
||||||
func (p *Buffer) EncodeZigzag64(x uint64) error {
|
|
||||||
// use signed number to get arithmetic right shift.
|
|
||||||
return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
|
||||||
}
|
|
||||||
|
|
||||||
// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
|
|
||||||
// to the Buffer.
|
|
||||||
// This is the format used for the sint32 protocol buffer type.
|
|
||||||
func (p *Buffer) EncodeZigzag32(x uint64) error {
|
|
||||||
// use signed number to get arithmetic right shift.
|
|
||||||
return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
|
|
||||||
}
|
|
||||||
|
|
||||||
// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
|
|
||||||
// This is the format used for the bytes protocol buffer
|
|
||||||
// type and for embedded messages.
|
|
||||||
func (p *Buffer) EncodeRawBytes(b []byte) error {
|
|
||||||
p.EncodeVarint(uint64(len(b)))
|
|
||||||
p.buf = append(p.buf, b...)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// EncodeStringBytes writes an encoded string to the Buffer.
|
|
||||||
// This is the format used for the proto2 string type.
|
|
||||||
func (p *Buffer) EncodeStringBytes(s string) error {
|
|
||||||
p.EncodeVarint(uint64(len(s)))
|
|
||||||
p.buf = append(p.buf, s...)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Marshaler is the interface representing objects that can marshal themselves.
|
|
||||||
type Marshaler interface {
|
|
||||||
Marshal() ([]byte, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// EncodeMessage writes the protocol buffer to the Buffer,
|
|
||||||
// prefixed by a varint-encoded length.
|
|
||||||
func (p *Buffer) EncodeMessage(pb Message) error {
|
|
||||||
siz := Size(pb)
|
|
||||||
p.EncodeVarint(uint64(siz))
|
|
||||||
return p.Marshal(pb)
|
|
||||||
}
|
|
||||||
|
|
||||||
// All protocol buffer fields are nillable, but be careful.
|
|
||||||
func isNil(v reflect.Value) bool {
|
|
||||||
switch v.Kind() {
|
|
||||||
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
|
||||||
return v.IsNil()
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
301
vendor/github.com/golang/protobuf/proto/equal.go
generated
vendored
301
vendor/github.com/golang/protobuf/proto/equal.go
generated
vendored
@ -1,301 +0,0 @@
|
|||||||
// Go support for Protocol Buffers - Google's data interchange format
|
|
||||||
//
|
|
||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// https://github.com/golang/protobuf
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
// Protocol buffer comparison.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"log"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
/*
|
|
||||||
Equal returns true iff protocol buffers a and b are equal.
|
|
||||||
The arguments must both be pointers to protocol buffer structs.
|
|
||||||
|
|
||||||
Equality is defined in this way:
|
|
||||||
- Two messages are equal iff they are the same type,
|
|
||||||
corresponding fields are equal, unknown field sets
|
|
||||||
are equal, and extensions sets are equal.
|
|
||||||
- Two set scalar fields are equal iff their values are equal.
|
|
||||||
If the fields are of a floating-point type, remember that
|
|
||||||
NaN != x for all x, including NaN. If the message is defined
|
|
||||||
in a proto3 .proto file, fields are not "set"; specifically,
|
|
||||||
zero length proto3 "bytes" fields are equal (nil == {}).
|
|
||||||
- Two repeated fields are equal iff their lengths are the same,
|
|
||||||
and their corresponding elements are equal. Note a "bytes" field,
|
|
||||||
although represented by []byte, is not a repeated field and the
|
|
||||||
rule for the scalar fields described above applies.
|
|
||||||
- Two unset fields are equal.
|
|
||||||
- Two unknown field sets are equal if their current
|
|
||||||
encoded state is equal.
|
|
||||||
- Two extension sets are equal iff they have corresponding
|
|
||||||
elements that are pairwise equal.
|
|
||||||
- Two map fields are equal iff their lengths are the same,
|
|
||||||
and they contain the same set of elements. Zero-length map
|
|
||||||
fields are equal.
|
|
||||||
- Every other combination of things are not equal.
|
|
||||||
|
|
||||||
The return value is undefined if a and b are not protocol buffers.
|
|
||||||
*/
|
|
||||||
func Equal(a, b Message) bool {
|
|
||||||
if a == nil || b == nil {
|
|
||||||
return a == b
|
|
||||||
}
|
|
||||||
v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
|
|
||||||
if v1.Type() != v2.Type() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if v1.Kind() == reflect.Ptr {
|
|
||||||
if v1.IsNil() {
|
|
||||||
return v2.IsNil()
|
|
||||||
}
|
|
||||||
if v2.IsNil() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
v1, v2 = v1.Elem(), v2.Elem()
|
|
||||||
}
|
|
||||||
if v1.Kind() != reflect.Struct {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return equalStruct(v1, v2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// v1 and v2 are known to have the same type.
|
|
||||||
func equalStruct(v1, v2 reflect.Value) bool {
|
|
||||||
sprop := GetProperties(v1.Type())
|
|
||||||
for i := 0; i < v1.NumField(); i++ {
|
|
||||||
f := v1.Type().Field(i)
|
|
||||||
if strings.HasPrefix(f.Name, "XXX_") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
f1, f2 := v1.Field(i), v2.Field(i)
|
|
||||||
if f.Type.Kind() == reflect.Ptr {
|
|
||||||
if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
|
|
||||||
// both unset
|
|
||||||
continue
|
|
||||||
} else if n1 != n2 {
|
|
||||||
// set/unset mismatch
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
f1, f2 = f1.Elem(), f2.Elem()
|
|
||||||
}
|
|
||||||
if !equalAny(f1, f2, sprop.Prop[i]) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
|
|
||||||
em2 := v2.FieldByName("XXX_InternalExtensions")
|
|
||||||
if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
|
|
||||||
em2 := v2.FieldByName("XXX_extensions")
|
|
||||||
if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
uf := v1.FieldByName("XXX_unrecognized")
|
|
||||||
if !uf.IsValid() {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
u1 := uf.Bytes()
|
|
||||||
u2 := v2.FieldByName("XXX_unrecognized").Bytes()
|
|
||||||
return bytes.Equal(u1, u2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// v1 and v2 are known to have the same type.
|
|
||||||
// prop may be nil.
|
|
||||||
func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
|
|
||||||
if v1.Type() == protoMessageType {
|
|
||||||
m1, _ := v1.Interface().(Message)
|
|
||||||
m2, _ := v2.Interface().(Message)
|
|
||||||
return Equal(m1, m2)
|
|
||||||
}
|
|
||||||
switch v1.Kind() {
|
|
||||||
case reflect.Bool:
|
|
||||||
return v1.Bool() == v2.Bool()
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
return v1.Float() == v2.Float()
|
|
||||||
case reflect.Int32, reflect.Int64:
|
|
||||||
return v1.Int() == v2.Int()
|
|
||||||
case reflect.Interface:
|
|
||||||
// Probably a oneof field; compare the inner values.
|
|
||||||
n1, n2 := v1.IsNil(), v2.IsNil()
|
|
||||||
if n1 || n2 {
|
|
||||||
return n1 == n2
|
|
||||||
}
|
|
||||||
e1, e2 := v1.Elem(), v2.Elem()
|
|
||||||
if e1.Type() != e2.Type() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return equalAny(e1, e2, nil)
|
|
||||||
case reflect.Map:
|
|
||||||
if v1.Len() != v2.Len() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for _, key := range v1.MapKeys() {
|
|
||||||
val2 := v2.MapIndex(key)
|
|
||||||
if !val2.IsValid() {
|
|
||||||
// This key was not found in the second map.
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if !equalAny(v1.MapIndex(key), val2, nil) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
case reflect.Ptr:
|
|
||||||
// Maps may have nil values in them, so check for nil.
|
|
||||||
if v1.IsNil() && v2.IsNil() {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if v1.IsNil() != v2.IsNil() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return equalAny(v1.Elem(), v2.Elem(), prop)
|
|
||||||
case reflect.Slice:
|
|
||||||
if v1.Type().Elem().Kind() == reflect.Uint8 {
|
|
||||||
// short circuit: []byte
|
|
||||||
|
|
||||||
// Edge case: if this is in a proto3 message, a zero length
|
|
||||||
// bytes field is considered the zero value.
|
|
||||||
if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if v1.IsNil() != v2.IsNil() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
|
|
||||||
}
|
|
||||||
|
|
||||||
if v1.Len() != v2.Len() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for i := 0; i < v1.Len(); i++ {
|
|
||||||
if !equalAny(v1.Index(i), v2.Index(i), prop) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
case reflect.String:
|
|
||||||
return v1.Interface().(string) == v2.Interface().(string)
|
|
||||||
case reflect.Struct:
|
|
||||||
return equalStruct(v1, v2)
|
|
||||||
case reflect.Uint32, reflect.Uint64:
|
|
||||||
return v1.Uint() == v2.Uint()
|
|
||||||
}
|
|
||||||
|
|
||||||
// unknown type, so not a protocol buffer
|
|
||||||
log.Printf("proto: don't know how to compare %v", v1)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// base is the struct type that the extensions are based on.
|
|
||||||
// x1 and x2 are InternalExtensions.
|
|
||||||
func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
|
|
||||||
em1, _ := x1.extensionsRead()
|
|
||||||
em2, _ := x2.extensionsRead()
|
|
||||||
return equalExtMap(base, em1, em2)
|
|
||||||
}
|
|
||||||
|
|
||||||
func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
|
|
||||||
if len(em1) != len(em2) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
for extNum, e1 := range em1 {
|
|
||||||
e2, ok := em2[extNum]
|
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
m1 := extensionAsLegacyType(e1.value)
|
|
||||||
m2 := extensionAsLegacyType(e2.value)
|
|
||||||
|
|
||||||
if m1 == nil && m2 == nil {
|
|
||||||
// Both have only encoded form.
|
|
||||||
if bytes.Equal(e1.enc, e2.enc) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// The bytes are different, but the extensions might still be
|
|
||||||
// equal. We need to decode them to compare.
|
|
||||||
}
|
|
||||||
|
|
||||||
if m1 != nil && m2 != nil {
|
|
||||||
// Both are unencoded.
|
|
||||||
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// At least one is encoded. To do a semantically correct comparison
|
|
||||||
// we need to unmarshal them first.
|
|
||||||
var desc *ExtensionDesc
|
|
||||||
if m := extensionMaps[base]; m != nil {
|
|
||||||
desc = m[extNum]
|
|
||||||
}
|
|
||||||
if desc == nil {
|
|
||||||
// If both have only encoded form and the bytes are the same,
|
|
||||||
// it is handled above. We get here when the bytes are different.
|
|
||||||
// We don't know how to decode it, so just compare them as byte
|
|
||||||
// slices.
|
|
||||||
log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
var err error
|
|
||||||
if m1 == nil {
|
|
||||||
m1, err = decodeExtension(e1.enc, desc)
|
|
||||||
}
|
|
||||||
if m2 == nil && err == nil {
|
|
||||||
m2, err = decodeExtension(e2.enc, desc)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
// The encoded form is invalid.
|
|
||||||
log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
845
vendor/github.com/golang/protobuf/proto/extensions.go
generated
vendored
845
vendor/github.com/golang/protobuf/proto/extensions.go
generated
vendored
@ -1,607 +1,356 @@
|
|||||||
// Go support for Protocol Buffers - Google's data interchange format
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
//
|
// Use of this source code is governed by a BSD-style
|
||||||
// Copyright 2010 The Go Authors. All rights reserved.
|
// license that can be found in the LICENSE file.
|
||||||
// https://github.com/golang/protobuf
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
package proto
|
package proto
|
||||||
|
|
||||||
/*
|
|
||||||
* Types and routines for supporting protocol buffer extensions.
|
|
||||||
*/
|
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"strconv"
|
|
||||||
"sync"
|
"google.golang.org/protobuf/encoding/protowire"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
"google.golang.org/protobuf/reflect/protoregistry"
|
||||||
|
"google.golang.org/protobuf/runtime/protoiface"
|
||||||
|
"google.golang.org/protobuf/runtime/protoimpl"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
|
type (
|
||||||
|
// ExtensionDesc represents an extension descriptor and
|
||||||
|
// is used to interact with an extension field in a message.
|
||||||
|
//
|
||||||
|
// Variables of this type are generated in code by protoc-gen-go.
|
||||||
|
ExtensionDesc = protoimpl.ExtensionInfo
|
||||||
|
|
||||||
|
// ExtensionRange represents a range of message extensions.
|
||||||
|
// Used in code generated by protoc-gen-go.
|
||||||
|
ExtensionRange = protoiface.ExtensionRangeV1
|
||||||
|
|
||||||
|
// Deprecated: Do not use; this is an internal type.
|
||||||
|
Extension = protoimpl.ExtensionFieldV1
|
||||||
|
|
||||||
|
// Deprecated: Do not use; this is an internal type.
|
||||||
|
XXX_InternalExtensions = protoimpl.ExtensionFields
|
||||||
|
)
|
||||||
|
|
||||||
|
// ErrMissingExtension reports whether the extension was not present.
|
||||||
var ErrMissingExtension = errors.New("proto: missing extension")
|
var ErrMissingExtension = errors.New("proto: missing extension")
|
||||||
|
|
||||||
// ExtensionRange represents a range of message extensions for a protocol buffer.
|
|
||||||
// Used in code generated by the protocol compiler.
|
|
||||||
type ExtensionRange struct {
|
|
||||||
Start, End int32 // both inclusive
|
|
||||||
}
|
|
||||||
|
|
||||||
// extendableProto is an interface implemented by any protocol buffer generated by the current
|
|
||||||
// proto compiler that may be extended.
|
|
||||||
type extendableProto interface {
|
|
||||||
Message
|
|
||||||
ExtensionRangeArray() []ExtensionRange
|
|
||||||
extensionsWrite() map[int32]Extension
|
|
||||||
extensionsRead() (map[int32]Extension, sync.Locker)
|
|
||||||
}
|
|
||||||
|
|
||||||
// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
|
|
||||||
// version of the proto compiler that may be extended.
|
|
||||||
type extendableProtoV1 interface {
|
|
||||||
Message
|
|
||||||
ExtensionRangeArray() []ExtensionRange
|
|
||||||
ExtensionMap() map[int32]Extension
|
|
||||||
}
|
|
||||||
|
|
||||||
// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
|
|
||||||
type extensionAdapter struct {
|
|
||||||
extendableProtoV1
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e extensionAdapter) extensionsWrite() map[int32]Extension {
|
|
||||||
return e.ExtensionMap()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
|
|
||||||
return e.ExtensionMap(), notLocker{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
|
|
||||||
type notLocker struct{}
|
|
||||||
|
|
||||||
func (n notLocker) Lock() {}
|
|
||||||
func (n notLocker) Unlock() {}
|
|
||||||
|
|
||||||
// extendable returns the extendableProto interface for the given generated proto message.
|
|
||||||
// If the proto message has the old extension format, it returns a wrapper that implements
|
|
||||||
// the extendableProto interface.
|
|
||||||
func extendable(p interface{}) (extendableProto, error) {
|
|
||||||
switch p := p.(type) {
|
|
||||||
case extendableProto:
|
|
||||||
if isNilPtr(p) {
|
|
||||||
return nil, fmt.Errorf("proto: nil %T is not extendable", p)
|
|
||||||
}
|
|
||||||
return p, nil
|
|
||||||
case extendableProtoV1:
|
|
||||||
if isNilPtr(p) {
|
|
||||||
return nil, fmt.Errorf("proto: nil %T is not extendable", p)
|
|
||||||
}
|
|
||||||
return extensionAdapter{p}, nil
|
|
||||||
}
|
|
||||||
// Don't allocate a specific error containing %T:
|
|
||||||
// this is the hot path for Clone and MarshalText.
|
|
||||||
return nil, errNotExtendable
|
|
||||||
}
|
|
||||||
|
|
||||||
var errNotExtendable = errors.New("proto: not an extendable proto.Message")
|
var errNotExtendable = errors.New("proto: not an extendable proto.Message")
|
||||||
|
|
||||||
func isNilPtr(x interface{}) bool {
|
// HasExtension reports whether the extension field is present in m
|
||||||
v := reflect.ValueOf(x)
|
// either as an explicitly populated field or as an unknown field.
|
||||||
return v.Kind() == reflect.Ptr && v.IsNil()
|
func HasExtension(m Message, xt *ExtensionDesc) (has bool) {
|
||||||
}
|
mr := MessageReflect(m)
|
||||||
|
if mr == nil || !mr.IsValid() {
|
||||||
// XXX_InternalExtensions is an internal representation of proto extensions.
|
return false
|
||||||
//
|
|
||||||
// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
|
|
||||||
// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
|
|
||||||
//
|
|
||||||
// The methods of XXX_InternalExtensions are not concurrency safe in general,
|
|
||||||
// but calls to logically read-only methods such as has and get may be executed concurrently.
|
|
||||||
type XXX_InternalExtensions struct {
|
|
||||||
// The struct must be indirect so that if a user inadvertently copies a
|
|
||||||
// generated message and its embedded XXX_InternalExtensions, they
|
|
||||||
// avoid the mayhem of a copied mutex.
|
|
||||||
//
|
|
||||||
// The mutex serializes all logically read-only operations to p.extensionMap.
|
|
||||||
// It is up to the client to ensure that write operations to p.extensionMap are
|
|
||||||
// mutually exclusive with other accesses.
|
|
||||||
p *struct {
|
|
||||||
mu sync.Mutex
|
|
||||||
extensionMap map[int32]Extension
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// extensionsWrite returns the extension map, creating it on first use.
|
// Check whether any populated known field matches the field number.
|
||||||
func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
|
xtd := xt.TypeDescriptor()
|
||||||
if e.p == nil {
|
if isValidExtension(mr.Descriptor(), xtd) {
|
||||||
e.p = new(struct {
|
has = mr.Has(xtd)
|
||||||
mu sync.Mutex
|
} else {
|
||||||
extensionMap map[int32]Extension
|
mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
|
||||||
|
has = int32(fd.Number()) == xt.Field
|
||||||
|
return !has
|
||||||
})
|
})
|
||||||
e.p.extensionMap = make(map[int32]Extension)
|
|
||||||
}
|
}
|
||||||
return e.p.extensionMap
|
|
||||||
}
|
|
||||||
|
|
||||||
// extensionsRead returns the extensions map for read-only use. It may be nil.
|
// Check whether any unknown field matches the field number.
|
||||||
// The caller must hold the returned mutex's lock when accessing Elements within the map.
|
for b := mr.GetUnknown(); !has && len(b) > 0; {
|
||||||
func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
|
num, _, n := protowire.ConsumeField(b)
|
||||||
if e.p == nil {
|
has = int32(num) == xt.Field
|
||||||
return nil, nil
|
b = b[n:]
|
||||||
}
|
}
|
||||||
return e.p.extensionMap, &e.p.mu
|
return has
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExtensionDesc represents an extension specification.
|
// ClearExtension removes the extension field from m
|
||||||
// Used in generated code from the protocol compiler.
|
// either as an explicitly populated field or as an unknown field.
|
||||||
type ExtensionDesc struct {
|
func ClearExtension(m Message, xt *ExtensionDesc) {
|
||||||
ExtendedType Message // nil pointer to the type that is being extended
|
mr := MessageReflect(m)
|
||||||
ExtensionType interface{} // nil pointer to the extension type
|
if mr == nil || !mr.IsValid() {
|
||||||
Field int32 // field number
|
|
||||||
Name string // fully-qualified name of extension, for text formatting
|
|
||||||
Tag string // protobuf tag style
|
|
||||||
Filename string // name of the file in which the extension is defined
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ed *ExtensionDesc) repeated() bool {
|
|
||||||
t := reflect.TypeOf(ed.ExtensionType)
|
|
||||||
return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extension represents an extension in a message.
|
|
||||||
type Extension struct {
|
|
||||||
// When an extension is stored in a message using SetExtension
|
|
||||||
// only desc and value are set. When the message is marshaled
|
|
||||||
// enc will be set to the encoded form of the message.
|
|
||||||
//
|
|
||||||
// When a message is unmarshaled and contains extensions, each
|
|
||||||
// extension will have only enc set. When such an extension is
|
|
||||||
// accessed using GetExtension (or GetExtensions) desc and value
|
|
||||||
// will be set.
|
|
||||||
desc *ExtensionDesc
|
|
||||||
|
|
||||||
// value is a concrete value for the extension field. Let the type of
|
|
||||||
// desc.ExtensionType be the "API type" and the type of Extension.value
|
|
||||||
// be the "storage type". The API type and storage type are the same except:
|
|
||||||
// * For scalars (except []byte), the API type uses *T,
|
|
||||||
// while the storage type uses T.
|
|
||||||
// * For repeated fields, the API type uses []T, while the storage type
|
|
||||||
// uses *[]T.
|
|
||||||
//
|
|
||||||
// The reason for the divergence is so that the storage type more naturally
|
|
||||||
// matches what is expected of when retrieving the values through the
|
|
||||||
// protobuf reflection APIs.
|
|
||||||
//
|
|
||||||
// The value may only be populated if desc is also populated.
|
|
||||||
value interface{}
|
|
||||||
|
|
||||||
// enc is the raw bytes for the extension field.
|
|
||||||
enc []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetRawExtension is for testing only.
|
|
||||||
func SetRawExtension(base Message, id int32, b []byte) {
|
|
||||||
epb, err := extendable(base)
|
|
||||||
if err != nil {
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
extmap := epb.extensionsWrite()
|
|
||||||
extmap[id] = Extension{enc: b}
|
|
||||||
}
|
|
||||||
|
|
||||||
// isExtensionField returns true iff the given field number is in an extension range.
|
xtd := xt.TypeDescriptor()
|
||||||
func isExtensionField(pb extendableProto, field int32) bool {
|
if isValidExtension(mr.Descriptor(), xtd) {
|
||||||
for _, er := range pb.ExtensionRangeArray() {
|
mr.Clear(xtd)
|
||||||
if er.Start <= field && field <= er.End {
|
} else {
|
||||||
|
mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
|
||||||
|
if int32(fd.Number()) == xt.Field {
|
||||||
|
mr.Clear(fd)
|
||||||
|
return false
|
||||||
|
}
|
||||||
return true
|
return true
|
||||||
}
|
})
|
||||||
}
|
}
|
||||||
return false
|
clearUnknown(mr, fieldNum(xt.Field))
|
||||||
}
|
}
|
||||||
|
|
||||||
// checkExtensionTypes checks that the given extension is valid for pb.
|
// ClearAllExtensions clears all extensions from m.
|
||||||
func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
|
// This includes populated fields and unknown fields in the extension range.
|
||||||
var pbi interface{} = pb
|
func ClearAllExtensions(m Message) {
|
||||||
// Check the extended type.
|
mr := MessageReflect(m)
|
||||||
if ea, ok := pbi.(extensionAdapter); ok {
|
if mr == nil || !mr.IsValid() {
|
||||||
pbi = ea.extendableProtoV1
|
|
||||||
}
|
|
||||||
if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
|
|
||||||
return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a)
|
|
||||||
}
|
|
||||||
// Check the range.
|
|
||||||
if !isExtensionField(pb, extension.Field) {
|
|
||||||
return errors.New("proto: bad extension number; not in declared ranges")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// extPropKey is sufficient to uniquely identify an extension.
|
|
||||||
type extPropKey struct {
|
|
||||||
base reflect.Type
|
|
||||||
field int32
|
|
||||||
}
|
|
||||||
|
|
||||||
var extProp = struct {
|
|
||||||
sync.RWMutex
|
|
||||||
m map[extPropKey]*Properties
|
|
||||||
}{
|
|
||||||
m: make(map[extPropKey]*Properties),
|
|
||||||
}
|
|
||||||
|
|
||||||
func extensionProperties(ed *ExtensionDesc) *Properties {
|
|
||||||
key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
|
|
||||||
|
|
||||||
extProp.RLock()
|
|
||||||
if prop, ok := extProp.m[key]; ok {
|
|
||||||
extProp.RUnlock()
|
|
||||||
return prop
|
|
||||||
}
|
|
||||||
extProp.RUnlock()
|
|
||||||
|
|
||||||
extProp.Lock()
|
|
||||||
defer extProp.Unlock()
|
|
||||||
// Check again.
|
|
||||||
if prop, ok := extProp.m[key]; ok {
|
|
||||||
return prop
|
|
||||||
}
|
|
||||||
|
|
||||||
prop := new(Properties)
|
|
||||||
prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
|
|
||||||
extProp.m[key] = prop
|
|
||||||
return prop
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasExtension returns whether the given extension is present in pb.
|
|
||||||
func HasExtension(pb Message, extension *ExtensionDesc) bool {
|
|
||||||
// TODO: Check types, field numbers, etc.?
|
|
||||||
epb, err := extendable(pb)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
extmap, mu := epb.extensionsRead()
|
|
||||||
if extmap == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
mu.Lock()
|
|
||||||
_, ok := extmap[extension.Field]
|
|
||||||
mu.Unlock()
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClearExtension removes the given extension from pb.
|
|
||||||
func ClearExtension(pb Message, extension *ExtensionDesc) {
|
|
||||||
epb, err := extendable(pb)
|
|
||||||
if err != nil {
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// TODO: Check types, field numbers, etc.?
|
|
||||||
extmap := epb.extensionsWrite()
|
mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
|
||||||
delete(extmap, extension.Field)
|
if fd.IsExtension() {
|
||||||
|
mr.Clear(fd)
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
clearUnknown(mr, mr.Descriptor().ExtensionRanges())
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetExtension retrieves a proto2 extended field from pb.
|
// GetExtension retrieves a proto2 extended field from m.
|
||||||
//
|
//
|
||||||
// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
|
// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
|
||||||
// then GetExtension parses the encoded field and returns a Go value of the specified type.
|
// then GetExtension parses the encoded field and returns a Go value of the specified type.
|
||||||
// If the field is not present, then the default value is returned (if one is specified),
|
// If the field is not present, then the default value is returned (if one is specified),
|
||||||
// otherwise ErrMissingExtension is reported.
|
// otherwise ErrMissingExtension is reported.
|
||||||
//
|
//
|
||||||
// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil),
|
// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil),
|
||||||
// then GetExtension returns the raw encoded bytes of the field extension.
|
// then GetExtension returns the raw encoded bytes for the extension field.
|
||||||
func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
|
func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) {
|
||||||
epb, err := extendable(pb)
|
mr := MessageReflect(m)
|
||||||
if err != nil {
|
if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
|
||||||
return nil, err
|
return nil, errNotExtendable
|
||||||
}
|
}
|
||||||
|
|
||||||
if extension.ExtendedType != nil {
|
// Retrieve the unknown fields for this extension field.
|
||||||
// can only check type if this is a complete descriptor
|
var bo protoreflect.RawFields
|
||||||
if err := checkExtensionTypes(epb, extension); err != nil {
|
for bi := mr.GetUnknown(); len(bi) > 0; {
|
||||||
|
num, _, n := protowire.ConsumeField(bi)
|
||||||
|
if int32(num) == xt.Field {
|
||||||
|
bo = append(bo, bi[:n]...)
|
||||||
|
}
|
||||||
|
bi = bi[n:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// For type incomplete descriptors, only retrieve the unknown fields.
|
||||||
|
if xt.ExtensionType == nil {
|
||||||
|
return []byte(bo), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the extension field only exists as unknown fields, unmarshal it.
|
||||||
|
// This is rarely done since proto.Unmarshal eagerly unmarshals extensions.
|
||||||
|
xtd := xt.TypeDescriptor()
|
||||||
|
if !isValidExtension(mr.Descriptor(), xtd) {
|
||||||
|
return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
|
||||||
|
}
|
||||||
|
if !mr.Has(xtd) && len(bo) > 0 {
|
||||||
|
m2 := mr.New()
|
||||||
|
if err := (proto.UnmarshalOptions{
|
||||||
|
Resolver: extensionResolver{xt},
|
||||||
|
}.Unmarshal(bo, m2.Interface())); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
if m2.Has(xtd) {
|
||||||
|
mr.Set(xtd, m2.Get(xtd))
|
||||||
emap, mu := epb.extensionsRead()
|
clearUnknown(mr, fieldNum(xt.Field))
|
||||||
if emap == nil {
|
|
||||||
return defaultExtensionValue(extension)
|
|
||||||
}
|
|
||||||
mu.Lock()
|
|
||||||
defer mu.Unlock()
|
|
||||||
e, ok := emap[extension.Field]
|
|
||||||
if !ok {
|
|
||||||
// defaultExtensionValue returns the default value or
|
|
||||||
// ErrMissingExtension if there is no default.
|
|
||||||
return defaultExtensionValue(extension)
|
|
||||||
}
|
|
||||||
|
|
||||||
if e.value != nil {
|
|
||||||
// Already decoded. Check the descriptor, though.
|
|
||||||
if e.desc != extension {
|
|
||||||
// This shouldn't happen. If it does, it means that
|
|
||||||
// GetExtension was called twice with two different
|
|
||||||
// descriptors with the same field number.
|
|
||||||
return nil, errors.New("proto: descriptor conflict")
|
|
||||||
}
|
}
|
||||||
return extensionAsLegacyType(e.value), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if extension.ExtensionType == nil {
|
// Check whether the message has the extension field set or a default.
|
||||||
// incomplete descriptor
|
var pv protoreflect.Value
|
||||||
return e.enc, nil
|
switch {
|
||||||
}
|
case mr.Has(xtd):
|
||||||
|
pv = mr.Get(xtd)
|
||||||
v, err := decodeExtension(e.enc, extension)
|
case xtd.HasDefault():
|
||||||
if err != nil {
|
pv = xtd.Default()
|
||||||
return nil, err
|
default:
|
||||||
}
|
|
||||||
|
|
||||||
// Remember the decoded version and drop the encoded version.
|
|
||||||
// That way it is safe to mutate what we return.
|
|
||||||
e.value = extensionAsStorageType(v)
|
|
||||||
e.desc = extension
|
|
||||||
e.enc = nil
|
|
||||||
emap[extension.Field] = e
|
|
||||||
return extensionAsLegacyType(e.value), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// defaultExtensionValue returns the default value for extension.
|
|
||||||
// If no default for an extension is defined ErrMissingExtension is returned.
|
|
||||||
func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
|
|
||||||
if extension.ExtensionType == nil {
|
|
||||||
// incomplete descriptor, so no default
|
|
||||||
return nil, ErrMissingExtension
|
return nil, ErrMissingExtension
|
||||||
}
|
}
|
||||||
|
|
||||||
t := reflect.TypeOf(extension.ExtensionType)
|
v := xt.InterfaceOf(pv)
|
||||||
props := extensionProperties(extension)
|
rv := reflect.ValueOf(v)
|
||||||
|
if isScalarKind(rv.Kind()) {
|
||||||
sf, _, err := fieldDefault(t, props)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if sf == nil || sf.value == nil {
|
|
||||||
// There is no default value.
|
|
||||||
return nil, ErrMissingExtension
|
|
||||||
}
|
|
||||||
|
|
||||||
if t.Kind() != reflect.Ptr {
|
|
||||||
// We do not need to return a Ptr, we can directly return sf.value.
|
|
||||||
return sf.value, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// We need to return an interface{} that is a pointer to sf.value.
|
|
||||||
value := reflect.New(t).Elem()
|
|
||||||
value.Set(reflect.New(value.Type().Elem()))
|
|
||||||
if sf.kind == reflect.Int32 {
|
|
||||||
// We may have an int32 or an enum, but the underlying data is int32.
|
|
||||||
// Since we can't set an int32 into a non int32 reflect.value directly
|
|
||||||
// set it as a int32.
|
|
||||||
value.Elem().SetInt(int64(sf.value.(int32)))
|
|
||||||
} else {
|
|
||||||
value.Elem().Set(reflect.ValueOf(sf.value))
|
|
||||||
}
|
|
||||||
return value.Interface(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// decodeExtension decodes an extension encoded in b.
|
|
||||||
func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
|
|
||||||
t := reflect.TypeOf(extension.ExtensionType)
|
|
||||||
unmarshal := typeUnmarshaler(t, extension.Tag)
|
|
||||||
|
|
||||||
// t is a pointer to a struct, pointer to basic type or a slice.
|
|
||||||
// Allocate space to store the pointer/slice.
|
|
||||||
value := reflect.New(t).Elem()
|
|
||||||
|
|
||||||
var err error
|
|
||||||
for {
|
|
||||||
x, n := decodeVarint(b)
|
|
||||||
if n == 0 {
|
|
||||||
return nil, io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b = b[n:]
|
|
||||||
wire := int(x) & 7
|
|
||||||
|
|
||||||
b, err = unmarshal(b, valToPointer(value.Addr()), wire)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(b) == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return value.Interface(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
|
|
||||||
// The returned slice has the same length as es; missing extensions will appear as nil elements.
|
|
||||||
func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
|
|
||||||
epb, err := extendable(pb)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
extensions = make([]interface{}, len(es))
|
|
||||||
for i, e := range es {
|
|
||||||
extensions[i], err = GetExtension(epb, e)
|
|
||||||
if err == ErrMissingExtension {
|
|
||||||
err = nil
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
|
|
||||||
// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
|
|
||||||
// just the Field field, which defines the extension's field number.
|
|
||||||
func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
|
|
||||||
epb, err := extendable(pb)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
registeredExtensions := RegisteredExtensions(pb)
|
|
||||||
|
|
||||||
emap, mu := epb.extensionsRead()
|
|
||||||
if emap == nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
mu.Lock()
|
|
||||||
defer mu.Unlock()
|
|
||||||
extensions := make([]*ExtensionDesc, 0, len(emap))
|
|
||||||
for extid, e := range emap {
|
|
||||||
desc := e.desc
|
|
||||||
if desc == nil {
|
|
||||||
desc = registeredExtensions[extid]
|
|
||||||
if desc == nil {
|
|
||||||
desc = &ExtensionDesc{Field: extid}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
extensions = append(extensions, desc)
|
|
||||||
}
|
|
||||||
return extensions, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetExtension sets the specified extension of pb to the specified value.
|
|
||||||
func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
|
|
||||||
epb, err := extendable(pb)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := checkExtensionTypes(epb, extension); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
typ := reflect.TypeOf(extension.ExtensionType)
|
|
||||||
if typ != reflect.TypeOf(value) {
|
|
||||||
return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType)
|
|
||||||
}
|
|
||||||
// nil extension values need to be caught early, because the
|
|
||||||
// encoder can't distinguish an ErrNil due to a nil extension
|
|
||||||
// from an ErrNil due to a missing field. Extensions are
|
|
||||||
// always optional, so the encoder would just swallow the error
|
|
||||||
// and drop all the extensions from the encoded message.
|
|
||||||
if reflect.ValueOf(value).IsNil() {
|
|
||||||
return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
|
|
||||||
}
|
|
||||||
|
|
||||||
extmap := epb.extensionsWrite()
|
|
||||||
extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClearAllExtensions clears all extensions from pb.
|
|
||||||
func ClearAllExtensions(pb Message) {
|
|
||||||
epb, err := extendable(pb)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
m := epb.extensionsWrite()
|
|
||||||
for k := range m {
|
|
||||||
delete(m, k)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// A global registry of extensions.
|
|
||||||
// The generated code will register the generated descriptors by calling RegisterExtension.
|
|
||||||
|
|
||||||
var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
|
|
||||||
|
|
||||||
// RegisterExtension is called from the generated code.
|
|
||||||
func RegisterExtension(desc *ExtensionDesc) {
|
|
||||||
st := reflect.TypeOf(desc.ExtendedType).Elem()
|
|
||||||
m := extensionMaps[st]
|
|
||||||
if m == nil {
|
|
||||||
m = make(map[int32]*ExtensionDesc)
|
|
||||||
extensionMaps[st] = m
|
|
||||||
}
|
|
||||||
if _, ok := m[desc.Field]; ok {
|
|
||||||
panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
|
|
||||||
}
|
|
||||||
m[desc.Field] = desc
|
|
||||||
}
|
|
||||||
|
|
||||||
// RegisteredExtensions returns a map of the registered extensions of a
|
|
||||||
// protocol buffer struct, indexed by the extension number.
|
|
||||||
// The argument pb should be a nil pointer to the struct type.
|
|
||||||
func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
|
|
||||||
return extensionMaps[reflect.TypeOf(pb).Elem()]
|
|
||||||
}
|
|
||||||
|
|
||||||
// extensionAsLegacyType converts an value in the storage type as the API type.
|
|
||||||
// See Extension.value.
|
|
||||||
func extensionAsLegacyType(v interface{}) interface{} {
|
|
||||||
switch rv := reflect.ValueOf(v); rv.Kind() {
|
|
||||||
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
|
|
||||||
// Represent primitive types as a pointer to the value.
|
|
||||||
rv2 := reflect.New(rv.Type())
|
rv2 := reflect.New(rv.Type())
|
||||||
rv2.Elem().Set(rv)
|
rv2.Elem().Set(rv)
|
||||||
v = rv2.Interface()
|
v = rv2.Interface()
|
||||||
case reflect.Ptr:
|
|
||||||
// Represent slice types as the value itself.
|
|
||||||
switch rv.Type().Elem().Kind() {
|
|
||||||
case reflect.Slice:
|
|
||||||
if rv.IsNil() {
|
|
||||||
v = reflect.Zero(rv.Type().Elem()).Interface()
|
|
||||||
} else {
|
|
||||||
v = rv.Elem().Interface()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return v
|
return v, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// extensionAsStorageType converts an value in the API type as the storage type.
|
// extensionResolver is a custom extension resolver that stores a single
|
||||||
// See Extension.value.
|
// extension type that takes precedence over the global registry.
|
||||||
func extensionAsStorageType(v interface{}) interface{} {
|
type extensionResolver struct{ xt protoreflect.ExtensionType }
|
||||||
switch rv := reflect.ValueOf(v); rv.Kind() {
|
|
||||||
case reflect.Ptr:
|
func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
|
||||||
// Represent slice types as the value itself.
|
if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field {
|
||||||
switch rv.Type().Elem().Kind() {
|
return r.xt, nil
|
||||||
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
|
}
|
||||||
if rv.IsNil() {
|
return protoregistry.GlobalTypes.FindExtensionByName(field)
|
||||||
v = reflect.Zero(rv.Type().Elem()).Interface()
|
}
|
||||||
} else {
|
|
||||||
v = rv.Elem().Interface()
|
func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
|
||||||
|
if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field {
|
||||||
|
return r.xt, nil
|
||||||
|
}
|
||||||
|
return protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetExtensions returns a list of the extensions values present in m,
|
||||||
|
// corresponding with the provided list of extension descriptors, xts.
|
||||||
|
// If an extension is missing in m, the corresponding value is nil.
|
||||||
|
func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) {
|
||||||
|
mr := MessageReflect(m)
|
||||||
|
if mr == nil || !mr.IsValid() {
|
||||||
|
return nil, errNotExtendable
|
||||||
|
}
|
||||||
|
|
||||||
|
vs := make([]interface{}, len(xts))
|
||||||
|
for i, xt := range xts {
|
||||||
|
v, err := GetExtension(m, xt)
|
||||||
|
if err != nil {
|
||||||
|
if err == ErrMissingExtension {
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
return vs, err
|
||||||
}
|
}
|
||||||
case reflect.Slice:
|
vs[i] = v
|
||||||
// Represent slice types as a pointer to the value.
|
}
|
||||||
if rv.Type().Elem().Kind() != reflect.Uint8 {
|
return vs, nil
|
||||||
rv2 := reflect.New(rv.Type())
|
}
|
||||||
rv2.Elem().Set(rv)
|
|
||||||
v = rv2.Interface()
|
// SetExtension sets an extension field in m to the provided value.
|
||||||
|
func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error {
|
||||||
|
mr := MessageReflect(m)
|
||||||
|
if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
|
||||||
|
return errNotExtendable
|
||||||
|
}
|
||||||
|
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) {
|
||||||
|
return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType)
|
||||||
|
}
|
||||||
|
if rv.Kind() == reflect.Ptr {
|
||||||
|
if rv.IsNil() {
|
||||||
|
return fmt.Errorf("proto: SetExtension called with nil value of type %T", v)
|
||||||
|
}
|
||||||
|
if isScalarKind(rv.Elem().Kind()) {
|
||||||
|
v = rv.Elem().Interface()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return v
|
|
||||||
|
xtd := xt.TypeDescriptor()
|
||||||
|
if !isValidExtension(mr.Descriptor(), xtd) {
|
||||||
|
return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
|
||||||
|
}
|
||||||
|
mr.Set(xtd, xt.ValueOf(v))
|
||||||
|
clearUnknown(mr, fieldNum(xt.Field))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetRawExtension inserts b into the unknown fields of m.
|
||||||
|
//
|
||||||
|
// Deprecated: Use Message.ProtoReflect.SetUnknown instead.
|
||||||
|
func SetRawExtension(m Message, fnum int32, b []byte) {
|
||||||
|
mr := MessageReflect(m)
|
||||||
|
if mr == nil || !mr.IsValid() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that the raw field is valid.
|
||||||
|
for b0 := b; len(b0) > 0; {
|
||||||
|
num, _, n := protowire.ConsumeField(b0)
|
||||||
|
if int32(num) != fnum {
|
||||||
|
panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum))
|
||||||
|
}
|
||||||
|
b0 = b0[n:]
|
||||||
|
}
|
||||||
|
|
||||||
|
ClearExtension(m, &ExtensionDesc{Field: fnum})
|
||||||
|
mr.SetUnknown(append(mr.GetUnknown(), b...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExtensionDescs returns a list of extension descriptors found in m,
|
||||||
|
// containing descriptors for both populated extension fields in m and
|
||||||
|
// also unknown fields of m that are in the extension range.
|
||||||
|
// For the later case, an type incomplete descriptor is provided where only
|
||||||
|
// the ExtensionDesc.Field field is populated.
|
||||||
|
// The order of the extension descriptors is undefined.
|
||||||
|
func ExtensionDescs(m Message) ([]*ExtensionDesc, error) {
|
||||||
|
mr := MessageReflect(m)
|
||||||
|
if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
|
||||||
|
return nil, errNotExtendable
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect a set of known extension descriptors.
|
||||||
|
extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc)
|
||||||
|
mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
|
||||||
|
if fd.IsExtension() {
|
||||||
|
xt := fd.(protoreflect.ExtensionTypeDescriptor)
|
||||||
|
if xd, ok := xt.Type().(*ExtensionDesc); ok {
|
||||||
|
extDescs[fd.Number()] = xd
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
|
||||||
|
// Collect a set of unknown extension descriptors.
|
||||||
|
extRanges := mr.Descriptor().ExtensionRanges()
|
||||||
|
for b := mr.GetUnknown(); len(b) > 0; {
|
||||||
|
num, _, n := protowire.ConsumeField(b)
|
||||||
|
if extRanges.Has(num) && extDescs[num] == nil {
|
||||||
|
extDescs[num] = nil
|
||||||
|
}
|
||||||
|
b = b[n:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Transpose the set of descriptors into a list.
|
||||||
|
var xts []*ExtensionDesc
|
||||||
|
for num, xt := range extDescs {
|
||||||
|
if xt == nil {
|
||||||
|
xt = &ExtensionDesc{Field: int32(num)}
|
||||||
|
}
|
||||||
|
xts = append(xts, xt)
|
||||||
|
}
|
||||||
|
return xts, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// isValidExtension reports whether xtd is a valid extension descriptor for md.
|
||||||
|
func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool {
|
||||||
|
return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number())
|
||||||
|
}
|
||||||
|
|
||||||
|
// isScalarKind reports whether k is a protobuf scalar kind (except bytes).
|
||||||
|
// This function exists for historical reasons since the representation of
|
||||||
|
// scalars differs between v1 and v2, where v1 uses *T and v2 uses T.
|
||||||
|
func isScalarKind(k reflect.Kind) bool {
|
||||||
|
switch k {
|
||||||
|
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// clearUnknown removes unknown fields from m where remover.Has reports true.
|
||||||
|
func clearUnknown(m protoreflect.Message, remover interface {
|
||||||
|
Has(protoreflect.FieldNumber) bool
|
||||||
|
}) {
|
||||||
|
var bo protoreflect.RawFields
|
||||||
|
for bi := m.GetUnknown(); len(bi) > 0; {
|
||||||
|
num, _, n := protowire.ConsumeField(bi)
|
||||||
|
if !remover.Has(num) {
|
||||||
|
bo = append(bo, bi[:n]...)
|
||||||
|
}
|
||||||
|
bi = bi[n:]
|
||||||
|
}
|
||||||
|
if bi := m.GetUnknown(); len(bi) != len(bo) {
|
||||||
|
m.SetUnknown(bo)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type fieldNum protoreflect.FieldNumber
|
||||||
|
|
||||||
|
func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool {
|
||||||
|
return protoreflect.FieldNumber(n1) == n2
|
||||||
}
|
}
|
||||||
|
965
vendor/github.com/golang/protobuf/proto/lib.go
generated
vendored
965
vendor/github.com/golang/protobuf/proto/lib.go
generated
vendored
@ -1,965 +0,0 @@
|
|||||||
// Go support for Protocol Buffers - Google's data interchange format
|
|
||||||
//
|
|
||||||
// Copyright 2010 The Go Authors. All rights reserved.
|
|
||||||
// https://github.com/golang/protobuf
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
/*
|
|
||||||
Package proto converts data structures to and from the wire format of
|
|
||||||
protocol buffers. It works in concert with the Go source code generated
|
|
||||||
for .proto files by the protocol compiler.
|
|
||||||
|
|
||||||
A summary of the properties of the protocol buffer interface
|
|
||||||
for a protocol buffer variable v:
|
|
||||||
|
|
||||||
- Names are turned from camel_case to CamelCase for export.
|
|
||||||
- There are no methods on v to set fields; just treat
|
|
||||||
them as structure fields.
|
|
||||||
- There are getters that return a field's value if set,
|
|
||||||
and return the field's default value if unset.
|
|
||||||
The getters work even if the receiver is a nil message.
|
|
||||||
- The zero value for a struct is its correct initialization state.
|
|
||||||
All desired fields must be set before marshaling.
|
|
||||||
- A Reset() method will restore a protobuf struct to its zero state.
|
|
||||||
- Non-repeated fields are pointers to the values; nil means unset.
|
|
||||||
That is, optional or required field int32 f becomes F *int32.
|
|
||||||
- Repeated fields are slices.
|
|
||||||
- Helper functions are available to aid the setting of fields.
|
|
||||||
msg.Foo = proto.String("hello") // set field
|
|
||||||
- Constants are defined to hold the default values of all fields that
|
|
||||||
have them. They have the form Default_StructName_FieldName.
|
|
||||||
Because the getter methods handle defaulted values,
|
|
||||||
direct use of these constants should be rare.
|
|
||||||
- Enums are given type names and maps from names to values.
|
|
||||||
Enum values are prefixed by the enclosing message's name, or by the
|
|
||||||
enum's type name if it is a top-level enum. Enum types have a String
|
|
||||||
method, and a Enum method to assist in message construction.
|
|
||||||
- Nested messages, groups and enums have type names prefixed with the name of
|
|
||||||
the surrounding message type.
|
|
||||||
- Extensions are given descriptor names that start with E_,
|
|
||||||
followed by an underscore-delimited list of the nested messages
|
|
||||||
that contain it (if any) followed by the CamelCased name of the
|
|
||||||
extension field itself. HasExtension, ClearExtension, GetExtension
|
|
||||||
and SetExtension are functions for manipulating extensions.
|
|
||||||
- Oneof field sets are given a single field in their message,
|
|
||||||
with distinguished wrapper types for each possible field value.
|
|
||||||
- Marshal and Unmarshal are functions to encode and decode the wire format.
|
|
||||||
|
|
||||||
When the .proto file specifies `syntax="proto3"`, there are some differences:
|
|
||||||
|
|
||||||
- Non-repeated fields of non-message type are values instead of pointers.
|
|
||||||
- Enum types do not get an Enum method.
|
|
||||||
|
|
||||||
The simplest way to describe this is to see an example.
|
|
||||||
Given file test.proto, containing
|
|
||||||
|
|
||||||
package example;
|
|
||||||
|
|
||||||
enum FOO { X = 17; }
|
|
||||||
|
|
||||||
message Test {
|
|
||||||
required string label = 1;
|
|
||||||
optional int32 type = 2 [default=77];
|
|
||||||
repeated int64 reps = 3;
|
|
||||||
optional group OptionalGroup = 4 {
|
|
||||||
required string RequiredField = 5;
|
|
||||||
}
|
|
||||||
oneof union {
|
|
||||||
int32 number = 6;
|
|
||||||
string name = 7;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
The resulting file, test.pb.go, is:
|
|
||||||
|
|
||||||
package example
|
|
||||||
|
|
||||||
import proto "github.com/golang/protobuf/proto"
|
|
||||||
import math "math"
|
|
||||||
|
|
||||||
type FOO int32
|
|
||||||
const (
|
|
||||||
FOO_X FOO = 17
|
|
||||||
)
|
|
||||||
var FOO_name = map[int32]string{
|
|
||||||
17: "X",
|
|
||||||
}
|
|
||||||
var FOO_value = map[string]int32{
|
|
||||||
"X": 17,
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x FOO) Enum() *FOO {
|
|
||||||
p := new(FOO)
|
|
||||||
*p = x
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
func (x FOO) String() string {
|
|
||||||
return proto.EnumName(FOO_name, int32(x))
|
|
||||||
}
|
|
||||||
func (x *FOO) UnmarshalJSON(data []byte) error {
|
|
||||||
value, err := proto.UnmarshalJSONEnum(FOO_value, data)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
*x = FOO(value)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type Test struct {
|
|
||||||
Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
|
|
||||||
Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
|
|
||||||
Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
|
|
||||||
Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
|
|
||||||
// Types that are valid to be assigned to Union:
|
|
||||||
// *Test_Number
|
|
||||||
// *Test_Name
|
|
||||||
Union isTest_Union `protobuf_oneof:"union"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
}
|
|
||||||
func (m *Test) Reset() { *m = Test{} }
|
|
||||||
func (m *Test) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*Test) ProtoMessage() {}
|
|
||||||
|
|
||||||
type isTest_Union interface {
|
|
||||||
isTest_Union()
|
|
||||||
}
|
|
||||||
|
|
||||||
type Test_Number struct {
|
|
||||||
Number int32 `protobuf:"varint,6,opt,name=number"`
|
|
||||||
}
|
|
||||||
type Test_Name struct {
|
|
||||||
Name string `protobuf:"bytes,7,opt,name=name"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*Test_Number) isTest_Union() {}
|
|
||||||
func (*Test_Name) isTest_Union() {}
|
|
||||||
|
|
||||||
func (m *Test) GetUnion() isTest_Union {
|
|
||||||
if m != nil {
|
|
||||||
return m.Union
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
const Default_Test_Type int32 = 77
|
|
||||||
|
|
||||||
func (m *Test) GetLabel() string {
|
|
||||||
if m != nil && m.Label != nil {
|
|
||||||
return *m.Label
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Test) GetType() int32 {
|
|
||||||
if m != nil && m.Type != nil {
|
|
||||||
return *m.Type
|
|
||||||
}
|
|
||||||
return Default_Test_Type
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
|
|
||||||
if m != nil {
|
|
||||||
return m.Optionalgroup
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type Test_OptionalGroup struct {
|
|
||||||
RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
|
|
||||||
}
|
|
||||||
func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
|
|
||||||
func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
|
|
||||||
|
|
||||||
func (m *Test_OptionalGroup) GetRequiredField() string {
|
|
||||||
if m != nil && m.RequiredField != nil {
|
|
||||||
return *m.RequiredField
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Test) GetNumber() int32 {
|
|
||||||
if x, ok := m.GetUnion().(*Test_Number); ok {
|
|
||||||
return x.Number
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Test) GetName() string {
|
|
||||||
if x, ok := m.GetUnion().(*Test_Name); ok {
|
|
||||||
return x.Name
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
|
|
||||||
}
|
|
||||||
|
|
||||||
To create and play with a Test object:
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
|
||||||
pb "./example.pb"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
test := &pb.Test{
|
|
||||||
Label: proto.String("hello"),
|
|
||||||
Type: proto.Int32(17),
|
|
||||||
Reps: []int64{1, 2, 3},
|
|
||||||
Optionalgroup: &pb.Test_OptionalGroup{
|
|
||||||
RequiredField: proto.String("good bye"),
|
|
||||||
},
|
|
||||||
Union: &pb.Test_Name{"fred"},
|
|
||||||
}
|
|
||||||
data, err := proto.Marshal(test)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal("marshaling error: ", err)
|
|
||||||
}
|
|
||||||
newTest := &pb.Test{}
|
|
||||||
err = proto.Unmarshal(data, newTest)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal("unmarshaling error: ", err)
|
|
||||||
}
|
|
||||||
// Now test and newTest contain the same data.
|
|
||||||
if test.GetLabel() != newTest.GetLabel() {
|
|
||||||
log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
|
|
||||||
}
|
|
||||||
// Use a type switch to determine which oneof was set.
|
|
||||||
switch u := test.Union.(type) {
|
|
||||||
case *pb.Test_Number: // u.Number contains the number.
|
|
||||||
case *pb.Test_Name: // u.Name contains the string.
|
|
||||||
}
|
|
||||||
// etc.
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
package proto
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"reflect"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// RequiredNotSetError is an error type returned by either Marshal or Unmarshal.
|
|
||||||
// Marshal reports this when a required field is not initialized.
|
|
||||||
// Unmarshal reports this when a required field is missing from the wire data.
|
|
||||||
type RequiredNotSetError struct{ field string }
|
|
||||||
|
|
||||||
func (e *RequiredNotSetError) Error() string {
|
|
||||||
if e.field == "" {
|
|
||||||
return fmt.Sprintf("proto: required field not set")
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("proto: required field %q not set", e.field)
|
|
||||||
}
|
|
||||||
func (e *RequiredNotSetError) RequiredNotSet() bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
type invalidUTF8Error struct{ field string }
|
|
||||||
|
|
||||||
func (e *invalidUTF8Error) Error() string {
|
|
||||||
if e.field == "" {
|
|
||||||
return "proto: invalid UTF-8 detected"
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field)
|
|
||||||
}
|
|
||||||
func (e *invalidUTF8Error) InvalidUTF8() bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8.
|
|
||||||
// This error should not be exposed to the external API as such errors should
|
|
||||||
// be recreated with the field information.
|
|
||||||
var errInvalidUTF8 = &invalidUTF8Error{}
|
|
||||||
|
|
||||||
// isNonFatal reports whether the error is either a RequiredNotSet error
|
|
||||||
// or a InvalidUTF8 error.
|
|
||||||
func isNonFatal(err error) bool {
|
|
||||||
if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
type nonFatal struct{ E error }
|
|
||||||
|
|
||||||
// Merge merges err into nf and reports whether it was successful.
|
|
||||||
// Otherwise it returns false for any fatal non-nil errors.
|
|
||||||
func (nf *nonFatal) Merge(err error) (ok bool) {
|
|
||||||
if err == nil {
|
|
||||||
return true // not an error
|
|
||||||
}
|
|
||||||
if !isNonFatal(err) {
|
|
||||||
return false // fatal error
|
|
||||||
}
|
|
||||||
if nf.E == nil {
|
|
||||||
nf.E = err // store first instance of non-fatal error
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Message is implemented by generated protocol buffer messages.
|
|
||||||
type Message interface {
|
|
||||||
Reset()
|
|
||||||
String() string
|
|
||||||
ProtoMessage()
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Buffer is a buffer manager for marshaling and unmarshaling
|
|
||||||
// protocol buffers. It may be reused between invocations to
|
|
||||||
// reduce memory usage. It is not necessary to use a Buffer;
|
|
||||||
// the global functions Marshal and Unmarshal create a
|
|
||||||
// temporary Buffer and are fine for most applications.
|
|
||||||
type Buffer struct {
|
|
||||||
buf []byte // encode/decode byte stream
|
|
||||||
index int // read point
|
|
||||||
|
|
||||||
deterministic bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBuffer allocates a new Buffer and initializes its internal data to
|
|
||||||
// the contents of the argument slice.
|
|
||||||
func NewBuffer(e []byte) *Buffer {
|
|
||||||
return &Buffer{buf: e}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset resets the Buffer, ready for marshaling a new protocol buffer.
|
|
||||||
func (p *Buffer) Reset() {
|
|
||||||
p.buf = p.buf[0:0] // for reading/writing
|
|
||||||
p.index = 0 // for reading
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetBuf replaces the internal buffer with the slice,
|
|
||||||
// ready for unmarshaling the contents of the slice.
|
|
||||||
func (p *Buffer) SetBuf(s []byte) {
|
|
||||||
p.buf = s
|
|
||||||
p.index = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bytes returns the contents of the Buffer.
|
|
||||||
func (p *Buffer) Bytes() []byte { return p.buf }
|
|
||||||
|
|
||||||
// SetDeterministic sets whether to use deterministic serialization.
|
|
||||||
//
|
|
||||||
// Deterministic serialization guarantees that for a given binary, equal
|
|
||||||
// messages will always be serialized to the same bytes. This implies:
|
|
||||||
//
|
|
||||||
// - Repeated serialization of a message will return the same bytes.
|
|
||||||
// - Different processes of the same binary (which may be executing on
|
|
||||||
// different machines) will serialize equal messages to the same bytes.
|
|
||||||
//
|
|
||||||
// Note that the deterministic serialization is NOT canonical across
|
|
||||||
// languages. It is not guaranteed to remain stable over time. It is unstable
|
|
||||||
// across different builds with schema changes due to unknown fields.
|
|
||||||
// Users who need canonical serialization (e.g., persistent storage in a
|
|
||||||
// canonical form, fingerprinting, etc.) should define their own
|
|
||||||
// canonicalization specification and implement their own serializer rather
|
|
||||||
// than relying on this API.
|
|
||||||
//
|
|
||||||
// If deterministic serialization is requested, map entries will be sorted
|
|
||||||
// by keys in lexicographical order. This is an implementation detail and
|
|
||||||
// subject to change.
|
|
||||||
func (p *Buffer) SetDeterministic(deterministic bool) {
|
|
||||||
p.deterministic = deterministic
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Helper routines for simplifying the creation of optional fields of basic type.
|
|
||||||
*/
|
|
||||||
|
|
||||||
// Bool is a helper routine that allocates a new bool value
|
|
||||||
// to store v and returns a pointer to it.
|
|
||||||
func Bool(v bool) *bool {
|
|
||||||
return &v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int32 is a helper routine that allocates a new int32 value
|
|
||||||
// to store v and returns a pointer to it.
|
|
||||||
func Int32(v int32) *int32 {
|
|
||||||
return &v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int is a helper routine that allocates a new int32 value
|
|
||||||
// to store v and returns a pointer to it, but unlike Int32
|
|
||||||
// its argument value is an int.
|
|
||||||
func Int(v int) *int32 {
|
|
||||||
p := new(int32)
|
|
||||||
*p = int32(v)
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int64 is a helper routine that allocates a new int64 value
|
|
||||||
// to store v and returns a pointer to it.
|
|
||||||
func Int64(v int64) *int64 {
|
|
||||||
return &v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Float32 is a helper routine that allocates a new float32 value
|
|
||||||
// to store v and returns a pointer to it.
|
|
||||||
func Float32(v float32) *float32 {
|
|
||||||
return &v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Float64 is a helper routine that allocates a new float64 value
|
|
||||||
// to store v and returns a pointer to it.
|
|
||||||
func Float64(v float64) *float64 {
|
|
||||||
return &v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uint32 is a helper routine that allocates a new uint32 value
|
|
||||||
// to store v and returns a pointer to it.
|
|
||||||
func Uint32(v uint32) *uint32 {
|
|
||||||
return &v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uint64 is a helper routine that allocates a new uint64 value
|
|
||||||
// to store v and returns a pointer to it.
|
|
||||||
func Uint64(v uint64) *uint64 {
|
|
||||||
return &v
|
|
||||||
}
|
|
||||||
|
|
||||||
// String is a helper routine that allocates a new string value
|
|
||||||
// to store v and returns a pointer to it.
|
|
||||||
func String(v string) *string {
|
|
||||||
return &v
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnumName is a helper function to simplify printing protocol buffer enums
|
|
||||||
// by name. Given an enum map and a value, it returns a useful string.
|
|
||||||
func EnumName(m map[int32]string, v int32) string {
|
|
||||||
s, ok := m[v]
|
|
||||||
if ok {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
return strconv.Itoa(int(v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
|
|
||||||
// from their JSON-encoded representation. Given a map from the enum's symbolic
|
|
||||||
// names to its int values, and a byte buffer containing the JSON-encoded
|
|
||||||
// value, it returns an int32 that can be cast to the enum type by the caller.
|
|
||||||
//
|
|
||||||
// The function can deal with both JSON representations, numeric and symbolic.
|
|
||||||
func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
|
|
||||||
if data[0] == '"' {
|
|
||||||
// New style: enums are strings.
|
|
||||||
var repr string
|
|
||||||
if err := json.Unmarshal(data, &repr); err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
val, ok := m[repr]
|
|
||||||
if !ok {
|
|
||||||
return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
|
|
||||||
}
|
|
||||||
return val, nil
|
|
||||||
}
|
|
||||||
// Old style: enums are ints.
|
|
||||||
var val int32
|
|
||||||
if err := json.Unmarshal(data, &val); err != nil {
|
|
||||||
return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
|
|
||||||
}
|
|
||||||
return val, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DebugPrint dumps the encoded data in b in a debugging format with a header
|
|
||||||
// including the string s. Used in testing but made available for general debugging.
|
|
||||||
func (p *Buffer) DebugPrint(s string, b []byte) {
|
|
||||||
var u uint64
|
|
||||||
|
|
||||||
obuf := p.buf
|
|
||||||
index := p.index
|
|
||||||
p.buf = b
|
|
||||||
p.index = 0
|
|
||||||
depth := 0
|
|
||||||
|
|
||||||
fmt.Printf("\n--- %s ---\n", s)
|
|
||||||
|
|
||||||
out:
|
|
||||||
for {
|
|
||||||
for i := 0; i < depth; i++ {
|
|
||||||
fmt.Print(" ")
|
|
||||||
}
|
|
||||||
|
|
||||||
index := p.index
|
|
||||||
if index == len(p.buf) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
op, err := p.DecodeVarint()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("%3d: fetching op err %v\n", index, err)
|
|
||||||
break out
|
|
||||||
}
|
|
||||||
tag := op >> 3
|
|
||||||
wire := op & 7
|
|
||||||
|
|
||||||
switch wire {
|
|
||||||
default:
|
|
||||||
fmt.Printf("%3d: t=%3d unknown wire=%d\n",
|
|
||||||
index, tag, wire)
|
|
||||||
break out
|
|
||||||
|
|
||||||
case WireBytes:
|
|
||||||
var r []byte
|
|
||||||
|
|
||||||
r, err = p.DecodeRawBytes(false)
|
|
||||||
if err != nil {
|
|
||||||
break out
|
|
||||||
}
|
|
||||||
fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
|
|
||||||
if len(r) <= 6 {
|
|
||||||
for i := 0; i < len(r); i++ {
|
|
||||||
fmt.Printf(" %.2x", r[i])
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
for i := 0; i < 3; i++ {
|
|
||||||
fmt.Printf(" %.2x", r[i])
|
|
||||||
}
|
|
||||||
fmt.Printf(" ..")
|
|
||||||
for i := len(r) - 3; i < len(r); i++ {
|
|
||||||
fmt.Printf(" %.2x", r[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fmt.Printf("\n")
|
|
||||||
|
|
||||||
case WireFixed32:
|
|
||||||
u, err = p.DecodeFixed32()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
|
|
||||||
break out
|
|
||||||
}
|
|
||||||
fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
|
|
||||||
|
|
||||||
case WireFixed64:
|
|
||||||
u, err = p.DecodeFixed64()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
|
|
||||||
break out
|
|
||||||
}
|
|
||||||
fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
|
|
||||||
|
|
||||||
case WireVarint:
|
|
||||||
u, err = p.DecodeVarint()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
|
|
||||||
break out
|
|
||||||
}
|
|
||||||
fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
|
|
||||||
|
|
||||||
case WireStartGroup:
|
|
||||||
fmt.Printf("%3d: t=%3d start\n", index, tag)
|
|
||||||
depth++
|
|
||||||
|
|
||||||
case WireEndGroup:
|
|
||||||
depth--
|
|
||||||
fmt.Printf("%3d: t=%3d end\n", index, tag)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if depth != 0 {
|
|
||||||
fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
|
|
||||||
}
|
|
||||||
fmt.Printf("\n")
|
|
||||||
|
|
||||||
p.buf = obuf
|
|
||||||
p.index = index
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetDefaults sets unset protocol buffer fields to their default values.
|
|
||||||
// It only modifies fields that are both unset and have defined defaults.
|
|
||||||
// It recursively sets default values in any non-nil sub-messages.
|
|
||||||
func SetDefaults(pb Message) {
|
|
||||||
setDefaults(reflect.ValueOf(pb), true, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// v is a pointer to a struct.
|
|
||||||
func setDefaults(v reflect.Value, recur, zeros bool) {
|
|
||||||
v = v.Elem()
|
|
||||||
|
|
||||||
defaultMu.RLock()
|
|
||||||
dm, ok := defaults[v.Type()]
|
|
||||||
defaultMu.RUnlock()
|
|
||||||
if !ok {
|
|
||||||
dm = buildDefaultMessage(v.Type())
|
|
||||||
defaultMu.Lock()
|
|
||||||
defaults[v.Type()] = dm
|
|
||||||
defaultMu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, sf := range dm.scalars {
|
|
||||||
f := v.Field(sf.index)
|
|
||||||
if !f.IsNil() {
|
|
||||||
// field already set
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
dv := sf.value
|
|
||||||
if dv == nil && !zeros {
|
|
||||||
// no explicit default, and don't want to set zeros
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
fptr := f.Addr().Interface() // **T
|
|
||||||
// TODO: Consider batching the allocations we do here.
|
|
||||||
switch sf.kind {
|
|
||||||
case reflect.Bool:
|
|
||||||
b := new(bool)
|
|
||||||
if dv != nil {
|
|
||||||
*b = dv.(bool)
|
|
||||||
}
|
|
||||||
*(fptr.(**bool)) = b
|
|
||||||
case reflect.Float32:
|
|
||||||
f := new(float32)
|
|
||||||
if dv != nil {
|
|
||||||
*f = dv.(float32)
|
|
||||||
}
|
|
||||||
*(fptr.(**float32)) = f
|
|
||||||
case reflect.Float64:
|
|
||||||
f := new(float64)
|
|
||||||
if dv != nil {
|
|
||||||
*f = dv.(float64)
|
|
||||||
}
|
|
||||||
*(fptr.(**float64)) = f
|
|
||||||
case reflect.Int32:
|
|
||||||
// might be an enum
|
|
||||||
if ft := f.Type(); ft != int32PtrType {
|
|
||||||
// enum
|
|
||||||
f.Set(reflect.New(ft.Elem()))
|
|
||||||
if dv != nil {
|
|
||||||
f.Elem().SetInt(int64(dv.(int32)))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// int32 field
|
|
||||||
i := new(int32)
|
|
||||||
if dv != nil {
|
|
||||||
*i = dv.(int32)
|
|
||||||
}
|
|
||||||
*(fptr.(**int32)) = i
|
|
||||||
}
|
|
||||||
case reflect.Int64:
|
|
||||||
i := new(int64)
|
|
||||||
if dv != nil {
|
|
||||||
*i = dv.(int64)
|
|
||||||
}
|
|
||||||
*(fptr.(**int64)) = i
|
|
||||||
case reflect.String:
|
|
||||||
s := new(string)
|
|
||||||
if dv != nil {
|
|
||||||
*s = dv.(string)
|
|
||||||
}
|
|
||||||
*(fptr.(**string)) = s
|
|
||||||
case reflect.Uint8:
|
|
||||||
// exceptional case: []byte
|
|
||||||
var b []byte
|
|
||||||
if dv != nil {
|
|
||||||
db := dv.([]byte)
|
|
||||||
b = make([]byte, len(db))
|
|
||||||
copy(b, db)
|
|
||||||
} else {
|
|
||||||
b = []byte{}
|
|
||||||
}
|
|
||||||
*(fptr.(*[]byte)) = b
|
|
||||||
case reflect.Uint32:
|
|
||||||
u := new(uint32)
|
|
||||||
if dv != nil {
|
|
||||||
*u = dv.(uint32)
|
|
||||||
}
|
|
||||||
*(fptr.(**uint32)) = u
|
|
||||||
case reflect.Uint64:
|
|
||||||
u := new(uint64)
|
|
||||||
if dv != nil {
|
|
||||||
*u = dv.(uint64)
|
|
||||||
}
|
|
||||||
*(fptr.(**uint64)) = u
|
|
||||||
default:
|
|
||||||
log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, ni := range dm.nested {
|
|
||||||
f := v.Field(ni)
|
|
||||||
// f is *T or []*T or map[T]*T
|
|
||||||
switch f.Kind() {
|
|
||||||
case reflect.Ptr:
|
|
||||||
if f.IsNil() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
setDefaults(f, recur, zeros)
|
|
||||||
|
|
||||||
case reflect.Slice:
|
|
||||||
for i := 0; i < f.Len(); i++ {
|
|
||||||
e := f.Index(i)
|
|
||||||
if e.IsNil() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
setDefaults(e, recur, zeros)
|
|
||||||
}
|
|
||||||
|
|
||||||
case reflect.Map:
|
|
||||||
for _, k := range f.MapKeys() {
|
|
||||||
e := f.MapIndex(k)
|
|
||||||
if e.IsNil() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
setDefaults(e, recur, zeros)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
// defaults maps a protocol buffer struct type to a slice of the fields,
|
|
||||||
// with its scalar fields set to their proto-declared non-zero default values.
|
|
||||||
defaultMu sync.RWMutex
|
|
||||||
defaults = make(map[reflect.Type]defaultMessage)
|
|
||||||
|
|
||||||
int32PtrType = reflect.TypeOf((*int32)(nil))
|
|
||||||
)
|
|
||||||
|
|
||||||
// defaultMessage represents information about the default values of a message.
|
|
||||||
type defaultMessage struct {
|
|
||||||
scalars []scalarField
|
|
||||||
nested []int // struct field index of nested messages
|
|
||||||
}
|
|
||||||
|
|
||||||
type scalarField struct {
|
|
||||||
index int // struct field index
|
|
||||||
kind reflect.Kind // element type (the T in *T or []T)
|
|
||||||
value interface{} // the proto-declared default value, or nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// t is a struct type.
|
|
||||||
func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
|
|
||||||
sprop := GetProperties(t)
|
|
||||||
for _, prop := range sprop.Prop {
|
|
||||||
fi, ok := sprop.decoderTags.get(prop.Tag)
|
|
||||||
if !ok {
|
|
||||||
// XXX_unrecognized
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
ft := t.Field(fi).Type
|
|
||||||
|
|
||||||
sf, nested, err := fieldDefault(ft, prop)
|
|
||||||
switch {
|
|
||||||
case err != nil:
|
|
||||||
log.Print(err)
|
|
||||||
case nested:
|
|
||||||
dm.nested = append(dm.nested, fi)
|
|
||||||
case sf != nil:
|
|
||||||
sf.index = fi
|
|
||||||
dm.scalars = append(dm.scalars, *sf)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return dm
|
|
||||||
}
|
|
||||||
|
|
||||||
// fieldDefault returns the scalarField for field type ft.
|
|
||||||
// sf will be nil if the field can not have a default.
|
|
||||||
// nestedMessage will be true if this is a nested message.
|
|
||||||
// Note that sf.index is not set on return.
|
|
||||||
func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
|
|
||||||
var canHaveDefault bool
|
|
||||||
switch ft.Kind() {
|
|
||||||
case reflect.Ptr:
|
|
||||||
if ft.Elem().Kind() == reflect.Struct {
|
|
||||||
nestedMessage = true
|
|
||||||
} else {
|
|
||||||
canHaveDefault = true // proto2 scalar field
|
|
||||||
}
|
|
||||||
|
|
||||||
case reflect.Slice:
|
|
||||||
switch ft.Elem().Kind() {
|
|
||||||
case reflect.Ptr:
|
|
||||||
nestedMessage = true // repeated message
|
|
||||||
case reflect.Uint8:
|
|
||||||
canHaveDefault = true // bytes field
|
|
||||||
}
|
|
||||||
|
|
||||||
case reflect.Map:
|
|
||||||
if ft.Elem().Kind() == reflect.Ptr {
|
|
||||||
nestedMessage = true // map with message values
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !canHaveDefault {
|
|
||||||
if nestedMessage {
|
|
||||||
return nil, true, nil
|
|
||||||
}
|
|
||||||
return nil, false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// We now know that ft is a pointer or slice.
|
|
||||||
sf = &scalarField{kind: ft.Elem().Kind()}
|
|
||||||
|
|
||||||
// scalar fields without defaults
|
|
||||||
if !prop.HasDefault {
|
|
||||||
return sf, false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// a scalar field: either *T or []byte
|
|
||||||
switch ft.Elem().Kind() {
|
|
||||||
case reflect.Bool:
|
|
||||||
x, err := strconv.ParseBool(prop.Default)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
|
|
||||||
}
|
|
||||||
sf.value = x
|
|
||||||
case reflect.Float32:
|
|
||||||
x, err := strconv.ParseFloat(prop.Default, 32)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
|
|
||||||
}
|
|
||||||
sf.value = float32(x)
|
|
||||||
case reflect.Float64:
|
|
||||||
x, err := strconv.ParseFloat(prop.Default, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
|
|
||||||
}
|
|
||||||
sf.value = x
|
|
||||||
case reflect.Int32:
|
|
||||||
x, err := strconv.ParseInt(prop.Default, 10, 32)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
|
|
||||||
}
|
|
||||||
sf.value = int32(x)
|
|
||||||
case reflect.Int64:
|
|
||||||
x, err := strconv.ParseInt(prop.Default, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
|
|
||||||
}
|
|
||||||
sf.value = x
|
|
||||||
case reflect.String:
|
|
||||||
sf.value = prop.Default
|
|
||||||
case reflect.Uint8:
|
|
||||||
// []byte (not *uint8)
|
|
||||||
sf.value = []byte(prop.Default)
|
|
||||||
case reflect.Uint32:
|
|
||||||
x, err := strconv.ParseUint(prop.Default, 10, 32)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
|
|
||||||
}
|
|
||||||
sf.value = uint32(x)
|
|
||||||
case reflect.Uint64:
|
|
||||||
x, err := strconv.ParseUint(prop.Default, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
|
|
||||||
}
|
|
||||||
sf.value = x
|
|
||||||
default:
|
|
||||||
return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
|
|
||||||
}
|
|
||||||
|
|
||||||
return sf, false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// mapKeys returns a sort.Interface to be used for sorting the map keys.
|
|
||||||
// Map fields may have key types of non-float scalars, strings and enums.
|
|
||||||
func mapKeys(vs []reflect.Value) sort.Interface {
|
|
||||||
s := mapKeySorter{vs: vs}
|
|
||||||
|
|
||||||
// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps.
|
|
||||||
if len(vs) == 0 {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
switch vs[0].Kind() {
|
|
||||||
case reflect.Int32, reflect.Int64:
|
|
||||||
s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
|
|
||||||
case reflect.Uint32, reflect.Uint64:
|
|
||||||
s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
|
|
||||||
case reflect.Bool:
|
|
||||||
s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true
|
|
||||||
case reflect.String:
|
|
||||||
s.less = func(a, b reflect.Value) bool { return a.String() < b.String() }
|
|
||||||
default:
|
|
||||||
panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind()))
|
|
||||||
}
|
|
||||||
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
type mapKeySorter struct {
|
|
||||||
vs []reflect.Value
|
|
||||||
less func(a, b reflect.Value) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s mapKeySorter) Len() int { return len(s.vs) }
|
|
||||||
func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
|
|
||||||
func (s mapKeySorter) Less(i, j int) bool {
|
|
||||||
return s.less(s.vs[i], s.vs[j])
|
|
||||||
}
|
|
||||||
|
|
||||||
// isProto3Zero reports whether v is a zero proto3 value.
|
|
||||||
func isProto3Zero(v reflect.Value) bool {
|
|
||||||
switch v.Kind() {
|
|
||||||
case reflect.Bool:
|
|
||||||
return !v.Bool()
|
|
||||||
case reflect.Int32, reflect.Int64:
|
|
||||||
return v.Int() == 0
|
|
||||||
case reflect.Uint32, reflect.Uint64:
|
|
||||||
return v.Uint() == 0
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
return v.Float() == 0
|
|
||||||
case reflect.String:
|
|
||||||
return v.String() == ""
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
// ProtoPackageIsVersion3 is referenced from generated protocol buffer files
|
|
||||||
// to assert that that code is compatible with this version of the proto package.
|
|
||||||
ProtoPackageIsVersion3 = true
|
|
||||||
|
|
||||||
// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
|
|
||||||
// to assert that that code is compatible with this version of the proto package.
|
|
||||||
ProtoPackageIsVersion2 = true
|
|
||||||
|
|
||||||
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
|
|
||||||
// to assert that that code is compatible with this version of the proto package.
|
|
||||||
ProtoPackageIsVersion1 = true
|
|
||||||
)
|
|
||||||
|
|
||||||
// InternalMessageInfo is a type used internally by generated .pb.go files.
|
|
||||||
// This type is not intended to be used by non-generated code.
|
|
||||||
// This type is not subject to any compatibility guarantee.
|
|
||||||
type InternalMessageInfo struct {
|
|
||||||
marshal *marshalInfo
|
|
||||||
unmarshal *unmarshalInfo
|
|
||||||
merge *mergeInfo
|
|
||||||
discard *discardInfo
|
|
||||||
}
|
|
181
vendor/github.com/golang/protobuf/proto/message_set.go
generated
vendored
181
vendor/github.com/golang/protobuf/proto/message_set.go
generated
vendored
@ -1,181 +0,0 @@
|
|||||||
// Go support for Protocol Buffers - Google's data interchange format
|
|
||||||
//
|
|
||||||
// Copyright 2010 The Go Authors. All rights reserved.
|
|
||||||
// https://github.com/golang/protobuf
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Support for message sets.
|
|
||||||
*/
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
|
|
||||||
// A message type ID is required for storing a protocol buffer in a message set.
|
|
||||||
var errNoMessageTypeID = errors.New("proto does not have a message type ID")
|
|
||||||
|
|
||||||
// The first two types (_MessageSet_Item and messageSet)
|
|
||||||
// model what the protocol compiler produces for the following protocol message:
|
|
||||||
// message MessageSet {
|
|
||||||
// repeated group Item = 1 {
|
|
||||||
// required int32 type_id = 2;
|
|
||||||
// required string message = 3;
|
|
||||||
// };
|
|
||||||
// }
|
|
||||||
// That is the MessageSet wire format. We can't use a proto to generate these
|
|
||||||
// because that would introduce a circular dependency between it and this package.
|
|
||||||
|
|
||||||
type _MessageSet_Item struct {
|
|
||||||
TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
|
|
||||||
Message []byte `protobuf:"bytes,3,req,name=message"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type messageSet struct {
|
|
||||||
Item []*_MessageSet_Item `protobuf:"group,1,rep"`
|
|
||||||
XXX_unrecognized []byte
|
|
||||||
// TODO: caching?
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure messageSet is a Message.
|
|
||||||
var _ Message = (*messageSet)(nil)
|
|
||||||
|
|
||||||
// messageTypeIder is an interface satisfied by a protocol buffer type
|
|
||||||
// that may be stored in a MessageSet.
|
|
||||||
type messageTypeIder interface {
|
|
||||||
MessageTypeId() int32
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ms *messageSet) find(pb Message) *_MessageSet_Item {
|
|
||||||
mti, ok := pb.(messageTypeIder)
|
|
||||||
if !ok {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
id := mti.MessageTypeId()
|
|
||||||
for _, item := range ms.Item {
|
|
||||||
if *item.TypeId == id {
|
|
||||||
return item
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ms *messageSet) Has(pb Message) bool {
|
|
||||||
return ms.find(pb) != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ms *messageSet) Unmarshal(pb Message) error {
|
|
||||||
if item := ms.find(pb); item != nil {
|
|
||||||
return Unmarshal(item.Message, pb)
|
|
||||||
}
|
|
||||||
if _, ok := pb.(messageTypeIder); !ok {
|
|
||||||
return errNoMessageTypeID
|
|
||||||
}
|
|
||||||
return nil // TODO: return error instead?
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ms *messageSet) Marshal(pb Message) error {
|
|
||||||
msg, err := Marshal(pb)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if item := ms.find(pb); item != nil {
|
|
||||||
// reuse existing item
|
|
||||||
item.Message = msg
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
mti, ok := pb.(messageTypeIder)
|
|
||||||
if !ok {
|
|
||||||
return errNoMessageTypeID
|
|
||||||
}
|
|
||||||
|
|
||||||
mtid := mti.MessageTypeId()
|
|
||||||
ms.Item = append(ms.Item, &_MessageSet_Item{
|
|
||||||
TypeId: &mtid,
|
|
||||||
Message: msg,
|
|
||||||
})
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ms *messageSet) Reset() { *ms = messageSet{} }
|
|
||||||
func (ms *messageSet) String() string { return CompactTextString(ms) }
|
|
||||||
func (*messageSet) ProtoMessage() {}
|
|
||||||
|
|
||||||
// Support for the message_set_wire_format message option.
|
|
||||||
|
|
||||||
func skipVarint(buf []byte) []byte {
|
|
||||||
i := 0
|
|
||||||
for ; buf[i]&0x80 != 0; i++ {
|
|
||||||
}
|
|
||||||
return buf[i+1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
|
|
||||||
// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
|
|
||||||
func unmarshalMessageSet(buf []byte, exts interface{}) error {
|
|
||||||
var m map[int32]Extension
|
|
||||||
switch exts := exts.(type) {
|
|
||||||
case *XXX_InternalExtensions:
|
|
||||||
m = exts.extensionsWrite()
|
|
||||||
case map[int32]Extension:
|
|
||||||
m = exts
|
|
||||||
default:
|
|
||||||
return errors.New("proto: not an extension map")
|
|
||||||
}
|
|
||||||
|
|
||||||
ms := new(messageSet)
|
|
||||||
if err := Unmarshal(buf, ms); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, item := range ms.Item {
|
|
||||||
id := *item.TypeId
|
|
||||||
msg := item.Message
|
|
||||||
|
|
||||||
// Restore wire type and field number varint, plus length varint.
|
|
||||||
// Be careful to preserve duplicate items.
|
|
||||||
b := EncodeVarint(uint64(id)<<3 | WireBytes)
|
|
||||||
if ext, ok := m[id]; ok {
|
|
||||||
// Existing data; rip off the tag and length varint
|
|
||||||
// so we join the new data correctly.
|
|
||||||
// We can assume that ext.enc is set because we are unmarshaling.
|
|
||||||
o := ext.enc[len(b):] // skip wire type and field number
|
|
||||||
_, n := DecodeVarint(o) // calculate length of length varint
|
|
||||||
o = o[n:] // skip length varint
|
|
||||||
msg = append(o, msg...) // join old data and new data
|
|
||||||
}
|
|
||||||
b = append(b, EncodeVarint(uint64(len(msg)))...)
|
|
||||||
b = append(b, msg...)
|
|
||||||
|
|
||||||
m[id] = Extension{enc: b}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
360
vendor/github.com/golang/protobuf/proto/pointer_reflect.go
generated
vendored
360
vendor/github.com/golang/protobuf/proto/pointer_reflect.go
generated
vendored
@ -1,360 +0,0 @@
|
|||||||
// Go support for Protocol Buffers - Google's data interchange format
|
|
||||||
//
|
|
||||||
// Copyright 2012 The Go Authors. All rights reserved.
|
|
||||||
// https://github.com/golang/protobuf
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
// +build purego appengine js
|
|
||||||
|
|
||||||
// This file contains an implementation of proto field accesses using package reflect.
|
|
||||||
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
|
|
||||||
// be used on App Engine.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
const unsafeAllowed = false
|
|
||||||
|
|
||||||
// A field identifies a field in a struct, accessible from a pointer.
|
|
||||||
// In this implementation, a field is identified by the sequence of field indices
|
|
||||||
// passed to reflect's FieldByIndex.
|
|
||||||
type field []int
|
|
||||||
|
|
||||||
// toField returns a field equivalent to the given reflect field.
|
|
||||||
func toField(f *reflect.StructField) field {
|
|
||||||
return f.Index
|
|
||||||
}
|
|
||||||
|
|
||||||
// invalidField is an invalid field identifier.
|
|
||||||
var invalidField = field(nil)
|
|
||||||
|
|
||||||
// zeroField is a noop when calling pointer.offset.
|
|
||||||
var zeroField = field([]int{})
|
|
||||||
|
|
||||||
// IsValid reports whether the field identifier is valid.
|
|
||||||
func (f field) IsValid() bool { return f != nil }
|
|
||||||
|
|
||||||
// The pointer type is for the table-driven decoder.
|
|
||||||
// The implementation here uses a reflect.Value of pointer type to
|
|
||||||
// create a generic pointer. In pointer_unsafe.go we use unsafe
|
|
||||||
// instead of reflect to implement the same (but faster) interface.
|
|
||||||
type pointer struct {
|
|
||||||
v reflect.Value
|
|
||||||
}
|
|
||||||
|
|
||||||
// toPointer converts an interface of pointer type to a pointer
|
|
||||||
// that points to the same target.
|
|
||||||
func toPointer(i *Message) pointer {
|
|
||||||
return pointer{v: reflect.ValueOf(*i)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// toAddrPointer converts an interface to a pointer that points to
|
|
||||||
// the interface data.
|
|
||||||
func toAddrPointer(i *interface{}, isptr, deref bool) pointer {
|
|
||||||
v := reflect.ValueOf(*i)
|
|
||||||
u := reflect.New(v.Type())
|
|
||||||
u.Elem().Set(v)
|
|
||||||
if deref {
|
|
||||||
u = u.Elem()
|
|
||||||
}
|
|
||||||
return pointer{v: u}
|
|
||||||
}
|
|
||||||
|
|
||||||
// valToPointer converts v to a pointer. v must be of pointer type.
|
|
||||||
func valToPointer(v reflect.Value) pointer {
|
|
||||||
return pointer{v: v}
|
|
||||||
}
|
|
||||||
|
|
||||||
// offset converts from a pointer to a structure to a pointer to
|
|
||||||
// one of its fields.
|
|
||||||
func (p pointer) offset(f field) pointer {
|
|
||||||
return pointer{v: p.v.Elem().FieldByIndex(f).Addr()}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p pointer) isNil() bool {
|
|
||||||
return p.v.IsNil()
|
|
||||||
}
|
|
||||||
|
|
||||||
// grow updates the slice s in place to make it one element longer.
|
|
||||||
// s must be addressable.
|
|
||||||
// Returns the (addressable) new element.
|
|
||||||
func grow(s reflect.Value) reflect.Value {
|
|
||||||
n, m := s.Len(), s.Cap()
|
|
||||||
if n < m {
|
|
||||||
s.SetLen(n + 1)
|
|
||||||
} else {
|
|
||||||
s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem())))
|
|
||||||
}
|
|
||||||
return s.Index(n)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p pointer) toInt64() *int64 {
|
|
||||||
return p.v.Interface().(*int64)
|
|
||||||
}
|
|
||||||
func (p pointer) toInt64Ptr() **int64 {
|
|
||||||
return p.v.Interface().(**int64)
|
|
||||||
}
|
|
||||||
func (p pointer) toInt64Slice() *[]int64 {
|
|
||||||
return p.v.Interface().(*[]int64)
|
|
||||||
}
|
|
||||||
|
|
||||||
var int32ptr = reflect.TypeOf((*int32)(nil))
|
|
||||||
|
|
||||||
func (p pointer) toInt32() *int32 {
|
|
||||||
return p.v.Convert(int32ptr).Interface().(*int32)
|
|
||||||
}
|
|
||||||
|
|
||||||
// The toInt32Ptr/Slice methods don't work because of enums.
|
|
||||||
// Instead, we must use set/get methods for the int32ptr/slice case.
|
|
||||||
/*
|
|
||||||
func (p pointer) toInt32Ptr() **int32 {
|
|
||||||
return p.v.Interface().(**int32)
|
|
||||||
}
|
|
||||||
func (p pointer) toInt32Slice() *[]int32 {
|
|
||||||
return p.v.Interface().(*[]int32)
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
func (p pointer) getInt32Ptr() *int32 {
|
|
||||||
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
|
|
||||||
// raw int32 type
|
|
||||||
return p.v.Elem().Interface().(*int32)
|
|
||||||
}
|
|
||||||
// an enum
|
|
||||||
return p.v.Elem().Convert(int32PtrType).Interface().(*int32)
|
|
||||||
}
|
|
||||||
func (p pointer) setInt32Ptr(v int32) {
|
|
||||||
// Allocate value in a *int32. Possibly convert that to a *enum.
|
|
||||||
// Then assign it to a **int32 or **enum.
|
|
||||||
// Note: we can convert *int32 to *enum, but we can't convert
|
|
||||||
// **int32 to **enum!
|
|
||||||
p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem()))
|
|
||||||
}
|
|
||||||
|
|
||||||
// getInt32Slice copies []int32 from p as a new slice.
|
|
||||||
// This behavior differs from the implementation in pointer_unsafe.go.
|
|
||||||
func (p pointer) getInt32Slice() []int32 {
|
|
||||||
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
|
|
||||||
// raw int32 type
|
|
||||||
return p.v.Elem().Interface().([]int32)
|
|
||||||
}
|
|
||||||
// an enum
|
|
||||||
// Allocate a []int32, then assign []enum's values into it.
|
|
||||||
// Note: we can't convert []enum to []int32.
|
|
||||||
slice := p.v.Elem()
|
|
||||||
s := make([]int32, slice.Len())
|
|
||||||
for i := 0; i < slice.Len(); i++ {
|
|
||||||
s[i] = int32(slice.Index(i).Int())
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// setInt32Slice copies []int32 into p as a new slice.
|
|
||||||
// This behavior differs from the implementation in pointer_unsafe.go.
|
|
||||||
func (p pointer) setInt32Slice(v []int32) {
|
|
||||||
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
|
|
||||||
// raw int32 type
|
|
||||||
p.v.Elem().Set(reflect.ValueOf(v))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// an enum
|
|
||||||
// Allocate a []enum, then assign []int32's values into it.
|
|
||||||
// Note: we can't convert []enum to []int32.
|
|
||||||
slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v))
|
|
||||||
for i, x := range v {
|
|
||||||
slice.Index(i).SetInt(int64(x))
|
|
||||||
}
|
|
||||||
p.v.Elem().Set(slice)
|
|
||||||
}
|
|
||||||
func (p pointer) appendInt32Slice(v int32) {
|
|
||||||
grow(p.v.Elem()).SetInt(int64(v))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p pointer) toUint64() *uint64 {
|
|
||||||
return p.v.Interface().(*uint64)
|
|
||||||
}
|
|
||||||
func (p pointer) toUint64Ptr() **uint64 {
|
|
||||||
return p.v.Interface().(**uint64)
|
|
||||||
}
|
|
||||||
func (p pointer) toUint64Slice() *[]uint64 {
|
|
||||||
return p.v.Interface().(*[]uint64)
|
|
||||||
}
|
|
||||||
func (p pointer) toUint32() *uint32 {
|
|
||||||
return p.v.Interface().(*uint32)
|
|
||||||
}
|
|
||||||
func (p pointer) toUint32Ptr() **uint32 {
|
|
||||||
return p.v.Interface().(**uint32)
|
|
||||||
}
|
|
||||||
func (p pointer) toUint32Slice() *[]uint32 {
|
|
||||||
return p.v.Interface().(*[]uint32)
|
|
||||||
}
|
|
||||||
func (p pointer) toBool() *bool {
|
|
||||||
return p.v.Interface().(*bool)
|
|
||||||
}
|
|
||||||
func (p pointer) toBoolPtr() **bool {
|
|
||||||
return p.v.Interface().(**bool)
|
|
||||||
}
|
|
||||||
func (p pointer) toBoolSlice() *[]bool {
|
|
||||||
return p.v.Interface().(*[]bool)
|
|
||||||
}
|
|
||||||
func (p pointer) toFloat64() *float64 {
|
|
||||||
return p.v.Interface().(*float64)
|
|
||||||
}
|
|
||||||
func (p pointer) toFloat64Ptr() **float64 {
|
|
||||||
return p.v.Interface().(**float64)
|
|
||||||
}
|
|
||||||
func (p pointer) toFloat64Slice() *[]float64 {
|
|
||||||
return p.v.Interface().(*[]float64)
|
|
||||||
}
|
|
||||||
func (p pointer) toFloat32() *float32 {
|
|
||||||
return p.v.Interface().(*float32)
|
|
||||||
}
|
|
||||||
func (p pointer) toFloat32Ptr() **float32 {
|
|
||||||
return p.v.Interface().(**float32)
|
|
||||||
}
|
|
||||||
func (p pointer) toFloat32Slice() *[]float32 {
|
|
||||||
return p.v.Interface().(*[]float32)
|
|
||||||
}
|
|
||||||
func (p pointer) toString() *string {
|
|
||||||
return p.v.Interface().(*string)
|
|
||||||
}
|
|
||||||
func (p pointer) toStringPtr() **string {
|
|
||||||
return p.v.Interface().(**string)
|
|
||||||
}
|
|
||||||
func (p pointer) toStringSlice() *[]string {
|
|
||||||
return p.v.Interface().(*[]string)
|
|
||||||
}
|
|
||||||
func (p pointer) toBytes() *[]byte {
|
|
||||||
return p.v.Interface().(*[]byte)
|
|
||||||
}
|
|
||||||
func (p pointer) toBytesSlice() *[][]byte {
|
|
||||||
return p.v.Interface().(*[][]byte)
|
|
||||||
}
|
|
||||||
func (p pointer) toExtensions() *XXX_InternalExtensions {
|
|
||||||
return p.v.Interface().(*XXX_InternalExtensions)
|
|
||||||
}
|
|
||||||
func (p pointer) toOldExtensions() *map[int32]Extension {
|
|
||||||
return p.v.Interface().(*map[int32]Extension)
|
|
||||||
}
|
|
||||||
func (p pointer) getPointer() pointer {
|
|
||||||
return pointer{v: p.v.Elem()}
|
|
||||||
}
|
|
||||||
func (p pointer) setPointer(q pointer) {
|
|
||||||
p.v.Elem().Set(q.v)
|
|
||||||
}
|
|
||||||
func (p pointer) appendPointer(q pointer) {
|
|
||||||
grow(p.v.Elem()).Set(q.v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// getPointerSlice copies []*T from p as a new []pointer.
|
|
||||||
// This behavior differs from the implementation in pointer_unsafe.go.
|
|
||||||
func (p pointer) getPointerSlice() []pointer {
|
|
||||||
if p.v.IsNil() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
n := p.v.Elem().Len()
|
|
||||||
s := make([]pointer, n)
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
s[i] = pointer{v: p.v.Elem().Index(i)}
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// setPointerSlice copies []pointer into p as a new []*T.
|
|
||||||
// This behavior differs from the implementation in pointer_unsafe.go.
|
|
||||||
func (p pointer) setPointerSlice(v []pointer) {
|
|
||||||
if v == nil {
|
|
||||||
p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v))
|
|
||||||
for _, p := range v {
|
|
||||||
s = reflect.Append(s, p.v)
|
|
||||||
}
|
|
||||||
p.v.Elem().Set(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// getInterfacePointer returns a pointer that points to the
|
|
||||||
// interface data of the interface pointed by p.
|
|
||||||
func (p pointer) getInterfacePointer() pointer {
|
|
||||||
if p.v.Elem().IsNil() {
|
|
||||||
return pointer{v: p.v.Elem()}
|
|
||||||
}
|
|
||||||
return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
|
|
||||||
// TODO: check that p.v.Type().Elem() == t?
|
|
||||||
return p.v
|
|
||||||
}
|
|
||||||
|
|
||||||
func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
|
|
||||||
atomicLock.Lock()
|
|
||||||
defer atomicLock.Unlock()
|
|
||||||
return *p
|
|
||||||
}
|
|
||||||
func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
|
|
||||||
atomicLock.Lock()
|
|
||||||
defer atomicLock.Unlock()
|
|
||||||
*p = v
|
|
||||||
}
|
|
||||||
func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
|
|
||||||
atomicLock.Lock()
|
|
||||||
defer atomicLock.Unlock()
|
|
||||||
return *p
|
|
||||||
}
|
|
||||||
func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
|
|
||||||
atomicLock.Lock()
|
|
||||||
defer atomicLock.Unlock()
|
|
||||||
*p = v
|
|
||||||
}
|
|
||||||
func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
|
|
||||||
atomicLock.Lock()
|
|
||||||
defer atomicLock.Unlock()
|
|
||||||
return *p
|
|
||||||
}
|
|
||||||
func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
|
|
||||||
atomicLock.Lock()
|
|
||||||
defer atomicLock.Unlock()
|
|
||||||
*p = v
|
|
||||||
}
|
|
||||||
func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
|
|
||||||
atomicLock.Lock()
|
|
||||||
defer atomicLock.Unlock()
|
|
||||||
return *p
|
|
||||||
}
|
|
||||||
func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
|
|
||||||
atomicLock.Lock()
|
|
||||||
defer atomicLock.Unlock()
|
|
||||||
*p = v
|
|
||||||
}
|
|
||||||
|
|
||||||
var atomicLock sync.Mutex
|
|
313
vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
generated
vendored
313
vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
generated
vendored
@ -1,313 +0,0 @@
|
|||||||
// Go support for Protocol Buffers - Google's data interchange format
|
|
||||||
//
|
|
||||||
// Copyright 2012 The Go Authors. All rights reserved.
|
|
||||||
// https://github.com/golang/protobuf
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
// +build !purego,!appengine,!js
|
|
||||||
|
|
||||||
// This file contains the implementation of the proto field accesses using package unsafe.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"sync/atomic"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
const unsafeAllowed = true
|
|
||||||
|
|
||||||
// A field identifies a field in a struct, accessible from a pointer.
|
|
||||||
// In this implementation, a field is identified by its byte offset from the start of the struct.
|
|
||||||
type field uintptr
|
|
||||||
|
|
||||||
// toField returns a field equivalent to the given reflect field.
|
|
||||||
func toField(f *reflect.StructField) field {
|
|
||||||
return field(f.Offset)
|
|
||||||
}
|
|
||||||
|
|
||||||
// invalidField is an invalid field identifier.
|
|
||||||
const invalidField = ^field(0)
|
|
||||||
|
|
||||||
// zeroField is a noop when calling pointer.offset.
|
|
||||||
const zeroField = field(0)
|
|
||||||
|
|
||||||
// IsValid reports whether the field identifier is valid.
|
|
||||||
func (f field) IsValid() bool {
|
|
||||||
return f != invalidField
|
|
||||||
}
|
|
||||||
|
|
||||||
// The pointer type below is for the new table-driven encoder/decoder.
|
|
||||||
// The implementation here uses unsafe.Pointer to create a generic pointer.
|
|
||||||
// In pointer_reflect.go we use reflect instead of unsafe to implement
|
|
||||||
// the same (but slower) interface.
|
|
||||||
type pointer struct {
|
|
||||||
p unsafe.Pointer
|
|
||||||
}
|
|
||||||
|
|
||||||
// size of pointer
|
|
||||||
var ptrSize = unsafe.Sizeof(uintptr(0))
|
|
||||||
|
|
||||||
// toPointer converts an interface of pointer type to a pointer
|
|
||||||
// that points to the same target.
|
|
||||||
func toPointer(i *Message) pointer {
|
|
||||||
// Super-tricky - read pointer out of data word of interface value.
|
|
||||||
// Saves ~25ns over the equivalent:
|
|
||||||
// return valToPointer(reflect.ValueOf(*i))
|
|
||||||
return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
|
|
||||||
}
|
|
||||||
|
|
||||||
// toAddrPointer converts an interface to a pointer that points to
|
|
||||||
// the interface data.
|
|
||||||
func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) {
|
|
||||||
// Super-tricky - read or get the address of data word of interface value.
|
|
||||||
if isptr {
|
|
||||||
// The interface is of pointer type, thus it is a direct interface.
|
|
||||||
// The data word is the pointer data itself. We take its address.
|
|
||||||
p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
|
|
||||||
} else {
|
|
||||||
// The interface is not of pointer type. The data word is the pointer
|
|
||||||
// to the data.
|
|
||||||
p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
|
|
||||||
}
|
|
||||||
if deref {
|
|
||||||
p.p = *(*unsafe.Pointer)(p.p)
|
|
||||||
}
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// valToPointer converts v to a pointer. v must be of pointer type.
|
|
||||||
func valToPointer(v reflect.Value) pointer {
|
|
||||||
return pointer{p: unsafe.Pointer(v.Pointer())}
|
|
||||||
}
|
|
||||||
|
|
||||||
// offset converts from a pointer to a structure to a pointer to
|
|
||||||
// one of its fields.
|
|
||||||
func (p pointer) offset(f field) pointer {
|
|
||||||
// For safety, we should panic if !f.IsValid, however calling panic causes
|
|
||||||
// this to no longer be inlineable, which is a serious performance cost.
|
|
||||||
/*
|
|
||||||
if !f.IsValid() {
|
|
||||||
panic("invalid field")
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p pointer) isNil() bool {
|
|
||||||
return p.p == nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p pointer) toInt64() *int64 {
|
|
||||||
return (*int64)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toInt64Ptr() **int64 {
|
|
||||||
return (**int64)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toInt64Slice() *[]int64 {
|
|
||||||
return (*[]int64)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toInt32() *int32 {
|
|
||||||
return (*int32)(p.p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist.
|
|
||||||
/*
|
|
||||||
func (p pointer) toInt32Ptr() **int32 {
|
|
||||||
return (**int32)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toInt32Slice() *[]int32 {
|
|
||||||
return (*[]int32)(p.p)
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
func (p pointer) getInt32Ptr() *int32 {
|
|
||||||
return *(**int32)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) setInt32Ptr(v int32) {
|
|
||||||
*(**int32)(p.p) = &v
|
|
||||||
}
|
|
||||||
|
|
||||||
// getInt32Slice loads a []int32 from p.
|
|
||||||
// The value returned is aliased with the original slice.
|
|
||||||
// This behavior differs from the implementation in pointer_reflect.go.
|
|
||||||
func (p pointer) getInt32Slice() []int32 {
|
|
||||||
return *(*[]int32)(p.p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// setInt32Slice stores a []int32 to p.
|
|
||||||
// The value set is aliased with the input slice.
|
|
||||||
// This behavior differs from the implementation in pointer_reflect.go.
|
|
||||||
func (p pointer) setInt32Slice(v []int32) {
|
|
||||||
*(*[]int32)(p.p) = v
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead?
|
|
||||||
func (p pointer) appendInt32Slice(v int32) {
|
|
||||||
s := (*[]int32)(p.p)
|
|
||||||
*s = append(*s, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p pointer) toUint64() *uint64 {
|
|
||||||
return (*uint64)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toUint64Ptr() **uint64 {
|
|
||||||
return (**uint64)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toUint64Slice() *[]uint64 {
|
|
||||||
return (*[]uint64)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toUint32() *uint32 {
|
|
||||||
return (*uint32)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toUint32Ptr() **uint32 {
|
|
||||||
return (**uint32)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toUint32Slice() *[]uint32 {
|
|
||||||
return (*[]uint32)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toBool() *bool {
|
|
||||||
return (*bool)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toBoolPtr() **bool {
|
|
||||||
return (**bool)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toBoolSlice() *[]bool {
|
|
||||||
return (*[]bool)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toFloat64() *float64 {
|
|
||||||
return (*float64)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toFloat64Ptr() **float64 {
|
|
||||||
return (**float64)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toFloat64Slice() *[]float64 {
|
|
||||||
return (*[]float64)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toFloat32() *float32 {
|
|
||||||
return (*float32)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toFloat32Ptr() **float32 {
|
|
||||||
return (**float32)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toFloat32Slice() *[]float32 {
|
|
||||||
return (*[]float32)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toString() *string {
|
|
||||||
return (*string)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toStringPtr() **string {
|
|
||||||
return (**string)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toStringSlice() *[]string {
|
|
||||||
return (*[]string)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toBytes() *[]byte {
|
|
||||||
return (*[]byte)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toBytesSlice() *[][]byte {
|
|
||||||
return (*[][]byte)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toExtensions() *XXX_InternalExtensions {
|
|
||||||
return (*XXX_InternalExtensions)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toOldExtensions() *map[int32]Extension {
|
|
||||||
return (*map[int32]Extension)(p.p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// getPointerSlice loads []*T from p as a []pointer.
|
|
||||||
// The value returned is aliased with the original slice.
|
|
||||||
// This behavior differs from the implementation in pointer_reflect.go.
|
|
||||||
func (p pointer) getPointerSlice() []pointer {
|
|
||||||
// Super-tricky - p should point to a []*T where T is a
|
|
||||||
// message type. We load it as []pointer.
|
|
||||||
return *(*[]pointer)(p.p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// setPointerSlice stores []pointer into p as a []*T.
|
|
||||||
// The value set is aliased with the input slice.
|
|
||||||
// This behavior differs from the implementation in pointer_reflect.go.
|
|
||||||
func (p pointer) setPointerSlice(v []pointer) {
|
|
||||||
// Super-tricky - p should point to a []*T where T is a
|
|
||||||
// message type. We store it as []pointer.
|
|
||||||
*(*[]pointer)(p.p) = v
|
|
||||||
}
|
|
||||||
|
|
||||||
// getPointer loads the pointer at p and returns it.
|
|
||||||
func (p pointer) getPointer() pointer {
|
|
||||||
return pointer{p: *(*unsafe.Pointer)(p.p)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// setPointer stores the pointer q at p.
|
|
||||||
func (p pointer) setPointer(q pointer) {
|
|
||||||
*(*unsafe.Pointer)(p.p) = q.p
|
|
||||||
}
|
|
||||||
|
|
||||||
// append q to the slice pointed to by p.
|
|
||||||
func (p pointer) appendPointer(q pointer) {
|
|
||||||
s := (*[]unsafe.Pointer)(p.p)
|
|
||||||
*s = append(*s, q.p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// getInterfacePointer returns a pointer that points to the
|
|
||||||
// interface data of the interface pointed by p.
|
|
||||||
func (p pointer) getInterfacePointer() pointer {
|
|
||||||
// Super-tricky - read pointer out of data word of interface value.
|
|
||||||
return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]}
|
|
||||||
}
|
|
||||||
|
|
||||||
// asPointerTo returns a reflect.Value that is a pointer to an
|
|
||||||
// object of type t stored at p.
|
|
||||||
func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
|
|
||||||
return reflect.NewAt(t, p.p)
|
|
||||||
}
|
|
||||||
|
|
||||||
func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
|
|
||||||
return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
|
||||||
}
|
|
||||||
func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
|
|
||||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
|
||||||
}
|
|
||||||
func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
|
|
||||||
return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
|
||||||
}
|
|
||||||
func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
|
|
||||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
|
||||||
}
|
|
||||||
func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
|
|
||||||
return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
|
||||||
}
|
|
||||||
func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
|
|
||||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
|
||||||
}
|
|
||||||
func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
|
|
||||||
return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
|
||||||
}
|
|
||||||
func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
|
|
||||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
|
||||||
}
|
|
652
vendor/github.com/golang/protobuf/proto/properties.go
generated
vendored
652
vendor/github.com/golang/protobuf/proto/properties.go
generated
vendored
@ -1,162 +1,104 @@
|
|||||||
// Go support for Protocol Buffers - Google's data interchange format
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
//
|
// Use of this source code is governed by a BSD-style
|
||||||
// Copyright 2010 The Go Authors. All rights reserved.
|
// license that can be found in the LICENSE file.
|
||||||
// https://github.com/golang/protobuf
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
package proto
|
package proto
|
||||||
|
|
||||||
/*
|
|
||||||
* Routines for encoding data into the wire format for protocol buffers.
|
|
||||||
*/
|
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"sort"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
"google.golang.org/protobuf/runtime/protoimpl"
|
||||||
)
|
)
|
||||||
|
|
||||||
const debug bool = false
|
// StructProperties represents protocol buffer type information for a
|
||||||
|
// generated protobuf message in the open-struct API.
|
||||||
// Constants that identify the encoding of a value on the wire.
|
//
|
||||||
const (
|
// Deprecated: Do not use.
|
||||||
WireVarint = 0
|
|
||||||
WireFixed64 = 1
|
|
||||||
WireBytes = 2
|
|
||||||
WireStartGroup = 3
|
|
||||||
WireEndGroup = 4
|
|
||||||
WireFixed32 = 5
|
|
||||||
)
|
|
||||||
|
|
||||||
// tagMap is an optimization over map[int]int for typical protocol buffer
|
|
||||||
// use-cases. Encoded protocol buffers are often in tag order with small tag
|
|
||||||
// numbers.
|
|
||||||
type tagMap struct {
|
|
||||||
fastTags []int
|
|
||||||
slowTags map[int]int
|
|
||||||
}
|
|
||||||
|
|
||||||
// tagMapFastLimit is the upper bound on the tag number that will be stored in
|
|
||||||
// the tagMap slice rather than its map.
|
|
||||||
const tagMapFastLimit = 1024
|
|
||||||
|
|
||||||
func (p *tagMap) get(t int) (int, bool) {
|
|
||||||
if t > 0 && t < tagMapFastLimit {
|
|
||||||
if t >= len(p.fastTags) {
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
fi := p.fastTags[t]
|
|
||||||
return fi, fi >= 0
|
|
||||||
}
|
|
||||||
fi, ok := p.slowTags[t]
|
|
||||||
return fi, ok
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *tagMap) put(t int, fi int) {
|
|
||||||
if t > 0 && t < tagMapFastLimit {
|
|
||||||
for len(p.fastTags) < t+1 {
|
|
||||||
p.fastTags = append(p.fastTags, -1)
|
|
||||||
}
|
|
||||||
p.fastTags[t] = fi
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if p.slowTags == nil {
|
|
||||||
p.slowTags = make(map[int]int)
|
|
||||||
}
|
|
||||||
p.slowTags[t] = fi
|
|
||||||
}
|
|
||||||
|
|
||||||
// StructProperties represents properties for all the fields of a struct.
|
|
||||||
// decoderTags and decoderOrigNames should only be used by the decoder.
|
|
||||||
type StructProperties struct {
|
type StructProperties struct {
|
||||||
Prop []*Properties // properties for each field
|
// Prop are the properties for each field.
|
||||||
reqCount int // required count
|
//
|
||||||
decoderTags tagMap // map from proto tag to struct field number
|
// Fields belonging to a oneof are stored in OneofTypes instead, with a
|
||||||
decoderOrigNames map[string]int // map from original name to struct field number
|
// single Properties representing the parent oneof held here.
|
||||||
order []int // list of struct field numbers in tag order
|
//
|
||||||
|
// The order of Prop matches the order of fields in the Go struct.
|
||||||
|
// Struct fields that are not related to protobufs have a "XXX_" prefix
|
||||||
|
// in the Properties.Name and must be ignored by the user.
|
||||||
|
Prop []*Properties
|
||||||
|
|
||||||
// OneofTypes contains information about the oneof fields in this message.
|
// OneofTypes contains information about the oneof fields in this message.
|
||||||
// It is keyed by the original name of a field.
|
// It is keyed by the protobuf field name.
|
||||||
OneofTypes map[string]*OneofProperties
|
OneofTypes map[string]*OneofProperties
|
||||||
}
|
}
|
||||||
|
|
||||||
// OneofProperties represents information about a specific field in a oneof.
|
// Properties represents the type information for a protobuf message field.
|
||||||
type OneofProperties struct {
|
//
|
||||||
Type reflect.Type // pointer to generated struct type for this oneof field
|
// Deprecated: Do not use.
|
||||||
Field int // struct field number of the containing oneof in the message
|
|
||||||
Prop *Properties
|
|
||||||
}
|
|
||||||
|
|
||||||
// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
|
|
||||||
// See encode.go, (*Buffer).enc_struct.
|
|
||||||
|
|
||||||
func (sp *StructProperties) Len() int { return len(sp.order) }
|
|
||||||
func (sp *StructProperties) Less(i, j int) bool {
|
|
||||||
return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
|
|
||||||
}
|
|
||||||
func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
|
|
||||||
|
|
||||||
// Properties represents the protocol-specific behavior of a single struct field.
|
|
||||||
type Properties struct {
|
type Properties struct {
|
||||||
Name string // name of the field, for error messages
|
// Name is a placeholder name with little meaningful semantic value.
|
||||||
OrigName string // original name before protocol compiler (always set)
|
// If the name has an "XXX_" prefix, the entire Properties must be ignored.
|
||||||
JSONName string // name to use for JSON; determined by protoc
|
Name string
|
||||||
Wire string
|
// OrigName is the protobuf field name or oneof name.
|
||||||
|
OrigName string
|
||||||
|
// JSONName is the JSON name for the protobuf field.
|
||||||
|
JSONName string
|
||||||
|
// Enum is a placeholder name for enums.
|
||||||
|
// For historical reasons, this is neither the Go name for the enum,
|
||||||
|
// nor the protobuf name for the enum.
|
||||||
|
Enum string // Deprecated: Do not use.
|
||||||
|
// Weak contains the full name of the weakly referenced message.
|
||||||
|
Weak string
|
||||||
|
// Wire is a string representation of the wire type.
|
||||||
|
Wire string
|
||||||
|
// WireType is the protobuf wire type for the field.
|
||||||
WireType int
|
WireType int
|
||||||
Tag int
|
// Tag is the protobuf field number.
|
||||||
|
Tag int
|
||||||
|
// Required reports whether this is a required field.
|
||||||
Required bool
|
Required bool
|
||||||
|
// Optional reports whether this is a optional field.
|
||||||
Optional bool
|
Optional bool
|
||||||
|
// Repeated reports whether this is a repeated field.
|
||||||
Repeated bool
|
Repeated bool
|
||||||
Packed bool // relevant for repeated primitives only
|
// Packed reports whether this is a packed repeated field of scalars.
|
||||||
Enum string // set for enum types only
|
Packed bool
|
||||||
proto3 bool // whether this is known to be a proto3 field
|
// Proto3 reports whether this field operates under the proto3 syntax.
|
||||||
oneof bool // whether this is a oneof field
|
Proto3 bool
|
||||||
|
// Oneof reports whether this field belongs within a oneof.
|
||||||
|
Oneof bool
|
||||||
|
|
||||||
Default string // default value
|
// Default is the default value in string form.
|
||||||
HasDefault bool // whether an explicit default was provided
|
Default string
|
||||||
|
// HasDefault reports whether the field has a default value.
|
||||||
|
HasDefault bool
|
||||||
|
|
||||||
stype reflect.Type // set for struct types only
|
// MapKeyProp is the properties for the key field for a map field.
|
||||||
sprop *StructProperties // set for struct types only
|
MapKeyProp *Properties
|
||||||
|
// MapValProp is the properties for the value field for a map field.
|
||||||
|
MapValProp *Properties
|
||||||
|
}
|
||||||
|
|
||||||
mtype reflect.Type // set for map types only
|
// OneofProperties represents the type information for a protobuf oneof.
|
||||||
MapKeyProp *Properties // set for map types only
|
//
|
||||||
MapValProp *Properties // set for map types only
|
// Deprecated: Do not use.
|
||||||
|
type OneofProperties struct {
|
||||||
|
// Type is a pointer to the generated wrapper type for the field value.
|
||||||
|
// This is nil for messages that are not in the open-struct API.
|
||||||
|
Type reflect.Type
|
||||||
|
// Field is the index into StructProperties.Prop for the containing oneof.
|
||||||
|
Field int
|
||||||
|
// Prop is the properties for the field.
|
||||||
|
Prop *Properties
|
||||||
}
|
}
|
||||||
|
|
||||||
// String formats the properties in the protobuf struct field tag style.
|
// String formats the properties in the protobuf struct field tag style.
|
||||||
func (p *Properties) String() string {
|
func (p *Properties) String() string {
|
||||||
s := p.Wire
|
s := p.Wire
|
||||||
s += ","
|
s += "," + strconv.Itoa(p.Tag)
|
||||||
s += strconv.Itoa(p.Tag)
|
|
||||||
if p.Required {
|
if p.Required {
|
||||||
s += ",req"
|
s += ",req"
|
||||||
}
|
}
|
||||||
@ -170,18 +112,21 @@ func (p *Properties) String() string {
|
|||||||
s += ",packed"
|
s += ",packed"
|
||||||
}
|
}
|
||||||
s += ",name=" + p.OrigName
|
s += ",name=" + p.OrigName
|
||||||
if p.JSONName != p.OrigName {
|
if p.JSONName != "" {
|
||||||
s += ",json=" + p.JSONName
|
s += ",json=" + p.JSONName
|
||||||
}
|
}
|
||||||
if p.proto3 {
|
|
||||||
s += ",proto3"
|
|
||||||
}
|
|
||||||
if p.oneof {
|
|
||||||
s += ",oneof"
|
|
||||||
}
|
|
||||||
if len(p.Enum) > 0 {
|
if len(p.Enum) > 0 {
|
||||||
s += ",enum=" + p.Enum
|
s += ",enum=" + p.Enum
|
||||||
}
|
}
|
||||||
|
if len(p.Weak) > 0 {
|
||||||
|
s += ",weak=" + p.Weak
|
||||||
|
}
|
||||||
|
if p.Proto3 {
|
||||||
|
s += ",proto3"
|
||||||
|
}
|
||||||
|
if p.Oneof {
|
||||||
|
s += ",oneof"
|
||||||
|
}
|
||||||
if p.HasDefault {
|
if p.HasDefault {
|
||||||
s += ",def=" + p.Default
|
s += ",def=" + p.Default
|
||||||
}
|
}
|
||||||
@ -189,356 +134,173 @@ func (p *Properties) String() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Parse populates p by parsing a string in the protobuf struct field tag style.
|
// Parse populates p by parsing a string in the protobuf struct field tag style.
|
||||||
func (p *Properties) Parse(s string) {
|
func (p *Properties) Parse(tag string) {
|
||||||
// "bytes,49,opt,name=foo,def=hello!"
|
// For example: "bytes,49,opt,name=foo,def=hello!"
|
||||||
fields := strings.Split(s, ",") // breaks def=, but handled below.
|
for len(tag) > 0 {
|
||||||
if len(fields) < 2 {
|
i := strings.IndexByte(tag, ',')
|
||||||
log.Printf("proto: tag has too few fields: %q", s)
|
if i < 0 {
|
||||||
return
|
i = len(tag)
|
||||||
}
|
}
|
||||||
|
switch s := tag[:i]; {
|
||||||
p.Wire = fields[0]
|
case strings.HasPrefix(s, "name="):
|
||||||
switch p.Wire {
|
p.OrigName = s[len("name="):]
|
||||||
case "varint":
|
case strings.HasPrefix(s, "json="):
|
||||||
p.WireType = WireVarint
|
p.JSONName = s[len("json="):]
|
||||||
case "fixed32":
|
case strings.HasPrefix(s, "enum="):
|
||||||
p.WireType = WireFixed32
|
p.Enum = s[len("enum="):]
|
||||||
case "fixed64":
|
case strings.HasPrefix(s, "weak="):
|
||||||
p.WireType = WireFixed64
|
p.Weak = s[len("weak="):]
|
||||||
case "zigzag32":
|
case strings.Trim(s, "0123456789") == "":
|
||||||
p.WireType = WireVarint
|
n, _ := strconv.ParseUint(s, 10, 32)
|
||||||
case "zigzag64":
|
p.Tag = int(n)
|
||||||
p.WireType = WireVarint
|
case s == "opt":
|
||||||
case "bytes", "group":
|
|
||||||
p.WireType = WireBytes
|
|
||||||
// no numeric converter for non-numeric types
|
|
||||||
default:
|
|
||||||
log.Printf("proto: tag has unknown wire type: %q", s)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var err error
|
|
||||||
p.Tag, err = strconv.Atoi(fields[1])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
outer:
|
|
||||||
for i := 2; i < len(fields); i++ {
|
|
||||||
f := fields[i]
|
|
||||||
switch {
|
|
||||||
case f == "req":
|
|
||||||
p.Required = true
|
|
||||||
case f == "opt":
|
|
||||||
p.Optional = true
|
p.Optional = true
|
||||||
case f == "rep":
|
case s == "req":
|
||||||
|
p.Required = true
|
||||||
|
case s == "rep":
|
||||||
p.Repeated = true
|
p.Repeated = true
|
||||||
case f == "packed":
|
case s == "varint" || s == "zigzag32" || s == "zigzag64":
|
||||||
|
p.Wire = s
|
||||||
|
p.WireType = WireVarint
|
||||||
|
case s == "fixed32":
|
||||||
|
p.Wire = s
|
||||||
|
p.WireType = WireFixed32
|
||||||
|
case s == "fixed64":
|
||||||
|
p.Wire = s
|
||||||
|
p.WireType = WireFixed64
|
||||||
|
case s == "bytes":
|
||||||
|
p.Wire = s
|
||||||
|
p.WireType = WireBytes
|
||||||
|
case s == "group":
|
||||||
|
p.Wire = s
|
||||||
|
p.WireType = WireStartGroup
|
||||||
|
case s == "packed":
|
||||||
p.Packed = true
|
p.Packed = true
|
||||||
case strings.HasPrefix(f, "name="):
|
case s == "proto3":
|
||||||
p.OrigName = f[5:]
|
p.Proto3 = true
|
||||||
case strings.HasPrefix(f, "json="):
|
case s == "oneof":
|
||||||
p.JSONName = f[5:]
|
p.Oneof = true
|
||||||
case strings.HasPrefix(f, "enum="):
|
case strings.HasPrefix(s, "def="):
|
||||||
p.Enum = f[5:]
|
// The default tag is special in that everything afterwards is the
|
||||||
case f == "proto3":
|
// default regardless of the presence of commas.
|
||||||
p.proto3 = true
|
|
||||||
case f == "oneof":
|
|
||||||
p.oneof = true
|
|
||||||
case strings.HasPrefix(f, "def="):
|
|
||||||
p.HasDefault = true
|
p.HasDefault = true
|
||||||
p.Default = f[4:] // rest of string
|
p.Default, i = tag[len("def="):], len(tag)
|
||||||
if i+1 < len(fields) {
|
|
||||||
// Commas aren't escaped, and def is always last.
|
|
||||||
p.Default += "," + strings.Join(fields[i+1:], ",")
|
|
||||||
break outer
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
tag = strings.TrimPrefix(tag[i:], ",")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
|
|
||||||
|
|
||||||
// setFieldProps initializes the field properties for submessages and maps.
|
|
||||||
func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
|
|
||||||
switch t1 := typ; t1.Kind() {
|
|
||||||
case reflect.Ptr:
|
|
||||||
if t1.Elem().Kind() == reflect.Struct {
|
|
||||||
p.stype = t1.Elem()
|
|
||||||
}
|
|
||||||
|
|
||||||
case reflect.Slice:
|
|
||||||
if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct {
|
|
||||||
p.stype = t2.Elem()
|
|
||||||
}
|
|
||||||
|
|
||||||
case reflect.Map:
|
|
||||||
p.mtype = t1
|
|
||||||
p.MapKeyProp = &Properties{}
|
|
||||||
p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
|
|
||||||
p.MapValProp = &Properties{}
|
|
||||||
vtype := p.mtype.Elem()
|
|
||||||
if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
|
|
||||||
// The value type is not a message (*T) or bytes ([]byte),
|
|
||||||
// so we need encoders for the pointer to this type.
|
|
||||||
vtype = reflect.PtrTo(vtype)
|
|
||||||
}
|
|
||||||
p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
|
|
||||||
}
|
|
||||||
|
|
||||||
if p.stype != nil {
|
|
||||||
if lockGetProp {
|
|
||||||
p.sprop = GetProperties(p.stype)
|
|
||||||
} else {
|
|
||||||
p.sprop = getPropertiesLocked(p.stype)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
|
|
||||||
)
|
|
||||||
|
|
||||||
// Init populates the properties from a protocol buffer struct tag.
|
// Init populates the properties from a protocol buffer struct tag.
|
||||||
|
//
|
||||||
|
// Deprecated: Do not use.
|
||||||
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
|
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
|
||||||
p.init(typ, name, tag, f, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
|
|
||||||
// "bytes,49,opt,def=hello!"
|
|
||||||
p.Name = name
|
p.Name = name
|
||||||
p.OrigName = name
|
p.OrigName = name
|
||||||
if tag == "" {
|
if tag == "" {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
p.Parse(tag)
|
p.Parse(tag)
|
||||||
p.setFieldProps(typ, f, lockGetProp)
|
|
||||||
|
if typ != nil && typ.Kind() == reflect.Map {
|
||||||
|
p.MapKeyProp = new(Properties)
|
||||||
|
p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil)
|
||||||
|
p.MapValProp = new(Properties)
|
||||||
|
p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var propertiesCache sync.Map // map[reflect.Type]*StructProperties
|
||||||
propertiesMu sync.RWMutex
|
|
||||||
propertiesMap = make(map[reflect.Type]*StructProperties)
|
|
||||||
)
|
|
||||||
|
|
||||||
// GetProperties returns the list of properties for the type represented by t.
|
// GetProperties returns the list of properties for the type represented by t,
|
||||||
// t must represent a generated struct type of a protocol message.
|
// which must be a generated protocol buffer message in the open-struct API,
|
||||||
|
// where protobuf message fields are represented by exported Go struct fields.
|
||||||
|
//
|
||||||
|
// Deprecated: Use protobuf reflection instead.
|
||||||
func GetProperties(t reflect.Type) *StructProperties {
|
func GetProperties(t reflect.Type) *StructProperties {
|
||||||
if t.Kind() != reflect.Struct {
|
if p, ok := propertiesCache.Load(t); ok {
|
||||||
panic("proto: type must have kind struct")
|
return p.(*StructProperties)
|
||||||
}
|
}
|
||||||
|
p, _ := propertiesCache.LoadOrStore(t, newProperties(t))
|
||||||
// Most calls to GetProperties in a long-running program will be
|
return p.(*StructProperties)
|
||||||
// retrieving details for types we have seen before.
|
|
||||||
propertiesMu.RLock()
|
|
||||||
sprop, ok := propertiesMap[t]
|
|
||||||
propertiesMu.RUnlock()
|
|
||||||
if ok {
|
|
||||||
return sprop
|
|
||||||
}
|
|
||||||
|
|
||||||
propertiesMu.Lock()
|
|
||||||
sprop = getPropertiesLocked(t)
|
|
||||||
propertiesMu.Unlock()
|
|
||||||
return sprop
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type (
|
func newProperties(t reflect.Type) *StructProperties {
|
||||||
oneofFuncsIface interface {
|
if t.Kind() != reflect.Struct {
|
||||||
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
|
panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
|
||||||
}
|
|
||||||
oneofWrappersIface interface {
|
|
||||||
XXX_OneofWrappers() []interface{}
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// getPropertiesLocked requires that propertiesMu is held.
|
|
||||||
func getPropertiesLocked(t reflect.Type) *StructProperties {
|
|
||||||
if prop, ok := propertiesMap[t]; ok {
|
|
||||||
return prop
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var hasOneof bool
|
||||||
prop := new(StructProperties)
|
prop := new(StructProperties)
|
||||||
// in case of recursive protos, fill this in now.
|
|
||||||
propertiesMap[t] = prop
|
|
||||||
|
|
||||||
// build properties
|
|
||||||
prop.Prop = make([]*Properties, t.NumField())
|
|
||||||
prop.order = make([]int, t.NumField())
|
|
||||||
|
|
||||||
|
// Construct a list of properties for each field in the struct.
|
||||||
for i := 0; i < t.NumField(); i++ {
|
for i := 0; i < t.NumField(); i++ {
|
||||||
f := t.Field(i)
|
|
||||||
p := new(Properties)
|
p := new(Properties)
|
||||||
name := f.Name
|
f := t.Field(i)
|
||||||
p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
|
tagField := f.Tag.Get("protobuf")
|
||||||
|
p.Init(f.Type, f.Name, tagField, &f)
|
||||||
|
|
||||||
oneof := f.Tag.Get("protobuf_oneof") // special case
|
tagOneof := f.Tag.Get("protobuf_oneof")
|
||||||
if oneof != "" {
|
if tagOneof != "" {
|
||||||
// Oneof fields don't use the traditional protobuf tag.
|
hasOneof = true
|
||||||
p.OrigName = oneof
|
p.OrigName = tagOneof
|
||||||
}
|
}
|
||||||
prop.Prop[i] = p
|
|
||||||
prop.order[i] = i
|
// Rename unrelated struct fields with the "XXX_" prefix since so much
|
||||||
if debug {
|
// user code simply checks for this to exclude special fields.
|
||||||
print(i, " ", f.Name, " ", t.String(), " ")
|
if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") {
|
||||||
if p.Tag > 0 {
|
p.Name = "XXX_" + p.Name
|
||||||
print(p.String())
|
p.OrigName = "XXX_" + p.OrigName
|
||||||
|
} else if p.Weak != "" {
|
||||||
|
p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field
|
||||||
|
}
|
||||||
|
|
||||||
|
prop.Prop = append(prop.Prop, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Construct a mapping of oneof field names to properties.
|
||||||
|
if hasOneof {
|
||||||
|
var oneofWrappers []interface{}
|
||||||
|
if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok {
|
||||||
|
oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{})
|
||||||
|
}
|
||||||
|
if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok {
|
||||||
|
oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{})
|
||||||
|
}
|
||||||
|
if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok {
|
||||||
|
if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok {
|
||||||
|
oneofWrappers = m.ProtoMessageInfo().OneofWrappers
|
||||||
}
|
}
|
||||||
print("\n")
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Re-order prop.order.
|
|
||||||
sort.Sort(prop)
|
|
||||||
|
|
||||||
var oots []interface{}
|
|
||||||
switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
|
|
||||||
case oneofFuncsIface:
|
|
||||||
_, _, _, oots = m.XXX_OneofFuncs()
|
|
||||||
case oneofWrappersIface:
|
|
||||||
oots = m.XXX_OneofWrappers()
|
|
||||||
}
|
|
||||||
if len(oots) > 0 {
|
|
||||||
// Interpret oneof metadata.
|
|
||||||
prop.OneofTypes = make(map[string]*OneofProperties)
|
prop.OneofTypes = make(map[string]*OneofProperties)
|
||||||
for _, oot := range oots {
|
for _, wrapper := range oneofWrappers {
|
||||||
oop := &OneofProperties{
|
p := &OneofProperties{
|
||||||
Type: reflect.ValueOf(oot).Type(), // *T
|
Type: reflect.ValueOf(wrapper).Type(), // *T
|
||||||
Prop: new(Properties),
|
Prop: new(Properties),
|
||||||
}
|
}
|
||||||
sft := oop.Type.Elem().Field(0)
|
f := p.Type.Elem().Field(0)
|
||||||
oop.Prop.Name = sft.Name
|
p.Prop.Name = f.Name
|
||||||
oop.Prop.Parse(sft.Tag.Get("protobuf"))
|
p.Prop.Parse(f.Tag.Get("protobuf"))
|
||||||
// There will be exactly one interface field that
|
|
||||||
// this new value is assignable to.
|
|
||||||
for i := 0; i < t.NumField(); i++ {
|
|
||||||
f := t.Field(i)
|
|
||||||
if f.Type.Kind() != reflect.Interface {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if !oop.Type.AssignableTo(f.Type) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
oop.Field = i
|
|
||||||
break
|
|
||||||
}
|
|
||||||
prop.OneofTypes[oop.Prop.OrigName] = oop
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// build required counts
|
// Determine the struct field that contains this oneof.
|
||||||
// build tags
|
// Each wrapper is assignable to exactly one parent field.
|
||||||
reqCount := 0
|
var foundOneof bool
|
||||||
prop.decoderOrigNames = make(map[string]int)
|
for i := 0; i < t.NumField() && !foundOneof; i++ {
|
||||||
for i, p := range prop.Prop {
|
if p.Type.AssignableTo(t.Field(i).Type) {
|
||||||
if strings.HasPrefix(p.Name, "XXX_") {
|
p.Field = i
|
||||||
// Internal fields should not appear in tags/origNames maps.
|
foundOneof = true
|
||||||
// They are handled specially when encoding and decoding.
|
}
|
||||||
continue
|
}
|
||||||
|
if !foundOneof {
|
||||||
|
panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
|
||||||
|
}
|
||||||
|
prop.OneofTypes[p.Prop.OrigName] = p
|
||||||
}
|
}
|
||||||
if p.Required {
|
|
||||||
reqCount++
|
|
||||||
}
|
|
||||||
prop.decoderTags.put(p.Tag, i)
|
|
||||||
prop.decoderOrigNames[p.OrigName] = i
|
|
||||||
}
|
}
|
||||||
prop.reqCount = reqCount
|
|
||||||
|
|
||||||
return prop
|
return prop
|
||||||
}
|
}
|
||||||
|
|
||||||
// A global registry of enum types.
|
func (sp *StructProperties) Len() int { return len(sp.Prop) }
|
||||||
// The generated code will register the generated maps by calling RegisterEnum.
|
func (sp *StructProperties) Less(i, j int) bool { return false }
|
||||||
|
func (sp *StructProperties) Swap(i, j int) { return }
|
||||||
var enumValueMaps = make(map[string]map[string]int32)
|
|
||||||
|
|
||||||
// RegisterEnum is called from the generated code to install the enum descriptor
|
|
||||||
// maps into the global table to aid parsing text format protocol buffers.
|
|
||||||
func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
|
|
||||||
if _, ok := enumValueMaps[typeName]; ok {
|
|
||||||
panic("proto: duplicate enum registered: " + typeName)
|
|
||||||
}
|
|
||||||
enumValueMaps[typeName] = valueMap
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnumValueMap returns the mapping from names to integers of the
|
|
||||||
// enum type enumType, or a nil if not found.
|
|
||||||
func EnumValueMap(enumType string) map[string]int32 {
|
|
||||||
return enumValueMaps[enumType]
|
|
||||||
}
|
|
||||||
|
|
||||||
// A registry of all linked message types.
|
|
||||||
// The string is a fully-qualified proto name ("pkg.Message").
|
|
||||||
var (
|
|
||||||
protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers
|
|
||||||
protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types
|
|
||||||
revProtoTypes = make(map[reflect.Type]string)
|
|
||||||
)
|
|
||||||
|
|
||||||
// RegisterType is called from generated code and maps from the fully qualified
|
|
||||||
// proto name to the type (pointer to struct) of the protocol buffer.
|
|
||||||
func RegisterType(x Message, name string) {
|
|
||||||
if _, ok := protoTypedNils[name]; ok {
|
|
||||||
// TODO: Some day, make this a panic.
|
|
||||||
log.Printf("proto: duplicate proto type registered: %s", name)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t := reflect.TypeOf(x)
|
|
||||||
if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 {
|
|
||||||
// Generated code always calls RegisterType with nil x.
|
|
||||||
// This check is just for extra safety.
|
|
||||||
protoTypedNils[name] = x
|
|
||||||
} else {
|
|
||||||
protoTypedNils[name] = reflect.Zero(t).Interface().(Message)
|
|
||||||
}
|
|
||||||
revProtoTypes[t] = name
|
|
||||||
}
|
|
||||||
|
|
||||||
// RegisterMapType is called from generated code and maps from the fully qualified
|
|
||||||
// proto name to the native map type of the proto map definition.
|
|
||||||
func RegisterMapType(x interface{}, name string) {
|
|
||||||
if reflect.TypeOf(x).Kind() != reflect.Map {
|
|
||||||
panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name))
|
|
||||||
}
|
|
||||||
if _, ok := protoMapTypes[name]; ok {
|
|
||||||
log.Printf("proto: duplicate proto type registered: %s", name)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t := reflect.TypeOf(x)
|
|
||||||
protoMapTypes[name] = t
|
|
||||||
revProtoTypes[t] = name
|
|
||||||
}
|
|
||||||
|
|
||||||
// MessageName returns the fully-qualified proto name for the given message type.
|
|
||||||
func MessageName(x Message) string {
|
|
||||||
type xname interface {
|
|
||||||
XXX_MessageName() string
|
|
||||||
}
|
|
||||||
if m, ok := x.(xname); ok {
|
|
||||||
return m.XXX_MessageName()
|
|
||||||
}
|
|
||||||
return revProtoTypes[reflect.TypeOf(x)]
|
|
||||||
}
|
|
||||||
|
|
||||||
// MessageType returns the message type (pointer to struct) for a named message.
|
|
||||||
// The type is not guaranteed to implement proto.Message if the name refers to a
|
|
||||||
// map entry.
|
|
||||||
func MessageType(name string) reflect.Type {
|
|
||||||
if t, ok := protoTypedNils[name]; ok {
|
|
||||||
return reflect.TypeOf(t)
|
|
||||||
}
|
|
||||||
return protoMapTypes[name]
|
|
||||||
}
|
|
||||||
|
|
||||||
// A registry of all linked proto files.
|
|
||||||
var (
|
|
||||||
protoFiles = make(map[string][]byte) // file name => fileDescriptor
|
|
||||||
)
|
|
||||||
|
|
||||||
// RegisterFile is called from generated code and maps from the
|
|
||||||
// full file name of a .proto file to its compressed FileDescriptorProto.
|
|
||||||
func RegisterFile(filename string, fileDescriptor []byte) {
|
|
||||||
protoFiles[filename] = fileDescriptor
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
|
|
||||||
func FileDescriptor(filename string) []byte { return protoFiles[filename] }
|
|
||||||
|
167
vendor/github.com/golang/protobuf/proto/proto.go
generated
vendored
Normal file
167
vendor/github.com/golang/protobuf/proto/proto.go
generated
vendored
Normal file
@ -0,0 +1,167 @@
|
|||||||
|
// Copyright 2019 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package proto provides functionality for handling protocol buffer messages.
|
||||||
|
// In particular, it provides marshaling and unmarshaling between a protobuf
|
||||||
|
// message and the binary wire format.
|
||||||
|
//
|
||||||
|
// See https://developers.google.com/protocol-buffers/docs/gotutorial for
|
||||||
|
// more information.
|
||||||
|
//
|
||||||
|
// Deprecated: Use the "google.golang.org/protobuf/proto" package instead.
|
||||||
|
package proto
|
||||||
|
|
||||||
|
import (
|
||||||
|
protoV2 "google.golang.org/protobuf/proto"
|
||||||
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
"google.golang.org/protobuf/runtime/protoiface"
|
||||||
|
"google.golang.org/protobuf/runtime/protoimpl"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
ProtoPackageIsVersion1 = true
|
||||||
|
ProtoPackageIsVersion2 = true
|
||||||
|
ProtoPackageIsVersion3 = true
|
||||||
|
ProtoPackageIsVersion4 = true
|
||||||
|
)
|
||||||
|
|
||||||
|
// GeneratedEnum is any enum type generated by protoc-gen-go
|
||||||
|
// which is a named int32 kind.
|
||||||
|
// This type exists for documentation purposes.
|
||||||
|
type GeneratedEnum interface{}
|
||||||
|
|
||||||
|
// GeneratedMessage is any message type generated by protoc-gen-go
|
||||||
|
// which is a pointer to a named struct kind.
|
||||||
|
// This type exists for documentation purposes.
|
||||||
|
type GeneratedMessage interface{}
|
||||||
|
|
||||||
|
// Message is a protocol buffer message.
|
||||||
|
//
|
||||||
|
// This is the v1 version of the message interface and is marginally better
|
||||||
|
// than an empty interface as it lacks any method to programatically interact
|
||||||
|
// with the contents of the message.
|
||||||
|
//
|
||||||
|
// A v2 message is declared in "google.golang.org/protobuf/proto".Message and
|
||||||
|
// exposes protobuf reflection as a first-class feature of the interface.
|
||||||
|
//
|
||||||
|
// To convert a v1 message to a v2 message, use the MessageV2 function.
|
||||||
|
// To convert a v2 message to a v1 message, use the MessageV1 function.
|
||||||
|
type Message = protoiface.MessageV1
|
||||||
|
|
||||||
|
// MessageV1 converts either a v1 or v2 message to a v1 message.
|
||||||
|
// It returns nil if m is nil.
|
||||||
|
func MessageV1(m GeneratedMessage) protoiface.MessageV1 {
|
||||||
|
return protoimpl.X.ProtoMessageV1Of(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MessageV2 converts either a v1 or v2 message to a v2 message.
|
||||||
|
// It returns nil if m is nil.
|
||||||
|
func MessageV2(m GeneratedMessage) protoV2.Message {
|
||||||
|
return protoimpl.X.ProtoMessageV2Of(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MessageReflect returns a reflective view for a message.
|
||||||
|
// It returns nil if m is nil.
|
||||||
|
func MessageReflect(m Message) protoreflect.Message {
|
||||||
|
return protoimpl.X.MessageOf(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshaler is implemented by messages that can marshal themselves.
|
||||||
|
// This interface is used by the following functions: Size, Marshal,
|
||||||
|
// Buffer.Marshal, and Buffer.EncodeMessage.
|
||||||
|
//
|
||||||
|
// Deprecated: Do not implement.
|
||||||
|
type Marshaler interface {
|
||||||
|
// Marshal formats the encoded bytes of the message.
|
||||||
|
// It should be deterministic and emit valid protobuf wire data.
|
||||||
|
// The caller takes ownership of the returned buffer.
|
||||||
|
Marshal() ([]byte, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshaler is implemented by messages that can unmarshal themselves.
|
||||||
|
// This interface is used by the following functions: Unmarshal, UnmarshalMerge,
|
||||||
|
// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup.
|
||||||
|
//
|
||||||
|
// Deprecated: Do not implement.
|
||||||
|
type Unmarshaler interface {
|
||||||
|
// Unmarshal parses the encoded bytes of the protobuf wire input.
|
||||||
|
// The provided buffer is only valid for during method call.
|
||||||
|
// It should not reset the receiver message.
|
||||||
|
Unmarshal([]byte) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merger is implemented by messages that can merge themselves.
|
||||||
|
// This interface is used by the following functions: Clone and Merge.
|
||||||
|
//
|
||||||
|
// Deprecated: Do not implement.
|
||||||
|
type Merger interface {
|
||||||
|
// Merge merges the contents of src into the receiver message.
|
||||||
|
// It clones all data structures in src such that it aliases no mutable
|
||||||
|
// memory referenced by src.
|
||||||
|
Merge(src Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequiredNotSetError is an error type returned when
|
||||||
|
// marshaling or unmarshaling a message with missing required fields.
|
||||||
|
type RequiredNotSetError struct {
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *RequiredNotSetError) Error() string {
|
||||||
|
if e.err != nil {
|
||||||
|
return e.err.Error()
|
||||||
|
}
|
||||||
|
return "proto: required field not set"
|
||||||
|
}
|
||||||
|
func (e *RequiredNotSetError) RequiredNotSet() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkRequiredNotSet(m protoV2.Message) error {
|
||||||
|
if err := protoV2.CheckInitialized(m); err != nil {
|
||||||
|
return &RequiredNotSetError{err: err}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clone returns a deep copy of src.
|
||||||
|
func Clone(src Message) Message {
|
||||||
|
return MessageV1(protoV2.Clone(MessageV2(src)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge merges src into dst, which must be messages of the same type.
|
||||||
|
//
|
||||||
|
// Populated scalar fields in src are copied to dst, while populated
|
||||||
|
// singular messages in src are merged into dst by recursively calling Merge.
|
||||||
|
// The elements of every list field in src is appended to the corresponded
|
||||||
|
// list fields in dst. The entries of every map field in src is copied into
|
||||||
|
// the corresponding map field in dst, possibly replacing existing entries.
|
||||||
|
// The unknown fields of src are appended to the unknown fields of dst.
|
||||||
|
func Merge(dst, src Message) {
|
||||||
|
protoV2.Merge(MessageV2(dst), MessageV2(src))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Equal reports whether two messages are equal.
|
||||||
|
// If two messages marshal to the same bytes under deterministic serialization,
|
||||||
|
// then Equal is guaranteed to report true.
|
||||||
|
//
|
||||||
|
// Two messages are equal if they are the same protobuf message type,
|
||||||
|
// have the same set of populated known and extension field values,
|
||||||
|
// and the same set of unknown fields values.
|
||||||
|
//
|
||||||
|
// Scalar values are compared with the equivalent of the == operator in Go,
|
||||||
|
// except bytes values which are compared using bytes.Equal and
|
||||||
|
// floating point values which specially treat NaNs as equal.
|
||||||
|
// Message values are compared by recursively calling Equal.
|
||||||
|
// Lists are equal if each element value is also equal.
|
||||||
|
// Maps are equal if they have the same set of keys, where the pair of values
|
||||||
|
// for each key is also equal.
|
||||||
|
func Equal(x, y Message) bool {
|
||||||
|
return protoV2.Equal(MessageV2(x), MessageV2(y))
|
||||||
|
}
|
||||||
|
|
||||||
|
func isMessageSet(md protoreflect.MessageDescriptor) bool {
|
||||||
|
ms, ok := md.(interface{ IsMessageSet() bool })
|
||||||
|
return ok && ms.IsMessageSet()
|
||||||
|
}
|
323
vendor/github.com/golang/protobuf/proto/registry.go
generated
vendored
Normal file
323
vendor/github.com/golang/protobuf/proto/registry.go
generated
vendored
Normal file
@ -0,0 +1,323 @@
|
|||||||
|
// Copyright 2019 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package proto
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"compress/gzip"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
"google.golang.org/protobuf/reflect/protoregistry"
|
||||||
|
"google.golang.org/protobuf/runtime/protoimpl"
|
||||||
|
)
|
||||||
|
|
||||||
|
// filePath is the path to the proto source file.
|
||||||
|
type filePath = string // e.g., "google/protobuf/descriptor.proto"
|
||||||
|
|
||||||
|
// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto.
|
||||||
|
type fileDescGZIP = []byte
|
||||||
|
|
||||||
|
var fileCache sync.Map // map[filePath]fileDescGZIP
|
||||||
|
|
||||||
|
// RegisterFile is called from generated code to register the compressed
|
||||||
|
// FileDescriptorProto with the file path for a proto source file.
|
||||||
|
//
|
||||||
|
// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead.
|
||||||
|
func RegisterFile(s filePath, d fileDescGZIP) {
|
||||||
|
// Decompress the descriptor.
|
||||||
|
zr, err := gzip.NewReader(bytes.NewReader(d))
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
|
||||||
|
}
|
||||||
|
b, err := ioutil.ReadAll(zr)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Construct a protoreflect.FileDescriptor from the raw descriptor.
|
||||||
|
// Note that DescBuilder.Build automatically registers the constructed
|
||||||
|
// file descriptor with the v2 registry.
|
||||||
|
protoimpl.DescBuilder{RawDescriptor: b}.Build()
|
||||||
|
|
||||||
|
// Locally cache the raw descriptor form for the file.
|
||||||
|
fileCache.Store(s, d)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileDescriptor returns the compressed FileDescriptorProto given the file path
|
||||||
|
// for a proto source file. It returns nil if not found.
|
||||||
|
//
|
||||||
|
// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead.
|
||||||
|
func FileDescriptor(s filePath) fileDescGZIP {
|
||||||
|
if v, ok := fileCache.Load(s); ok {
|
||||||
|
return v.(fileDescGZIP)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find the descriptor in the v2 registry.
|
||||||
|
var b []byte
|
||||||
|
if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil {
|
||||||
|
if fd, ok := fd.(interface{ ProtoLegacyRawDesc() []byte }); ok {
|
||||||
|
b = fd.ProtoLegacyRawDesc()
|
||||||
|
} else {
|
||||||
|
// TODO: Use protodesc.ToFileDescriptorProto to construct
|
||||||
|
// a descriptorpb.FileDescriptorProto and marshal it.
|
||||||
|
// However, doing so causes the proto package to have a dependency
|
||||||
|
// on descriptorpb, leading to cyclic dependency issues.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Locally cache the raw descriptor form for the file.
|
||||||
|
if len(b) > 0 {
|
||||||
|
v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b))
|
||||||
|
return v.(fileDescGZIP)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// enumName is the name of an enum. For historical reasons, the enum name is
|
||||||
|
// neither the full Go name nor the full protobuf name of the enum.
|
||||||
|
// The name is the dot-separated combination of just the proto package that the
|
||||||
|
// enum is declared within followed by the Go type name of the generated enum.
|
||||||
|
type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum"
|
||||||
|
|
||||||
|
// enumsByName maps enum values by name to their numeric counterpart.
|
||||||
|
type enumsByName = map[string]int32
|
||||||
|
|
||||||
|
// enumsByNumber maps enum values by number to their name counterpart.
|
||||||
|
type enumsByNumber = map[int32]string
|
||||||
|
|
||||||
|
var enumCache sync.Map // map[enumName]enumsByName
|
||||||
|
var numFilesCache sync.Map // map[protoreflect.FullName]int
|
||||||
|
|
||||||
|
// RegisterEnum is called from the generated code to register the mapping of
|
||||||
|
// enum value names to enum numbers for the enum identified by s.
|
||||||
|
//
|
||||||
|
// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead.
|
||||||
|
func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) {
|
||||||
|
if _, ok := enumCache.Load(s); ok {
|
||||||
|
panic("proto: duplicate enum registered: " + s)
|
||||||
|
}
|
||||||
|
enumCache.Store(s, m)
|
||||||
|
|
||||||
|
// This does not forward registration to the v2 registry since this API
|
||||||
|
// lacks sufficient information to construct a complete v2 enum descriptor.
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnumValueMap returns the mapping from enum value names to enum numbers for
|
||||||
|
// the enum of the given name. It returns nil if not found.
|
||||||
|
//
|
||||||
|
// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead.
|
||||||
|
func EnumValueMap(s enumName) enumsByName {
|
||||||
|
if v, ok := enumCache.Load(s); ok {
|
||||||
|
return v.(enumsByName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check whether the cache is stale. If the number of files in the current
|
||||||
|
// package differs, then it means that some enums may have been recently
|
||||||
|
// registered upstream that we do not know about.
|
||||||
|
var protoPkg protoreflect.FullName
|
||||||
|
if i := strings.LastIndexByte(s, '.'); i >= 0 {
|
||||||
|
protoPkg = protoreflect.FullName(s[:i])
|
||||||
|
}
|
||||||
|
v, _ := numFilesCache.Load(protoPkg)
|
||||||
|
numFiles, _ := v.(int)
|
||||||
|
if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles {
|
||||||
|
return nil // cache is up-to-date; was not found earlier
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the enum cache for all enums declared in the given proto package.
|
||||||
|
numFiles = 0
|
||||||
|
protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool {
|
||||||
|
walkEnums(fd, func(ed protoreflect.EnumDescriptor) {
|
||||||
|
name := protoimpl.X.LegacyEnumName(ed)
|
||||||
|
if _, ok := enumCache.Load(name); !ok {
|
||||||
|
m := make(enumsByName)
|
||||||
|
evs := ed.Values()
|
||||||
|
for i := evs.Len() - 1; i >= 0; i-- {
|
||||||
|
ev := evs.Get(i)
|
||||||
|
m[string(ev.Name())] = int32(ev.Number())
|
||||||
|
}
|
||||||
|
enumCache.LoadOrStore(name, m)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
numFiles++
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
numFilesCache.Store(protoPkg, numFiles)
|
||||||
|
|
||||||
|
// Check cache again for enum map.
|
||||||
|
if v, ok := enumCache.Load(s); ok {
|
||||||
|
return v.(enumsByName)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// walkEnums recursively walks all enums declared in d.
|
||||||
|
func walkEnums(d interface {
|
||||||
|
Enums() protoreflect.EnumDescriptors
|
||||||
|
Messages() protoreflect.MessageDescriptors
|
||||||
|
}, f func(protoreflect.EnumDescriptor)) {
|
||||||
|
eds := d.Enums()
|
||||||
|
for i := eds.Len() - 1; i >= 0; i-- {
|
||||||
|
f(eds.Get(i))
|
||||||
|
}
|
||||||
|
mds := d.Messages()
|
||||||
|
for i := mds.Len() - 1; i >= 0; i-- {
|
||||||
|
walkEnums(mds.Get(i), f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// messageName is the full name of protobuf message.
|
||||||
|
type messageName = string
|
||||||
|
|
||||||
|
var messageTypeCache sync.Map // map[messageName]reflect.Type
|
||||||
|
|
||||||
|
// RegisterType is called from generated code to register the message Go type
|
||||||
|
// for a message of the given name.
|
||||||
|
//
|
||||||
|
// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead.
|
||||||
|
func RegisterType(m Message, s messageName) {
|
||||||
|
mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s))
|
||||||
|
if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
messageTypeCache.Store(s, reflect.TypeOf(m))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterMapType is called from generated code to register the Go map type
|
||||||
|
// for a protobuf message representing a map entry.
|
||||||
|
//
|
||||||
|
// Deprecated: Do not use.
|
||||||
|
func RegisterMapType(m interface{}, s messageName) {
|
||||||
|
t := reflect.TypeOf(m)
|
||||||
|
if t.Kind() != reflect.Map {
|
||||||
|
panic(fmt.Sprintf("invalid map kind: %v", t))
|
||||||
|
}
|
||||||
|
if _, ok := messageTypeCache.Load(s); ok {
|
||||||
|
panic(fmt.Errorf("proto: duplicate proto message registered: %s", s))
|
||||||
|
}
|
||||||
|
messageTypeCache.Store(s, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MessageType returns the message type for a named message.
|
||||||
|
// It returns nil if not found.
|
||||||
|
//
|
||||||
|
// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead.
|
||||||
|
func MessageType(s messageName) reflect.Type {
|
||||||
|
if v, ok := messageTypeCache.Load(s); ok {
|
||||||
|
return v.(reflect.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Derive the message type from the v2 registry.
|
||||||
|
var t reflect.Type
|
||||||
|
if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil {
|
||||||
|
t = messageGoType(mt)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we could not get a concrete type, it is possible that it is a
|
||||||
|
// pseudo-message for a map entry.
|
||||||
|
if t == nil {
|
||||||
|
d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s))
|
||||||
|
if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() {
|
||||||
|
kt := goTypeForField(md.Fields().ByNumber(1))
|
||||||
|
vt := goTypeForField(md.Fields().ByNumber(2))
|
||||||
|
t = reflect.MapOf(kt, vt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Locally cache the message type for the given name.
|
||||||
|
if t != nil {
|
||||||
|
v, _ := messageTypeCache.LoadOrStore(s, t)
|
||||||
|
return v.(reflect.Type)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type {
|
||||||
|
switch k := fd.Kind(); k {
|
||||||
|
case protoreflect.EnumKind:
|
||||||
|
if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil {
|
||||||
|
return enumGoType(et)
|
||||||
|
}
|
||||||
|
return reflect.TypeOf(protoreflect.EnumNumber(0))
|
||||||
|
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||||
|
if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil {
|
||||||
|
return messageGoType(mt)
|
||||||
|
}
|
||||||
|
return reflect.TypeOf((*protoreflect.Message)(nil)).Elem()
|
||||||
|
default:
|
||||||
|
return reflect.TypeOf(fd.Default().Interface())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func enumGoType(et protoreflect.EnumType) reflect.Type {
|
||||||
|
return reflect.TypeOf(et.New(0))
|
||||||
|
}
|
||||||
|
|
||||||
|
func messageGoType(mt protoreflect.MessageType) reflect.Type {
|
||||||
|
return reflect.TypeOf(MessageV1(mt.Zero().Interface()))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MessageName returns the full protobuf name for the given message type.
|
||||||
|
//
|
||||||
|
// Deprecated: Use protoreflect.MessageDescriptor.FullName instead.
|
||||||
|
func MessageName(m Message) messageName {
|
||||||
|
if m == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
if m, ok := m.(interface{ XXX_MessageName() messageName }); ok {
|
||||||
|
return m.XXX_MessageName()
|
||||||
|
}
|
||||||
|
return messageName(protoimpl.X.MessageDescriptorOf(m).FullName())
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterExtension is called from the generated code to register
|
||||||
|
// the extension descriptor.
|
||||||
|
//
|
||||||
|
// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead.
|
||||||
|
func RegisterExtension(d *ExtensionDesc) {
|
||||||
|
if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type extensionsByNumber = map[int32]*ExtensionDesc
|
||||||
|
|
||||||
|
var extensionCache sync.Map // map[messageName]extensionsByNumber
|
||||||
|
|
||||||
|
// RegisteredExtensions returns a map of the registered extensions for the
|
||||||
|
// provided protobuf message, indexed by the extension field number.
|
||||||
|
//
|
||||||
|
// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead.
|
||||||
|
func RegisteredExtensions(m Message) extensionsByNumber {
|
||||||
|
// Check whether the cache is stale. If the number of extensions for
|
||||||
|
// the given message differs, then it means that some extensions were
|
||||||
|
// recently registered upstream that we do not know about.
|
||||||
|
s := MessageName(m)
|
||||||
|
v, _ := extensionCache.Load(s)
|
||||||
|
xs, _ := v.(extensionsByNumber)
|
||||||
|
if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) {
|
||||||
|
return xs // cache is up-to-date
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cache is stale, re-compute the extensions map.
|
||||||
|
xs = make(extensionsByNumber)
|
||||||
|
protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool {
|
||||||
|
if xd, ok := xt.(*ExtensionDesc); ok {
|
||||||
|
xs[int32(xt.TypeDescriptor().Number())] = xd
|
||||||
|
} else {
|
||||||
|
// TODO: This implies that the protoreflect.ExtensionType is a
|
||||||
|
// custom type not generated by protoc-gen-go. We could try and
|
||||||
|
// convert the type to an ExtensionDesc.
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
extensionCache.Store(s, xs)
|
||||||
|
return xs
|
||||||
|
}
|
2776
vendor/github.com/golang/protobuf/proto/table_marshal.go
generated
vendored
2776
vendor/github.com/golang/protobuf/proto/table_marshal.go
generated
vendored
File diff suppressed because it is too large
Load Diff
654
vendor/github.com/golang/protobuf/proto/table_merge.go
generated
vendored
654
vendor/github.com/golang/protobuf/proto/table_merge.go
generated
vendored
@ -1,654 +0,0 @@
|
|||||||
// Go support for Protocol Buffers - Google's data interchange format
|
|
||||||
//
|
|
||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
|
||||||
// https://github.com/golang/protobuf
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Merge merges the src message into dst.
|
|
||||||
// This assumes that dst and src of the same type and are non-nil.
|
|
||||||
func (a *InternalMessageInfo) Merge(dst, src Message) {
|
|
||||||
mi := atomicLoadMergeInfo(&a.merge)
|
|
||||||
if mi == nil {
|
|
||||||
mi = getMergeInfo(reflect.TypeOf(dst).Elem())
|
|
||||||
atomicStoreMergeInfo(&a.merge, mi)
|
|
||||||
}
|
|
||||||
mi.merge(toPointer(&dst), toPointer(&src))
|
|
||||||
}
|
|
||||||
|
|
||||||
type mergeInfo struct {
|
|
||||||
typ reflect.Type
|
|
||||||
|
|
||||||
initialized int32 // 0: only typ is valid, 1: everything is valid
|
|
||||||
lock sync.Mutex
|
|
||||||
|
|
||||||
fields []mergeFieldInfo
|
|
||||||
unrecognized field // Offset of XXX_unrecognized
|
|
||||||
}
|
|
||||||
|
|
||||||
type mergeFieldInfo struct {
|
|
||||||
field field // Offset of field, guaranteed to be valid
|
|
||||||
|
|
||||||
// isPointer reports whether the value in the field is a pointer.
|
|
||||||
// This is true for the following situations:
|
|
||||||
// * Pointer to struct
|
|
||||||
// * Pointer to basic type (proto2 only)
|
|
||||||
// * Slice (first value in slice header is a pointer)
|
|
||||||
// * String (first value in string header is a pointer)
|
|
||||||
isPointer bool
|
|
||||||
|
|
||||||
// basicWidth reports the width of the field assuming that it is directly
|
|
||||||
// embedded in the struct (as is the case for basic types in proto3).
|
|
||||||
// The possible values are:
|
|
||||||
// 0: invalid
|
|
||||||
// 1: bool
|
|
||||||
// 4: int32, uint32, float32
|
|
||||||
// 8: int64, uint64, float64
|
|
||||||
basicWidth int
|
|
||||||
|
|
||||||
// Where dst and src are pointers to the types being merged.
|
|
||||||
merge func(dst, src pointer)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
mergeInfoMap = map[reflect.Type]*mergeInfo{}
|
|
||||||
mergeInfoLock sync.Mutex
|
|
||||||
)
|
|
||||||
|
|
||||||
func getMergeInfo(t reflect.Type) *mergeInfo {
|
|
||||||
mergeInfoLock.Lock()
|
|
||||||
defer mergeInfoLock.Unlock()
|
|
||||||
mi := mergeInfoMap[t]
|
|
||||||
if mi == nil {
|
|
||||||
mi = &mergeInfo{typ: t}
|
|
||||||
mergeInfoMap[t] = mi
|
|
||||||
}
|
|
||||||
return mi
|
|
||||||
}
|
|
||||||
|
|
||||||
// merge merges src into dst assuming they are both of type *mi.typ.
|
|
||||||
func (mi *mergeInfo) merge(dst, src pointer) {
|
|
||||||
if dst.isNil() {
|
|
||||||
panic("proto: nil destination")
|
|
||||||
}
|
|
||||||
if src.isNil() {
|
|
||||||
return // Nothing to do.
|
|
||||||
}
|
|
||||||
|
|
||||||
if atomic.LoadInt32(&mi.initialized) == 0 {
|
|
||||||
mi.computeMergeInfo()
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, fi := range mi.fields {
|
|
||||||
sfp := src.offset(fi.field)
|
|
||||||
|
|
||||||
// As an optimization, we can avoid the merge function call cost
|
|
||||||
// if we know for sure that the source will have no effect
|
|
||||||
// by checking if it is the zero value.
|
|
||||||
if unsafeAllowed {
|
|
||||||
if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if fi.basicWidth > 0 {
|
|
||||||
switch {
|
|
||||||
case fi.basicWidth == 1 && !*sfp.toBool():
|
|
||||||
continue
|
|
||||||
case fi.basicWidth == 4 && *sfp.toUint32() == 0:
|
|
||||||
continue
|
|
||||||
case fi.basicWidth == 8 && *sfp.toUint64() == 0:
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
dfp := dst.offset(fi.field)
|
|
||||||
fi.merge(dfp, sfp)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Make this faster?
|
|
||||||
out := dst.asPointerTo(mi.typ).Elem()
|
|
||||||
in := src.asPointerTo(mi.typ).Elem()
|
|
||||||
if emIn, err := extendable(in.Addr().Interface()); err == nil {
|
|
||||||
emOut, _ := extendable(out.Addr().Interface())
|
|
||||||
mIn, muIn := emIn.extensionsRead()
|
|
||||||
if mIn != nil {
|
|
||||||
mOut := emOut.extensionsWrite()
|
|
||||||
muIn.Lock()
|
|
||||||
mergeExtension(mOut, mIn)
|
|
||||||
muIn.Unlock()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if mi.unrecognized.IsValid() {
|
|
||||||
if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 {
|
|
||||||
*dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mi *mergeInfo) computeMergeInfo() {
|
|
||||||
mi.lock.Lock()
|
|
||||||
defer mi.lock.Unlock()
|
|
||||||
if mi.initialized != 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t := mi.typ
|
|
||||||
n := t.NumField()
|
|
||||||
|
|
||||||
props := GetProperties(t)
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
f := t.Field(i)
|
|
||||||
if strings.HasPrefix(f.Name, "XXX_") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
mfi := mergeFieldInfo{field: toField(&f)}
|
|
||||||
tf := f.Type
|
|
||||||
|
|
||||||
// As an optimization, we can avoid the merge function call cost
|
|
||||||
// if we know for sure that the source will have no effect
|
|
||||||
// by checking if it is the zero value.
|
|
||||||
if unsafeAllowed {
|
|
||||||
switch tf.Kind() {
|
|
||||||
case reflect.Ptr, reflect.Slice, reflect.String:
|
|
||||||
// As a special case, we assume slices and strings are pointers
|
|
||||||
// since we know that the first field in the SliceSlice or
|
|
||||||
// StringHeader is a data pointer.
|
|
||||||
mfi.isPointer = true
|
|
||||||
case reflect.Bool:
|
|
||||||
mfi.basicWidth = 1
|
|
||||||
case reflect.Int32, reflect.Uint32, reflect.Float32:
|
|
||||||
mfi.basicWidth = 4
|
|
||||||
case reflect.Int64, reflect.Uint64, reflect.Float64:
|
|
||||||
mfi.basicWidth = 8
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unwrap tf to get at its most basic type.
|
|
||||||
var isPointer, isSlice bool
|
|
||||||
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
|
|
||||||
isSlice = true
|
|
||||||
tf = tf.Elem()
|
|
||||||
}
|
|
||||||
if tf.Kind() == reflect.Ptr {
|
|
||||||
isPointer = true
|
|
||||||
tf = tf.Elem()
|
|
||||||
}
|
|
||||||
if isPointer && isSlice && tf.Kind() != reflect.Struct {
|
|
||||||
panic("both pointer and slice for basic type in " + tf.Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
switch tf.Kind() {
|
|
||||||
case reflect.Int32:
|
|
||||||
switch {
|
|
||||||
case isSlice: // E.g., []int32
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
// NOTE: toInt32Slice is not defined (see pointer_reflect.go).
|
|
||||||
/*
|
|
||||||
sfsp := src.toInt32Slice()
|
|
||||||
if *sfsp != nil {
|
|
||||||
dfsp := dst.toInt32Slice()
|
|
||||||
*dfsp = append(*dfsp, *sfsp...)
|
|
||||||
if *dfsp == nil {
|
|
||||||
*dfsp = []int64{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
sfs := src.getInt32Slice()
|
|
||||||
if sfs != nil {
|
|
||||||
dfs := dst.getInt32Slice()
|
|
||||||
dfs = append(dfs, sfs...)
|
|
||||||
if dfs == nil {
|
|
||||||
dfs = []int32{}
|
|
||||||
}
|
|
||||||
dst.setInt32Slice(dfs)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case isPointer: // E.g., *int32
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
// NOTE: toInt32Ptr is not defined (see pointer_reflect.go).
|
|
||||||
/*
|
|
||||||
sfpp := src.toInt32Ptr()
|
|
||||||
if *sfpp != nil {
|
|
||||||
dfpp := dst.toInt32Ptr()
|
|
||||||
if *dfpp == nil {
|
|
||||||
*dfpp = Int32(**sfpp)
|
|
||||||
} else {
|
|
||||||
**dfpp = **sfpp
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
sfp := src.getInt32Ptr()
|
|
||||||
if sfp != nil {
|
|
||||||
dfp := dst.getInt32Ptr()
|
|
||||||
if dfp == nil {
|
|
||||||
dst.setInt32Ptr(*sfp)
|
|
||||||
} else {
|
|
||||||
*dfp = *sfp
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default: // E.g., int32
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
if v := *src.toInt32(); v != 0 {
|
|
||||||
*dst.toInt32() = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case reflect.Int64:
|
|
||||||
switch {
|
|
||||||
case isSlice: // E.g., []int64
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
sfsp := src.toInt64Slice()
|
|
||||||
if *sfsp != nil {
|
|
||||||
dfsp := dst.toInt64Slice()
|
|
||||||
*dfsp = append(*dfsp, *sfsp...)
|
|
||||||
if *dfsp == nil {
|
|
||||||
*dfsp = []int64{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case isPointer: // E.g., *int64
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
sfpp := src.toInt64Ptr()
|
|
||||||
if *sfpp != nil {
|
|
||||||
dfpp := dst.toInt64Ptr()
|
|
||||||
if *dfpp == nil {
|
|
||||||
*dfpp = Int64(**sfpp)
|
|
||||||
} else {
|
|
||||||
**dfpp = **sfpp
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default: // E.g., int64
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
if v := *src.toInt64(); v != 0 {
|
|
||||||
*dst.toInt64() = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case reflect.Uint32:
|
|
||||||
switch {
|
|
||||||
case isSlice: // E.g., []uint32
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
sfsp := src.toUint32Slice()
|
|
||||||
if *sfsp != nil {
|
|
||||||
dfsp := dst.toUint32Slice()
|
|
||||||
*dfsp = append(*dfsp, *sfsp...)
|
|
||||||
if *dfsp == nil {
|
|
||||||
*dfsp = []uint32{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case isPointer: // E.g., *uint32
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
sfpp := src.toUint32Ptr()
|
|
||||||
if *sfpp != nil {
|
|
||||||
dfpp := dst.toUint32Ptr()
|
|
||||||
if *dfpp == nil {
|
|
||||||
*dfpp = Uint32(**sfpp)
|
|
||||||
} else {
|
|
||||||
**dfpp = **sfpp
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default: // E.g., uint32
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
if v := *src.toUint32(); v != 0 {
|
|
||||||
*dst.toUint32() = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case reflect.Uint64:
|
|
||||||
switch {
|
|
||||||
case isSlice: // E.g., []uint64
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
sfsp := src.toUint64Slice()
|
|
||||||
if *sfsp != nil {
|
|
||||||
dfsp := dst.toUint64Slice()
|
|
||||||
*dfsp = append(*dfsp, *sfsp...)
|
|
||||||
if *dfsp == nil {
|
|
||||||
*dfsp = []uint64{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case isPointer: // E.g., *uint64
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
sfpp := src.toUint64Ptr()
|
|
||||||
if *sfpp != nil {
|
|
||||||
dfpp := dst.toUint64Ptr()
|
|
||||||
if *dfpp == nil {
|
|
||||||
*dfpp = Uint64(**sfpp)
|
|
||||||
} else {
|
|
||||||
**dfpp = **sfpp
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default: // E.g., uint64
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
if v := *src.toUint64(); v != 0 {
|
|
||||||
*dst.toUint64() = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case reflect.Float32:
|
|
||||||
switch {
|
|
||||||
case isSlice: // E.g., []float32
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
sfsp := src.toFloat32Slice()
|
|
||||||
if *sfsp != nil {
|
|
||||||
dfsp := dst.toFloat32Slice()
|
|
||||||
*dfsp = append(*dfsp, *sfsp...)
|
|
||||||
if *dfsp == nil {
|
|
||||||
*dfsp = []float32{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case isPointer: // E.g., *float32
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
sfpp := src.toFloat32Ptr()
|
|
||||||
if *sfpp != nil {
|
|
||||||
dfpp := dst.toFloat32Ptr()
|
|
||||||
if *dfpp == nil {
|
|
||||||
*dfpp = Float32(**sfpp)
|
|
||||||
} else {
|
|
||||||
**dfpp = **sfpp
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default: // E.g., float32
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
if v := *src.toFloat32(); v != 0 {
|
|
||||||
*dst.toFloat32() = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case reflect.Float64:
|
|
||||||
switch {
|
|
||||||
case isSlice: // E.g., []float64
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
sfsp := src.toFloat64Slice()
|
|
||||||
if *sfsp != nil {
|
|
||||||
dfsp := dst.toFloat64Slice()
|
|
||||||
*dfsp = append(*dfsp, *sfsp...)
|
|
||||||
if *dfsp == nil {
|
|
||||||
*dfsp = []float64{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case isPointer: // E.g., *float64
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
sfpp := src.toFloat64Ptr()
|
|
||||||
if *sfpp != nil {
|
|
||||||
dfpp := dst.toFloat64Ptr()
|
|
||||||
if *dfpp == nil {
|
|
||||||
*dfpp = Float64(**sfpp)
|
|
||||||
} else {
|
|
||||||
**dfpp = **sfpp
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default: // E.g., float64
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
if v := *src.toFloat64(); v != 0 {
|
|
||||||
*dst.toFloat64() = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case reflect.Bool:
|
|
||||||
switch {
|
|
||||||
case isSlice: // E.g., []bool
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
sfsp := src.toBoolSlice()
|
|
||||||
if *sfsp != nil {
|
|
||||||
dfsp := dst.toBoolSlice()
|
|
||||||
*dfsp = append(*dfsp, *sfsp...)
|
|
||||||
if *dfsp == nil {
|
|
||||||
*dfsp = []bool{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case isPointer: // E.g., *bool
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
sfpp := src.toBoolPtr()
|
|
||||||
if *sfpp != nil {
|
|
||||||
dfpp := dst.toBoolPtr()
|
|
||||||
if *dfpp == nil {
|
|
||||||
*dfpp = Bool(**sfpp)
|
|
||||||
} else {
|
|
||||||
**dfpp = **sfpp
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default: // E.g., bool
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
if v := *src.toBool(); v {
|
|
||||||
*dst.toBool() = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case reflect.String:
|
|
||||||
switch {
|
|
||||||
case isSlice: // E.g., []string
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
sfsp := src.toStringSlice()
|
|
||||||
if *sfsp != nil {
|
|
||||||
dfsp := dst.toStringSlice()
|
|
||||||
*dfsp = append(*dfsp, *sfsp...)
|
|
||||||
if *dfsp == nil {
|
|
||||||
*dfsp = []string{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case isPointer: // E.g., *string
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
sfpp := src.toStringPtr()
|
|
||||||
if *sfpp != nil {
|
|
||||||
dfpp := dst.toStringPtr()
|
|
||||||
if *dfpp == nil {
|
|
||||||
*dfpp = String(**sfpp)
|
|
||||||
} else {
|
|
||||||
**dfpp = **sfpp
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default: // E.g., string
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
if v := *src.toString(); v != "" {
|
|
||||||
*dst.toString() = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case reflect.Slice:
|
|
||||||
isProto3 := props.Prop[i].proto3
|
|
||||||
switch {
|
|
||||||
case isPointer:
|
|
||||||
panic("bad pointer in byte slice case in " + tf.Name())
|
|
||||||
case tf.Elem().Kind() != reflect.Uint8:
|
|
||||||
panic("bad element kind in byte slice case in " + tf.Name())
|
|
||||||
case isSlice: // E.g., [][]byte
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
sbsp := src.toBytesSlice()
|
|
||||||
if *sbsp != nil {
|
|
||||||
dbsp := dst.toBytesSlice()
|
|
||||||
for _, sb := range *sbsp {
|
|
||||||
if sb == nil {
|
|
||||||
*dbsp = append(*dbsp, nil)
|
|
||||||
} else {
|
|
||||||
*dbsp = append(*dbsp, append([]byte{}, sb...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if *dbsp == nil {
|
|
||||||
*dbsp = [][]byte{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default: // E.g., []byte
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
sbp := src.toBytes()
|
|
||||||
if *sbp != nil {
|
|
||||||
dbp := dst.toBytes()
|
|
||||||
if !isProto3 || len(*sbp) > 0 {
|
|
||||||
*dbp = append([]byte{}, *sbp...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case reflect.Struct:
|
|
||||||
switch {
|
|
||||||
case !isPointer:
|
|
||||||
panic(fmt.Sprintf("message field %s without pointer", tf))
|
|
||||||
case isSlice: // E.g., []*pb.T
|
|
||||||
mi := getMergeInfo(tf)
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
sps := src.getPointerSlice()
|
|
||||||
if sps != nil {
|
|
||||||
dps := dst.getPointerSlice()
|
|
||||||
for _, sp := range sps {
|
|
||||||
var dp pointer
|
|
||||||
if !sp.isNil() {
|
|
||||||
dp = valToPointer(reflect.New(tf))
|
|
||||||
mi.merge(dp, sp)
|
|
||||||
}
|
|
||||||
dps = append(dps, dp)
|
|
||||||
}
|
|
||||||
if dps == nil {
|
|
||||||
dps = []pointer{}
|
|
||||||
}
|
|
||||||
dst.setPointerSlice(dps)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default: // E.g., *pb.T
|
|
||||||
mi := getMergeInfo(tf)
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
sp := src.getPointer()
|
|
||||||
if !sp.isNil() {
|
|
||||||
dp := dst.getPointer()
|
|
||||||
if dp.isNil() {
|
|
||||||
dp = valToPointer(reflect.New(tf))
|
|
||||||
dst.setPointer(dp)
|
|
||||||
}
|
|
||||||
mi.merge(dp, sp)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case reflect.Map:
|
|
||||||
switch {
|
|
||||||
case isPointer || isSlice:
|
|
||||||
panic("bad pointer or slice in map case in " + tf.Name())
|
|
||||||
default: // E.g., map[K]V
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
sm := src.asPointerTo(tf).Elem()
|
|
||||||
if sm.Len() == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
dm := dst.asPointerTo(tf).Elem()
|
|
||||||
if dm.IsNil() {
|
|
||||||
dm.Set(reflect.MakeMap(tf))
|
|
||||||
}
|
|
||||||
|
|
||||||
switch tf.Elem().Kind() {
|
|
||||||
case reflect.Ptr: // Proto struct (e.g., *T)
|
|
||||||
for _, key := range sm.MapKeys() {
|
|
||||||
val := sm.MapIndex(key)
|
|
||||||
val = reflect.ValueOf(Clone(val.Interface().(Message)))
|
|
||||||
dm.SetMapIndex(key, val)
|
|
||||||
}
|
|
||||||
case reflect.Slice: // E.g. Bytes type (e.g., []byte)
|
|
||||||
for _, key := range sm.MapKeys() {
|
|
||||||
val := sm.MapIndex(key)
|
|
||||||
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
|
|
||||||
dm.SetMapIndex(key, val)
|
|
||||||
}
|
|
||||||
default: // Basic type (e.g., string)
|
|
||||||
for _, key := range sm.MapKeys() {
|
|
||||||
val := sm.MapIndex(key)
|
|
||||||
dm.SetMapIndex(key, val)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case reflect.Interface:
|
|
||||||
// Must be oneof field.
|
|
||||||
switch {
|
|
||||||
case isPointer || isSlice:
|
|
||||||
panic("bad pointer or slice in interface case in " + tf.Name())
|
|
||||||
default: // E.g., interface{}
|
|
||||||
// TODO: Make this faster?
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
su := src.asPointerTo(tf).Elem()
|
|
||||||
if !su.IsNil() {
|
|
||||||
du := dst.asPointerTo(tf).Elem()
|
|
||||||
typ := su.Elem().Type()
|
|
||||||
if du.IsNil() || du.Elem().Type() != typ {
|
|
||||||
du.Set(reflect.New(typ.Elem())) // Initialize interface if empty
|
|
||||||
}
|
|
||||||
sv := su.Elem().Elem().Field(0)
|
|
||||||
if sv.Kind() == reflect.Ptr && sv.IsNil() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
dv := du.Elem().Elem().Field(0)
|
|
||||||
if dv.Kind() == reflect.Ptr && dv.IsNil() {
|
|
||||||
dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty
|
|
||||||
}
|
|
||||||
switch sv.Type().Kind() {
|
|
||||||
case reflect.Ptr: // Proto struct (e.g., *T)
|
|
||||||
Merge(dv.Interface().(Message), sv.Interface().(Message))
|
|
||||||
case reflect.Slice: // E.g. Bytes type (e.g., []byte)
|
|
||||||
dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...)))
|
|
||||||
default: // Basic type (e.g., string)
|
|
||||||
dv.Set(sv)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
panic(fmt.Sprintf("merger not found for type:%s", tf))
|
|
||||||
}
|
|
||||||
mi.fields = append(mi.fields, mfi)
|
|
||||||
}
|
|
||||||
|
|
||||||
mi.unrecognized = invalidField
|
|
||||||
if f, ok := t.FieldByName("XXX_unrecognized"); ok {
|
|
||||||
if f.Type != reflect.TypeOf([]byte{}) {
|
|
||||||
panic("expected XXX_unrecognized to be of type []byte")
|
|
||||||
}
|
|
||||||
mi.unrecognized = toField(&f)
|
|
||||||
}
|
|
||||||
|
|
||||||
atomic.StoreInt32(&mi.initialized, 1)
|
|
||||||
}
|
|
2053
vendor/github.com/golang/protobuf/proto/table_unmarshal.go
generated
vendored
2053
vendor/github.com/golang/protobuf/proto/table_unmarshal.go
generated
vendored
File diff suppressed because it is too large
Load Diff
845
vendor/github.com/golang/protobuf/proto/text.go
generated
vendored
845
vendor/github.com/golang/protobuf/proto/text.go
generated
vendored
@ -1,845 +0,0 @@
|
|||||||
// Go support for Protocol Buffers - Google's data interchange format
|
|
||||||
//
|
|
||||||
// Copyright 2010 The Go Authors. All rights reserved.
|
|
||||||
// https://github.com/golang/protobuf
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
// Functions for writing the text protocol buffer format.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"bytes"
|
|
||||||
"encoding"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"math"
|
|
||||||
"reflect"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
newline = []byte("\n")
|
|
||||||
spaces = []byte(" ")
|
|
||||||
endBraceNewline = []byte("}\n")
|
|
||||||
backslashN = []byte{'\\', 'n'}
|
|
||||||
backslashR = []byte{'\\', 'r'}
|
|
||||||
backslashT = []byte{'\\', 't'}
|
|
||||||
backslashDQ = []byte{'\\', '"'}
|
|
||||||
backslashBS = []byte{'\\', '\\'}
|
|
||||||
posInf = []byte("inf")
|
|
||||||
negInf = []byte("-inf")
|
|
||||||
nan = []byte("nan")
|
|
||||||
)
|
|
||||||
|
|
||||||
type writer interface {
|
|
||||||
io.Writer
|
|
||||||
WriteByte(byte) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// textWriter is an io.Writer that tracks its indentation level.
|
|
||||||
type textWriter struct {
|
|
||||||
ind int
|
|
||||||
complete bool // if the current position is a complete line
|
|
||||||
compact bool // whether to write out as a one-liner
|
|
||||||
w writer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *textWriter) WriteString(s string) (n int, err error) {
|
|
||||||
if !strings.Contains(s, "\n") {
|
|
||||||
if !w.compact && w.complete {
|
|
||||||
w.writeIndent()
|
|
||||||
}
|
|
||||||
w.complete = false
|
|
||||||
return io.WriteString(w.w, s)
|
|
||||||
}
|
|
||||||
// WriteString is typically called without newlines, so this
|
|
||||||
// codepath and its copy are rare. We copy to avoid
|
|
||||||
// duplicating all of Write's logic here.
|
|
||||||
return w.Write([]byte(s))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *textWriter) Write(p []byte) (n int, err error) {
|
|
||||||
newlines := bytes.Count(p, newline)
|
|
||||||
if newlines == 0 {
|
|
||||||
if !w.compact && w.complete {
|
|
||||||
w.writeIndent()
|
|
||||||
}
|
|
||||||
n, err = w.w.Write(p)
|
|
||||||
w.complete = false
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
frags := bytes.SplitN(p, newline, newlines+1)
|
|
||||||
if w.compact {
|
|
||||||
for i, frag := range frags {
|
|
||||||
if i > 0 {
|
|
||||||
if err := w.w.WriteByte(' '); err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
n++
|
|
||||||
}
|
|
||||||
nn, err := w.w.Write(frag)
|
|
||||||
n += nn
|
|
||||||
if err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, frag := range frags {
|
|
||||||
if w.complete {
|
|
||||||
w.writeIndent()
|
|
||||||
}
|
|
||||||
nn, err := w.w.Write(frag)
|
|
||||||
n += nn
|
|
||||||
if err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
if i+1 < len(frags) {
|
|
||||||
if err := w.w.WriteByte('\n'); err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
n++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
w.complete = len(frags[len(frags)-1]) == 0
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *textWriter) WriteByte(c byte) error {
|
|
||||||
if w.compact && c == '\n' {
|
|
||||||
c = ' '
|
|
||||||
}
|
|
||||||
if !w.compact && w.complete {
|
|
||||||
w.writeIndent()
|
|
||||||
}
|
|
||||||
err := w.w.WriteByte(c)
|
|
||||||
w.complete = c == '\n'
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *textWriter) indent() { w.ind++ }
|
|
||||||
|
|
||||||
func (w *textWriter) unindent() {
|
|
||||||
if w.ind == 0 {
|
|
||||||
log.Print("proto: textWriter unindented too far")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
w.ind--
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeName(w *textWriter, props *Properties) error {
|
|
||||||
if _, err := w.WriteString(props.OrigName); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if props.Wire != "group" {
|
|
||||||
return w.WriteByte(':')
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func requiresQuotes(u string) bool {
|
|
||||||
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
|
|
||||||
for _, ch := range u {
|
|
||||||
switch {
|
|
||||||
case ch == '.' || ch == '/' || ch == '_':
|
|
||||||
continue
|
|
||||||
case '0' <= ch && ch <= '9':
|
|
||||||
continue
|
|
||||||
case 'A' <= ch && ch <= 'Z':
|
|
||||||
continue
|
|
||||||
case 'a' <= ch && ch <= 'z':
|
|
||||||
continue
|
|
||||||
default:
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// isAny reports whether sv is a google.protobuf.Any message
|
|
||||||
func isAny(sv reflect.Value) bool {
|
|
||||||
type wkt interface {
|
|
||||||
XXX_WellKnownType() string
|
|
||||||
}
|
|
||||||
t, ok := sv.Addr().Interface().(wkt)
|
|
||||||
return ok && t.XXX_WellKnownType() == "Any"
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeProto3Any writes an expanded google.protobuf.Any message.
|
|
||||||
//
|
|
||||||
// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
|
|
||||||
// required messages are not linked in).
|
|
||||||
//
|
|
||||||
// It returns (true, error) when sv was written in expanded format or an error
|
|
||||||
// was encountered.
|
|
||||||
func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
|
|
||||||
turl := sv.FieldByName("TypeUrl")
|
|
||||||
val := sv.FieldByName("Value")
|
|
||||||
if !turl.IsValid() || !val.IsValid() {
|
|
||||||
return true, errors.New("proto: invalid google.protobuf.Any message")
|
|
||||||
}
|
|
||||||
|
|
||||||
b, ok := val.Interface().([]byte)
|
|
||||||
if !ok {
|
|
||||||
return true, errors.New("proto: invalid google.protobuf.Any message")
|
|
||||||
}
|
|
||||||
|
|
||||||
parts := strings.Split(turl.String(), "/")
|
|
||||||
mt := MessageType(parts[len(parts)-1])
|
|
||||||
if mt == nil {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
m := reflect.New(mt.Elem())
|
|
||||||
if err := Unmarshal(b, m.Interface().(Message)); err != nil {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
w.Write([]byte("["))
|
|
||||||
u := turl.String()
|
|
||||||
if requiresQuotes(u) {
|
|
||||||
writeString(w, u)
|
|
||||||
} else {
|
|
||||||
w.Write([]byte(u))
|
|
||||||
}
|
|
||||||
if w.compact {
|
|
||||||
w.Write([]byte("]:<"))
|
|
||||||
} else {
|
|
||||||
w.Write([]byte("]: <\n"))
|
|
||||||
w.ind++
|
|
||||||
}
|
|
||||||
if err := tm.writeStruct(w, m.Elem()); err != nil {
|
|
||||||
return true, err
|
|
||||||
}
|
|
||||||
if w.compact {
|
|
||||||
w.Write([]byte("> "))
|
|
||||||
} else {
|
|
||||||
w.ind--
|
|
||||||
w.Write([]byte(">\n"))
|
|
||||||
}
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
|
||||||
if tm.ExpandAny && isAny(sv) {
|
|
||||||
if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
st := sv.Type()
|
|
||||||
sprops := GetProperties(st)
|
|
||||||
for i := 0; i < sv.NumField(); i++ {
|
|
||||||
fv := sv.Field(i)
|
|
||||||
props := sprops.Prop[i]
|
|
||||||
name := st.Field(i).Name
|
|
||||||
|
|
||||||
if name == "XXX_NoUnkeyedLiteral" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.HasPrefix(name, "XXX_") {
|
|
||||||
// There are two XXX_ fields:
|
|
||||||
// XXX_unrecognized []byte
|
|
||||||
// XXX_extensions map[int32]proto.Extension
|
|
||||||
// The first is handled here;
|
|
||||||
// the second is handled at the bottom of this function.
|
|
||||||
if name == "XXX_unrecognized" && !fv.IsNil() {
|
|
||||||
if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if fv.Kind() == reflect.Ptr && fv.IsNil() {
|
|
||||||
// Field not filled in. This could be an optional field or
|
|
||||||
// a required field that wasn't filled in. Either way, there
|
|
||||||
// isn't anything we can show for it.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if fv.Kind() == reflect.Slice && fv.IsNil() {
|
|
||||||
// Repeated field that is empty, or a bytes field that is unused.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if props.Repeated && fv.Kind() == reflect.Slice {
|
|
||||||
// Repeated field.
|
|
||||||
for j := 0; j < fv.Len(); j++ {
|
|
||||||
if err := writeName(w, props); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !w.compact {
|
|
||||||
if err := w.WriteByte(' '); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
v := fv.Index(j)
|
|
||||||
if v.Kind() == reflect.Ptr && v.IsNil() {
|
|
||||||
// A nil message in a repeated field is not valid,
|
|
||||||
// but we can handle that more gracefully than panicking.
|
|
||||||
if _, err := w.Write([]byte("<nil>\n")); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err := tm.writeAny(w, v, props); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := w.WriteByte('\n'); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if fv.Kind() == reflect.Map {
|
|
||||||
// Map fields are rendered as a repeated struct with key/value fields.
|
|
||||||
keys := fv.MapKeys()
|
|
||||||
sort.Sort(mapKeys(keys))
|
|
||||||
for _, key := range keys {
|
|
||||||
val := fv.MapIndex(key)
|
|
||||||
if err := writeName(w, props); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !w.compact {
|
|
||||||
if err := w.WriteByte(' '); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// open struct
|
|
||||||
if err := w.WriteByte('<'); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !w.compact {
|
|
||||||
if err := w.WriteByte('\n'); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
w.indent()
|
|
||||||
// key
|
|
||||||
if _, err := w.WriteString("key:"); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !w.compact {
|
|
||||||
if err := w.WriteByte(' '); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err := tm.writeAny(w, key, props.MapKeyProp); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := w.WriteByte('\n'); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// nil values aren't legal, but we can avoid panicking because of them.
|
|
||||||
if val.Kind() != reflect.Ptr || !val.IsNil() {
|
|
||||||
// value
|
|
||||||
if _, err := w.WriteString("value:"); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !w.compact {
|
|
||||||
if err := w.WriteByte(' '); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err := tm.writeAny(w, val, props.MapValProp); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := w.WriteByte('\n'); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// close struct
|
|
||||||
w.unindent()
|
|
||||||
if err := w.WriteByte('>'); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := w.WriteByte('\n'); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
|
|
||||||
// empty bytes field
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
|
|
||||||
// proto3 non-repeated scalar field; skip if zero value
|
|
||||||
if isProto3Zero(fv) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if fv.Kind() == reflect.Interface {
|
|
||||||
// Check if it is a oneof.
|
|
||||||
if st.Field(i).Tag.Get("protobuf_oneof") != "" {
|
|
||||||
// fv is nil, or holds a pointer to generated struct.
|
|
||||||
// That generated struct has exactly one field,
|
|
||||||
// which has a protobuf struct tag.
|
|
||||||
if fv.IsNil() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
inner := fv.Elem().Elem() // interface -> *T -> T
|
|
||||||
tag := inner.Type().Field(0).Tag.Get("protobuf")
|
|
||||||
props = new(Properties) // Overwrite the outer props var, but not its pointee.
|
|
||||||
props.Parse(tag)
|
|
||||||
// Write the value in the oneof, not the oneof itself.
|
|
||||||
fv = inner.Field(0)
|
|
||||||
|
|
||||||
// Special case to cope with malformed messages gracefully:
|
|
||||||
// If the value in the oneof is a nil pointer, don't panic
|
|
||||||
// in writeAny.
|
|
||||||
if fv.Kind() == reflect.Ptr && fv.IsNil() {
|
|
||||||
// Use errors.New so writeAny won't render quotes.
|
|
||||||
msg := errors.New("/* nil */")
|
|
||||||
fv = reflect.ValueOf(&msg).Elem()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := writeName(w, props); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !w.compact {
|
|
||||||
if err := w.WriteByte(' '); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Enums have a String method, so writeAny will work fine.
|
|
||||||
if err := tm.writeAny(w, fv, props); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := w.WriteByte('\n'); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extensions (the XXX_extensions field).
|
|
||||||
pv := sv.Addr()
|
|
||||||
if _, err := extendable(pv.Interface()); err == nil {
|
|
||||||
if err := tm.writeExtensions(w, pv); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
|
|
||||||
|
|
||||||
// writeAny writes an arbitrary field.
|
|
||||||
func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
|
|
||||||
v = reflect.Indirect(v)
|
|
||||||
|
|
||||||
// Floats have special cases.
|
|
||||||
if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
|
|
||||||
x := v.Float()
|
|
||||||
var b []byte
|
|
||||||
switch {
|
|
||||||
case math.IsInf(x, 1):
|
|
||||||
b = posInf
|
|
||||||
case math.IsInf(x, -1):
|
|
||||||
b = negInf
|
|
||||||
case math.IsNaN(x):
|
|
||||||
b = nan
|
|
||||||
}
|
|
||||||
if b != nil {
|
|
||||||
_, err := w.Write(b)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Other values are handled below.
|
|
||||||
}
|
|
||||||
|
|
||||||
// We don't attempt to serialise every possible value type; only those
|
|
||||||
// that can occur in protocol buffers.
|
|
||||||
switch v.Kind() {
|
|
||||||
case reflect.Slice:
|
|
||||||
// Should only be a []byte; repeated fields are handled in writeStruct.
|
|
||||||
if err := writeString(w, string(v.Bytes())); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
case reflect.String:
|
|
||||||
if err := writeString(w, v.String()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
case reflect.Struct:
|
|
||||||
// Required/optional group/message.
|
|
||||||
var bra, ket byte = '<', '>'
|
|
||||||
if props != nil && props.Wire == "group" {
|
|
||||||
bra, ket = '{', '}'
|
|
||||||
}
|
|
||||||
if err := w.WriteByte(bra); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !w.compact {
|
|
||||||
if err := w.WriteByte('\n'); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
w.indent()
|
|
||||||
if v.CanAddr() {
|
|
||||||
// Calling v.Interface on a struct causes the reflect package to
|
|
||||||
// copy the entire struct. This is racy with the new Marshaler
|
|
||||||
// since we atomically update the XXX_sizecache.
|
|
||||||
//
|
|
||||||
// Thus, we retrieve a pointer to the struct if possible to avoid
|
|
||||||
// a race since v.Interface on the pointer doesn't copy the struct.
|
|
||||||
//
|
|
||||||
// If v is not addressable, then we are not worried about a race
|
|
||||||
// since it implies that the binary Marshaler cannot possibly be
|
|
||||||
// mutating this value.
|
|
||||||
v = v.Addr()
|
|
||||||
}
|
|
||||||
if v.Type().Implements(textMarshalerType) {
|
|
||||||
text, err := v.Interface().(encoding.TextMarshaler).MarshalText()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err = w.Write(text); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if v.Kind() == reflect.Ptr {
|
|
||||||
v = v.Elem()
|
|
||||||
}
|
|
||||||
if err := tm.writeStruct(w, v); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
w.unindent()
|
|
||||||
if err := w.WriteByte(ket); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
_, err := fmt.Fprint(w, v.Interface())
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// equivalent to C's isprint.
|
|
||||||
func isprint(c byte) bool {
|
|
||||||
return c >= 0x20 && c < 0x7f
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeString writes a string in the protocol buffer text format.
|
|
||||||
// It is similar to strconv.Quote except we don't use Go escape sequences,
|
|
||||||
// we treat the string as a byte sequence, and we use octal escapes.
|
|
||||||
// These differences are to maintain interoperability with the other
|
|
||||||
// languages' implementations of the text format.
|
|
||||||
func writeString(w *textWriter, s string) error {
|
|
||||||
// use WriteByte here to get any needed indent
|
|
||||||
if err := w.WriteByte('"'); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Loop over the bytes, not the runes.
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
var err error
|
|
||||||
// Divergence from C++: we don't escape apostrophes.
|
|
||||||
// There's no need to escape them, and the C++ parser
|
|
||||||
// copes with a naked apostrophe.
|
|
||||||
switch c := s[i]; c {
|
|
||||||
case '\n':
|
|
||||||
_, err = w.w.Write(backslashN)
|
|
||||||
case '\r':
|
|
||||||
_, err = w.w.Write(backslashR)
|
|
||||||
case '\t':
|
|
||||||
_, err = w.w.Write(backslashT)
|
|
||||||
case '"':
|
|
||||||
_, err = w.w.Write(backslashDQ)
|
|
||||||
case '\\':
|
|
||||||
_, err = w.w.Write(backslashBS)
|
|
||||||
default:
|
|
||||||
if isprint(c) {
|
|
||||||
err = w.w.WriteByte(c)
|
|
||||||
} else {
|
|
||||||
_, err = fmt.Fprintf(w.w, "\\%03o", c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return w.WriteByte('"')
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeUnknownStruct(w *textWriter, data []byte) (err error) {
|
|
||||||
if !w.compact {
|
|
||||||
if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
b := NewBuffer(data)
|
|
||||||
for b.index < len(b.buf) {
|
|
||||||
x, err := b.DecodeVarint()
|
|
||||||
if err != nil {
|
|
||||||
_, err := fmt.Fprintf(w, "/* %v */\n", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
wire, tag := x&7, x>>3
|
|
||||||
if wire == WireEndGroup {
|
|
||||||
w.unindent()
|
|
||||||
if _, err := w.Write(endBraceNewline); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if _, err := fmt.Fprint(w, tag); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if wire != WireStartGroup {
|
|
||||||
if err := w.WriteByte(':'); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !w.compact || wire == WireStartGroup {
|
|
||||||
if err := w.WriteByte(' '); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
switch wire {
|
|
||||||
case WireBytes:
|
|
||||||
buf, e := b.DecodeRawBytes(false)
|
|
||||||
if e == nil {
|
|
||||||
_, err = fmt.Fprintf(w, "%q", buf)
|
|
||||||
} else {
|
|
||||||
_, err = fmt.Fprintf(w, "/* %v */", e)
|
|
||||||
}
|
|
||||||
case WireFixed32:
|
|
||||||
x, err = b.DecodeFixed32()
|
|
||||||
err = writeUnknownInt(w, x, err)
|
|
||||||
case WireFixed64:
|
|
||||||
x, err = b.DecodeFixed64()
|
|
||||||
err = writeUnknownInt(w, x, err)
|
|
||||||
case WireStartGroup:
|
|
||||||
err = w.WriteByte('{')
|
|
||||||
w.indent()
|
|
||||||
case WireVarint:
|
|
||||||
x, err = b.DecodeVarint()
|
|
||||||
err = writeUnknownInt(w, x, err)
|
|
||||||
default:
|
|
||||||
_, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err = w.WriteByte('\n'); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeUnknownInt(w *textWriter, x uint64, err error) error {
|
|
||||||
if err == nil {
|
|
||||||
_, err = fmt.Fprint(w, x)
|
|
||||||
} else {
|
|
||||||
_, err = fmt.Fprintf(w, "/* %v */", err)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
type int32Slice []int32
|
|
||||||
|
|
||||||
func (s int32Slice) Len() int { return len(s) }
|
|
||||||
func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
|
|
||||||
func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
|
||||||
|
|
||||||
// writeExtensions writes all the extensions in pv.
|
|
||||||
// pv is assumed to be a pointer to a protocol message struct that is extendable.
|
|
||||||
func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
|
|
||||||
emap := extensionMaps[pv.Type().Elem()]
|
|
||||||
ep, _ := extendable(pv.Interface())
|
|
||||||
|
|
||||||
// Order the extensions by ID.
|
|
||||||
// This isn't strictly necessary, but it will give us
|
|
||||||
// canonical output, which will also make testing easier.
|
|
||||||
m, mu := ep.extensionsRead()
|
|
||||||
if m == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
mu.Lock()
|
|
||||||
ids := make([]int32, 0, len(m))
|
|
||||||
for id := range m {
|
|
||||||
ids = append(ids, id)
|
|
||||||
}
|
|
||||||
sort.Sort(int32Slice(ids))
|
|
||||||
mu.Unlock()
|
|
||||||
|
|
||||||
for _, extNum := range ids {
|
|
||||||
ext := m[extNum]
|
|
||||||
var desc *ExtensionDesc
|
|
||||||
if emap != nil {
|
|
||||||
desc = emap[extNum]
|
|
||||||
}
|
|
||||||
if desc == nil {
|
|
||||||
// Unknown extension.
|
|
||||||
if err := writeUnknownStruct(w, ext.enc); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
pb, err := GetExtension(ep, desc)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed getting extension: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Repeated extensions will appear as a slice.
|
|
||||||
if !desc.repeated() {
|
|
||||||
if err := tm.writeExtension(w, desc.Name, pb); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
v := reflect.ValueOf(pb)
|
|
||||||
for i := 0; i < v.Len(); i++ {
|
|
||||||
if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
|
|
||||||
if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !w.compact {
|
|
||||||
if err := w.WriteByte(' '); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := w.WriteByte('\n'); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *textWriter) writeIndent() {
|
|
||||||
if !w.complete {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
remain := w.ind * 2
|
|
||||||
for remain > 0 {
|
|
||||||
n := remain
|
|
||||||
if n > len(spaces) {
|
|
||||||
n = len(spaces)
|
|
||||||
}
|
|
||||||
w.w.Write(spaces[:n])
|
|
||||||
remain -= n
|
|
||||||
}
|
|
||||||
w.complete = false
|
|
||||||
}
|
|
||||||
|
|
||||||
// TextMarshaler is a configurable text format marshaler.
|
|
||||||
type TextMarshaler struct {
|
|
||||||
Compact bool // use compact text format (one line).
|
|
||||||
ExpandAny bool // expand google.protobuf.Any messages of known types
|
|
||||||
}
|
|
||||||
|
|
||||||
// Marshal writes a given protocol buffer in text format.
|
|
||||||
// The only errors returned are from w.
|
|
||||||
func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
|
|
||||||
val := reflect.ValueOf(pb)
|
|
||||||
if pb == nil || val.IsNil() {
|
|
||||||
w.Write([]byte("<nil>"))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
var bw *bufio.Writer
|
|
||||||
ww, ok := w.(writer)
|
|
||||||
if !ok {
|
|
||||||
bw = bufio.NewWriter(w)
|
|
||||||
ww = bw
|
|
||||||
}
|
|
||||||
aw := &textWriter{
|
|
||||||
w: ww,
|
|
||||||
complete: true,
|
|
||||||
compact: tm.Compact,
|
|
||||||
}
|
|
||||||
|
|
||||||
if etm, ok := pb.(encoding.TextMarshaler); ok {
|
|
||||||
text, err := etm.MarshalText()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err = aw.Write(text); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if bw != nil {
|
|
||||||
return bw.Flush()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// Dereference the received pointer so we don't have outer < and >.
|
|
||||||
v := reflect.Indirect(val)
|
|
||||||
if err := tm.writeStruct(aw, v); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if bw != nil {
|
|
||||||
return bw.Flush()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Text is the same as Marshal, but returns the string directly.
|
|
||||||
func (tm *TextMarshaler) Text(pb Message) string {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
tm.Marshal(&buf, pb)
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
defaultTextMarshaler = TextMarshaler{}
|
|
||||||
compactTextMarshaler = TextMarshaler{Compact: true}
|
|
||||||
)
|
|
||||||
|
|
||||||
// TODO: consider removing some of the Marshal functions below.
|
|
||||||
|
|
||||||
// MarshalText writes a given protocol buffer in text format.
|
|
||||||
// The only errors returned are from w.
|
|
||||||
func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
|
|
||||||
|
|
||||||
// MarshalTextString is the same as MarshalText, but returns the string directly.
|
|
||||||
func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
|
|
||||||
|
|
||||||
// CompactText writes a given protocol buffer in compact text format (one line).
|
|
||||||
func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
|
|
||||||
|
|
||||||
// CompactTextString is the same as CompactText, but returns the string directly.
|
|
||||||
func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
|
|
801
vendor/github.com/golang/protobuf/proto/text_decode.go
generated
vendored
Normal file
801
vendor/github.com/golang/protobuf/proto/text_decode.go
generated
vendored
Normal file
@ -0,0 +1,801 @@
|
|||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package proto
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/encoding/prototext"
|
||||||
|
protoV2 "google.golang.org/protobuf/proto"
|
||||||
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
"google.golang.org/protobuf/reflect/protoregistry"
|
||||||
|
)
|
||||||
|
|
||||||
|
const wrapTextUnmarshalV2 = false
|
||||||
|
|
||||||
|
// ParseError is returned by UnmarshalText.
|
||||||
|
type ParseError struct {
|
||||||
|
Message string
|
||||||
|
|
||||||
|
// Deprecated: Do not use.
|
||||||
|
Line, Offset int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ParseError) Error() string {
|
||||||
|
if wrapTextUnmarshalV2 {
|
||||||
|
return e.Message
|
||||||
|
}
|
||||||
|
if e.Line == 1 {
|
||||||
|
return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("line %d: %v", e.Line, e.Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalText parses a proto text formatted string into m.
|
||||||
|
func UnmarshalText(s string, m Message) error {
|
||||||
|
if u, ok := m.(encoding.TextUnmarshaler); ok {
|
||||||
|
return u.UnmarshalText([]byte(s))
|
||||||
|
}
|
||||||
|
|
||||||
|
m.Reset()
|
||||||
|
mi := MessageV2(m)
|
||||||
|
|
||||||
|
if wrapTextUnmarshalV2 {
|
||||||
|
err := prototext.UnmarshalOptions{
|
||||||
|
AllowPartial: true,
|
||||||
|
}.Unmarshal([]byte(s), mi)
|
||||||
|
if err != nil {
|
||||||
|
return &ParseError{Message: err.Error()}
|
||||||
|
}
|
||||||
|
return checkRequiredNotSet(mi)
|
||||||
|
} else {
|
||||||
|
if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return checkRequiredNotSet(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type textParser struct {
|
||||||
|
s string // remaining input
|
||||||
|
done bool // whether the parsing is finished (success or error)
|
||||||
|
backed bool // whether back() was called
|
||||||
|
offset, line int
|
||||||
|
cur token
|
||||||
|
}
|
||||||
|
|
||||||
|
type token struct {
|
||||||
|
value string
|
||||||
|
err *ParseError
|
||||||
|
line int // line number
|
||||||
|
offset int // byte number from start of input, not start of line
|
||||||
|
unquoted string // the unquoted version of value, if it was a quoted string
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTextParser(s string) *textParser {
|
||||||
|
p := new(textParser)
|
||||||
|
p.s = s
|
||||||
|
p.line = 1
|
||||||
|
p.cur.line = 1
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) {
|
||||||
|
md := m.Descriptor()
|
||||||
|
fds := md.Fields()
|
||||||
|
|
||||||
|
// A struct is a sequence of "name: value", terminated by one of
|
||||||
|
// '>' or '}', or the end of the input. A name may also be
|
||||||
|
// "[extension]" or "[type/url]".
|
||||||
|
//
|
||||||
|
// The whole struct can also be an expanded Any message, like:
|
||||||
|
// [type/url] < ... struct contents ... >
|
||||||
|
seen := make(map[protoreflect.FieldNumber]bool)
|
||||||
|
for {
|
||||||
|
tok := p.next()
|
||||||
|
if tok.err != nil {
|
||||||
|
return tok.err
|
||||||
|
}
|
||||||
|
if tok.value == terminator {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if tok.value == "[" {
|
||||||
|
if err := p.unmarshalExtensionOrAny(m, seen); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is a normal, non-extension field.
|
||||||
|
name := protoreflect.Name(tok.value)
|
||||||
|
fd := fds.ByName(name)
|
||||||
|
switch {
|
||||||
|
case fd == nil:
|
||||||
|
gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name))))
|
||||||
|
if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name {
|
||||||
|
fd = gd
|
||||||
|
}
|
||||||
|
case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name:
|
||||||
|
fd = nil
|
||||||
|
case fd.IsWeak() && fd.Message().IsPlaceholder():
|
||||||
|
fd = nil
|
||||||
|
}
|
||||||
|
if fd == nil {
|
||||||
|
typeName := string(md.FullName())
|
||||||
|
if m, ok := m.Interface().(Message); ok {
|
||||||
|
t := reflect.TypeOf(m)
|
||||||
|
if t.Kind() == reflect.Ptr {
|
||||||
|
typeName = t.Elem().String()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return p.errorf("unknown field name %q in %v", name, typeName)
|
||||||
|
}
|
||||||
|
if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil {
|
||||||
|
return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name())
|
||||||
|
}
|
||||||
|
if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] {
|
||||||
|
return p.errorf("non-repeated field %q was repeated", fd.Name())
|
||||||
|
}
|
||||||
|
seen[fd.Number()] = true
|
||||||
|
|
||||||
|
// Consume any colon.
|
||||||
|
if err := p.checkForColon(fd); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse into the field.
|
||||||
|
v := m.Get(fd)
|
||||||
|
if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
|
||||||
|
v = m.Mutable(fd)
|
||||||
|
}
|
||||||
|
if v, err = p.unmarshalValue(v, fd); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
m.Set(fd, v)
|
||||||
|
|
||||||
|
if err := p.consumeOptionalSeparator(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error {
|
||||||
|
name, err := p.consumeExtensionOrAnyName()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If it contains a slash, it's an Any type URL.
|
||||||
|
if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 {
|
||||||
|
tok := p.next()
|
||||||
|
if tok.err != nil {
|
||||||
|
return tok.err
|
||||||
|
}
|
||||||
|
// consume an optional colon
|
||||||
|
if tok.value == ":" {
|
||||||
|
tok = p.next()
|
||||||
|
if tok.err != nil {
|
||||||
|
return tok.err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var terminator string
|
||||||
|
switch tok.value {
|
||||||
|
case "<":
|
||||||
|
terminator = ">"
|
||||||
|
case "{":
|
||||||
|
terminator = "}"
|
||||||
|
default:
|
||||||
|
return p.errorf("expected '{' or '<', found %q", tok.value)
|
||||||
|
}
|
||||||
|
|
||||||
|
mt, err := protoregistry.GlobalTypes.FindMessageByURL(name)
|
||||||
|
if err != nil {
|
||||||
|
return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):])
|
||||||
|
}
|
||||||
|
m2 := mt.New()
|
||||||
|
if err := p.unmarshalMessage(m2, terminator); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
b, err := protoV2.Marshal(m2.Interface())
|
||||||
|
if err != nil {
|
||||||
|
return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err)
|
||||||
|
}
|
||||||
|
|
||||||
|
urlFD := m.Descriptor().Fields().ByName("type_url")
|
||||||
|
valFD := m.Descriptor().Fields().ByName("value")
|
||||||
|
if seen[urlFD.Number()] {
|
||||||
|
return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name())
|
||||||
|
}
|
||||||
|
if seen[valFD.Number()] {
|
||||||
|
return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name())
|
||||||
|
}
|
||||||
|
m.Set(urlFD, protoreflect.ValueOfString(name))
|
||||||
|
m.Set(valFD, protoreflect.ValueOfBytes(b))
|
||||||
|
seen[urlFD.Number()] = true
|
||||||
|
seen[valFD.Number()] = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
xname := protoreflect.FullName(name)
|
||||||
|
xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname)
|
||||||
|
if xt == nil && isMessageSet(m.Descriptor()) {
|
||||||
|
xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension"))
|
||||||
|
}
|
||||||
|
if xt == nil {
|
||||||
|
return p.errorf("unrecognized extension %q", name)
|
||||||
|
}
|
||||||
|
fd := xt.TypeDescriptor()
|
||||||
|
if fd.ContainingMessage().FullName() != m.Descriptor().FullName() {
|
||||||
|
return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName())
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := p.checkForColon(fd); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
v := m.Get(fd)
|
||||||
|
if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
|
||||||
|
v = m.Mutable(fd)
|
||||||
|
}
|
||||||
|
v, err = p.unmarshalValue(v, fd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
m.Set(fd, v)
|
||||||
|
return p.consumeOptionalSeparator()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
|
||||||
|
tok := p.next()
|
||||||
|
if tok.err != nil {
|
||||||
|
return v, tok.err
|
||||||
|
}
|
||||||
|
if tok.value == "" {
|
||||||
|
return v, p.errorf("unexpected EOF")
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case fd.IsList():
|
||||||
|
lv := v.List()
|
||||||
|
var err error
|
||||||
|
if tok.value == "[" {
|
||||||
|
// Repeated field with list notation, like [1,2,3].
|
||||||
|
for {
|
||||||
|
vv := lv.NewElement()
|
||||||
|
vv, err = p.unmarshalSingularValue(vv, fd)
|
||||||
|
if err != nil {
|
||||||
|
return v, err
|
||||||
|
}
|
||||||
|
lv.Append(vv)
|
||||||
|
|
||||||
|
tok := p.next()
|
||||||
|
if tok.err != nil {
|
||||||
|
return v, tok.err
|
||||||
|
}
|
||||||
|
if tok.value == "]" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if tok.value != "," {
|
||||||
|
return v, p.errorf("Expected ']' or ',' found %q", tok.value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// One value of the repeated field.
|
||||||
|
p.back()
|
||||||
|
vv := lv.NewElement()
|
||||||
|
vv, err = p.unmarshalSingularValue(vv, fd)
|
||||||
|
if err != nil {
|
||||||
|
return v, err
|
||||||
|
}
|
||||||
|
lv.Append(vv)
|
||||||
|
return v, nil
|
||||||
|
case fd.IsMap():
|
||||||
|
// The map entry should be this sequence of tokens:
|
||||||
|
// < key : KEY value : VALUE >
|
||||||
|
// However, implementations may omit key or value, and technically
|
||||||
|
// we should support them in any order.
|
||||||
|
var terminator string
|
||||||
|
switch tok.value {
|
||||||
|
case "<":
|
||||||
|
terminator = ">"
|
||||||
|
case "{":
|
||||||
|
terminator = "}"
|
||||||
|
default:
|
||||||
|
return v, p.errorf("expected '{' or '<', found %q", tok.value)
|
||||||
|
}
|
||||||
|
|
||||||
|
keyFD := fd.MapKey()
|
||||||
|
valFD := fd.MapValue()
|
||||||
|
|
||||||
|
mv := v.Map()
|
||||||
|
kv := keyFD.Default()
|
||||||
|
vv := mv.NewValue()
|
||||||
|
for {
|
||||||
|
tok := p.next()
|
||||||
|
if tok.err != nil {
|
||||||
|
return v, tok.err
|
||||||
|
}
|
||||||
|
if tok.value == terminator {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
switch tok.value {
|
||||||
|
case "key":
|
||||||
|
if err := p.consumeToken(":"); err != nil {
|
||||||
|
return v, err
|
||||||
|
}
|
||||||
|
if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil {
|
||||||
|
return v, err
|
||||||
|
}
|
||||||
|
if err := p.consumeOptionalSeparator(); err != nil {
|
||||||
|
return v, err
|
||||||
|
}
|
||||||
|
case "value":
|
||||||
|
if err := p.checkForColon(valFD); err != nil {
|
||||||
|
return v, err
|
||||||
|
}
|
||||||
|
if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil {
|
||||||
|
return v, err
|
||||||
|
}
|
||||||
|
if err := p.consumeOptionalSeparator(); err != nil {
|
||||||
|
return v, err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
p.back()
|
||||||
|
return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
mv.Set(kv.MapKey(), vv)
|
||||||
|
return v, nil
|
||||||
|
default:
|
||||||
|
p.back()
|
||||||
|
return p.unmarshalSingularValue(v, fd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
|
||||||
|
tok := p.next()
|
||||||
|
if tok.err != nil {
|
||||||
|
return v, tok.err
|
||||||
|
}
|
||||||
|
if tok.value == "" {
|
||||||
|
return v, p.errorf("unexpected EOF")
|
||||||
|
}
|
||||||
|
|
||||||
|
switch fd.Kind() {
|
||||||
|
case protoreflect.BoolKind:
|
||||||
|
switch tok.value {
|
||||||
|
case "true", "1", "t", "True":
|
||||||
|
return protoreflect.ValueOfBool(true), nil
|
||||||
|
case "false", "0", "f", "False":
|
||||||
|
return protoreflect.ValueOfBool(false), nil
|
||||||
|
}
|
||||||
|
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
|
||||||
|
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
|
||||||
|
return protoreflect.ValueOfInt32(int32(x)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// The C++ parser accepts large positive hex numbers that uses
|
||||||
|
// two's complement arithmetic to represent negative numbers.
|
||||||
|
// This feature is here for backwards compatibility with C++.
|
||||||
|
if strings.HasPrefix(tok.value, "0x") {
|
||||||
|
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
|
||||||
|
return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
|
||||||
|
if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
|
||||||
|
return protoreflect.ValueOfInt64(int64(x)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// The C++ parser accepts large positive hex numbers that uses
|
||||||
|
// two's complement arithmetic to represent negative numbers.
|
||||||
|
// This feature is here for backwards compatibility with C++.
|
||||||
|
if strings.HasPrefix(tok.value, "0x") {
|
||||||
|
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
|
||||||
|
return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
|
||||||
|
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
|
||||||
|
return protoreflect.ValueOfUint32(uint32(x)), nil
|
||||||
|
}
|
||||||
|
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
|
||||||
|
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
|
||||||
|
return protoreflect.ValueOfUint64(uint64(x)), nil
|
||||||
|
}
|
||||||
|
case protoreflect.FloatKind:
|
||||||
|
// Ignore 'f' for compatibility with output generated by C++,
|
||||||
|
// but don't remove 'f' when the value is "-inf" or "inf".
|
||||||
|
v := tok.value
|
||||||
|
if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
|
||||||
|
v = v[:len(v)-len("f")]
|
||||||
|
}
|
||||||
|
if x, err := strconv.ParseFloat(v, 32); err == nil {
|
||||||
|
return protoreflect.ValueOfFloat32(float32(x)), nil
|
||||||
|
}
|
||||||
|
case protoreflect.DoubleKind:
|
||||||
|
// Ignore 'f' for compatibility with output generated by C++,
|
||||||
|
// but don't remove 'f' when the value is "-inf" or "inf".
|
||||||
|
v := tok.value
|
||||||
|
if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
|
||||||
|
v = v[:len(v)-len("f")]
|
||||||
|
}
|
||||||
|
if x, err := strconv.ParseFloat(v, 64); err == nil {
|
||||||
|
return protoreflect.ValueOfFloat64(float64(x)), nil
|
||||||
|
}
|
||||||
|
case protoreflect.StringKind:
|
||||||
|
if isQuote(tok.value[0]) {
|
||||||
|
return protoreflect.ValueOfString(tok.unquoted), nil
|
||||||
|
}
|
||||||
|
case protoreflect.BytesKind:
|
||||||
|
if isQuote(tok.value[0]) {
|
||||||
|
return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil
|
||||||
|
}
|
||||||
|
case protoreflect.EnumKind:
|
||||||
|
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
|
||||||
|
return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil
|
||||||
|
}
|
||||||
|
vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value))
|
||||||
|
if vd != nil {
|
||||||
|
return protoreflect.ValueOfEnum(vd.Number()), nil
|
||||||
|
}
|
||||||
|
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||||
|
var terminator string
|
||||||
|
switch tok.value {
|
||||||
|
case "{":
|
||||||
|
terminator = "}"
|
||||||
|
case "<":
|
||||||
|
terminator = ">"
|
||||||
|
default:
|
||||||
|
return v, p.errorf("expected '{' or '<', found %q", tok.value)
|
||||||
|
}
|
||||||
|
err := p.unmarshalMessage(v.Message(), terminator)
|
||||||
|
return v, err
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("invalid kind %v", fd.Kind()))
|
||||||
|
}
|
||||||
|
return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Consume a ':' from the input stream (if the next token is a colon),
|
||||||
|
// returning an error if a colon is needed but not present.
|
||||||
|
func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError {
|
||||||
|
tok := p.next()
|
||||||
|
if tok.err != nil {
|
||||||
|
return tok.err
|
||||||
|
}
|
||||||
|
if tok.value != ":" {
|
||||||
|
if fd.Message() == nil {
|
||||||
|
return p.errorf("expected ':', found %q", tok.value)
|
||||||
|
}
|
||||||
|
p.back()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// consumeExtensionOrAnyName consumes an extension name or an Any type URL and
|
||||||
|
// the following ']'. It returns the name or URL consumed.
|
||||||
|
func (p *textParser) consumeExtensionOrAnyName() (string, error) {
|
||||||
|
tok := p.next()
|
||||||
|
if tok.err != nil {
|
||||||
|
return "", tok.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If extension name or type url is quoted, it's a single token.
|
||||||
|
if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
|
||||||
|
name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return name, p.consumeToken("]")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Consume everything up to "]"
|
||||||
|
var parts []string
|
||||||
|
for tok.value != "]" {
|
||||||
|
parts = append(parts, tok.value)
|
||||||
|
tok = p.next()
|
||||||
|
if tok.err != nil {
|
||||||
|
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
|
||||||
|
}
|
||||||
|
if p.done && tok.value != "]" {
|
||||||
|
return "", p.errorf("unclosed type_url or extension name")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return strings.Join(parts, ""), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// consumeOptionalSeparator consumes an optional semicolon or comma.
|
||||||
|
// It is used in unmarshalMessage to provide backward compatibility.
|
||||||
|
func (p *textParser) consumeOptionalSeparator() error {
|
||||||
|
tok := p.next()
|
||||||
|
if tok.err != nil {
|
||||||
|
return tok.err
|
||||||
|
}
|
||||||
|
if tok.value != ";" && tok.value != "," {
|
||||||
|
p.back()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
|
||||||
|
pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
|
||||||
|
p.cur.err = pe
|
||||||
|
p.done = true
|
||||||
|
return pe
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *textParser) skipWhitespace() {
|
||||||
|
i := 0
|
||||||
|
for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
|
||||||
|
if p.s[i] == '#' {
|
||||||
|
// comment; skip to end of line or input
|
||||||
|
for i < len(p.s) && p.s[i] != '\n' {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
if i == len(p.s) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if p.s[i] == '\n' {
|
||||||
|
p.line++
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
p.offset += i
|
||||||
|
p.s = p.s[i:len(p.s)]
|
||||||
|
if len(p.s) == 0 {
|
||||||
|
p.done = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *textParser) advance() {
|
||||||
|
// Skip whitespace
|
||||||
|
p.skipWhitespace()
|
||||||
|
if p.done {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start of non-whitespace
|
||||||
|
p.cur.err = nil
|
||||||
|
p.cur.offset, p.cur.line = p.offset, p.line
|
||||||
|
p.cur.unquoted = ""
|
||||||
|
switch p.s[0] {
|
||||||
|
case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
|
||||||
|
// Single symbol
|
||||||
|
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
|
||||||
|
case '"', '\'':
|
||||||
|
// Quoted string
|
||||||
|
i := 1
|
||||||
|
for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
|
||||||
|
if p.s[i] == '\\' && i+1 < len(p.s) {
|
||||||
|
// skip escaped char
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
if i >= len(p.s) || p.s[i] != p.s[0] {
|
||||||
|
p.errorf("unmatched quote")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
|
||||||
|
if err != nil {
|
||||||
|
p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
|
||||||
|
p.cur.unquoted = unq
|
||||||
|
default:
|
||||||
|
i := 0
|
||||||
|
for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
if i == 0 {
|
||||||
|
p.errorf("unexpected byte %#x", p.s[0])
|
||||||
|
return
|
||||||
|
}
|
||||||
|
p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
|
||||||
|
}
|
||||||
|
p.offset += len(p.cur.value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Back off the parser by one token. Can only be done between calls to next().
|
||||||
|
// It makes the next advance() a no-op.
|
||||||
|
func (p *textParser) back() { p.backed = true }
|
||||||
|
|
||||||
|
// Advances the parser and returns the new current token.
|
||||||
|
func (p *textParser) next() *token {
|
||||||
|
if p.backed || p.done {
|
||||||
|
p.backed = false
|
||||||
|
return &p.cur
|
||||||
|
}
|
||||||
|
p.advance()
|
||||||
|
if p.done {
|
||||||
|
p.cur.value = ""
|
||||||
|
} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
|
||||||
|
// Look for multiple quoted strings separated by whitespace,
|
||||||
|
// and concatenate them.
|
||||||
|
cat := p.cur
|
||||||
|
for {
|
||||||
|
p.skipWhitespace()
|
||||||
|
if p.done || !isQuote(p.s[0]) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
p.advance()
|
||||||
|
if p.cur.err != nil {
|
||||||
|
return &p.cur
|
||||||
|
}
|
||||||
|
cat.value += " " + p.cur.value
|
||||||
|
cat.unquoted += p.cur.unquoted
|
||||||
|
}
|
||||||
|
p.done = false // parser may have seen EOF, but we want to return cat
|
||||||
|
p.cur = cat
|
||||||
|
}
|
||||||
|
return &p.cur
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *textParser) consumeToken(s string) error {
|
||||||
|
tok := p.next()
|
||||||
|
if tok.err != nil {
|
||||||
|
return tok.err
|
||||||
|
}
|
||||||
|
if tok.value != s {
|
||||||
|
p.back()
|
||||||
|
return p.errorf("expected %q, found %q", s, tok.value)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var errBadUTF8 = errors.New("proto: bad UTF-8")
|
||||||
|
|
||||||
|
func unquoteC(s string, quote rune) (string, error) {
|
||||||
|
// This is based on C++'s tokenizer.cc.
|
||||||
|
// Despite its name, this is *not* parsing C syntax.
|
||||||
|
// For instance, "\0" is an invalid quoted string.
|
||||||
|
|
||||||
|
// Avoid allocation in trivial cases.
|
||||||
|
simple := true
|
||||||
|
for _, r := range s {
|
||||||
|
if r == '\\' || r == quote {
|
||||||
|
simple = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if simple {
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := make([]byte, 0, 3*len(s)/2)
|
||||||
|
for len(s) > 0 {
|
||||||
|
r, n := utf8.DecodeRuneInString(s)
|
||||||
|
if r == utf8.RuneError && n == 1 {
|
||||||
|
return "", errBadUTF8
|
||||||
|
}
|
||||||
|
s = s[n:]
|
||||||
|
if r != '\\' {
|
||||||
|
if r < utf8.RuneSelf {
|
||||||
|
buf = append(buf, byte(r))
|
||||||
|
} else {
|
||||||
|
buf = append(buf, string(r)...)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
ch, tail, err := unescape(s)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
buf = append(buf, ch...)
|
||||||
|
s = tail
|
||||||
|
}
|
||||||
|
return string(buf), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func unescape(s string) (ch string, tail string, err error) {
|
||||||
|
r, n := utf8.DecodeRuneInString(s)
|
||||||
|
if r == utf8.RuneError && n == 1 {
|
||||||
|
return "", "", errBadUTF8
|
||||||
|
}
|
||||||
|
s = s[n:]
|
||||||
|
switch r {
|
||||||
|
case 'a':
|
||||||
|
return "\a", s, nil
|
||||||
|
case 'b':
|
||||||
|
return "\b", s, nil
|
||||||
|
case 'f':
|
||||||
|
return "\f", s, nil
|
||||||
|
case 'n':
|
||||||
|
return "\n", s, nil
|
||||||
|
case 'r':
|
||||||
|
return "\r", s, nil
|
||||||
|
case 't':
|
||||||
|
return "\t", s, nil
|
||||||
|
case 'v':
|
||||||
|
return "\v", s, nil
|
||||||
|
case '?':
|
||||||
|
return "?", s, nil // trigraph workaround
|
||||||
|
case '\'', '"', '\\':
|
||||||
|
return string(r), s, nil
|
||||||
|
case '0', '1', '2', '3', '4', '5', '6', '7':
|
||||||
|
if len(s) < 2 {
|
||||||
|
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
|
||||||
|
}
|
||||||
|
ss := string(r) + s[:2]
|
||||||
|
s = s[2:]
|
||||||
|
i, err := strconv.ParseUint(ss, 8, 8)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
|
||||||
|
}
|
||||||
|
return string([]byte{byte(i)}), s, nil
|
||||||
|
case 'x', 'X', 'u', 'U':
|
||||||
|
var n int
|
||||||
|
switch r {
|
||||||
|
case 'x', 'X':
|
||||||
|
n = 2
|
||||||
|
case 'u':
|
||||||
|
n = 4
|
||||||
|
case 'U':
|
||||||
|
n = 8
|
||||||
|
}
|
||||||
|
if len(s) < n {
|
||||||
|
return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
|
||||||
|
}
|
||||||
|
ss := s[:n]
|
||||||
|
s = s[n:]
|
||||||
|
i, err := strconv.ParseUint(ss, 16, 64)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
|
||||||
|
}
|
||||||
|
if r == 'x' || r == 'X' {
|
||||||
|
return string([]byte{byte(i)}), s, nil
|
||||||
|
}
|
||||||
|
if i > utf8.MaxRune {
|
||||||
|
return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
|
||||||
|
}
|
||||||
|
return string(i), s, nil
|
||||||
|
}
|
||||||
|
return "", "", fmt.Errorf(`unknown escape \%c`, r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func isIdentOrNumberChar(c byte) bool {
|
||||||
|
switch {
|
||||||
|
case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
|
||||||
|
return true
|
||||||
|
case '0' <= c && c <= '9':
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
switch c {
|
||||||
|
case '-', '+', '.', '_':
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func isWhitespace(c byte) bool {
|
||||||
|
switch c {
|
||||||
|
case ' ', '\t', '\n', '\r':
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func isQuote(c byte) bool {
|
||||||
|
switch c {
|
||||||
|
case '"', '\'':
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
560
vendor/github.com/golang/protobuf/proto/text_encode.go
generated
vendored
Normal file
560
vendor/github.com/golang/protobuf/proto/text_encode.go
generated
vendored
Normal file
@ -0,0 +1,560 @@
|
|||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package proto
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/encoding/prototext"
|
||||||
|
"google.golang.org/protobuf/encoding/protowire"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
"google.golang.org/protobuf/reflect/protoregistry"
|
||||||
|
)
|
||||||
|
|
||||||
|
const wrapTextMarshalV2 = false
|
||||||
|
|
||||||
|
// TextMarshaler is a configurable text format marshaler.
|
||||||
|
type TextMarshaler struct {
|
||||||
|
Compact bool // use compact text format (one line)
|
||||||
|
ExpandAny bool // expand google.protobuf.Any messages of known types
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal writes the proto text format of m to w.
|
||||||
|
func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error {
|
||||||
|
b, err := tm.marshal(m)
|
||||||
|
if len(b) > 0 {
|
||||||
|
if _, err := w.Write(b); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Text returns a proto text formatted string of m.
|
||||||
|
func (tm *TextMarshaler) Text(m Message) string {
|
||||||
|
b, _ := tm.marshal(m)
|
||||||
|
return string(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tm *TextMarshaler) marshal(m Message) ([]byte, error) {
|
||||||
|
mr := MessageReflect(m)
|
||||||
|
if mr == nil || !mr.IsValid() {
|
||||||
|
return []byte("<nil>"), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if wrapTextMarshalV2 {
|
||||||
|
if m, ok := m.(encoding.TextMarshaler); ok {
|
||||||
|
return m.MarshalText()
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := prototext.MarshalOptions{
|
||||||
|
AllowPartial: true,
|
||||||
|
EmitUnknown: true,
|
||||||
|
}
|
||||||
|
if !tm.Compact {
|
||||||
|
opts.Indent = " "
|
||||||
|
}
|
||||||
|
if !tm.ExpandAny {
|
||||||
|
opts.Resolver = (*protoregistry.Types)(nil)
|
||||||
|
}
|
||||||
|
return opts.Marshal(mr.Interface())
|
||||||
|
} else {
|
||||||
|
w := &textWriter{
|
||||||
|
compact: tm.Compact,
|
||||||
|
expandAny: tm.ExpandAny,
|
||||||
|
complete: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
if m, ok := m.(encoding.TextMarshaler); ok {
|
||||||
|
b, err := m.MarshalText()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
w.Write(b)
|
||||||
|
return w.buf, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
err := w.writeMessage(mr)
|
||||||
|
return w.buf, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
defaultTextMarshaler = TextMarshaler{}
|
||||||
|
compactTextMarshaler = TextMarshaler{Compact: true}
|
||||||
|
)
|
||||||
|
|
||||||
|
// MarshalText writes the proto text format of m to w.
|
||||||
|
func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) }
|
||||||
|
|
||||||
|
// MarshalTextString returns a proto text formatted string of m.
|
||||||
|
func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) }
|
||||||
|
|
||||||
|
// CompactText writes the compact proto text format of m to w.
|
||||||
|
func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) }
|
||||||
|
|
||||||
|
// CompactTextString returns a compact proto text formatted string of m.
|
||||||
|
func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) }
|
||||||
|
|
||||||
|
var (
|
||||||
|
newline = []byte("\n")
|
||||||
|
endBraceNewline = []byte("}\n")
|
||||||
|
posInf = []byte("inf")
|
||||||
|
negInf = []byte("-inf")
|
||||||
|
nan = []byte("nan")
|
||||||
|
)
|
||||||
|
|
||||||
|
// textWriter is an io.Writer that tracks its indentation level.
|
||||||
|
type textWriter struct {
|
||||||
|
compact bool // same as TextMarshaler.Compact
|
||||||
|
expandAny bool // same as TextMarshaler.ExpandAny
|
||||||
|
complete bool // whether the current position is a complete line
|
||||||
|
indent int // indentation level; never negative
|
||||||
|
buf []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *textWriter) Write(p []byte) (n int, _ error) {
|
||||||
|
newlines := bytes.Count(p, newline)
|
||||||
|
if newlines == 0 {
|
||||||
|
if !w.compact && w.complete {
|
||||||
|
w.writeIndent()
|
||||||
|
}
|
||||||
|
w.buf = append(w.buf, p...)
|
||||||
|
w.complete = false
|
||||||
|
return len(p), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
frags := bytes.SplitN(p, newline, newlines+1)
|
||||||
|
if w.compact {
|
||||||
|
for i, frag := range frags {
|
||||||
|
if i > 0 {
|
||||||
|
w.buf = append(w.buf, ' ')
|
||||||
|
n++
|
||||||
|
}
|
||||||
|
w.buf = append(w.buf, frag...)
|
||||||
|
n += len(frag)
|
||||||
|
}
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, frag := range frags {
|
||||||
|
if w.complete {
|
||||||
|
w.writeIndent()
|
||||||
|
}
|
||||||
|
w.buf = append(w.buf, frag...)
|
||||||
|
n += len(frag)
|
||||||
|
if i+1 < len(frags) {
|
||||||
|
w.buf = append(w.buf, '\n')
|
||||||
|
n++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
w.complete = len(frags[len(frags)-1]) == 0
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *textWriter) WriteByte(c byte) error {
|
||||||
|
if w.compact && c == '\n' {
|
||||||
|
c = ' '
|
||||||
|
}
|
||||||
|
if !w.compact && w.complete {
|
||||||
|
w.writeIndent()
|
||||||
|
}
|
||||||
|
w.buf = append(w.buf, c)
|
||||||
|
w.complete = c == '\n'
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) {
|
||||||
|
if !w.compact && w.complete {
|
||||||
|
w.writeIndent()
|
||||||
|
}
|
||||||
|
w.complete = false
|
||||||
|
|
||||||
|
if fd.Kind() != protoreflect.GroupKind {
|
||||||
|
w.buf = append(w.buf, fd.Name()...)
|
||||||
|
w.WriteByte(':')
|
||||||
|
} else {
|
||||||
|
// Use message type name for group field name.
|
||||||
|
w.buf = append(w.buf, fd.Message().Name()...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !w.compact {
|
||||||
|
w.WriteByte(' ')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func requiresQuotes(u string) bool {
|
||||||
|
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
|
||||||
|
for _, ch := range u {
|
||||||
|
switch {
|
||||||
|
case ch == '.' || ch == '/' || ch == '_':
|
||||||
|
continue
|
||||||
|
case '0' <= ch && ch <= '9':
|
||||||
|
continue
|
||||||
|
case 'A' <= ch && ch <= 'Z':
|
||||||
|
continue
|
||||||
|
case 'a' <= ch && ch <= 'z':
|
||||||
|
continue
|
||||||
|
default:
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeProto3Any writes an expanded google.protobuf.Any message.
|
||||||
|
//
|
||||||
|
// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
|
||||||
|
// required messages are not linked in).
|
||||||
|
//
|
||||||
|
// It returns (true, error) when sv was written in expanded format or an error
|
||||||
|
// was encountered.
|
||||||
|
func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) {
|
||||||
|
md := m.Descriptor()
|
||||||
|
fdURL := md.Fields().ByName("type_url")
|
||||||
|
fdVal := md.Fields().ByName("value")
|
||||||
|
|
||||||
|
url := m.Get(fdURL).String()
|
||||||
|
mt, err := protoregistry.GlobalTypes.FindMessageByURL(url)
|
||||||
|
if err != nil {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
b := m.Get(fdVal).Bytes()
|
||||||
|
m2 := mt.New()
|
||||||
|
if err := proto.Unmarshal(b, m2.Interface()); err != nil {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
w.Write([]byte("["))
|
||||||
|
if requiresQuotes(url) {
|
||||||
|
w.writeQuotedString(url)
|
||||||
|
} else {
|
||||||
|
w.Write([]byte(url))
|
||||||
|
}
|
||||||
|
if w.compact {
|
||||||
|
w.Write([]byte("]:<"))
|
||||||
|
} else {
|
||||||
|
w.Write([]byte("]: <\n"))
|
||||||
|
w.indent++
|
||||||
|
}
|
||||||
|
if err := w.writeMessage(m2); err != nil {
|
||||||
|
return true, err
|
||||||
|
}
|
||||||
|
if w.compact {
|
||||||
|
w.Write([]byte("> "))
|
||||||
|
} else {
|
||||||
|
w.indent--
|
||||||
|
w.Write([]byte(">\n"))
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *textWriter) writeMessage(m protoreflect.Message) error {
|
||||||
|
md := m.Descriptor()
|
||||||
|
if w.expandAny && md.FullName() == "google.protobuf.Any" {
|
||||||
|
if canExpand, err := w.writeProto3Any(m); canExpand {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fds := md.Fields()
|
||||||
|
for i := 0; i < fds.Len(); {
|
||||||
|
fd := fds.Get(i)
|
||||||
|
if od := fd.ContainingOneof(); od != nil {
|
||||||
|
fd = m.WhichOneof(od)
|
||||||
|
i += od.Fields().Len()
|
||||||
|
} else {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
if fd == nil || !m.Has(fd) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case fd.IsList():
|
||||||
|
lv := m.Get(fd).List()
|
||||||
|
for j := 0; j < lv.Len(); j++ {
|
||||||
|
w.writeName(fd)
|
||||||
|
v := lv.Get(j)
|
||||||
|
if err := w.writeSingularValue(v, fd); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.WriteByte('\n')
|
||||||
|
}
|
||||||
|
case fd.IsMap():
|
||||||
|
kfd := fd.MapKey()
|
||||||
|
vfd := fd.MapValue()
|
||||||
|
mv := m.Get(fd).Map()
|
||||||
|
|
||||||
|
type entry struct{ key, val protoreflect.Value }
|
||||||
|
var entries []entry
|
||||||
|
mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
|
||||||
|
entries = append(entries, entry{k.Value(), v})
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
sort.Slice(entries, func(i, j int) bool {
|
||||||
|
switch kfd.Kind() {
|
||||||
|
case protoreflect.BoolKind:
|
||||||
|
return !entries[i].key.Bool() && entries[j].key.Bool()
|
||||||
|
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
|
||||||
|
return entries[i].key.Int() < entries[j].key.Int()
|
||||||
|
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
|
||||||
|
return entries[i].key.Uint() < entries[j].key.Uint()
|
||||||
|
case protoreflect.StringKind:
|
||||||
|
return entries[i].key.String() < entries[j].key.String()
|
||||||
|
default:
|
||||||
|
panic("invalid kind")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
for _, entry := range entries {
|
||||||
|
w.writeName(fd)
|
||||||
|
w.WriteByte('<')
|
||||||
|
if !w.compact {
|
||||||
|
w.WriteByte('\n')
|
||||||
|
}
|
||||||
|
w.indent++
|
||||||
|
w.writeName(kfd)
|
||||||
|
if err := w.writeSingularValue(entry.key, kfd); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.WriteByte('\n')
|
||||||
|
w.writeName(vfd)
|
||||||
|
if err := w.writeSingularValue(entry.val, vfd); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.WriteByte('\n')
|
||||||
|
w.indent--
|
||||||
|
w.WriteByte('>')
|
||||||
|
w.WriteByte('\n')
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
w.writeName(fd)
|
||||||
|
if err := w.writeSingularValue(m.Get(fd), fd); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.WriteByte('\n')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if b := m.GetUnknown(); len(b) > 0 {
|
||||||
|
w.writeUnknownFields(b)
|
||||||
|
}
|
||||||
|
return w.writeExtensions(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
|
||||||
|
switch fd.Kind() {
|
||||||
|
case protoreflect.FloatKind, protoreflect.DoubleKind:
|
||||||
|
switch vf := v.Float(); {
|
||||||
|
case math.IsInf(vf, +1):
|
||||||
|
w.Write(posInf)
|
||||||
|
case math.IsInf(vf, -1):
|
||||||
|
w.Write(negInf)
|
||||||
|
case math.IsNaN(vf):
|
||||||
|
w.Write(nan)
|
||||||
|
default:
|
||||||
|
fmt.Fprint(w, v.Interface())
|
||||||
|
}
|
||||||
|
case protoreflect.StringKind:
|
||||||
|
// NOTE: This does not validate UTF-8 for historical reasons.
|
||||||
|
w.writeQuotedString(string(v.String()))
|
||||||
|
case protoreflect.BytesKind:
|
||||||
|
w.writeQuotedString(string(v.Bytes()))
|
||||||
|
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||||
|
var bra, ket byte = '<', '>'
|
||||||
|
if fd.Kind() == protoreflect.GroupKind {
|
||||||
|
bra, ket = '{', '}'
|
||||||
|
}
|
||||||
|
w.WriteByte(bra)
|
||||||
|
if !w.compact {
|
||||||
|
w.WriteByte('\n')
|
||||||
|
}
|
||||||
|
w.indent++
|
||||||
|
m := v.Message()
|
||||||
|
if m2, ok := m.Interface().(encoding.TextMarshaler); ok {
|
||||||
|
b, err := m2.MarshalText()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.Write(b)
|
||||||
|
} else {
|
||||||
|
w.writeMessage(m)
|
||||||
|
}
|
||||||
|
w.indent--
|
||||||
|
w.WriteByte(ket)
|
||||||
|
case protoreflect.EnumKind:
|
||||||
|
if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil {
|
||||||
|
fmt.Fprint(w, ev.Name())
|
||||||
|
} else {
|
||||||
|
fmt.Fprint(w, v.Enum())
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
fmt.Fprint(w, v.Interface())
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeQuotedString writes a quoted string in the protocol buffer text format.
|
||||||
|
func (w *textWriter) writeQuotedString(s string) {
|
||||||
|
w.WriteByte('"')
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
switch c := s[i]; c {
|
||||||
|
case '\n':
|
||||||
|
w.buf = append(w.buf, `\n`...)
|
||||||
|
case '\r':
|
||||||
|
w.buf = append(w.buf, `\r`...)
|
||||||
|
case '\t':
|
||||||
|
w.buf = append(w.buf, `\t`...)
|
||||||
|
case '"':
|
||||||
|
w.buf = append(w.buf, `\"`...)
|
||||||
|
case '\\':
|
||||||
|
w.buf = append(w.buf, `\\`...)
|
||||||
|
default:
|
||||||
|
if isPrint := c >= 0x20 && c < 0x7f; isPrint {
|
||||||
|
w.buf = append(w.buf, c)
|
||||||
|
} else {
|
||||||
|
w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
w.WriteByte('"')
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *textWriter) writeUnknownFields(b []byte) {
|
||||||
|
if !w.compact {
|
||||||
|
fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b))
|
||||||
|
}
|
||||||
|
|
||||||
|
for len(b) > 0 {
|
||||||
|
num, wtyp, n := protowire.ConsumeTag(b)
|
||||||
|
if n < 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b = b[n:]
|
||||||
|
|
||||||
|
if wtyp == protowire.EndGroupType {
|
||||||
|
w.indent--
|
||||||
|
w.Write(endBraceNewline)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Fprint(w, num)
|
||||||
|
if wtyp != protowire.StartGroupType {
|
||||||
|
w.WriteByte(':')
|
||||||
|
}
|
||||||
|
if !w.compact || wtyp == protowire.StartGroupType {
|
||||||
|
w.WriteByte(' ')
|
||||||
|
}
|
||||||
|
switch wtyp {
|
||||||
|
case protowire.VarintType:
|
||||||
|
v, n := protowire.ConsumeVarint(b)
|
||||||
|
if n < 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b = b[n:]
|
||||||
|
fmt.Fprint(w, v)
|
||||||
|
case protowire.Fixed32Type:
|
||||||
|
v, n := protowire.ConsumeFixed32(b)
|
||||||
|
if n < 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b = b[n:]
|
||||||
|
fmt.Fprint(w, v)
|
||||||
|
case protowire.Fixed64Type:
|
||||||
|
v, n := protowire.ConsumeFixed64(b)
|
||||||
|
if n < 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b = b[n:]
|
||||||
|
fmt.Fprint(w, v)
|
||||||
|
case protowire.BytesType:
|
||||||
|
v, n := protowire.ConsumeBytes(b)
|
||||||
|
if n < 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b = b[n:]
|
||||||
|
fmt.Fprintf(w, "%q", v)
|
||||||
|
case protowire.StartGroupType:
|
||||||
|
w.WriteByte('{')
|
||||||
|
w.indent++
|
||||||
|
default:
|
||||||
|
fmt.Fprintf(w, "/* unknown wire type %d */", wtyp)
|
||||||
|
}
|
||||||
|
w.WriteByte('\n')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeExtensions writes all the extensions in m.
|
||||||
|
func (w *textWriter) writeExtensions(m protoreflect.Message) error {
|
||||||
|
md := m.Descriptor()
|
||||||
|
if md.ExtensionRanges().Len() == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type ext struct {
|
||||||
|
desc protoreflect.FieldDescriptor
|
||||||
|
val protoreflect.Value
|
||||||
|
}
|
||||||
|
var exts []ext
|
||||||
|
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
|
||||||
|
if fd.IsExtension() {
|
||||||
|
exts = append(exts, ext{fd, v})
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
sort.Slice(exts, func(i, j int) bool {
|
||||||
|
return exts[i].desc.Number() < exts[j].desc.Number()
|
||||||
|
})
|
||||||
|
|
||||||
|
for _, ext := range exts {
|
||||||
|
// For message set, use the name of the message as the extension name.
|
||||||
|
name := string(ext.desc.FullName())
|
||||||
|
if isMessageSet(ext.desc.ContainingMessage()) {
|
||||||
|
name = strings.TrimSuffix(name, ".message_set_extension")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !ext.desc.IsList() {
|
||||||
|
if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
lv := ext.val.List()
|
||||||
|
for i := 0; i < lv.Len(); i++ {
|
||||||
|
if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
|
||||||
|
fmt.Fprintf(w, "[%s]:", name)
|
||||||
|
if !w.compact {
|
||||||
|
w.WriteByte(' ')
|
||||||
|
}
|
||||||
|
if err := w.writeSingularValue(v, fd); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.WriteByte('\n')
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *textWriter) writeIndent() {
|
||||||
|
if !w.complete {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for i := 0; i < w.indent*2; i++ {
|
||||||
|
w.buf = append(w.buf, ' ')
|
||||||
|
}
|
||||||
|
w.complete = false
|
||||||
|
}
|
880
vendor/github.com/golang/protobuf/proto/text_parser.go
generated
vendored
880
vendor/github.com/golang/protobuf/proto/text_parser.go
generated
vendored
@ -1,880 +0,0 @@
|
|||||||
// Go support for Protocol Buffers - Google's data interchange format
|
|
||||||
//
|
|
||||||
// Copyright 2010 The Go Authors. All rights reserved.
|
|
||||||
// https://github.com/golang/protobuf
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
// Functions for parsing the Text protocol buffer format.
|
|
||||||
// TODO: message sets.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"unicode/utf8"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Error string emitted when deserializing Any and fields are already set
|
|
||||||
const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
|
|
||||||
|
|
||||||
type ParseError struct {
|
|
||||||
Message string
|
|
||||||
Line int // 1-based line number
|
|
||||||
Offset int // 0-based byte offset from start of input
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *ParseError) Error() string {
|
|
||||||
if p.Line == 1 {
|
|
||||||
// show offset only for first line
|
|
||||||
return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("line %d: %v", p.Line, p.Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
type token struct {
|
|
||||||
value string
|
|
||||||
err *ParseError
|
|
||||||
line int // line number
|
|
||||||
offset int // byte number from start of input, not start of line
|
|
||||||
unquoted string // the unquoted version of value, if it was a quoted string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *token) String() string {
|
|
||||||
if t.err == nil {
|
|
||||||
return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("parse error: %v", t.err)
|
|
||||||
}
|
|
||||||
|
|
||||||
type textParser struct {
|
|
||||||
s string // remaining input
|
|
||||||
done bool // whether the parsing is finished (success or error)
|
|
||||||
backed bool // whether back() was called
|
|
||||||
offset, line int
|
|
||||||
cur token
|
|
||||||
}
|
|
||||||
|
|
||||||
func newTextParser(s string) *textParser {
|
|
||||||
p := new(textParser)
|
|
||||||
p.s = s
|
|
||||||
p.line = 1
|
|
||||||
p.cur.line = 1
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
|
|
||||||
pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
|
|
||||||
p.cur.err = pe
|
|
||||||
p.done = true
|
|
||||||
return pe
|
|
||||||
}
|
|
||||||
|
|
||||||
// Numbers and identifiers are matched by [-+._A-Za-z0-9]
|
|
||||||
func isIdentOrNumberChar(c byte) bool {
|
|
||||||
switch {
|
|
||||||
case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
|
|
||||||
return true
|
|
||||||
case '0' <= c && c <= '9':
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
switch c {
|
|
||||||
case '-', '+', '.', '_':
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func isWhitespace(c byte) bool {
|
|
||||||
switch c {
|
|
||||||
case ' ', '\t', '\n', '\r':
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func isQuote(c byte) bool {
|
|
||||||
switch c {
|
|
||||||
case '"', '\'':
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *textParser) skipWhitespace() {
|
|
||||||
i := 0
|
|
||||||
for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
|
|
||||||
if p.s[i] == '#' {
|
|
||||||
// comment; skip to end of line or input
|
|
||||||
for i < len(p.s) && p.s[i] != '\n' {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
if i == len(p.s) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if p.s[i] == '\n' {
|
|
||||||
p.line++
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
p.offset += i
|
|
||||||
p.s = p.s[i:len(p.s)]
|
|
||||||
if len(p.s) == 0 {
|
|
||||||
p.done = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *textParser) advance() {
|
|
||||||
// Skip whitespace
|
|
||||||
p.skipWhitespace()
|
|
||||||
if p.done {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start of non-whitespace
|
|
||||||
p.cur.err = nil
|
|
||||||
p.cur.offset, p.cur.line = p.offset, p.line
|
|
||||||
p.cur.unquoted = ""
|
|
||||||
switch p.s[0] {
|
|
||||||
case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
|
|
||||||
// Single symbol
|
|
||||||
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
|
|
||||||
case '"', '\'':
|
|
||||||
// Quoted string
|
|
||||||
i := 1
|
|
||||||
for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
|
|
||||||
if p.s[i] == '\\' && i+1 < len(p.s) {
|
|
||||||
// skip escaped char
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
if i >= len(p.s) || p.s[i] != p.s[0] {
|
|
||||||
p.errorf("unmatched quote")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
|
|
||||||
if err != nil {
|
|
||||||
p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
|
|
||||||
p.cur.unquoted = unq
|
|
||||||
default:
|
|
||||||
i := 0
|
|
||||||
for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
if i == 0 {
|
|
||||||
p.errorf("unexpected byte %#x", p.s[0])
|
|
||||||
return
|
|
||||||
}
|
|
||||||
p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
|
|
||||||
}
|
|
||||||
p.offset += len(p.cur.value)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
errBadUTF8 = errors.New("proto: bad UTF-8")
|
|
||||||
)
|
|
||||||
|
|
||||||
func unquoteC(s string, quote rune) (string, error) {
|
|
||||||
// This is based on C++'s tokenizer.cc.
|
|
||||||
// Despite its name, this is *not* parsing C syntax.
|
|
||||||
// For instance, "\0" is an invalid quoted string.
|
|
||||||
|
|
||||||
// Avoid allocation in trivial cases.
|
|
||||||
simple := true
|
|
||||||
for _, r := range s {
|
|
||||||
if r == '\\' || r == quote {
|
|
||||||
simple = false
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if simple {
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := make([]byte, 0, 3*len(s)/2)
|
|
||||||
for len(s) > 0 {
|
|
||||||
r, n := utf8.DecodeRuneInString(s)
|
|
||||||
if r == utf8.RuneError && n == 1 {
|
|
||||||
return "", errBadUTF8
|
|
||||||
}
|
|
||||||
s = s[n:]
|
|
||||||
if r != '\\' {
|
|
||||||
if r < utf8.RuneSelf {
|
|
||||||
buf = append(buf, byte(r))
|
|
||||||
} else {
|
|
||||||
buf = append(buf, string(r)...)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
ch, tail, err := unescape(s)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
buf = append(buf, ch...)
|
|
||||||
s = tail
|
|
||||||
}
|
|
||||||
return string(buf), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func unescape(s string) (ch string, tail string, err error) {
|
|
||||||
r, n := utf8.DecodeRuneInString(s)
|
|
||||||
if r == utf8.RuneError && n == 1 {
|
|
||||||
return "", "", errBadUTF8
|
|
||||||
}
|
|
||||||
s = s[n:]
|
|
||||||
switch r {
|
|
||||||
case 'a':
|
|
||||||
return "\a", s, nil
|
|
||||||
case 'b':
|
|
||||||
return "\b", s, nil
|
|
||||||
case 'f':
|
|
||||||
return "\f", s, nil
|
|
||||||
case 'n':
|
|
||||||
return "\n", s, nil
|
|
||||||
case 'r':
|
|
||||||
return "\r", s, nil
|
|
||||||
case 't':
|
|
||||||
return "\t", s, nil
|
|
||||||
case 'v':
|
|
||||||
return "\v", s, nil
|
|
||||||
case '?':
|
|
||||||
return "?", s, nil // trigraph workaround
|
|
||||||
case '\'', '"', '\\':
|
|
||||||
return string(r), s, nil
|
|
||||||
case '0', '1', '2', '3', '4', '5', '6', '7':
|
|
||||||
if len(s) < 2 {
|
|
||||||
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
|
|
||||||
}
|
|
||||||
ss := string(r) + s[:2]
|
|
||||||
s = s[2:]
|
|
||||||
i, err := strconv.ParseUint(ss, 8, 8)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
|
|
||||||
}
|
|
||||||
return string([]byte{byte(i)}), s, nil
|
|
||||||
case 'x', 'X', 'u', 'U':
|
|
||||||
var n int
|
|
||||||
switch r {
|
|
||||||
case 'x', 'X':
|
|
||||||
n = 2
|
|
||||||
case 'u':
|
|
||||||
n = 4
|
|
||||||
case 'U':
|
|
||||||
n = 8
|
|
||||||
}
|
|
||||||
if len(s) < n {
|
|
||||||
return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
|
|
||||||
}
|
|
||||||
ss := s[:n]
|
|
||||||
s = s[n:]
|
|
||||||
i, err := strconv.ParseUint(ss, 16, 64)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
|
|
||||||
}
|
|
||||||
if r == 'x' || r == 'X' {
|
|
||||||
return string([]byte{byte(i)}), s, nil
|
|
||||||
}
|
|
||||||
if i > utf8.MaxRune {
|
|
||||||
return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
|
|
||||||
}
|
|
||||||
return string(i), s, nil
|
|
||||||
}
|
|
||||||
return "", "", fmt.Errorf(`unknown escape \%c`, r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Back off the parser by one token. Can only be done between calls to next().
|
|
||||||
// It makes the next advance() a no-op.
|
|
||||||
func (p *textParser) back() { p.backed = true }
|
|
||||||
|
|
||||||
// Advances the parser and returns the new current token.
|
|
||||||
func (p *textParser) next() *token {
|
|
||||||
if p.backed || p.done {
|
|
||||||
p.backed = false
|
|
||||||
return &p.cur
|
|
||||||
}
|
|
||||||
p.advance()
|
|
||||||
if p.done {
|
|
||||||
p.cur.value = ""
|
|
||||||
} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
|
|
||||||
// Look for multiple quoted strings separated by whitespace,
|
|
||||||
// and concatenate them.
|
|
||||||
cat := p.cur
|
|
||||||
for {
|
|
||||||
p.skipWhitespace()
|
|
||||||
if p.done || !isQuote(p.s[0]) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
p.advance()
|
|
||||||
if p.cur.err != nil {
|
|
||||||
return &p.cur
|
|
||||||
}
|
|
||||||
cat.value += " " + p.cur.value
|
|
||||||
cat.unquoted += p.cur.unquoted
|
|
||||||
}
|
|
||||||
p.done = false // parser may have seen EOF, but we want to return cat
|
|
||||||
p.cur = cat
|
|
||||||
}
|
|
||||||
return &p.cur
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *textParser) consumeToken(s string) error {
|
|
||||||
tok := p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return tok.err
|
|
||||||
}
|
|
||||||
if tok.value != s {
|
|
||||||
p.back()
|
|
||||||
return p.errorf("expected %q, found %q", s, tok.value)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return a RequiredNotSetError indicating which required field was not set.
|
|
||||||
func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
|
|
||||||
st := sv.Type()
|
|
||||||
sprops := GetProperties(st)
|
|
||||||
for i := 0; i < st.NumField(); i++ {
|
|
||||||
if !isNil(sv.Field(i)) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
props := sprops.Prop[i]
|
|
||||||
if props.Required {
|
|
||||||
return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns the index in the struct for the named field, as well as the parsed tag properties.
|
|
||||||
func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
|
|
||||||
i, ok := sprops.decoderOrigNames[name]
|
|
||||||
if ok {
|
|
||||||
return i, sprops.Prop[i], true
|
|
||||||
}
|
|
||||||
return -1, nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Consume a ':' from the input stream (if the next token is a colon),
|
|
||||||
// returning an error if a colon is needed but not present.
|
|
||||||
func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
|
|
||||||
tok := p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return tok.err
|
|
||||||
}
|
|
||||||
if tok.value != ":" {
|
|
||||||
// Colon is optional when the field is a group or message.
|
|
||||||
needColon := true
|
|
||||||
switch props.Wire {
|
|
||||||
case "group":
|
|
||||||
needColon = false
|
|
||||||
case "bytes":
|
|
||||||
// A "bytes" field is either a message, a string, or a repeated field;
|
|
||||||
// those three become *T, *string and []T respectively, so we can check for
|
|
||||||
// this field being a pointer to a non-string.
|
|
||||||
if typ.Kind() == reflect.Ptr {
|
|
||||||
// *T or *string
|
|
||||||
if typ.Elem().Kind() == reflect.String {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
} else if typ.Kind() == reflect.Slice {
|
|
||||||
// []T or []*T
|
|
||||||
if typ.Elem().Kind() != reflect.Ptr {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
} else if typ.Kind() == reflect.String {
|
|
||||||
// The proto3 exception is for a string field,
|
|
||||||
// which requires a colon.
|
|
||||||
break
|
|
||||||
}
|
|
||||||
needColon = false
|
|
||||||
}
|
|
||||||
if needColon {
|
|
||||||
return p.errorf("expected ':', found %q", tok.value)
|
|
||||||
}
|
|
||||||
p.back()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
|
||||||
st := sv.Type()
|
|
||||||
sprops := GetProperties(st)
|
|
||||||
reqCount := sprops.reqCount
|
|
||||||
var reqFieldErr error
|
|
||||||
fieldSet := make(map[string]bool)
|
|
||||||
// A struct is a sequence of "name: value", terminated by one of
|
|
||||||
// '>' or '}', or the end of the input. A name may also be
|
|
||||||
// "[extension]" or "[type/url]".
|
|
||||||
//
|
|
||||||
// The whole struct can also be an expanded Any message, like:
|
|
||||||
// [type/url] < ... struct contents ... >
|
|
||||||
for {
|
|
||||||
tok := p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return tok.err
|
|
||||||
}
|
|
||||||
if tok.value == terminator {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if tok.value == "[" {
|
|
||||||
// Looks like an extension or an Any.
|
|
||||||
//
|
|
||||||
// TODO: Check whether we need to handle
|
|
||||||
// namespace rooted names (e.g. ".something.Foo").
|
|
||||||
extName, err := p.consumeExtName()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if s := strings.LastIndex(extName, "/"); s >= 0 {
|
|
||||||
// If it contains a slash, it's an Any type URL.
|
|
||||||
messageName := extName[s+1:]
|
|
||||||
mt := MessageType(messageName)
|
|
||||||
if mt == nil {
|
|
||||||
return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
|
|
||||||
}
|
|
||||||
tok = p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return tok.err
|
|
||||||
}
|
|
||||||
// consume an optional colon
|
|
||||||
if tok.value == ":" {
|
|
||||||
tok = p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return tok.err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
var terminator string
|
|
||||||
switch tok.value {
|
|
||||||
case "<":
|
|
||||||
terminator = ">"
|
|
||||||
case "{":
|
|
||||||
terminator = "}"
|
|
||||||
default:
|
|
||||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
|
||||||
}
|
|
||||||
v := reflect.New(mt.Elem())
|
|
||||||
if pe := p.readStruct(v.Elem(), terminator); pe != nil {
|
|
||||||
return pe
|
|
||||||
}
|
|
||||||
b, err := Marshal(v.Interface().(Message))
|
|
||||||
if err != nil {
|
|
||||||
return p.errorf("failed to marshal message of type %q: %v", messageName, err)
|
|
||||||
}
|
|
||||||
if fieldSet["type_url"] {
|
|
||||||
return p.errorf(anyRepeatedlyUnpacked, "type_url")
|
|
||||||
}
|
|
||||||
if fieldSet["value"] {
|
|
||||||
return p.errorf(anyRepeatedlyUnpacked, "value")
|
|
||||||
}
|
|
||||||
sv.FieldByName("TypeUrl").SetString(extName)
|
|
||||||
sv.FieldByName("Value").SetBytes(b)
|
|
||||||
fieldSet["type_url"] = true
|
|
||||||
fieldSet["value"] = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
var desc *ExtensionDesc
|
|
||||||
// This could be faster, but it's functional.
|
|
||||||
// TODO: Do something smarter than a linear scan.
|
|
||||||
for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
|
|
||||||
if d.Name == extName {
|
|
||||||
desc = d
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if desc == nil {
|
|
||||||
return p.errorf("unrecognized extension %q", extName)
|
|
||||||
}
|
|
||||||
|
|
||||||
props := &Properties{}
|
|
||||||
props.Parse(desc.Tag)
|
|
||||||
|
|
||||||
typ := reflect.TypeOf(desc.ExtensionType)
|
|
||||||
if err := p.checkForColon(props, typ); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
rep := desc.repeated()
|
|
||||||
|
|
||||||
// Read the extension structure, and set it in
|
|
||||||
// the value we're constructing.
|
|
||||||
var ext reflect.Value
|
|
||||||
if !rep {
|
|
||||||
ext = reflect.New(typ).Elem()
|
|
||||||
} else {
|
|
||||||
ext = reflect.New(typ.Elem()).Elem()
|
|
||||||
}
|
|
||||||
if err := p.readAny(ext, props); err != nil {
|
|
||||||
if _, ok := err.(*RequiredNotSetError); !ok {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
reqFieldErr = err
|
|
||||||
}
|
|
||||||
ep := sv.Addr().Interface().(Message)
|
|
||||||
if !rep {
|
|
||||||
SetExtension(ep, desc, ext.Interface())
|
|
||||||
} else {
|
|
||||||
old, err := GetExtension(ep, desc)
|
|
||||||
var sl reflect.Value
|
|
||||||
if err == nil {
|
|
||||||
sl = reflect.ValueOf(old) // existing slice
|
|
||||||
} else {
|
|
||||||
sl = reflect.MakeSlice(typ, 0, 1)
|
|
||||||
}
|
|
||||||
sl = reflect.Append(sl, ext)
|
|
||||||
SetExtension(ep, desc, sl.Interface())
|
|
||||||
}
|
|
||||||
if err := p.consumeOptionalSeparator(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is a normal, non-extension field.
|
|
||||||
name := tok.value
|
|
||||||
var dst reflect.Value
|
|
||||||
fi, props, ok := structFieldByName(sprops, name)
|
|
||||||
if ok {
|
|
||||||
dst = sv.Field(fi)
|
|
||||||
} else if oop, ok := sprops.OneofTypes[name]; ok {
|
|
||||||
// It is a oneof.
|
|
||||||
props = oop.Prop
|
|
||||||
nv := reflect.New(oop.Type.Elem())
|
|
||||||
dst = nv.Elem().Field(0)
|
|
||||||
field := sv.Field(oop.Field)
|
|
||||||
if !field.IsNil() {
|
|
||||||
return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
|
|
||||||
}
|
|
||||||
field.Set(nv)
|
|
||||||
}
|
|
||||||
if !dst.IsValid() {
|
|
||||||
return p.errorf("unknown field name %q in %v", name, st)
|
|
||||||
}
|
|
||||||
|
|
||||||
if dst.Kind() == reflect.Map {
|
|
||||||
// Consume any colon.
|
|
||||||
if err := p.checkForColon(props, dst.Type()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Construct the map if it doesn't already exist.
|
|
||||||
if dst.IsNil() {
|
|
||||||
dst.Set(reflect.MakeMap(dst.Type()))
|
|
||||||
}
|
|
||||||
key := reflect.New(dst.Type().Key()).Elem()
|
|
||||||
val := reflect.New(dst.Type().Elem()).Elem()
|
|
||||||
|
|
||||||
// The map entry should be this sequence of tokens:
|
|
||||||
// < key : KEY value : VALUE >
|
|
||||||
// However, implementations may omit key or value, and technically
|
|
||||||
// we should support them in any order. See b/28924776 for a time
|
|
||||||
// this went wrong.
|
|
||||||
|
|
||||||
tok := p.next()
|
|
||||||
var terminator string
|
|
||||||
switch tok.value {
|
|
||||||
case "<":
|
|
||||||
terminator = ">"
|
|
||||||
case "{":
|
|
||||||
terminator = "}"
|
|
||||||
default:
|
|
||||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
tok := p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return tok.err
|
|
||||||
}
|
|
||||||
if tok.value == terminator {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
switch tok.value {
|
|
||||||
case "key":
|
|
||||||
if err := p.consumeToken(":"); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := p.readAny(key, props.MapKeyProp); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := p.consumeOptionalSeparator(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
case "value":
|
|
||||||
if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := p.readAny(val, props.MapValProp); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := p.consumeOptionalSeparator(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
p.back()
|
|
||||||
return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
dst.SetMapIndex(key, val)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check that it's not already set if it's not a repeated field.
|
|
||||||
if !props.Repeated && fieldSet[name] {
|
|
||||||
return p.errorf("non-repeated field %q was repeated", name)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := p.checkForColon(props, dst.Type()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse into the field.
|
|
||||||
fieldSet[name] = true
|
|
||||||
if err := p.readAny(dst, props); err != nil {
|
|
||||||
if _, ok := err.(*RequiredNotSetError); !ok {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
reqFieldErr = err
|
|
||||||
}
|
|
||||||
if props.Required {
|
|
||||||
reqCount--
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := p.consumeOptionalSeparator(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
if reqCount > 0 {
|
|
||||||
return p.missingRequiredFieldError(sv)
|
|
||||||
}
|
|
||||||
return reqFieldErr
|
|
||||||
}
|
|
||||||
|
|
||||||
// consumeExtName consumes extension name or expanded Any type URL and the
|
|
||||||
// following ']'. It returns the name or URL consumed.
|
|
||||||
func (p *textParser) consumeExtName() (string, error) {
|
|
||||||
tok := p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return "", tok.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// If extension name or type url is quoted, it's a single token.
|
|
||||||
if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
|
|
||||||
name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return name, p.consumeToken("]")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Consume everything up to "]"
|
|
||||||
var parts []string
|
|
||||||
for tok.value != "]" {
|
|
||||||
parts = append(parts, tok.value)
|
|
||||||
tok = p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
|
|
||||||
}
|
|
||||||
if p.done && tok.value != "]" {
|
|
||||||
return "", p.errorf("unclosed type_url or extension name")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return strings.Join(parts, ""), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// consumeOptionalSeparator consumes an optional semicolon or comma.
|
|
||||||
// It is used in readStruct to provide backward compatibility.
|
|
||||||
func (p *textParser) consumeOptionalSeparator() error {
|
|
||||||
tok := p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return tok.err
|
|
||||||
}
|
|
||||||
if tok.value != ";" && tok.value != "," {
|
|
||||||
p.back()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *textParser) readAny(v reflect.Value, props *Properties) error {
|
|
||||||
tok := p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return tok.err
|
|
||||||
}
|
|
||||||
if tok.value == "" {
|
|
||||||
return p.errorf("unexpected EOF")
|
|
||||||
}
|
|
||||||
|
|
||||||
switch fv := v; fv.Kind() {
|
|
||||||
case reflect.Slice:
|
|
||||||
at := v.Type()
|
|
||||||
if at.Elem().Kind() == reflect.Uint8 {
|
|
||||||
// Special case for []byte
|
|
||||||
if tok.value[0] != '"' && tok.value[0] != '\'' {
|
|
||||||
// Deliberately written out here, as the error after
|
|
||||||
// this switch statement would write "invalid []byte: ...",
|
|
||||||
// which is not as user-friendly.
|
|
||||||
return p.errorf("invalid string: %v", tok.value)
|
|
||||||
}
|
|
||||||
bytes := []byte(tok.unquoted)
|
|
||||||
fv.Set(reflect.ValueOf(bytes))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// Repeated field.
|
|
||||||
if tok.value == "[" {
|
|
||||||
// Repeated field with list notation, like [1,2,3].
|
|
||||||
for {
|
|
||||||
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
|
|
||||||
err := p.readAny(fv.Index(fv.Len()-1), props)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
tok := p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return tok.err
|
|
||||||
}
|
|
||||||
if tok.value == "]" {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if tok.value != "," {
|
|
||||||
return p.errorf("Expected ']' or ',' found %q", tok.value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// One value of the repeated field.
|
|
||||||
p.back()
|
|
||||||
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
|
|
||||||
return p.readAny(fv.Index(fv.Len()-1), props)
|
|
||||||
case reflect.Bool:
|
|
||||||
// true/1/t/True or false/f/0/False.
|
|
||||||
switch tok.value {
|
|
||||||
case "true", "1", "t", "True":
|
|
||||||
fv.SetBool(true)
|
|
||||||
return nil
|
|
||||||
case "false", "0", "f", "False":
|
|
||||||
fv.SetBool(false)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
v := tok.value
|
|
||||||
// Ignore 'f' for compatibility with output generated by C++, but don't
|
|
||||||
// remove 'f' when the value is "-inf" or "inf".
|
|
||||||
if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
|
|
||||||
v = v[:len(v)-1]
|
|
||||||
}
|
|
||||||
if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
|
|
||||||
fv.SetFloat(f)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
case reflect.Int32:
|
|
||||||
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
|
|
||||||
fv.SetInt(x)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(props.Enum) == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
m, ok := enumValueMaps[props.Enum]
|
|
||||||
if !ok {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
x, ok := m[tok.value]
|
|
||||||
if !ok {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
fv.SetInt(int64(x))
|
|
||||||
return nil
|
|
||||||
case reflect.Int64:
|
|
||||||
if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
|
|
||||||
fv.SetInt(x)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
case reflect.Ptr:
|
|
||||||
// A basic field (indirected through pointer), or a repeated message/group
|
|
||||||
p.back()
|
|
||||||
fv.Set(reflect.New(fv.Type().Elem()))
|
|
||||||
return p.readAny(fv.Elem(), props)
|
|
||||||
case reflect.String:
|
|
||||||
if tok.value[0] == '"' || tok.value[0] == '\'' {
|
|
||||||
fv.SetString(tok.unquoted)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
case reflect.Struct:
|
|
||||||
var terminator string
|
|
||||||
switch tok.value {
|
|
||||||
case "{":
|
|
||||||
terminator = "}"
|
|
||||||
case "<":
|
|
||||||
terminator = ">"
|
|
||||||
default:
|
|
||||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
|
||||||
}
|
|
||||||
// TODO: Handle nested messages which implement encoding.TextUnmarshaler.
|
|
||||||
return p.readStruct(fv, terminator)
|
|
||||||
case reflect.Uint32:
|
|
||||||
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
|
|
||||||
fv.SetUint(uint64(x))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
case reflect.Uint64:
|
|
||||||
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
|
|
||||||
fv.SetUint(x)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return p.errorf("invalid %v: %v", v.Type(), tok.value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
|
|
||||||
// before starting to unmarshal, so any existing data in pb is always removed.
|
|
||||||
// If a required field is not set and no other error occurs,
|
|
||||||
// UnmarshalText returns *RequiredNotSetError.
|
|
||||||
func UnmarshalText(s string, pb Message) error {
|
|
||||||
if um, ok := pb.(encoding.TextUnmarshaler); ok {
|
|
||||||
return um.UnmarshalText([]byte(s))
|
|
||||||
}
|
|
||||||
pb.Reset()
|
|
||||||
v := reflect.ValueOf(pb)
|
|
||||||
return newTextParser(s).readStruct(v.Elem(), "")
|
|
||||||
}
|
|
78
vendor/github.com/golang/protobuf/proto/wire.go
generated
vendored
Normal file
78
vendor/github.com/golang/protobuf/proto/wire.go
generated
vendored
Normal file
@ -0,0 +1,78 @@
|
|||||||
|
// Copyright 2019 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package proto
|
||||||
|
|
||||||
|
import (
|
||||||
|
protoV2 "google.golang.org/protobuf/proto"
|
||||||
|
"google.golang.org/protobuf/runtime/protoiface"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Size returns the size in bytes of the wire-format encoding of m.
|
||||||
|
func Size(m Message) int {
|
||||||
|
if m == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
mi := MessageV2(m)
|
||||||
|
return protoV2.Size(mi)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal returns the wire-format encoding of m.
|
||||||
|
func Marshal(m Message) ([]byte, error) {
|
||||||
|
b, err := marshalAppend(nil, m, false)
|
||||||
|
if b == nil {
|
||||||
|
b = zeroBytes
|
||||||
|
}
|
||||||
|
return b, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var zeroBytes = make([]byte, 0, 0)
|
||||||
|
|
||||||
|
func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) {
|
||||||
|
if m == nil {
|
||||||
|
return nil, ErrNil
|
||||||
|
}
|
||||||
|
mi := MessageV2(m)
|
||||||
|
nbuf, err := protoV2.MarshalOptions{
|
||||||
|
Deterministic: deterministic,
|
||||||
|
AllowPartial: true,
|
||||||
|
}.MarshalAppend(buf, mi)
|
||||||
|
if err != nil {
|
||||||
|
return buf, err
|
||||||
|
}
|
||||||
|
if len(buf) == len(nbuf) {
|
||||||
|
if !mi.ProtoReflect().IsValid() {
|
||||||
|
return buf, ErrNil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nbuf, checkRequiredNotSet(mi)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal parses a wire-format message in b and places the decoded results in m.
|
||||||
|
//
|
||||||
|
// Unmarshal resets m before starting to unmarshal, so any existing data in m is always
|
||||||
|
// removed. Use UnmarshalMerge to preserve and append to existing data.
|
||||||
|
func Unmarshal(b []byte, m Message) error {
|
||||||
|
m.Reset()
|
||||||
|
return UnmarshalMerge(b, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalMerge parses a wire-format message in b and places the decoded results in m.
|
||||||
|
func UnmarshalMerge(b []byte, m Message) error {
|
||||||
|
mi := MessageV2(m)
|
||||||
|
out, err := protoV2.UnmarshalOptions{
|
||||||
|
AllowPartial: true,
|
||||||
|
Merge: true,
|
||||||
|
}.UnmarshalState(protoiface.UnmarshalInput{
|
||||||
|
Buf: b,
|
||||||
|
Message: mi.ProtoReflect(),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if out.Flags&protoiface.UnmarshalInitialized > 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return checkRequiredNotSet(mi)
|
||||||
|
}
|
34
vendor/github.com/golang/protobuf/proto/wrappers.go
generated
vendored
Normal file
34
vendor/github.com/golang/protobuf/proto/wrappers.go
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
// Copyright 2019 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package proto
|
||||||
|
|
||||||
|
// Bool stores v in a new bool value and returns a pointer to it.
|
||||||
|
func Bool(v bool) *bool { return &v }
|
||||||
|
|
||||||
|
// Int stores v in a new int32 value and returns a pointer to it.
|
||||||
|
//
|
||||||
|
// Deprecated: Use Int32 instead.
|
||||||
|
func Int(v int) *int32 { return Int32(int32(v)) }
|
||||||
|
|
||||||
|
// Int32 stores v in a new int32 value and returns a pointer to it.
|
||||||
|
func Int32(v int32) *int32 { return &v }
|
||||||
|
|
||||||
|
// Int64 stores v in a new int64 value and returns a pointer to it.
|
||||||
|
func Int64(v int64) *int64 { return &v }
|
||||||
|
|
||||||
|
// Uint32 stores v in a new uint32 value and returns a pointer to it.
|
||||||
|
func Uint32(v uint32) *uint32 { return &v }
|
||||||
|
|
||||||
|
// Uint64 stores v in a new uint64 value and returns a pointer to it.
|
||||||
|
func Uint64(v uint64) *uint64 { return &v }
|
||||||
|
|
||||||
|
// Float32 stores v in a new float32 value and returns a pointer to it.
|
||||||
|
func Float32(v float32) *float32 { return &v }
|
||||||
|
|
||||||
|
// Float64 stores v in a new float64 value and returns a pointer to it.
|
||||||
|
func Float64(v float64) *float64 { return &v }
|
||||||
|
|
||||||
|
// String stores v in a new string value and returns a pointer to it.
|
||||||
|
func String(v string) *string { return &v }
|
3047
vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
generated
vendored
3047
vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
885
vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
generated
vendored
885
vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
generated
vendored
@ -1,885 +0,0 @@
|
|||||||
// Protocol Buffers - Google's data interchange format
|
|
||||||
// Copyright 2008 Google Inc. All rights reserved.
|
|
||||||
// https://developers.google.com/protocol-buffers/
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
// Author: kenton@google.com (Kenton Varda)
|
|
||||||
// Based on original Protocol Buffers design by
|
|
||||||
// Sanjay Ghemawat, Jeff Dean, and others.
|
|
||||||
//
|
|
||||||
// The messages in this file describe the definitions found in .proto files.
|
|
||||||
// A valid .proto file can be translated directly to a FileDescriptorProto
|
|
||||||
// without any other information (e.g. without reading its imports).
|
|
||||||
|
|
||||||
|
|
||||||
syntax = "proto2";
|
|
||||||
|
|
||||||
package google.protobuf;
|
|
||||||
|
|
||||||
option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor";
|
|
||||||
option java_package = "com.google.protobuf";
|
|
||||||
option java_outer_classname = "DescriptorProtos";
|
|
||||||
option csharp_namespace = "Google.Protobuf.Reflection";
|
|
||||||
option objc_class_prefix = "GPB";
|
|
||||||
option cc_enable_arenas = true;
|
|
||||||
|
|
||||||
// descriptor.proto must be optimized for speed because reflection-based
|
|
||||||
// algorithms don't work during bootstrapping.
|
|
||||||
option optimize_for = SPEED;
|
|
||||||
|
|
||||||
// The protocol compiler can output a FileDescriptorSet containing the .proto
|
|
||||||
// files it parses.
|
|
||||||
message FileDescriptorSet {
|
|
||||||
repeated FileDescriptorProto file = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Describes a complete .proto file.
|
|
||||||
message FileDescriptorProto {
|
|
||||||
optional string name = 1; // file name, relative to root of source tree
|
|
||||||
optional string package = 2; // e.g. "foo", "foo.bar", etc.
|
|
||||||
|
|
||||||
// Names of files imported by this file.
|
|
||||||
repeated string dependency = 3;
|
|
||||||
// Indexes of the public imported files in the dependency list above.
|
|
||||||
repeated int32 public_dependency = 10;
|
|
||||||
// Indexes of the weak imported files in the dependency list.
|
|
||||||
// For Google-internal migration only. Do not use.
|
|
||||||
repeated int32 weak_dependency = 11;
|
|
||||||
|
|
||||||
// All top-level definitions in this file.
|
|
||||||
repeated DescriptorProto message_type = 4;
|
|
||||||
repeated EnumDescriptorProto enum_type = 5;
|
|
||||||
repeated ServiceDescriptorProto service = 6;
|
|
||||||
repeated FieldDescriptorProto extension = 7;
|
|
||||||
|
|
||||||
optional FileOptions options = 8;
|
|
||||||
|
|
||||||
// This field contains optional information about the original source code.
|
|
||||||
// You may safely remove this entire field without harming runtime
|
|
||||||
// functionality of the descriptors -- the information is needed only by
|
|
||||||
// development tools.
|
|
||||||
optional SourceCodeInfo source_code_info = 9;
|
|
||||||
|
|
||||||
// The syntax of the proto file.
|
|
||||||
// The supported values are "proto2" and "proto3".
|
|
||||||
optional string syntax = 12;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Describes a message type.
|
|
||||||
message DescriptorProto {
|
|
||||||
optional string name = 1;
|
|
||||||
|
|
||||||
repeated FieldDescriptorProto field = 2;
|
|
||||||
repeated FieldDescriptorProto extension = 6;
|
|
||||||
|
|
||||||
repeated DescriptorProto nested_type = 3;
|
|
||||||
repeated EnumDescriptorProto enum_type = 4;
|
|
||||||
|
|
||||||
message ExtensionRange {
|
|
||||||
optional int32 start = 1; // Inclusive.
|
|
||||||
optional int32 end = 2; // Exclusive.
|
|
||||||
|
|
||||||
optional ExtensionRangeOptions options = 3;
|
|
||||||
}
|
|
||||||
repeated ExtensionRange extension_range = 5;
|
|
||||||
|
|
||||||
repeated OneofDescriptorProto oneof_decl = 8;
|
|
||||||
|
|
||||||
optional MessageOptions options = 7;
|
|
||||||
|
|
||||||
// Range of reserved tag numbers. Reserved tag numbers may not be used by
|
|
||||||
// fields or extension ranges in the same message. Reserved ranges may
|
|
||||||
// not overlap.
|
|
||||||
message ReservedRange {
|
|
||||||
optional int32 start = 1; // Inclusive.
|
|
||||||
optional int32 end = 2; // Exclusive.
|
|
||||||
}
|
|
||||||
repeated ReservedRange reserved_range = 9;
|
|
||||||
// Reserved field names, which may not be used by fields in the same message.
|
|
||||||
// A given name may only be reserved once.
|
|
||||||
repeated string reserved_name = 10;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ExtensionRangeOptions {
|
|
||||||
// The parser stores options it doesn't recognize here. See above.
|
|
||||||
repeated UninterpretedOption uninterpreted_option = 999;
|
|
||||||
|
|
||||||
// Clients can define custom options in extensions of this message. See above.
|
|
||||||
extensions 1000 to max;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Describes a field within a message.
|
|
||||||
message FieldDescriptorProto {
|
|
||||||
enum Type {
|
|
||||||
// 0 is reserved for errors.
|
|
||||||
// Order is weird for historical reasons.
|
|
||||||
TYPE_DOUBLE = 1;
|
|
||||||
TYPE_FLOAT = 2;
|
|
||||||
// Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if
|
|
||||||
// negative values are likely.
|
|
||||||
TYPE_INT64 = 3;
|
|
||||||
TYPE_UINT64 = 4;
|
|
||||||
// Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if
|
|
||||||
// negative values are likely.
|
|
||||||
TYPE_INT32 = 5;
|
|
||||||
TYPE_FIXED64 = 6;
|
|
||||||
TYPE_FIXED32 = 7;
|
|
||||||
TYPE_BOOL = 8;
|
|
||||||
TYPE_STRING = 9;
|
|
||||||
// Tag-delimited aggregate.
|
|
||||||
// Group type is deprecated and not supported in proto3. However, Proto3
|
|
||||||
// implementations should still be able to parse the group wire format and
|
|
||||||
// treat group fields as unknown fields.
|
|
||||||
TYPE_GROUP = 10;
|
|
||||||
TYPE_MESSAGE = 11; // Length-delimited aggregate.
|
|
||||||
|
|
||||||
// New in version 2.
|
|
||||||
TYPE_BYTES = 12;
|
|
||||||
TYPE_UINT32 = 13;
|
|
||||||
TYPE_ENUM = 14;
|
|
||||||
TYPE_SFIXED32 = 15;
|
|
||||||
TYPE_SFIXED64 = 16;
|
|
||||||
TYPE_SINT32 = 17; // Uses ZigZag encoding.
|
|
||||||
TYPE_SINT64 = 18; // Uses ZigZag encoding.
|
|
||||||
}
|
|
||||||
|
|
||||||
enum Label {
|
|
||||||
// 0 is reserved for errors
|
|
||||||
LABEL_OPTIONAL = 1;
|
|
||||||
LABEL_REQUIRED = 2;
|
|
||||||
LABEL_REPEATED = 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
optional string name = 1;
|
|
||||||
optional int32 number = 3;
|
|
||||||
optional Label label = 4;
|
|
||||||
|
|
||||||
// If type_name is set, this need not be set. If both this and type_name
|
|
||||||
// are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.
|
|
||||||
optional Type type = 5;
|
|
||||||
|
|
||||||
// For message and enum types, this is the name of the type. If the name
|
|
||||||
// starts with a '.', it is fully-qualified. Otherwise, C++-like scoping
|
|
||||||
// rules are used to find the type (i.e. first the nested types within this
|
|
||||||
// message are searched, then within the parent, on up to the root
|
|
||||||
// namespace).
|
|
||||||
optional string type_name = 6;
|
|
||||||
|
|
||||||
// For extensions, this is the name of the type being extended. It is
|
|
||||||
// resolved in the same manner as type_name.
|
|
||||||
optional string extendee = 2;
|
|
||||||
|
|
||||||
// For numeric types, contains the original text representation of the value.
|
|
||||||
// For booleans, "true" or "false".
|
|
||||||
// For strings, contains the default text contents (not escaped in any way).
|
|
||||||
// For bytes, contains the C escaped value. All bytes >= 128 are escaped.
|
|
||||||
// TODO(kenton): Base-64 encode?
|
|
||||||
optional string default_value = 7;
|
|
||||||
|
|
||||||
// If set, gives the index of a oneof in the containing type's oneof_decl
|
|
||||||
// list. This field is a member of that oneof.
|
|
||||||
optional int32 oneof_index = 9;
|
|
||||||
|
|
||||||
// JSON name of this field. The value is set by protocol compiler. If the
|
|
||||||
// user has set a "json_name" option on this field, that option's value
|
|
||||||
// will be used. Otherwise, it's deduced from the field's name by converting
|
|
||||||
// it to camelCase.
|
|
||||||
optional string json_name = 10;
|
|
||||||
|
|
||||||
optional FieldOptions options = 8;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Describes a oneof.
|
|
||||||
message OneofDescriptorProto {
|
|
||||||
optional string name = 1;
|
|
||||||
optional OneofOptions options = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Describes an enum type.
|
|
||||||
message EnumDescriptorProto {
|
|
||||||
optional string name = 1;
|
|
||||||
|
|
||||||
repeated EnumValueDescriptorProto value = 2;
|
|
||||||
|
|
||||||
optional EnumOptions options = 3;
|
|
||||||
|
|
||||||
// Range of reserved numeric values. Reserved values may not be used by
|
|
||||||
// entries in the same enum. Reserved ranges may not overlap.
|
|
||||||
//
|
|
||||||
// Note that this is distinct from DescriptorProto.ReservedRange in that it
|
|
||||||
// is inclusive such that it can appropriately represent the entire int32
|
|
||||||
// domain.
|
|
||||||
message EnumReservedRange {
|
|
||||||
optional int32 start = 1; // Inclusive.
|
|
||||||
optional int32 end = 2; // Inclusive.
|
|
||||||
}
|
|
||||||
|
|
||||||
// Range of reserved numeric values. Reserved numeric values may not be used
|
|
||||||
// by enum values in the same enum declaration. Reserved ranges may not
|
|
||||||
// overlap.
|
|
||||||
repeated EnumReservedRange reserved_range = 4;
|
|
||||||
|
|
||||||
// Reserved enum value names, which may not be reused. A given name may only
|
|
||||||
// be reserved once.
|
|
||||||
repeated string reserved_name = 5;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Describes a value within an enum.
|
|
||||||
message EnumValueDescriptorProto {
|
|
||||||
optional string name = 1;
|
|
||||||
optional int32 number = 2;
|
|
||||||
|
|
||||||
optional EnumValueOptions options = 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Describes a service.
|
|
||||||
message ServiceDescriptorProto {
|
|
||||||
optional string name = 1;
|
|
||||||
repeated MethodDescriptorProto method = 2;
|
|
||||||
|
|
||||||
optional ServiceOptions options = 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Describes a method of a service.
|
|
||||||
message MethodDescriptorProto {
|
|
||||||
optional string name = 1;
|
|
||||||
|
|
||||||
// Input and output type names. These are resolved in the same way as
|
|
||||||
// FieldDescriptorProto.type_name, but must refer to a message type.
|
|
||||||
optional string input_type = 2;
|
|
||||||
optional string output_type = 3;
|
|
||||||
|
|
||||||
optional MethodOptions options = 4;
|
|
||||||
|
|
||||||
// Identifies if client streams multiple client messages
|
|
||||||
optional bool client_streaming = 5 [default = false];
|
|
||||||
// Identifies if server streams multiple server messages
|
|
||||||
optional bool server_streaming = 6 [default = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
// ===================================================================
|
|
||||||
// Options
|
|
||||||
|
|
||||||
// Each of the definitions above may have "options" attached. These are
|
|
||||||
// just annotations which may cause code to be generated slightly differently
|
|
||||||
// or may contain hints for code that manipulates protocol messages.
|
|
||||||
//
|
|
||||||
// Clients may define custom options as extensions of the *Options messages.
|
|
||||||
// These extensions may not yet be known at parsing time, so the parser cannot
|
|
||||||
// store the values in them. Instead it stores them in a field in the *Options
|
|
||||||
// message called uninterpreted_option. This field must have the same name
|
|
||||||
// across all *Options messages. We then use this field to populate the
|
|
||||||
// extensions when we build a descriptor, at which point all protos have been
|
|
||||||
// parsed and so all extensions are known.
|
|
||||||
//
|
|
||||||
// Extension numbers for custom options may be chosen as follows:
|
|
||||||
// * For options which will only be used within a single application or
|
|
||||||
// organization, or for experimental options, use field numbers 50000
|
|
||||||
// through 99999. It is up to you to ensure that you do not use the
|
|
||||||
// same number for multiple options.
|
|
||||||
// * For options which will be published and used publicly by multiple
|
|
||||||
// independent entities, e-mail protobuf-global-extension-registry@google.com
|
|
||||||
// to reserve extension numbers. Simply provide your project name (e.g.
|
|
||||||
// Objective-C plugin) and your project website (if available) -- there's no
|
|
||||||
// need to explain how you intend to use them. Usually you only need one
|
|
||||||
// extension number. You can declare multiple options with only one extension
|
|
||||||
// number by putting them in a sub-message. See the Custom Options section of
|
|
||||||
// the docs for examples:
|
|
||||||
// https://developers.google.com/protocol-buffers/docs/proto#options
|
|
||||||
// If this turns out to be popular, a web service will be set up
|
|
||||||
// to automatically assign option numbers.
|
|
||||||
|
|
||||||
message FileOptions {
|
|
||||||
|
|
||||||
// Sets the Java package where classes generated from this .proto will be
|
|
||||||
// placed. By default, the proto package is used, but this is often
|
|
||||||
// inappropriate because proto packages do not normally start with backwards
|
|
||||||
// domain names.
|
|
||||||
optional string java_package = 1;
|
|
||||||
|
|
||||||
|
|
||||||
// If set, all the classes from the .proto file are wrapped in a single
|
|
||||||
// outer class with the given name. This applies to both Proto1
|
|
||||||
// (equivalent to the old "--one_java_file" option) and Proto2 (where
|
|
||||||
// a .proto always translates to a single class, but you may want to
|
|
||||||
// explicitly choose the class name).
|
|
||||||
optional string java_outer_classname = 8;
|
|
||||||
|
|
||||||
// If set true, then the Java code generator will generate a separate .java
|
|
||||||
// file for each top-level message, enum, and service defined in the .proto
|
|
||||||
// file. Thus, these types will *not* be nested inside the outer class
|
|
||||||
// named by java_outer_classname. However, the outer class will still be
|
|
||||||
// generated to contain the file's getDescriptor() method as well as any
|
|
||||||
// top-level extensions defined in the file.
|
|
||||||
optional bool java_multiple_files = 10 [default = false];
|
|
||||||
|
|
||||||
// This option does nothing.
|
|
||||||
optional bool java_generate_equals_and_hash = 20 [deprecated=true];
|
|
||||||
|
|
||||||
// If set true, then the Java2 code generator will generate code that
|
|
||||||
// throws an exception whenever an attempt is made to assign a non-UTF-8
|
|
||||||
// byte sequence to a string field.
|
|
||||||
// Message reflection will do the same.
|
|
||||||
// However, an extension field still accepts non-UTF-8 byte sequences.
|
|
||||||
// This option has no effect on when used with the lite runtime.
|
|
||||||
optional bool java_string_check_utf8 = 27 [default = false];
|
|
||||||
|
|
||||||
|
|
||||||
// Generated classes can be optimized for speed or code size.
|
|
||||||
enum OptimizeMode {
|
|
||||||
SPEED = 1; // Generate complete code for parsing, serialization,
|
|
||||||
// etc.
|
|
||||||
CODE_SIZE = 2; // Use ReflectionOps to implement these methods.
|
|
||||||
LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime.
|
|
||||||
}
|
|
||||||
optional OptimizeMode optimize_for = 9 [default = SPEED];
|
|
||||||
|
|
||||||
// Sets the Go package where structs generated from this .proto will be
|
|
||||||
// placed. If omitted, the Go package will be derived from the following:
|
|
||||||
// - The basename of the package import path, if provided.
|
|
||||||
// - Otherwise, the package statement in the .proto file, if present.
|
|
||||||
// - Otherwise, the basename of the .proto file, without extension.
|
|
||||||
optional string go_package = 11;
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// Should generic services be generated in each language? "Generic" services
|
|
||||||
// are not specific to any particular RPC system. They are generated by the
|
|
||||||
// main code generators in each language (without additional plugins).
|
|
||||||
// Generic services were the only kind of service generation supported by
|
|
||||||
// early versions of google.protobuf.
|
|
||||||
//
|
|
||||||
// Generic services are now considered deprecated in favor of using plugins
|
|
||||||
// that generate code specific to your particular RPC system. Therefore,
|
|
||||||
// these default to false. Old code which depends on generic services should
|
|
||||||
// explicitly set them to true.
|
|
||||||
optional bool cc_generic_services = 16 [default = false];
|
|
||||||
optional bool java_generic_services = 17 [default = false];
|
|
||||||
optional bool py_generic_services = 18 [default = false];
|
|
||||||
optional bool php_generic_services = 42 [default = false];
|
|
||||||
|
|
||||||
// Is this file deprecated?
|
|
||||||
// Depending on the target platform, this can emit Deprecated annotations
|
|
||||||
// for everything in the file, or it will be completely ignored; in the very
|
|
||||||
// least, this is a formalization for deprecating files.
|
|
||||||
optional bool deprecated = 23 [default = false];
|
|
||||||
|
|
||||||
// Enables the use of arenas for the proto messages in this file. This applies
|
|
||||||
// only to generated classes for C++.
|
|
||||||
optional bool cc_enable_arenas = 31 [default = false];
|
|
||||||
|
|
||||||
|
|
||||||
// Sets the objective c class prefix which is prepended to all objective c
|
|
||||||
// generated classes from this .proto. There is no default.
|
|
||||||
optional string objc_class_prefix = 36;
|
|
||||||
|
|
||||||
// Namespace for generated classes; defaults to the package.
|
|
||||||
optional string csharp_namespace = 37;
|
|
||||||
|
|
||||||
// By default Swift generators will take the proto package and CamelCase it
|
|
||||||
// replacing '.' with underscore and use that to prefix the types/symbols
|
|
||||||
// defined. When this options is provided, they will use this value instead
|
|
||||||
// to prefix the types/symbols defined.
|
|
||||||
optional string swift_prefix = 39;
|
|
||||||
|
|
||||||
// Sets the php class prefix which is prepended to all php generated classes
|
|
||||||
// from this .proto. Default is empty.
|
|
||||||
optional string php_class_prefix = 40;
|
|
||||||
|
|
||||||
// Use this option to change the namespace of php generated classes. Default
|
|
||||||
// is empty. When this option is empty, the package name will be used for
|
|
||||||
// determining the namespace.
|
|
||||||
optional string php_namespace = 41;
|
|
||||||
|
|
||||||
// Use this option to change the namespace of php generated metadata classes.
|
|
||||||
// Default is empty. When this option is empty, the proto file name will be
|
|
||||||
// used for determining the namespace.
|
|
||||||
optional string php_metadata_namespace = 44;
|
|
||||||
|
|
||||||
// Use this option to change the package of ruby generated classes. Default
|
|
||||||
// is empty. When this option is not set, the package name will be used for
|
|
||||||
// determining the ruby package.
|
|
||||||
optional string ruby_package = 45;
|
|
||||||
|
|
||||||
|
|
||||||
// The parser stores options it doesn't recognize here.
|
|
||||||
// See the documentation for the "Options" section above.
|
|
||||||
repeated UninterpretedOption uninterpreted_option = 999;
|
|
||||||
|
|
||||||
// Clients can define custom options in extensions of this message.
|
|
||||||
// See the documentation for the "Options" section above.
|
|
||||||
extensions 1000 to max;
|
|
||||||
|
|
||||||
reserved 38;
|
|
||||||
}
|
|
||||||
|
|
||||||
message MessageOptions {
|
|
||||||
// Set true to use the old proto1 MessageSet wire format for extensions.
|
|
||||||
// This is provided for backwards-compatibility with the MessageSet wire
|
|
||||||
// format. You should not use this for any other reason: It's less
|
|
||||||
// efficient, has fewer features, and is more complicated.
|
|
||||||
//
|
|
||||||
// The message must be defined exactly as follows:
|
|
||||||
// message Foo {
|
|
||||||
// option message_set_wire_format = true;
|
|
||||||
// extensions 4 to max;
|
|
||||||
// }
|
|
||||||
// Note that the message cannot have any defined fields; MessageSets only
|
|
||||||
// have extensions.
|
|
||||||
//
|
|
||||||
// All extensions of your type must be singular messages; e.g. they cannot
|
|
||||||
// be int32s, enums, or repeated messages.
|
|
||||||
//
|
|
||||||
// Because this is an option, the above two restrictions are not enforced by
|
|
||||||
// the protocol compiler.
|
|
||||||
optional bool message_set_wire_format = 1 [default = false];
|
|
||||||
|
|
||||||
// Disables the generation of the standard "descriptor()" accessor, which can
|
|
||||||
// conflict with a field of the same name. This is meant to make migration
|
|
||||||
// from proto1 easier; new code should avoid fields named "descriptor".
|
|
||||||
optional bool no_standard_descriptor_accessor = 2 [default = false];
|
|
||||||
|
|
||||||
// Is this message deprecated?
|
|
||||||
// Depending on the target platform, this can emit Deprecated annotations
|
|
||||||
// for the message, or it will be completely ignored; in the very least,
|
|
||||||
// this is a formalization for deprecating messages.
|
|
||||||
optional bool deprecated = 3 [default = false];
|
|
||||||
|
|
||||||
// Whether the message is an automatically generated map entry type for the
|
|
||||||
// maps field.
|
|
||||||
//
|
|
||||||
// For maps fields:
|
|
||||||
// map<KeyType, ValueType> map_field = 1;
|
|
||||||
// The parsed descriptor looks like:
|
|
||||||
// message MapFieldEntry {
|
|
||||||
// option map_entry = true;
|
|
||||||
// optional KeyType key = 1;
|
|
||||||
// optional ValueType value = 2;
|
|
||||||
// }
|
|
||||||
// repeated MapFieldEntry map_field = 1;
|
|
||||||
//
|
|
||||||
// Implementations may choose not to generate the map_entry=true message, but
|
|
||||||
// use a native map in the target language to hold the keys and values.
|
|
||||||
// The reflection APIs in such implementations still need to work as
|
|
||||||
// if the field is a repeated message field.
|
|
||||||
//
|
|
||||||
// NOTE: Do not set the option in .proto files. Always use the maps syntax
|
|
||||||
// instead. The option should only be implicitly set by the proto compiler
|
|
||||||
// parser.
|
|
||||||
optional bool map_entry = 7;
|
|
||||||
|
|
||||||
reserved 8; // javalite_serializable
|
|
||||||
reserved 9; // javanano_as_lite
|
|
||||||
|
|
||||||
|
|
||||||
// The parser stores options it doesn't recognize here. See above.
|
|
||||||
repeated UninterpretedOption uninterpreted_option = 999;
|
|
||||||
|
|
||||||
// Clients can define custom options in extensions of this message. See above.
|
|
||||||
extensions 1000 to max;
|
|
||||||
}
|
|
||||||
|
|
||||||
message FieldOptions {
|
|
||||||
// The ctype option instructs the C++ code generator to use a different
|
|
||||||
// representation of the field than it normally would. See the specific
|
|
||||||
// options below. This option is not yet implemented in the open source
|
|
||||||
// release -- sorry, we'll try to include it in a future version!
|
|
||||||
optional CType ctype = 1 [default = STRING];
|
|
||||||
enum CType {
|
|
||||||
// Default mode.
|
|
||||||
STRING = 0;
|
|
||||||
|
|
||||||
CORD = 1;
|
|
||||||
|
|
||||||
STRING_PIECE = 2;
|
|
||||||
}
|
|
||||||
// The packed option can be enabled for repeated primitive fields to enable
|
|
||||||
// a more efficient representation on the wire. Rather than repeatedly
|
|
||||||
// writing the tag and type for each element, the entire array is encoded as
|
|
||||||
// a single length-delimited blob. In proto3, only explicit setting it to
|
|
||||||
// false will avoid using packed encoding.
|
|
||||||
optional bool packed = 2;
|
|
||||||
|
|
||||||
// The jstype option determines the JavaScript type used for values of the
|
|
||||||
// field. The option is permitted only for 64 bit integral and fixed types
|
|
||||||
// (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING
|
|
||||||
// is represented as JavaScript string, which avoids loss of precision that
|
|
||||||
// can happen when a large value is converted to a floating point JavaScript.
|
|
||||||
// Specifying JS_NUMBER for the jstype causes the generated JavaScript code to
|
|
||||||
// use the JavaScript "number" type. The behavior of the default option
|
|
||||||
// JS_NORMAL is implementation dependent.
|
|
||||||
//
|
|
||||||
// This option is an enum to permit additional types to be added, e.g.
|
|
||||||
// goog.math.Integer.
|
|
||||||
optional JSType jstype = 6 [default = JS_NORMAL];
|
|
||||||
enum JSType {
|
|
||||||
// Use the default type.
|
|
||||||
JS_NORMAL = 0;
|
|
||||||
|
|
||||||
// Use JavaScript strings.
|
|
||||||
JS_STRING = 1;
|
|
||||||
|
|
||||||
// Use JavaScript numbers.
|
|
||||||
JS_NUMBER = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Should this field be parsed lazily? Lazy applies only to message-type
|
|
||||||
// fields. It means that when the outer message is initially parsed, the
|
|
||||||
// inner message's contents will not be parsed but instead stored in encoded
|
|
||||||
// form. The inner message will actually be parsed when it is first accessed.
|
|
||||||
//
|
|
||||||
// This is only a hint. Implementations are free to choose whether to use
|
|
||||||
// eager or lazy parsing regardless of the value of this option. However,
|
|
||||||
// setting this option true suggests that the protocol author believes that
|
|
||||||
// using lazy parsing on this field is worth the additional bookkeeping
|
|
||||||
// overhead typically needed to implement it.
|
|
||||||
//
|
|
||||||
// This option does not affect the public interface of any generated code;
|
|
||||||
// all method signatures remain the same. Furthermore, thread-safety of the
|
|
||||||
// interface is not affected by this option; const methods remain safe to
|
|
||||||
// call from multiple threads concurrently, while non-const methods continue
|
|
||||||
// to require exclusive access.
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// Note that implementations may choose not to check required fields within
|
|
||||||
// a lazy sub-message. That is, calling IsInitialized() on the outer message
|
|
||||||
// may return true even if the inner message has missing required fields.
|
|
||||||
// This is necessary because otherwise the inner message would have to be
|
|
||||||
// parsed in order to perform the check, defeating the purpose of lazy
|
|
||||||
// parsing. An implementation which chooses not to check required fields
|
|
||||||
// must be consistent about it. That is, for any particular sub-message, the
|
|
||||||
// implementation must either *always* check its required fields, or *never*
|
|
||||||
// check its required fields, regardless of whether or not the message has
|
|
||||||
// been parsed.
|
|
||||||
optional bool lazy = 5 [default = false];
|
|
||||||
|
|
||||||
// Is this field deprecated?
|
|
||||||
// Depending on the target platform, this can emit Deprecated annotations
|
|
||||||
// for accessors, or it will be completely ignored; in the very least, this
|
|
||||||
// is a formalization for deprecating fields.
|
|
||||||
optional bool deprecated = 3 [default = false];
|
|
||||||
|
|
||||||
// For Google-internal migration only. Do not use.
|
|
||||||
optional bool weak = 10 [default = false];
|
|
||||||
|
|
||||||
|
|
||||||
// The parser stores options it doesn't recognize here. See above.
|
|
||||||
repeated UninterpretedOption uninterpreted_option = 999;
|
|
||||||
|
|
||||||
// Clients can define custom options in extensions of this message. See above.
|
|
||||||
extensions 1000 to max;
|
|
||||||
|
|
||||||
reserved 4; // removed jtype
|
|
||||||
}
|
|
||||||
|
|
||||||
message OneofOptions {
|
|
||||||
// The parser stores options it doesn't recognize here. See above.
|
|
||||||
repeated UninterpretedOption uninterpreted_option = 999;
|
|
||||||
|
|
||||||
// Clients can define custom options in extensions of this message. See above.
|
|
||||||
extensions 1000 to max;
|
|
||||||
}
|
|
||||||
|
|
||||||
message EnumOptions {
|
|
||||||
|
|
||||||
// Set this option to true to allow mapping different tag names to the same
|
|
||||||
// value.
|
|
||||||
optional bool allow_alias = 2;
|
|
||||||
|
|
||||||
// Is this enum deprecated?
|
|
||||||
// Depending on the target platform, this can emit Deprecated annotations
|
|
||||||
// for the enum, or it will be completely ignored; in the very least, this
|
|
||||||
// is a formalization for deprecating enums.
|
|
||||||
optional bool deprecated = 3 [default = false];
|
|
||||||
|
|
||||||
reserved 5; // javanano_as_lite
|
|
||||||
|
|
||||||
// The parser stores options it doesn't recognize here. See above.
|
|
||||||
repeated UninterpretedOption uninterpreted_option = 999;
|
|
||||||
|
|
||||||
// Clients can define custom options in extensions of this message. See above.
|
|
||||||
extensions 1000 to max;
|
|
||||||
}
|
|
||||||
|
|
||||||
message EnumValueOptions {
|
|
||||||
// Is this enum value deprecated?
|
|
||||||
// Depending on the target platform, this can emit Deprecated annotations
|
|
||||||
// for the enum value, or it will be completely ignored; in the very least,
|
|
||||||
// this is a formalization for deprecating enum values.
|
|
||||||
optional bool deprecated = 1 [default = false];
|
|
||||||
|
|
||||||
// The parser stores options it doesn't recognize here. See above.
|
|
||||||
repeated UninterpretedOption uninterpreted_option = 999;
|
|
||||||
|
|
||||||
// Clients can define custom options in extensions of this message. See above.
|
|
||||||
extensions 1000 to max;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ServiceOptions {
|
|
||||||
|
|
||||||
// Note: Field numbers 1 through 32 are reserved for Google's internal RPC
|
|
||||||
// framework. We apologize for hoarding these numbers to ourselves, but
|
|
||||||
// we were already using them long before we decided to release Protocol
|
|
||||||
// Buffers.
|
|
||||||
|
|
||||||
// Is this service deprecated?
|
|
||||||
// Depending on the target platform, this can emit Deprecated annotations
|
|
||||||
// for the service, or it will be completely ignored; in the very least,
|
|
||||||
// this is a formalization for deprecating services.
|
|
||||||
optional bool deprecated = 33 [default = false];
|
|
||||||
|
|
||||||
// The parser stores options it doesn't recognize here. See above.
|
|
||||||
repeated UninterpretedOption uninterpreted_option = 999;
|
|
||||||
|
|
||||||
// Clients can define custom options in extensions of this message. See above.
|
|
||||||
extensions 1000 to max;
|
|
||||||
}
|
|
||||||
|
|
||||||
message MethodOptions {
|
|
||||||
|
|
||||||
// Note: Field numbers 1 through 32 are reserved for Google's internal RPC
|
|
||||||
// framework. We apologize for hoarding these numbers to ourselves, but
|
|
||||||
// we were already using them long before we decided to release Protocol
|
|
||||||
// Buffers.
|
|
||||||
|
|
||||||
// Is this method deprecated?
|
|
||||||
// Depending on the target platform, this can emit Deprecated annotations
|
|
||||||
// for the method, or it will be completely ignored; in the very least,
|
|
||||||
// this is a formalization for deprecating methods.
|
|
||||||
optional bool deprecated = 33 [default = false];
|
|
||||||
|
|
||||||
// Is this method side-effect-free (or safe in HTTP parlance), or idempotent,
|
|
||||||
// or neither? HTTP based RPC implementation may choose GET verb for safe
|
|
||||||
// methods, and PUT verb for idempotent methods instead of the default POST.
|
|
||||||
enum IdempotencyLevel {
|
|
||||||
IDEMPOTENCY_UNKNOWN = 0;
|
|
||||||
NO_SIDE_EFFECTS = 1; // implies idempotent
|
|
||||||
IDEMPOTENT = 2; // idempotent, but may have side effects
|
|
||||||
}
|
|
||||||
optional IdempotencyLevel idempotency_level = 34
|
|
||||||
[default = IDEMPOTENCY_UNKNOWN];
|
|
||||||
|
|
||||||
// The parser stores options it doesn't recognize here. See above.
|
|
||||||
repeated UninterpretedOption uninterpreted_option = 999;
|
|
||||||
|
|
||||||
// Clients can define custom options in extensions of this message. See above.
|
|
||||||
extensions 1000 to max;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
// A message representing a option the parser does not recognize. This only
|
|
||||||
// appears in options protos created by the compiler::Parser class.
|
|
||||||
// DescriptorPool resolves these when building Descriptor objects. Therefore,
|
|
||||||
// options protos in descriptor objects (e.g. returned by Descriptor::options(),
|
|
||||||
// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
|
|
||||||
// in them.
|
|
||||||
message UninterpretedOption {
|
|
||||||
// The name of the uninterpreted option. Each string represents a segment in
|
|
||||||
// a dot-separated name. is_extension is true iff a segment represents an
|
|
||||||
// extension (denoted with parentheses in options specs in .proto files).
|
|
||||||
// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
|
|
||||||
// "foo.(bar.baz).qux".
|
|
||||||
message NamePart {
|
|
||||||
required string name_part = 1;
|
|
||||||
required bool is_extension = 2;
|
|
||||||
}
|
|
||||||
repeated NamePart name = 2;
|
|
||||||
|
|
||||||
// The value of the uninterpreted option, in whatever type the tokenizer
|
|
||||||
// identified it as during parsing. Exactly one of these should be set.
|
|
||||||
optional string identifier_value = 3;
|
|
||||||
optional uint64 positive_int_value = 4;
|
|
||||||
optional int64 negative_int_value = 5;
|
|
||||||
optional double double_value = 6;
|
|
||||||
optional bytes string_value = 7;
|
|
||||||
optional string aggregate_value = 8;
|
|
||||||
}
|
|
||||||
|
|
||||||
// ===================================================================
|
|
||||||
// Optional source code info
|
|
||||||
|
|
||||||
// Encapsulates information about the original source file from which a
|
|
||||||
// FileDescriptorProto was generated.
|
|
||||||
message SourceCodeInfo {
|
|
||||||
// A Location identifies a piece of source code in a .proto file which
|
|
||||||
// corresponds to a particular definition. This information is intended
|
|
||||||
// to be useful to IDEs, code indexers, documentation generators, and similar
|
|
||||||
// tools.
|
|
||||||
//
|
|
||||||
// For example, say we have a file like:
|
|
||||||
// message Foo {
|
|
||||||
// optional string foo = 1;
|
|
||||||
// }
|
|
||||||
// Let's look at just the field definition:
|
|
||||||
// optional string foo = 1;
|
|
||||||
// ^ ^^ ^^ ^ ^^^
|
|
||||||
// a bc de f ghi
|
|
||||||
// We have the following locations:
|
|
||||||
// span path represents
|
|
||||||
// [a,i) [ 4, 0, 2, 0 ] The whole field definition.
|
|
||||||
// [a,b) [ 4, 0, 2, 0, 4 ] The label (optional).
|
|
||||||
// [c,d) [ 4, 0, 2, 0, 5 ] The type (string).
|
|
||||||
// [e,f) [ 4, 0, 2, 0, 1 ] The name (foo).
|
|
||||||
// [g,h) [ 4, 0, 2, 0, 3 ] The number (1).
|
|
||||||
//
|
|
||||||
// Notes:
|
|
||||||
// - A location may refer to a repeated field itself (i.e. not to any
|
|
||||||
// particular index within it). This is used whenever a set of elements are
|
|
||||||
// logically enclosed in a single code segment. For example, an entire
|
|
||||||
// extend block (possibly containing multiple extension definitions) will
|
|
||||||
// have an outer location whose path refers to the "extensions" repeated
|
|
||||||
// field without an index.
|
|
||||||
// - Multiple locations may have the same path. This happens when a single
|
|
||||||
// logical declaration is spread out across multiple places. The most
|
|
||||||
// obvious example is the "extend" block again -- there may be multiple
|
|
||||||
// extend blocks in the same scope, each of which will have the same path.
|
|
||||||
// - A location's span is not always a subset of its parent's span. For
|
|
||||||
// example, the "extendee" of an extension declaration appears at the
|
|
||||||
// beginning of the "extend" block and is shared by all extensions within
|
|
||||||
// the block.
|
|
||||||
// - Just because a location's span is a subset of some other location's span
|
|
||||||
// does not mean that it is a descendant. For example, a "group" defines
|
|
||||||
// both a type and a field in a single declaration. Thus, the locations
|
|
||||||
// corresponding to the type and field and their components will overlap.
|
|
||||||
// - Code which tries to interpret locations should probably be designed to
|
|
||||||
// ignore those that it doesn't understand, as more types of locations could
|
|
||||||
// be recorded in the future.
|
|
||||||
repeated Location location = 1;
|
|
||||||
message Location {
|
|
||||||
// Identifies which part of the FileDescriptorProto was defined at this
|
|
||||||
// location.
|
|
||||||
//
|
|
||||||
// Each element is a field number or an index. They form a path from
|
|
||||||
// the root FileDescriptorProto to the place where the definition. For
|
|
||||||
// example, this path:
|
|
||||||
// [ 4, 3, 2, 7, 1 ]
|
|
||||||
// refers to:
|
|
||||||
// file.message_type(3) // 4, 3
|
|
||||||
// .field(7) // 2, 7
|
|
||||||
// .name() // 1
|
|
||||||
// This is because FileDescriptorProto.message_type has field number 4:
|
|
||||||
// repeated DescriptorProto message_type = 4;
|
|
||||||
// and DescriptorProto.field has field number 2:
|
|
||||||
// repeated FieldDescriptorProto field = 2;
|
|
||||||
// and FieldDescriptorProto.name has field number 1:
|
|
||||||
// optional string name = 1;
|
|
||||||
//
|
|
||||||
// Thus, the above path gives the location of a field name. If we removed
|
|
||||||
// the last element:
|
|
||||||
// [ 4, 3, 2, 7 ]
|
|
||||||
// this path refers to the whole field declaration (from the beginning
|
|
||||||
// of the label to the terminating semicolon).
|
|
||||||
repeated int32 path = 1 [packed = true];
|
|
||||||
|
|
||||||
// Always has exactly three or four elements: start line, start column,
|
|
||||||
// end line (optional, otherwise assumed same as start line), end column.
|
|
||||||
// These are packed into a single field for efficiency. Note that line
|
|
||||||
// and column numbers are zero-based -- typically you will want to add
|
|
||||||
// 1 to each before displaying to a user.
|
|
||||||
repeated int32 span = 2 [packed = true];
|
|
||||||
|
|
||||||
// If this SourceCodeInfo represents a complete declaration, these are any
|
|
||||||
// comments appearing before and after the declaration which appear to be
|
|
||||||
// attached to the declaration.
|
|
||||||
//
|
|
||||||
// A series of line comments appearing on consecutive lines, with no other
|
|
||||||
// tokens appearing on those lines, will be treated as a single comment.
|
|
||||||
//
|
|
||||||
// leading_detached_comments will keep paragraphs of comments that appear
|
|
||||||
// before (but not connected to) the current element. Each paragraph,
|
|
||||||
// separated by empty lines, will be one comment element in the repeated
|
|
||||||
// field.
|
|
||||||
//
|
|
||||||
// Only the comment content is provided; comment markers (e.g. //) are
|
|
||||||
// stripped out. For block comments, leading whitespace and an asterisk
|
|
||||||
// will be stripped from the beginning of each line other than the first.
|
|
||||||
// Newlines are included in the output.
|
|
||||||
//
|
|
||||||
// Examples:
|
|
||||||
//
|
|
||||||
// optional int32 foo = 1; // Comment attached to foo.
|
|
||||||
// // Comment attached to bar.
|
|
||||||
// optional int32 bar = 2;
|
|
||||||
//
|
|
||||||
// optional string baz = 3;
|
|
||||||
// // Comment attached to baz.
|
|
||||||
// // Another line attached to baz.
|
|
||||||
//
|
|
||||||
// // Comment attached to qux.
|
|
||||||
// //
|
|
||||||
// // Another line attached to qux.
|
|
||||||
// optional double qux = 4;
|
|
||||||
//
|
|
||||||
// // Detached comment for corge. This is not leading or trailing comments
|
|
||||||
// // to qux or corge because there are blank lines separating it from
|
|
||||||
// // both.
|
|
||||||
//
|
|
||||||
// // Detached comment for corge paragraph 2.
|
|
||||||
//
|
|
||||||
// optional string corge = 5;
|
|
||||||
// /* Block comment attached
|
|
||||||
// * to corge. Leading asterisks
|
|
||||||
// * will be removed. */
|
|
||||||
// /* Block comment attached to
|
|
||||||
// * grault. */
|
|
||||||
// optional int32 grault = 6;
|
|
||||||
//
|
|
||||||
// // ignored detached comments.
|
|
||||||
optional string leading_comments = 3;
|
|
||||||
optional string trailing_comments = 4;
|
|
||||||
repeated string leading_detached_comments = 6;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Describes the relationship between generated code and its original source
|
|
||||||
// file. A GeneratedCodeInfo message is associated with only one generated
|
|
||||||
// source file, but may contain references to different source .proto files.
|
|
||||||
message GeneratedCodeInfo {
|
|
||||||
// An Annotation connects some span of text in generated code to an element
|
|
||||||
// of its generating .proto file.
|
|
||||||
repeated Annotation annotation = 1;
|
|
||||||
message Annotation {
|
|
||||||
// Identifies the element in the original source .proto file. This field
|
|
||||||
// is formatted the same as SourceCodeInfo.Location.path.
|
|
||||||
repeated int32 path = 1 [packed = true];
|
|
||||||
|
|
||||||
// Identifies the filesystem path to the original source .proto.
|
|
||||||
optional string source_file = 2;
|
|
||||||
|
|
||||||
// Identifies the starting offset in bytes in the generated code
|
|
||||||
// that relates to the identified object.
|
|
||||||
optional int32 begin = 3;
|
|
||||||
|
|
||||||
// Identifies the ending offset in bytes in the generated code that
|
|
||||||
// relates to the identified offset. The end offset should be one past
|
|
||||||
// the last relevant byte (so the length of the text = end - begin).
|
|
||||||
optional int32 end = 4;
|
|
||||||
}
|
|
||||||
}
|
|
214
vendor/github.com/golang/protobuf/ptypes/any.go
generated
vendored
214
vendor/github.com/golang/protobuf/ptypes/any.go
generated
vendored
@ -1,141 +1,165 @@
|
|||||||
// Go support for Protocol Buffers - Google's data interchange format
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
//
|
// Use of this source code is governed by a BSD-style
|
||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
// license that can be found in the LICENSE file.
|
||||||
// https://github.com/golang/protobuf
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
package ptypes
|
package ptypes
|
||||||
|
|
||||||
// This file implements functions to marshal proto.Message to/from
|
|
||||||
// google.protobuf.Any message.
|
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
"github.com/golang/protobuf/ptypes/any"
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
"google.golang.org/protobuf/reflect/protoregistry"
|
||||||
|
|
||||||
|
anypb "github.com/golang/protobuf/ptypes/any"
|
||||||
)
|
)
|
||||||
|
|
||||||
const googleApis = "type.googleapis.com/"
|
const urlPrefix = "type.googleapis.com/"
|
||||||
|
|
||||||
// AnyMessageName returns the name of the message contained in a google.protobuf.Any message.
|
// AnyMessageName returns the message name contained in an anypb.Any message.
|
||||||
//
|
// Most type assertions should use the Is function instead.
|
||||||
// Note that regular type assertions should be done using the Is
|
func AnyMessageName(any *anypb.Any) (string, error) {
|
||||||
// function. AnyMessageName is provided for less common use cases like filtering a
|
name, err := anyMessageName(any)
|
||||||
// sequence of Any messages based on a set of allowed message type names.
|
return string(name), err
|
||||||
func AnyMessageName(any *any.Any) (string, error) {
|
}
|
||||||
|
func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) {
|
||||||
if any == nil {
|
if any == nil {
|
||||||
return "", fmt.Errorf("message is nil")
|
return "", fmt.Errorf("message is nil")
|
||||||
}
|
}
|
||||||
slash := strings.LastIndex(any.TypeUrl, "/")
|
name := protoreflect.FullName(any.TypeUrl)
|
||||||
if slash < 0 {
|
if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 {
|
||||||
|
name = name[i+len("/"):]
|
||||||
|
}
|
||||||
|
if !name.IsValid() {
|
||||||
return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
|
return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
|
||||||
}
|
}
|
||||||
return any.TypeUrl[slash+1:], nil
|
return name, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any.
|
// MarshalAny marshals the given message m into an anypb.Any message.
|
||||||
func MarshalAny(pb proto.Message) (*any.Any, error) {
|
func MarshalAny(m proto.Message) (*anypb.Any, error) {
|
||||||
value, err := proto.Marshal(pb)
|
switch dm := m.(type) {
|
||||||
|
case DynamicAny:
|
||||||
|
m = dm.Message
|
||||||
|
case *DynamicAny:
|
||||||
|
if dm == nil {
|
||||||
|
return nil, proto.ErrNil
|
||||||
|
}
|
||||||
|
m = dm.Message
|
||||||
|
}
|
||||||
|
b, err := proto.Marshal(m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil
|
return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// DynamicAny is a value that can be passed to UnmarshalAny to automatically
|
// Empty returns a new message of the type specified in an anypb.Any message.
|
||||||
// allocate a proto.Message for the type specified in a google.protobuf.Any
|
// It returns protoregistry.NotFound if the corresponding message type could not
|
||||||
// message. The allocated message is stored in the embedded proto.Message.
|
// be resolved in the global registry.
|
||||||
//
|
func Empty(any *anypb.Any) (proto.Message, error) {
|
||||||
// Example:
|
name, err := anyMessageName(any)
|
||||||
//
|
|
||||||
// var x ptypes.DynamicAny
|
|
||||||
// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
|
|
||||||
// fmt.Printf("unmarshaled message: %v", x.Message)
|
|
||||||
type DynamicAny struct {
|
|
||||||
proto.Message
|
|
||||||
}
|
|
||||||
|
|
||||||
// Empty returns a new proto.Message of the type specified in a
|
|
||||||
// google.protobuf.Any message. It returns an error if corresponding message
|
|
||||||
// type isn't linked in.
|
|
||||||
func Empty(any *any.Any) (proto.Message, error) {
|
|
||||||
aname, err := AnyMessageName(any)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
mt, err := protoregistry.GlobalTypes.FindMessageByName(name)
|
||||||
t := proto.MessageType(aname)
|
if err != nil {
|
||||||
if t == nil {
|
return nil, err
|
||||||
return nil, fmt.Errorf("any: message type %q isn't linked in", aname)
|
|
||||||
}
|
}
|
||||||
return reflect.New(t.Elem()).Interface().(proto.Message), nil
|
return proto.MessageV1(mt.New().Interface()), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any
|
// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message
|
||||||
// message and places the decoded result in pb. It returns an error if type of
|
// into the provided message m. It returns an error if the target message
|
||||||
// contents of Any message does not match type of pb message.
|
// does not match the type in the Any message or if an unmarshal error occurs.
|
||||||
//
|
//
|
||||||
// pb can be a proto.Message, or a *DynamicAny.
|
// The target message m may be a *DynamicAny message. If the underlying message
|
||||||
func UnmarshalAny(any *any.Any, pb proto.Message) error {
|
// type could not be resolved, then this returns protoregistry.NotFound.
|
||||||
if d, ok := pb.(*DynamicAny); ok {
|
func UnmarshalAny(any *anypb.Any, m proto.Message) error {
|
||||||
if d.Message == nil {
|
if dm, ok := m.(*DynamicAny); ok {
|
||||||
|
if dm.Message == nil {
|
||||||
var err error
|
var err error
|
||||||
d.Message, err = Empty(any)
|
dm.Message, err = Empty(any)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return UnmarshalAny(any, d.Message)
|
m = dm.Message
|
||||||
}
|
}
|
||||||
|
|
||||||
aname, err := AnyMessageName(any)
|
anyName, err := AnyMessageName(any)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
msgName := proto.MessageName(m)
|
||||||
mname := proto.MessageName(pb)
|
if anyName != msgName {
|
||||||
if aname != mname {
|
return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName)
|
||||||
return fmt.Errorf("mismatched message type: got %q want %q", aname, mname)
|
|
||||||
}
|
}
|
||||||
return proto.Unmarshal(any.Value, pb)
|
return proto.Unmarshal(any.Value, m)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Is returns true if any value contains a given message type.
|
// Is reports whether the Any message contains a message of the specified type.
|
||||||
func Is(any *any.Any, pb proto.Message) bool {
|
func Is(any *anypb.Any, m proto.Message) bool {
|
||||||
// The following is equivalent to AnyMessageName(any) == proto.MessageName(pb),
|
if any == nil || m == nil {
|
||||||
// but it avoids scanning TypeUrl for the slash.
|
|
||||||
if any == nil {
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
name := proto.MessageName(pb)
|
name := proto.MessageName(m)
|
||||||
prefix := len(any.TypeUrl) - len(name)
|
if !strings.HasSuffix(any.TypeUrl, name) {
|
||||||
return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name
|
return false
|
||||||
|
}
|
||||||
|
return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/'
|
||||||
|
}
|
||||||
|
|
||||||
|
// DynamicAny is a value that can be passed to UnmarshalAny to automatically
|
||||||
|
// allocate a proto.Message for the type specified in an anypb.Any message.
|
||||||
|
// The allocated message is stored in the embedded proto.Message.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
// var x ptypes.DynamicAny
|
||||||
|
// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
|
||||||
|
// fmt.Printf("unmarshaled message: %v", x.Message)
|
||||||
|
type DynamicAny struct{ proto.Message }
|
||||||
|
|
||||||
|
func (m DynamicAny) String() string {
|
||||||
|
if m.Message == nil {
|
||||||
|
return "<nil>"
|
||||||
|
}
|
||||||
|
return m.Message.String()
|
||||||
|
}
|
||||||
|
func (m DynamicAny) Reset() {
|
||||||
|
if m.Message == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
m.Message.Reset()
|
||||||
|
}
|
||||||
|
func (m DynamicAny) ProtoMessage() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
func (m DynamicAny) ProtoReflect() protoreflect.Message {
|
||||||
|
if m.Message == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return dynamicAny{proto.MessageReflect(m.Message)}
|
||||||
|
}
|
||||||
|
|
||||||
|
type dynamicAny struct{ protoreflect.Message }
|
||||||
|
|
||||||
|
func (m dynamicAny) Type() protoreflect.MessageType {
|
||||||
|
return dynamicAnyType{m.Message.Type()}
|
||||||
|
}
|
||||||
|
func (m dynamicAny) New() protoreflect.Message {
|
||||||
|
return dynamicAnyType{m.Message.Type()}.New()
|
||||||
|
}
|
||||||
|
func (m dynamicAny) Interface() protoreflect.ProtoMessage {
|
||||||
|
return DynamicAny{proto.MessageV1(m.Message.Interface())}
|
||||||
|
}
|
||||||
|
|
||||||
|
type dynamicAnyType struct{ protoreflect.MessageType }
|
||||||
|
|
||||||
|
func (t dynamicAnyType) New() protoreflect.Message {
|
||||||
|
return dynamicAny{t.MessageType.New()}
|
||||||
|
}
|
||||||
|
func (t dynamicAnyType) Zero() protoreflect.Message {
|
||||||
|
return dynamicAny{t.MessageType.Zero()}
|
||||||
}
|
}
|
||||||
|
235
vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
generated
vendored
235
vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
generated
vendored
@ -1,203 +1,62 @@
|
|||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// source: google/protobuf/any.proto
|
// source: github.com/golang/protobuf/ptypes/any/any.proto
|
||||||
|
|
||||||
package any
|
package any
|
||||||
|
|
||||||
import (
|
import (
|
||||||
fmt "fmt"
|
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||||
proto "github.com/golang/protobuf/proto"
|
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||||
math "math"
|
anypb "google.golang.org/protobuf/types/known/anypb"
|
||||||
|
reflect "reflect"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
// Symbols defined in public import of google/protobuf/any.proto.
|
||||||
var _ = proto.Marshal
|
|
||||||
var _ = fmt.Errorf
|
|
||||||
var _ = math.Inf
|
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
type Any = anypb.Any
|
||||||
// is compatible with the proto package it is being compiled against.
|
|
||||||
// A compilation error at this line likely means your copy of the
|
|
||||||
// proto package needs to be updated.
|
|
||||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
|
||||||
|
|
||||||
// `Any` contains an arbitrary serialized protocol buffer message along with a
|
var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor
|
||||||
// URL that describes the type of the serialized message.
|
|
||||||
//
|
var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{
|
||||||
// Protobuf library provides support to pack/unpack Any values in the form
|
0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
||||||
// of utility functions or additional generated methods of the Any type.
|
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
||||||
//
|
0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
||||||
// Example 1: Pack and unpack a message in C++.
|
0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
|
||||||
//
|
0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29,
|
||||||
// Foo foo = ...;
|
0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e,
|
||||||
// Any any;
|
0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65,
|
||||||
// any.PackFrom(foo);
|
0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f,
|
||||||
// ...
|
0x74, 0x6f, 0x33,
|
||||||
// if (any.UnpackTo(&foo)) {
|
|
||||||
// ...
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// Example 2: Pack and unpack a message in Java.
|
|
||||||
//
|
|
||||||
// Foo foo = ...;
|
|
||||||
// Any any = Any.pack(foo);
|
|
||||||
// ...
|
|
||||||
// if (any.is(Foo.class)) {
|
|
||||||
// foo = any.unpack(Foo.class);
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// Example 3: Pack and unpack a message in Python.
|
|
||||||
//
|
|
||||||
// foo = Foo(...)
|
|
||||||
// any = Any()
|
|
||||||
// any.Pack(foo)
|
|
||||||
// ...
|
|
||||||
// if any.Is(Foo.DESCRIPTOR):
|
|
||||||
// any.Unpack(foo)
|
|
||||||
// ...
|
|
||||||
//
|
|
||||||
// Example 4: Pack and unpack a message in Go
|
|
||||||
//
|
|
||||||
// foo := &pb.Foo{...}
|
|
||||||
// any, err := ptypes.MarshalAny(foo)
|
|
||||||
// ...
|
|
||||||
// foo := &pb.Foo{}
|
|
||||||
// if err := ptypes.UnmarshalAny(any, foo); err != nil {
|
|
||||||
// ...
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// The pack methods provided by protobuf library will by default use
|
|
||||||
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
|
|
||||||
// methods only use the fully qualified type name after the last '/'
|
|
||||||
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
|
|
||||||
// name "y.z".
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// JSON
|
|
||||||
// ====
|
|
||||||
// The JSON representation of an `Any` value uses the regular
|
|
||||||
// representation of the deserialized, embedded message, with an
|
|
||||||
// additional field `@type` which contains the type URL. Example:
|
|
||||||
//
|
|
||||||
// package google.profile;
|
|
||||||
// message Person {
|
|
||||||
// string first_name = 1;
|
|
||||||
// string last_name = 2;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// {
|
|
||||||
// "@type": "type.googleapis.com/google.profile.Person",
|
|
||||||
// "firstName": <string>,
|
|
||||||
// "lastName": <string>
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// If the embedded message type is well-known and has a custom JSON
|
|
||||||
// representation, that representation will be embedded adding a field
|
|
||||||
// `value` which holds the custom JSON in addition to the `@type`
|
|
||||||
// field. Example (for message [google.protobuf.Duration][]):
|
|
||||||
//
|
|
||||||
// {
|
|
||||||
// "@type": "type.googleapis.com/google.protobuf.Duration",
|
|
||||||
// "value": "1.212s"
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
type Any struct {
|
|
||||||
// A URL/resource name that uniquely identifies the type of the serialized
|
|
||||||
// protocol buffer message. This string must contain at least
|
|
||||||
// one "/" character. The last segment of the URL's path must represent
|
|
||||||
// the fully qualified name of the type (as in
|
|
||||||
// `path/google.protobuf.Duration`). The name should be in a canonical form
|
|
||||||
// (e.g., leading "." is not accepted).
|
|
||||||
//
|
|
||||||
// In practice, teams usually precompile into the binary all types that they
|
|
||||||
// expect it to use in the context of Any. However, for URLs which use the
|
|
||||||
// scheme `http`, `https`, or no scheme, one can optionally set up a type
|
|
||||||
// server that maps type URLs to message definitions as follows:
|
|
||||||
//
|
|
||||||
// * If no scheme is provided, `https` is assumed.
|
|
||||||
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
|
|
||||||
// value in binary format, or produce an error.
|
|
||||||
// * Applications are allowed to cache lookup results based on the
|
|
||||||
// URL, or have them precompiled into a binary to avoid any
|
|
||||||
// lookup. Therefore, binary compatibility needs to be preserved
|
|
||||||
// on changes to types. (Use versioned type names to manage
|
|
||||||
// breaking changes.)
|
|
||||||
//
|
|
||||||
// Note: this functionality is not currently available in the official
|
|
||||||
// protobuf release, and it is not used for type URLs beginning with
|
|
||||||
// type.googleapis.com.
|
|
||||||
//
|
|
||||||
// Schemes other than `http`, `https` (or the empty scheme) might be
|
|
||||||
// used with implementation specific semantics.
|
|
||||||
//
|
|
||||||
TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
|
|
||||||
// Must be a valid serialized protocol buffer of the above specified type.
|
|
||||||
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Any) Reset() { *m = Any{} }
|
var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{}
|
||||||
func (m *Any) String() string { return proto.CompactTextString(m) }
|
var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{
|
||||||
func (*Any) ProtoMessage() {}
|
0, // [0:0] is the sub-list for method output_type
|
||||||
func (*Any) Descriptor() ([]byte, []int) {
|
0, // [0:0] is the sub-list for method input_type
|
||||||
return fileDescriptor_b53526c13ae22eb4, []int{0}
|
0, // [0:0] is the sub-list for extension type_name
|
||||||
|
0, // [0:0] is the sub-list for extension extendee
|
||||||
|
0, // [0:0] is the sub-list for field type_name
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*Any) XXX_WellKnownType() string { return "Any" }
|
func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() }
|
||||||
|
func file_github_com_golang_protobuf_ptypes_any_any_proto_init() {
|
||||||
func (m *Any) XXX_Unmarshal(b []byte) error {
|
if File_github_com_golang_protobuf_ptypes_any_any_proto != nil {
|
||||||
return xxx_messageInfo_Any.Unmarshal(m, b)
|
return
|
||||||
}
|
|
||||||
func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_Any.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *Any) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_Any.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *Any) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_Any.Size(m)
|
|
||||||
}
|
|
||||||
func (m *Any) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_Any.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_Any proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *Any) GetTypeUrl() string {
|
|
||||||
if m != nil {
|
|
||||||
return m.TypeUrl
|
|
||||||
}
|
}
|
||||||
return ""
|
type x struct{}
|
||||||
}
|
out := protoimpl.TypeBuilder{
|
||||||
|
File: protoimpl.DescBuilder{
|
||||||
func (m *Any) GetValue() []byte {
|
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||||
if m != nil {
|
RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc,
|
||||||
return m.Value
|
NumEnums: 0,
|
||||||
}
|
NumMessages: 0,
|
||||||
return nil
|
NumExtensions: 0,
|
||||||
}
|
NumServices: 0,
|
||||||
|
},
|
||||||
func init() {
|
GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes,
|
||||||
proto.RegisterType((*Any)(nil), "google.protobuf.Any")
|
DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs,
|
||||||
}
|
}.Build()
|
||||||
|
File_github_com_golang_protobuf_ptypes_any_any_proto = out.File
|
||||||
func init() {
|
file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil
|
||||||
proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4)
|
file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil
|
||||||
}
|
file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil
|
||||||
|
|
||||||
var fileDescriptor_b53526c13ae22eb4 = []byte{
|
|
||||||
// 185 bytes of a gzipped FileDescriptorProto
|
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f,
|
|
||||||
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4,
|
|
||||||
0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a,
|
|
||||||
0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46,
|
|
||||||
0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7,
|
|
||||||
0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce,
|
|
||||||
0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52,
|
|
||||||
0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc,
|
|
||||||
0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c,
|
|
||||||
0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce,
|
|
||||||
0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff,
|
|
||||||
0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00,
|
|
||||||
}
|
}
|
||||||
|
155
vendor/github.com/golang/protobuf/ptypes/any/any.proto
generated
vendored
155
vendor/github.com/golang/protobuf/ptypes/any/any.proto
generated
vendored
@ -1,155 +0,0 @@
|
|||||||
// Protocol Buffers - Google's data interchange format
|
|
||||||
// Copyright 2008 Google Inc. All rights reserved.
|
|
||||||
// https://developers.google.com/protocol-buffers/
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package google.protobuf;
|
|
||||||
|
|
||||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
|
||||||
option go_package = "github.com/golang/protobuf/ptypes/any";
|
|
||||||
option java_package = "com.google.protobuf";
|
|
||||||
option java_outer_classname = "AnyProto";
|
|
||||||
option java_multiple_files = true;
|
|
||||||
option objc_class_prefix = "GPB";
|
|
||||||
|
|
||||||
// `Any` contains an arbitrary serialized protocol buffer message along with a
|
|
||||||
// URL that describes the type of the serialized message.
|
|
||||||
//
|
|
||||||
// Protobuf library provides support to pack/unpack Any values in the form
|
|
||||||
// of utility functions or additional generated methods of the Any type.
|
|
||||||
//
|
|
||||||
// Example 1: Pack and unpack a message in C++.
|
|
||||||
//
|
|
||||||
// Foo foo = ...;
|
|
||||||
// Any any;
|
|
||||||
// any.PackFrom(foo);
|
|
||||||
// ...
|
|
||||||
// if (any.UnpackTo(&foo)) {
|
|
||||||
// ...
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// Example 2: Pack and unpack a message in Java.
|
|
||||||
//
|
|
||||||
// Foo foo = ...;
|
|
||||||
// Any any = Any.pack(foo);
|
|
||||||
// ...
|
|
||||||
// if (any.is(Foo.class)) {
|
|
||||||
// foo = any.unpack(Foo.class);
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// Example 3: Pack and unpack a message in Python.
|
|
||||||
//
|
|
||||||
// foo = Foo(...)
|
|
||||||
// any = Any()
|
|
||||||
// any.Pack(foo)
|
|
||||||
// ...
|
|
||||||
// if any.Is(Foo.DESCRIPTOR):
|
|
||||||
// any.Unpack(foo)
|
|
||||||
// ...
|
|
||||||
//
|
|
||||||
// Example 4: Pack and unpack a message in Go
|
|
||||||
//
|
|
||||||
// foo := &pb.Foo{...}
|
|
||||||
// any, err := ptypes.MarshalAny(foo)
|
|
||||||
// ...
|
|
||||||
// foo := &pb.Foo{}
|
|
||||||
// if err := ptypes.UnmarshalAny(any, foo); err != nil {
|
|
||||||
// ...
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// The pack methods provided by protobuf library will by default use
|
|
||||||
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
|
|
||||||
// methods only use the fully qualified type name after the last '/'
|
|
||||||
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
|
|
||||||
// name "y.z".
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// JSON
|
|
||||||
// ====
|
|
||||||
// The JSON representation of an `Any` value uses the regular
|
|
||||||
// representation of the deserialized, embedded message, with an
|
|
||||||
// additional field `@type` which contains the type URL. Example:
|
|
||||||
//
|
|
||||||
// package google.profile;
|
|
||||||
// message Person {
|
|
||||||
// string first_name = 1;
|
|
||||||
// string last_name = 2;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// {
|
|
||||||
// "@type": "type.googleapis.com/google.profile.Person",
|
|
||||||
// "firstName": <string>,
|
|
||||||
// "lastName": <string>
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// If the embedded message type is well-known and has a custom JSON
|
|
||||||
// representation, that representation will be embedded adding a field
|
|
||||||
// `value` which holds the custom JSON in addition to the `@type`
|
|
||||||
// field. Example (for message [google.protobuf.Duration][]):
|
|
||||||
//
|
|
||||||
// {
|
|
||||||
// "@type": "type.googleapis.com/google.protobuf.Duration",
|
|
||||||
// "value": "1.212s"
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
message Any {
|
|
||||||
// A URL/resource name that uniquely identifies the type of the serialized
|
|
||||||
// protocol buffer message. This string must contain at least
|
|
||||||
// one "/" character. The last segment of the URL's path must represent
|
|
||||||
// the fully qualified name of the type (as in
|
|
||||||
// `path/google.protobuf.Duration`). The name should be in a canonical form
|
|
||||||
// (e.g., leading "." is not accepted).
|
|
||||||
//
|
|
||||||
// In practice, teams usually precompile into the binary all types that they
|
|
||||||
// expect it to use in the context of Any. However, for URLs which use the
|
|
||||||
// scheme `http`, `https`, or no scheme, one can optionally set up a type
|
|
||||||
// server that maps type URLs to message definitions as follows:
|
|
||||||
//
|
|
||||||
// * If no scheme is provided, `https` is assumed.
|
|
||||||
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
|
|
||||||
// value in binary format, or produce an error.
|
|
||||||
// * Applications are allowed to cache lookup results based on the
|
|
||||||
// URL, or have them precompiled into a binary to avoid any
|
|
||||||
// lookup. Therefore, binary compatibility needs to be preserved
|
|
||||||
// on changes to types. (Use versioned type names to manage
|
|
||||||
// breaking changes.)
|
|
||||||
//
|
|
||||||
// Note: this functionality is not currently available in the official
|
|
||||||
// protobuf release, and it is not used for type URLs beginning with
|
|
||||||
// type.googleapis.com.
|
|
||||||
//
|
|
||||||
// Schemes other than `http`, `https` (or the empty scheme) might be
|
|
||||||
// used with implementation specific semantics.
|
|
||||||
//
|
|
||||||
string type_url = 1;
|
|
||||||
|
|
||||||
// Must be a valid serialized protocol buffer of the above specified type.
|
|
||||||
bytes value = 2;
|
|
||||||
}
|
|
37
vendor/github.com/golang/protobuf/ptypes/doc.go
generated
vendored
37
vendor/github.com/golang/protobuf/ptypes/doc.go
generated
vendored
@ -1,35 +1,6 @@
|
|||||||
// Go support for Protocol Buffers - Google's data interchange format
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
//
|
// Use of this source code is governed by a BSD-style
|
||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
// license that can be found in the LICENSE file.
|
||||||
// https://github.com/golang/protobuf
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
/*
|
// Package ptypes provides functionality for interacting with well-known types.
|
||||||
Package ptypes contains code for interacting with well-known types.
|
|
||||||
*/
|
|
||||||
package ptypes
|
package ptypes
|
||||||
|
116
vendor/github.com/golang/protobuf/ptypes/duration.go
generated
vendored
116
vendor/github.com/golang/protobuf/ptypes/duration.go
generated
vendored
@ -1,102 +1,72 @@
|
|||||||
// Go support for Protocol Buffers - Google's data interchange format
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
//
|
// Use of this source code is governed by a BSD-style
|
||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
// license that can be found in the LICENSE file.
|
||||||
// https://github.com/golang/protobuf
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
package ptypes
|
package ptypes
|
||||||
|
|
||||||
// This file implements conversions between google.protobuf.Duration
|
|
||||||
// and time.Duration.
|
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
durpb "github.com/golang/protobuf/ptypes/duration"
|
durationpb "github.com/golang/protobuf/ptypes/duration"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Range of google.protobuf.Duration as specified in duration.proto.
|
||||||
|
// This is about 10,000 years in seconds.
|
||||||
const (
|
const (
|
||||||
// Range of a durpb.Duration in seconds, as specified in
|
|
||||||
// google/protobuf/duration.proto. This is about 10,000 years in seconds.
|
|
||||||
maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
|
maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
|
||||||
minSeconds = -maxSeconds
|
minSeconds = -maxSeconds
|
||||||
)
|
)
|
||||||
|
|
||||||
// validateDuration determines whether the durpb.Duration is valid according to the
|
// Duration converts a durationpb.Duration to a time.Duration.
|
||||||
// definition in google/protobuf/duration.proto. A valid durpb.Duration
|
// Duration returns an error if dur is invalid or overflows a time.Duration.
|
||||||
// may still be too large to fit into a time.Duration (the range of durpb.Duration
|
func Duration(dur *durationpb.Duration) (time.Duration, error) {
|
||||||
// is about 10,000 years, and the range of time.Duration is about 290).
|
if err := validateDuration(dur); err != nil {
|
||||||
func validateDuration(d *durpb.Duration) error {
|
|
||||||
if d == nil {
|
|
||||||
return errors.New("duration: nil Duration")
|
|
||||||
}
|
|
||||||
if d.Seconds < minSeconds || d.Seconds > maxSeconds {
|
|
||||||
return fmt.Errorf("duration: %v: seconds out of range", d)
|
|
||||||
}
|
|
||||||
if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
|
|
||||||
return fmt.Errorf("duration: %v: nanos out of range", d)
|
|
||||||
}
|
|
||||||
// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
|
|
||||||
if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
|
|
||||||
return fmt.Errorf("duration: %v: seconds and nanos have different signs", d)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Duration converts a durpb.Duration to a time.Duration. Duration
|
|
||||||
// returns an error if the durpb.Duration is invalid or is too large to be
|
|
||||||
// represented in a time.Duration.
|
|
||||||
func Duration(p *durpb.Duration) (time.Duration, error) {
|
|
||||||
if err := validateDuration(p); err != nil {
|
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
d := time.Duration(p.Seconds) * time.Second
|
d := time.Duration(dur.Seconds) * time.Second
|
||||||
if int64(d/time.Second) != p.Seconds {
|
if int64(d/time.Second) != dur.Seconds {
|
||||||
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
|
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
|
||||||
}
|
}
|
||||||
if p.Nanos != 0 {
|
if dur.Nanos != 0 {
|
||||||
d += time.Duration(p.Nanos) * time.Nanosecond
|
d += time.Duration(dur.Nanos) * time.Nanosecond
|
||||||
if (d < 0) != (p.Nanos < 0) {
|
if (d < 0) != (dur.Nanos < 0) {
|
||||||
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
|
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return d, nil
|
return d, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// DurationProto converts a time.Duration to a durpb.Duration.
|
// DurationProto converts a time.Duration to a durationpb.Duration.
|
||||||
func DurationProto(d time.Duration) *durpb.Duration {
|
func DurationProto(d time.Duration) *durationpb.Duration {
|
||||||
nanos := d.Nanoseconds()
|
nanos := d.Nanoseconds()
|
||||||
secs := nanos / 1e9
|
secs := nanos / 1e9
|
||||||
nanos -= secs * 1e9
|
nanos -= secs * 1e9
|
||||||
return &durpb.Duration{
|
return &durationpb.Duration{
|
||||||
Seconds: secs,
|
Seconds: int64(secs),
|
||||||
Nanos: int32(nanos),
|
Nanos: int32(nanos),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// validateDuration determines whether the durationpb.Duration is valid
|
||||||
|
// according to the definition in google/protobuf/duration.proto.
|
||||||
|
// A valid durpb.Duration may still be too large to fit into a time.Duration
|
||||||
|
// Note that the range of durationpb.Duration is about 10,000 years,
|
||||||
|
// while the range of time.Duration is about 290 years.
|
||||||
|
func validateDuration(dur *durationpb.Duration) error {
|
||||||
|
if dur == nil {
|
||||||
|
return errors.New("duration: nil Duration")
|
||||||
|
}
|
||||||
|
if dur.Seconds < minSeconds || dur.Seconds > maxSeconds {
|
||||||
|
return fmt.Errorf("duration: %v: seconds out of range", dur)
|
||||||
|
}
|
||||||
|
if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 {
|
||||||
|
return fmt.Errorf("duration: %v: nanos out of range", dur)
|
||||||
|
}
|
||||||
|
// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
|
||||||
|
if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) {
|
||||||
|
return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
196
vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
generated
vendored
196
vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
generated
vendored
@ -1,163 +1,63 @@
|
|||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// source: google/protobuf/duration.proto
|
// source: github.com/golang/protobuf/ptypes/duration/duration.proto
|
||||||
|
|
||||||
package duration
|
package duration
|
||||||
|
|
||||||
import (
|
import (
|
||||||
fmt "fmt"
|
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||||
proto "github.com/golang/protobuf/proto"
|
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||||
math "math"
|
durationpb "google.golang.org/protobuf/types/known/durationpb"
|
||||||
|
reflect "reflect"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
// Symbols defined in public import of google/protobuf/duration.proto.
|
||||||
var _ = proto.Marshal
|
|
||||||
var _ = fmt.Errorf
|
|
||||||
var _ = math.Inf
|
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
type Duration = durationpb.Duration
|
||||||
// is compatible with the proto package it is being compiled against.
|
|
||||||
// A compilation error at this line likely means your copy of the
|
|
||||||
// proto package needs to be updated.
|
|
||||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
|
||||||
|
|
||||||
// A Duration represents a signed, fixed-length span of time represented
|
var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor
|
||||||
// as a count of seconds and fractions of seconds at nanosecond
|
|
||||||
// resolution. It is independent of any calendar and concepts like "day"
|
var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{
|
||||||
// or "month". It is related to Timestamp in that the difference between
|
0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
||||||
// two Timestamp values is a Duration and it can be added or subtracted
|
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
||||||
// from a Timestamp. Range is approximately +-10,000 years.
|
0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72,
|
||||||
//
|
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
|
||||||
// # Examples
|
0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72,
|
||||||
//
|
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67,
|
||||||
// Example 1: Compute Duration from two Timestamps in pseudo code.
|
0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
|
||||||
//
|
0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73,
|
||||||
// Timestamp start = ...;
|
0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69,
|
||||||
// Timestamp end = ...;
|
0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||||
// Duration duration = ...;
|
|
||||||
//
|
|
||||||
// duration.seconds = end.seconds - start.seconds;
|
|
||||||
// duration.nanos = end.nanos - start.nanos;
|
|
||||||
//
|
|
||||||
// if (duration.seconds < 0 && duration.nanos > 0) {
|
|
||||||
// duration.seconds += 1;
|
|
||||||
// duration.nanos -= 1000000000;
|
|
||||||
// } else if (duration.seconds > 0 && duration.nanos < 0) {
|
|
||||||
// duration.seconds -= 1;
|
|
||||||
// duration.nanos += 1000000000;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
|
|
||||||
//
|
|
||||||
// Timestamp start = ...;
|
|
||||||
// Duration duration = ...;
|
|
||||||
// Timestamp end = ...;
|
|
||||||
//
|
|
||||||
// end.seconds = start.seconds + duration.seconds;
|
|
||||||
// end.nanos = start.nanos + duration.nanos;
|
|
||||||
//
|
|
||||||
// if (end.nanos < 0) {
|
|
||||||
// end.seconds -= 1;
|
|
||||||
// end.nanos += 1000000000;
|
|
||||||
// } else if (end.nanos >= 1000000000) {
|
|
||||||
// end.seconds += 1;
|
|
||||||
// end.nanos -= 1000000000;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// Example 3: Compute Duration from datetime.timedelta in Python.
|
|
||||||
//
|
|
||||||
// td = datetime.timedelta(days=3, minutes=10)
|
|
||||||
// duration = Duration()
|
|
||||||
// duration.FromTimedelta(td)
|
|
||||||
//
|
|
||||||
// # JSON Mapping
|
|
||||||
//
|
|
||||||
// In JSON format, the Duration type is encoded as a string rather than an
|
|
||||||
// object, where the string ends in the suffix "s" (indicating seconds) and
|
|
||||||
// is preceded by the number of seconds, with nanoseconds expressed as
|
|
||||||
// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
|
|
||||||
// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
|
|
||||||
// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
|
|
||||||
// microsecond should be expressed in JSON format as "3.000001s".
|
|
||||||
//
|
|
||||||
//
|
|
||||||
type Duration struct {
|
|
||||||
// Signed seconds of the span of time. Must be from -315,576,000,000
|
|
||||||
// to +315,576,000,000 inclusive. Note: these bounds are computed from:
|
|
||||||
// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
|
|
||||||
Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
|
|
||||||
// Signed fractions of a second at nanosecond resolution of the span
|
|
||||||
// of time. Durations less than one second are represented with a 0
|
|
||||||
// `seconds` field and a positive or negative `nanos` field. For durations
|
|
||||||
// of one second or more, a non-zero value for the `nanos` field must be
|
|
||||||
// of the same sign as the `seconds` field. Must be from -999,999,999
|
|
||||||
// to +999,999,999 inclusive.
|
|
||||||
Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Duration) Reset() { *m = Duration{} }
|
var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{}
|
||||||
func (m *Duration) String() string { return proto.CompactTextString(m) }
|
var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{
|
||||||
func (*Duration) ProtoMessage() {}
|
0, // [0:0] is the sub-list for method output_type
|
||||||
func (*Duration) Descriptor() ([]byte, []int) {
|
0, // [0:0] is the sub-list for method input_type
|
||||||
return fileDescriptor_23597b2ebd7ac6c5, []int{0}
|
0, // [0:0] is the sub-list for extension type_name
|
||||||
|
0, // [0:0] is the sub-list for extension extendee
|
||||||
|
0, // [0:0] is the sub-list for field type_name
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*Duration) XXX_WellKnownType() string { return "Duration" }
|
func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() }
|
||||||
|
func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() {
|
||||||
func (m *Duration) XXX_Unmarshal(b []byte) error {
|
if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil {
|
||||||
return xxx_messageInfo_Duration.Unmarshal(m, b)
|
return
|
||||||
}
|
|
||||||
func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_Duration.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *Duration) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_Duration.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *Duration) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_Duration.Size(m)
|
|
||||||
}
|
|
||||||
func (m *Duration) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_Duration.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_Duration proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *Duration) GetSeconds() int64 {
|
|
||||||
if m != nil {
|
|
||||||
return m.Seconds
|
|
||||||
}
|
}
|
||||||
return 0
|
type x struct{}
|
||||||
}
|
out := protoimpl.TypeBuilder{
|
||||||
|
File: protoimpl.DescBuilder{
|
||||||
func (m *Duration) GetNanos() int32 {
|
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||||
if m != nil {
|
RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc,
|
||||||
return m.Nanos
|
NumEnums: 0,
|
||||||
}
|
NumMessages: 0,
|
||||||
return 0
|
NumExtensions: 0,
|
||||||
}
|
NumServices: 0,
|
||||||
|
},
|
||||||
func init() {
|
GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes,
|
||||||
proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
|
DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs,
|
||||||
}
|
}.Build()
|
||||||
|
File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File
|
||||||
func init() {
|
file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil
|
||||||
proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5)
|
file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil
|
||||||
}
|
file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil
|
||||||
|
|
||||||
var fileDescriptor_23597b2ebd7ac6c5 = []byte{
|
|
||||||
// 190 bytes of a gzipped FileDescriptorProto
|
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
|
|
||||||
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a,
|
|
||||||
0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56,
|
|
||||||
0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5,
|
|
||||||
0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e,
|
|
||||||
0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c,
|
|
||||||
0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56,
|
|
||||||
0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e,
|
|
||||||
0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4,
|
|
||||||
0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78,
|
|
||||||
0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63,
|
|
||||||
0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00,
|
|
||||||
}
|
}
|
||||||
|
116
vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
generated
vendored
116
vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
generated
vendored
@ -1,116 +0,0 @@
|
|||||||
// Protocol Buffers - Google's data interchange format
|
|
||||||
// Copyright 2008 Google Inc. All rights reserved.
|
|
||||||
// https://developers.google.com/protocol-buffers/
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package google.protobuf;
|
|
||||||
|
|
||||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
|
||||||
option cc_enable_arenas = true;
|
|
||||||
option go_package = "github.com/golang/protobuf/ptypes/duration";
|
|
||||||
option java_package = "com.google.protobuf";
|
|
||||||
option java_outer_classname = "DurationProto";
|
|
||||||
option java_multiple_files = true;
|
|
||||||
option objc_class_prefix = "GPB";
|
|
||||||
|
|
||||||
// A Duration represents a signed, fixed-length span of time represented
|
|
||||||
// as a count of seconds and fractions of seconds at nanosecond
|
|
||||||
// resolution. It is independent of any calendar and concepts like "day"
|
|
||||||
// or "month". It is related to Timestamp in that the difference between
|
|
||||||
// two Timestamp values is a Duration and it can be added or subtracted
|
|
||||||
// from a Timestamp. Range is approximately +-10,000 years.
|
|
||||||
//
|
|
||||||
// # Examples
|
|
||||||
//
|
|
||||||
// Example 1: Compute Duration from two Timestamps in pseudo code.
|
|
||||||
//
|
|
||||||
// Timestamp start = ...;
|
|
||||||
// Timestamp end = ...;
|
|
||||||
// Duration duration = ...;
|
|
||||||
//
|
|
||||||
// duration.seconds = end.seconds - start.seconds;
|
|
||||||
// duration.nanos = end.nanos - start.nanos;
|
|
||||||
//
|
|
||||||
// if (duration.seconds < 0 && duration.nanos > 0) {
|
|
||||||
// duration.seconds += 1;
|
|
||||||
// duration.nanos -= 1000000000;
|
|
||||||
// } else if (duration.seconds > 0 && duration.nanos < 0) {
|
|
||||||
// duration.seconds -= 1;
|
|
||||||
// duration.nanos += 1000000000;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
|
|
||||||
//
|
|
||||||
// Timestamp start = ...;
|
|
||||||
// Duration duration = ...;
|
|
||||||
// Timestamp end = ...;
|
|
||||||
//
|
|
||||||
// end.seconds = start.seconds + duration.seconds;
|
|
||||||
// end.nanos = start.nanos + duration.nanos;
|
|
||||||
//
|
|
||||||
// if (end.nanos < 0) {
|
|
||||||
// end.seconds -= 1;
|
|
||||||
// end.nanos += 1000000000;
|
|
||||||
// } else if (end.nanos >= 1000000000) {
|
|
||||||
// end.seconds += 1;
|
|
||||||
// end.nanos -= 1000000000;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// Example 3: Compute Duration from datetime.timedelta in Python.
|
|
||||||
//
|
|
||||||
// td = datetime.timedelta(days=3, minutes=10)
|
|
||||||
// duration = Duration()
|
|
||||||
// duration.FromTimedelta(td)
|
|
||||||
//
|
|
||||||
// # JSON Mapping
|
|
||||||
//
|
|
||||||
// In JSON format, the Duration type is encoded as a string rather than an
|
|
||||||
// object, where the string ends in the suffix "s" (indicating seconds) and
|
|
||||||
// is preceded by the number of seconds, with nanoseconds expressed as
|
|
||||||
// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
|
|
||||||
// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
|
|
||||||
// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
|
|
||||||
// microsecond should be expressed in JSON format as "3.000001s".
|
|
||||||
//
|
|
||||||
//
|
|
||||||
message Duration {
|
|
||||||
// Signed seconds of the span of time. Must be from -315,576,000,000
|
|
||||||
// to +315,576,000,000 inclusive. Note: these bounds are computed from:
|
|
||||||
// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
|
|
||||||
int64 seconds = 1;
|
|
||||||
|
|
||||||
// Signed fractions of a second at nanosecond resolution of the span
|
|
||||||
// of time. Durations less than one second are represented with a 0
|
|
||||||
// `seconds` field and a positive or negative `nanos` field. For durations
|
|
||||||
// of one second or more, a non-zero value for the `nanos` field must be
|
|
||||||
// of the same sign as the `seconds` field. Must be from -999,999,999
|
|
||||||
// to +999,999,999 inclusive.
|
|
||||||
int32 nanos = 2;
|
|
||||||
}
|
|
161
vendor/github.com/golang/protobuf/ptypes/timestamp.go
generated
vendored
161
vendor/github.com/golang/protobuf/ptypes/timestamp.go
generated
vendored
@ -1,46 +1,18 @@
|
|||||||
// Go support for Protocol Buffers - Google's data interchange format
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
//
|
// Use of this source code is governed by a BSD-style
|
||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
// license that can be found in the LICENSE file.
|
||||||
// https://github.com/golang/protobuf
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
package ptypes
|
package ptypes
|
||||||
|
|
||||||
// This file implements operations on google.protobuf.Timestamp.
|
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
tspb "github.com/golang/protobuf/ptypes/timestamp"
|
timestamppb "github.com/golang/protobuf/ptypes/timestamp"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Range of google.protobuf.Duration as specified in timestamp.proto.
|
||||||
const (
|
const (
|
||||||
// Seconds field of the earliest valid Timestamp.
|
// Seconds field of the earliest valid Timestamp.
|
||||||
// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
|
// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
|
||||||
@ -50,17 +22,71 @@ const (
|
|||||||
maxValidSeconds = 253402300800
|
maxValidSeconds = 253402300800
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Timestamp converts a timestamppb.Timestamp to a time.Time.
|
||||||
|
// It returns an error if the argument is invalid.
|
||||||
|
//
|
||||||
|
// Unlike most Go functions, if Timestamp returns an error, the first return
|
||||||
|
// value is not the zero time.Time. Instead, it is the value obtained from the
|
||||||
|
// time.Unix function when passed the contents of the Timestamp, in the UTC
|
||||||
|
// locale. This may or may not be a meaningful time; many invalid Timestamps
|
||||||
|
// do map to valid time.Times.
|
||||||
|
//
|
||||||
|
// A nil Timestamp returns an error. The first return value in that case is
|
||||||
|
// undefined.
|
||||||
|
func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) {
|
||||||
|
// Don't return the zero value on error, because corresponds to a valid
|
||||||
|
// timestamp. Instead return whatever time.Unix gives us.
|
||||||
|
var t time.Time
|
||||||
|
if ts == nil {
|
||||||
|
t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
|
||||||
|
} else {
|
||||||
|
t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
|
||||||
|
}
|
||||||
|
return t, validateTimestamp(ts)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimestampNow returns a google.protobuf.Timestamp for the current time.
|
||||||
|
func TimestampNow() *timestamppb.Timestamp {
|
||||||
|
ts, err := TimestampProto(time.Now())
|
||||||
|
if err != nil {
|
||||||
|
panic("ptypes: time.Now() out of Timestamp range")
|
||||||
|
}
|
||||||
|
return ts
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
|
||||||
|
// It returns an error if the resulting Timestamp is invalid.
|
||||||
|
func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) {
|
||||||
|
ts := ×tamppb.Timestamp{
|
||||||
|
Seconds: t.Unix(),
|
||||||
|
Nanos: int32(t.Nanosecond()),
|
||||||
|
}
|
||||||
|
if err := validateTimestamp(ts); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ts, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimestampString returns the RFC 3339 string for valid Timestamps.
|
||||||
|
// For invalid Timestamps, it returns an error message in parentheses.
|
||||||
|
func TimestampString(ts *timestamppb.Timestamp) string {
|
||||||
|
t, err := Timestamp(ts)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Sprintf("(%v)", err)
|
||||||
|
}
|
||||||
|
return t.Format(time.RFC3339Nano)
|
||||||
|
}
|
||||||
|
|
||||||
// validateTimestamp determines whether a Timestamp is valid.
|
// validateTimestamp determines whether a Timestamp is valid.
|
||||||
// A valid timestamp represents a time in the range
|
// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01)
|
||||||
// [0001-01-01, 10000-01-01) and has a Nanos field
|
// and has a Nanos field in the range [0, 1e9).
|
||||||
// in the range [0, 1e9).
|
|
||||||
//
|
//
|
||||||
// If the Timestamp is valid, validateTimestamp returns nil.
|
// If the Timestamp is valid, validateTimestamp returns nil.
|
||||||
// Otherwise, it returns an error that describes
|
// Otherwise, it returns an error that describes the problem.
|
||||||
// the problem.
|
|
||||||
//
|
//
|
||||||
// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
|
// Every valid Timestamp can be represented by a time.Time,
|
||||||
func validateTimestamp(ts *tspb.Timestamp) error {
|
// but the converse is not true.
|
||||||
|
func validateTimestamp(ts *timestamppb.Timestamp) error {
|
||||||
if ts == nil {
|
if ts == nil {
|
||||||
return errors.New("timestamp: nil Timestamp")
|
return errors.New("timestamp: nil Timestamp")
|
||||||
}
|
}
|
||||||
@ -75,58 +101,3 @@ func validateTimestamp(ts *tspb.Timestamp) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Timestamp converts a google.protobuf.Timestamp proto to a time.Time.
|
|
||||||
// It returns an error if the argument is invalid.
|
|
||||||
//
|
|
||||||
// Unlike most Go functions, if Timestamp returns an error, the first return value
|
|
||||||
// is not the zero time.Time. Instead, it is the value obtained from the
|
|
||||||
// time.Unix function when passed the contents of the Timestamp, in the UTC
|
|
||||||
// locale. This may or may not be a meaningful time; many invalid Timestamps
|
|
||||||
// do map to valid time.Times.
|
|
||||||
//
|
|
||||||
// A nil Timestamp returns an error. The first return value in that case is
|
|
||||||
// undefined.
|
|
||||||
func Timestamp(ts *tspb.Timestamp) (time.Time, error) {
|
|
||||||
// Don't return the zero value on error, because corresponds to a valid
|
|
||||||
// timestamp. Instead return whatever time.Unix gives us.
|
|
||||||
var t time.Time
|
|
||||||
if ts == nil {
|
|
||||||
t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
|
|
||||||
} else {
|
|
||||||
t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
|
|
||||||
}
|
|
||||||
return t, validateTimestamp(ts)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TimestampNow returns a google.protobuf.Timestamp for the current time.
|
|
||||||
func TimestampNow() *tspb.Timestamp {
|
|
||||||
ts, err := TimestampProto(time.Now())
|
|
||||||
if err != nil {
|
|
||||||
panic("ptypes: time.Now() out of Timestamp range")
|
|
||||||
}
|
|
||||||
return ts
|
|
||||||
}
|
|
||||||
|
|
||||||
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
|
|
||||||
// It returns an error if the resulting Timestamp is invalid.
|
|
||||||
func TimestampProto(t time.Time) (*tspb.Timestamp, error) {
|
|
||||||
ts := &tspb.Timestamp{
|
|
||||||
Seconds: t.Unix(),
|
|
||||||
Nanos: int32(t.Nanosecond()),
|
|
||||||
}
|
|
||||||
if err := validateTimestamp(ts); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return ts, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid
|
|
||||||
// Timestamps, it returns an error message in parentheses.
|
|
||||||
func TimestampString(ts *tspb.Timestamp) string {
|
|
||||||
t, err := Timestamp(ts)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Sprintf("(%v)", err)
|
|
||||||
}
|
|
||||||
return t.Format(time.RFC3339Nano)
|
|
||||||
}
|
|
||||||
|
219
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
generated
vendored
219
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
generated
vendored
@ -1,185 +1,64 @@
|
|||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// source: google/protobuf/timestamp.proto
|
// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
|
||||||
|
|
||||||
package timestamp
|
package timestamp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
fmt "fmt"
|
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||||
proto "github.com/golang/protobuf/proto"
|
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||||
math "math"
|
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||||
|
reflect "reflect"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
// Symbols defined in public import of google/protobuf/timestamp.proto.
|
||||||
var _ = proto.Marshal
|
|
||||||
var _ = fmt.Errorf
|
|
||||||
var _ = math.Inf
|
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
type Timestamp = timestamppb.Timestamp
|
||||||
// is compatible with the proto package it is being compiled against.
|
|
||||||
// A compilation error at this line likely means your copy of the
|
|
||||||
// proto package needs to be updated.
|
|
||||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
|
||||||
|
|
||||||
// A Timestamp represents a point in time independent of any time zone or local
|
var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor
|
||||||
// calendar, encoded as a count of seconds and fractions of seconds at
|
|
||||||
// nanosecond resolution. The count is relative to an epoch at UTC midnight on
|
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{
|
||||||
// January 1, 1970, in the proleptic Gregorian calendar which extends the
|
0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
||||||
// Gregorian calendar backwards to year one.
|
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
||||||
//
|
0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69,
|
||||||
// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap
|
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67,
|
||||||
// second table is needed for interpretation, using a [24-hour linear
|
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74,
|
||||||
// smear](https://developers.google.com/time/smear).
|
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37,
|
||||||
//
|
0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
||||||
// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By
|
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
||||||
// restricting to that range, we ensure that we can convert to and from [RFC
|
0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69,
|
||||||
// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings.
|
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||||
//
|
0x33,
|
||||||
// # Examples
|
|
||||||
//
|
|
||||||
// Example 1: Compute Timestamp from POSIX `time()`.
|
|
||||||
//
|
|
||||||
// Timestamp timestamp;
|
|
||||||
// timestamp.set_seconds(time(NULL));
|
|
||||||
// timestamp.set_nanos(0);
|
|
||||||
//
|
|
||||||
// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
|
|
||||||
//
|
|
||||||
// struct timeval tv;
|
|
||||||
// gettimeofday(&tv, NULL);
|
|
||||||
//
|
|
||||||
// Timestamp timestamp;
|
|
||||||
// timestamp.set_seconds(tv.tv_sec);
|
|
||||||
// timestamp.set_nanos(tv.tv_usec * 1000);
|
|
||||||
//
|
|
||||||
// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
|
|
||||||
//
|
|
||||||
// FILETIME ft;
|
|
||||||
// GetSystemTimeAsFileTime(&ft);
|
|
||||||
// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
|
|
||||||
//
|
|
||||||
// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
|
|
||||||
// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
|
|
||||||
// Timestamp timestamp;
|
|
||||||
// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
|
|
||||||
// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
|
|
||||||
//
|
|
||||||
// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
|
|
||||||
//
|
|
||||||
// long millis = System.currentTimeMillis();
|
|
||||||
//
|
|
||||||
// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
|
|
||||||
// .setNanos((int) ((millis % 1000) * 1000000)).build();
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// Example 5: Compute Timestamp from current time in Python.
|
|
||||||
//
|
|
||||||
// timestamp = Timestamp()
|
|
||||||
// timestamp.GetCurrentTime()
|
|
||||||
//
|
|
||||||
// # JSON Mapping
|
|
||||||
//
|
|
||||||
// In JSON format, the Timestamp type is encoded as a string in the
|
|
||||||
// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
|
|
||||||
// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
|
|
||||||
// where {year} is always expressed using four digits while {month}, {day},
|
|
||||||
// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
|
|
||||||
// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
|
|
||||||
// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
|
|
||||||
// is required. A proto3 JSON serializer should always use UTC (as indicated by
|
|
||||||
// "Z") when printing the Timestamp type and a proto3 JSON parser should be
|
|
||||||
// able to accept both UTC and other timezones (as indicated by an offset).
|
|
||||||
//
|
|
||||||
// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
|
|
||||||
// 01:30 UTC on January 15, 2017.
|
|
||||||
//
|
|
||||||
// In JavaScript, one can convert a Date object to this format using the
|
|
||||||
// standard
|
|
||||||
// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString)
|
|
||||||
// method. In Python, a standard `datetime.datetime` object can be converted
|
|
||||||
// to this format using
|
|
||||||
// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with
|
|
||||||
// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use
|
|
||||||
// the Joda Time's [`ISODateTimeFormat.dateTime()`](
|
|
||||||
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D
|
|
||||||
// ) to obtain a formatter capable of generating timestamps in this format.
|
|
||||||
//
|
|
||||||
//
|
|
||||||
type Timestamp struct {
|
|
||||||
// Represents seconds of UTC time since Unix epoch
|
|
||||||
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
|
|
||||||
// 9999-12-31T23:59:59Z inclusive.
|
|
||||||
Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
|
|
||||||
// Non-negative fractions of a second at nanosecond resolution. Negative
|
|
||||||
// second values with fractions must still have non-negative nanos values
|
|
||||||
// that count forward in time. Must be from 0 to 999,999,999
|
|
||||||
// inclusive.
|
|
||||||
Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Timestamp) Reset() { *m = Timestamp{} }
|
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{}
|
||||||
func (m *Timestamp) String() string { return proto.CompactTextString(m) }
|
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{
|
||||||
func (*Timestamp) ProtoMessage() {}
|
0, // [0:0] is the sub-list for method output_type
|
||||||
func (*Timestamp) Descriptor() ([]byte, []int) {
|
0, // [0:0] is the sub-list for method input_type
|
||||||
return fileDescriptor_292007bbfe81227e, []int{0}
|
0, // [0:0] is the sub-list for extension type_name
|
||||||
|
0, // [0:0] is the sub-list for extension extendee
|
||||||
|
0, // [0:0] is the sub-list for field type_name
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
|
func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() }
|
||||||
|
func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() {
|
||||||
func (m *Timestamp) XXX_Unmarshal(b []byte) error {
|
if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil {
|
||||||
return xxx_messageInfo_Timestamp.Unmarshal(m, b)
|
return
|
||||||
}
|
|
||||||
func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *Timestamp) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_Timestamp.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *Timestamp) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_Timestamp.Size(m)
|
|
||||||
}
|
|
||||||
func (m *Timestamp) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_Timestamp.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_Timestamp proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *Timestamp) GetSeconds() int64 {
|
|
||||||
if m != nil {
|
|
||||||
return m.Seconds
|
|
||||||
}
|
}
|
||||||
return 0
|
type x struct{}
|
||||||
}
|
out := protoimpl.TypeBuilder{
|
||||||
|
File: protoimpl.DescBuilder{
|
||||||
func (m *Timestamp) GetNanos() int32 {
|
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||||
if m != nil {
|
RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc,
|
||||||
return m.Nanos
|
NumEnums: 0,
|
||||||
}
|
NumMessages: 0,
|
||||||
return 0
|
NumExtensions: 0,
|
||||||
}
|
NumServices: 0,
|
||||||
|
},
|
||||||
func init() {
|
GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes,
|
||||||
proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
|
DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs,
|
||||||
}
|
}.Build()
|
||||||
|
File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File
|
||||||
func init() {
|
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil
|
||||||
proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e)
|
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil
|
||||||
}
|
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil
|
||||||
|
|
||||||
var fileDescriptor_292007bbfe81227e = []byte{
|
|
||||||
// 191 bytes of a gzipped FileDescriptorProto
|
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f,
|
|
||||||
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d,
|
|
||||||
0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28,
|
|
||||||
0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5,
|
|
||||||
0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89,
|
|
||||||
0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70,
|
|
||||||
0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51,
|
|
||||||
0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89,
|
|
||||||
0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71,
|
|
||||||
0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a,
|
|
||||||
0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43,
|
|
||||||
0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00,
|
|
||||||
}
|
}
|
||||||
|
138
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
generated
vendored
138
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
generated
vendored
@ -1,138 +0,0 @@
|
|||||||
// Protocol Buffers - Google's data interchange format
|
|
||||||
// Copyright 2008 Google Inc. All rights reserved.
|
|
||||||
// https://developers.google.com/protocol-buffers/
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package google.protobuf;
|
|
||||||
|
|
||||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
|
||||||
option cc_enable_arenas = true;
|
|
||||||
option go_package = "github.com/golang/protobuf/ptypes/timestamp";
|
|
||||||
option java_package = "com.google.protobuf";
|
|
||||||
option java_outer_classname = "TimestampProto";
|
|
||||||
option java_multiple_files = true;
|
|
||||||
option objc_class_prefix = "GPB";
|
|
||||||
|
|
||||||
// A Timestamp represents a point in time independent of any time zone or local
|
|
||||||
// calendar, encoded as a count of seconds and fractions of seconds at
|
|
||||||
// nanosecond resolution. The count is relative to an epoch at UTC midnight on
|
|
||||||
// January 1, 1970, in the proleptic Gregorian calendar which extends the
|
|
||||||
// Gregorian calendar backwards to year one.
|
|
||||||
//
|
|
||||||
// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap
|
|
||||||
// second table is needed for interpretation, using a [24-hour linear
|
|
||||||
// smear](https://developers.google.com/time/smear).
|
|
||||||
//
|
|
||||||
// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By
|
|
||||||
// restricting to that range, we ensure that we can convert to and from [RFC
|
|
||||||
// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings.
|
|
||||||
//
|
|
||||||
// # Examples
|
|
||||||
//
|
|
||||||
// Example 1: Compute Timestamp from POSIX `time()`.
|
|
||||||
//
|
|
||||||
// Timestamp timestamp;
|
|
||||||
// timestamp.set_seconds(time(NULL));
|
|
||||||
// timestamp.set_nanos(0);
|
|
||||||
//
|
|
||||||
// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
|
|
||||||
//
|
|
||||||
// struct timeval tv;
|
|
||||||
// gettimeofday(&tv, NULL);
|
|
||||||
//
|
|
||||||
// Timestamp timestamp;
|
|
||||||
// timestamp.set_seconds(tv.tv_sec);
|
|
||||||
// timestamp.set_nanos(tv.tv_usec * 1000);
|
|
||||||
//
|
|
||||||
// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
|
|
||||||
//
|
|
||||||
// FILETIME ft;
|
|
||||||
// GetSystemTimeAsFileTime(&ft);
|
|
||||||
// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
|
|
||||||
//
|
|
||||||
// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
|
|
||||||
// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
|
|
||||||
// Timestamp timestamp;
|
|
||||||
// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
|
|
||||||
// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
|
|
||||||
//
|
|
||||||
// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
|
|
||||||
//
|
|
||||||
// long millis = System.currentTimeMillis();
|
|
||||||
//
|
|
||||||
// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
|
|
||||||
// .setNanos((int) ((millis % 1000) * 1000000)).build();
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// Example 5: Compute Timestamp from current time in Python.
|
|
||||||
//
|
|
||||||
// timestamp = Timestamp()
|
|
||||||
// timestamp.GetCurrentTime()
|
|
||||||
//
|
|
||||||
// # JSON Mapping
|
|
||||||
//
|
|
||||||
// In JSON format, the Timestamp type is encoded as a string in the
|
|
||||||
// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
|
|
||||||
// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
|
|
||||||
// where {year} is always expressed using four digits while {month}, {day},
|
|
||||||
// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
|
|
||||||
// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
|
|
||||||
// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
|
|
||||||
// is required. A proto3 JSON serializer should always use UTC (as indicated by
|
|
||||||
// "Z") when printing the Timestamp type and a proto3 JSON parser should be
|
|
||||||
// able to accept both UTC and other timezones (as indicated by an offset).
|
|
||||||
//
|
|
||||||
// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
|
|
||||||
// 01:30 UTC on January 15, 2017.
|
|
||||||
//
|
|
||||||
// In JavaScript, one can convert a Date object to this format using the
|
|
||||||
// standard
|
|
||||||
// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString)
|
|
||||||
// method. In Python, a standard `datetime.datetime` object can be converted
|
|
||||||
// to this format using
|
|
||||||
// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with
|
|
||||||
// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use
|
|
||||||
// the Joda Time's [`ISODateTimeFormat.dateTime()`](
|
|
||||||
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D
|
|
||||||
// ) to obtain a formatter capable of generating timestamps in this format.
|
|
||||||
//
|
|
||||||
//
|
|
||||||
message Timestamp {
|
|
||||||
// Represents seconds of UTC time since Unix epoch
|
|
||||||
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
|
|
||||||
// 9999-12-31T23:59:59Z inclusive.
|
|
||||||
int64 seconds = 1;
|
|
||||||
|
|
||||||
// Non-negative fractions of a second at nanosecond resolution. Negative
|
|
||||||
// second values with fractions must still have non-negative nanos values
|
|
||||||
// that count forward in time. Must be from 0 to 999,999,999
|
|
||||||
// inclusive.
|
|
||||||
int32 nanos = 2;
|
|
||||||
}
|
|
504
vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go
generated
vendored
504
vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go
generated
vendored
@ -1,463 +1,71 @@
|
|||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// source: google/protobuf/wrappers.proto
|
// source: github.com/golang/protobuf/ptypes/wrappers/wrappers.proto
|
||||||
|
|
||||||
package wrappers
|
package wrappers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
fmt "fmt"
|
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||||
proto "github.com/golang/protobuf/proto"
|
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||||
math "math"
|
wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
|
||||||
|
reflect "reflect"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
// Symbols defined in public import of google/protobuf/wrappers.proto.
|
||||||
var _ = proto.Marshal
|
|
||||||
var _ = fmt.Errorf
|
|
||||||
var _ = math.Inf
|
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
type DoubleValue = wrapperspb.DoubleValue
|
||||||
// is compatible with the proto package it is being compiled against.
|
type FloatValue = wrapperspb.FloatValue
|
||||||
// A compilation error at this line likely means your copy of the
|
type Int64Value = wrapperspb.Int64Value
|
||||||
// proto package needs to be updated.
|
type UInt64Value = wrapperspb.UInt64Value
|
||||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
type Int32Value = wrapperspb.Int32Value
|
||||||
|
type UInt32Value = wrapperspb.UInt32Value
|
||||||
|
type BoolValue = wrapperspb.BoolValue
|
||||||
|
type StringValue = wrapperspb.StringValue
|
||||||
|
type BytesValue = wrapperspb.BytesValue
|
||||||
|
|
||||||
// Wrapper message for `double`.
|
var File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto protoreflect.FileDescriptor
|
||||||
//
|
|
||||||
// The JSON representation for `DoubleValue` is JSON number.
|
var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc = []byte{
|
||||||
type DoubleValue struct {
|
0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
||||||
// The double value.
|
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
||||||
Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
|
0x70, 0x65, 0x73, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2f, 0x77, 0x72, 0x61,
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
|
||||||
XXX_unrecognized []byte `json:"-"`
|
0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61,
|
||||||
XXX_sizecache int32 `json:"-"`
|
0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67,
|
||||||
|
0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
|
||||||
|
0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73,
|
||||||
|
0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x3b, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65,
|
||||||
|
0x72, 0x73, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *DoubleValue) Reset() { *m = DoubleValue{} }
|
var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes = []interface{}{}
|
||||||
func (m *DoubleValue) String() string { return proto.CompactTextString(m) }
|
var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs = []int32{
|
||||||
func (*DoubleValue) ProtoMessage() {}
|
0, // [0:0] is the sub-list for method output_type
|
||||||
func (*DoubleValue) Descriptor() ([]byte, []int) {
|
0, // [0:0] is the sub-list for method input_type
|
||||||
return fileDescriptor_5377b62bda767935, []int{0}
|
0, // [0:0] is the sub-list for extension type_name
|
||||||
|
0, // [0:0] is the sub-list for extension extendee
|
||||||
|
0, // [0:0] is the sub-list for field type_name
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" }
|
func init() { file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_init() }
|
||||||
|
func file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_init() {
|
||||||
func (m *DoubleValue) XXX_Unmarshal(b []byte) error {
|
if File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto != nil {
|
||||||
return xxx_messageInfo_DoubleValue.Unmarshal(m, b)
|
return
|
||||||
}
|
|
||||||
func (m *DoubleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_DoubleValue.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *DoubleValue) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_DoubleValue.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *DoubleValue) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_DoubleValue.Size(m)
|
|
||||||
}
|
|
||||||
func (m *DoubleValue) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_DoubleValue.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_DoubleValue proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *DoubleValue) GetValue() float64 {
|
|
||||||
if m != nil {
|
|
||||||
return m.Value
|
|
||||||
}
|
}
|
||||||
return 0
|
type x struct{}
|
||||||
}
|
out := protoimpl.TypeBuilder{
|
||||||
|
File: protoimpl.DescBuilder{
|
||||||
// Wrapper message for `float`.
|
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||||
//
|
RawDescriptor: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc,
|
||||||
// The JSON representation for `FloatValue` is JSON number.
|
NumEnums: 0,
|
||||||
type FloatValue struct {
|
NumMessages: 0,
|
||||||
// The float value.
|
NumExtensions: 0,
|
||||||
Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"`
|
NumServices: 0,
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
},
|
||||||
XXX_unrecognized []byte `json:"-"`
|
GoTypes: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes,
|
||||||
XXX_sizecache int32 `json:"-"`
|
DependencyIndexes: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs,
|
||||||
}
|
}.Build()
|
||||||
|
File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto = out.File
|
||||||
func (m *FloatValue) Reset() { *m = FloatValue{} }
|
file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc = nil
|
||||||
func (m *FloatValue) String() string { return proto.CompactTextString(m) }
|
file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes = nil
|
||||||
func (*FloatValue) ProtoMessage() {}
|
file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs = nil
|
||||||
func (*FloatValue) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_5377b62bda767935, []int{1}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" }
|
|
||||||
|
|
||||||
func (m *FloatValue) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_FloatValue.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *FloatValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_FloatValue.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *FloatValue) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_FloatValue.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *FloatValue) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_FloatValue.Size(m)
|
|
||||||
}
|
|
||||||
func (m *FloatValue) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_FloatValue.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_FloatValue proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *FloatValue) GetValue() float32 {
|
|
||||||
if m != nil {
|
|
||||||
return m.Value
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `int64`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `Int64Value` is JSON string.
|
|
||||||
type Int64Value struct {
|
|
||||||
// The int64 value.
|
|
||||||
Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Int64Value) Reset() { *m = Int64Value{} }
|
|
||||||
func (m *Int64Value) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*Int64Value) ProtoMessage() {}
|
|
||||||
func (*Int64Value) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_5377b62bda767935, []int{2}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" }
|
|
||||||
|
|
||||||
func (m *Int64Value) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_Int64Value.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *Int64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_Int64Value.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *Int64Value) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_Int64Value.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *Int64Value) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_Int64Value.Size(m)
|
|
||||||
}
|
|
||||||
func (m *Int64Value) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_Int64Value.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_Int64Value proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *Int64Value) GetValue() int64 {
|
|
||||||
if m != nil {
|
|
||||||
return m.Value
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `uint64`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `UInt64Value` is JSON string.
|
|
||||||
type UInt64Value struct {
|
|
||||||
// The uint64 value.
|
|
||||||
Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *UInt64Value) Reset() { *m = UInt64Value{} }
|
|
||||||
func (m *UInt64Value) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*UInt64Value) ProtoMessage() {}
|
|
||||||
func (*UInt64Value) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_5377b62bda767935, []int{3}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" }
|
|
||||||
|
|
||||||
func (m *UInt64Value) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_UInt64Value.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *UInt64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_UInt64Value.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *UInt64Value) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_UInt64Value.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *UInt64Value) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_UInt64Value.Size(m)
|
|
||||||
}
|
|
||||||
func (m *UInt64Value) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_UInt64Value.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_UInt64Value proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *UInt64Value) GetValue() uint64 {
|
|
||||||
if m != nil {
|
|
||||||
return m.Value
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `int32`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `Int32Value` is JSON number.
|
|
||||||
type Int32Value struct {
|
|
||||||
// The int32 value.
|
|
||||||
Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Int32Value) Reset() { *m = Int32Value{} }
|
|
||||||
func (m *Int32Value) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*Int32Value) ProtoMessage() {}
|
|
||||||
func (*Int32Value) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_5377b62bda767935, []int{4}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" }
|
|
||||||
|
|
||||||
func (m *Int32Value) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_Int32Value.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *Int32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_Int32Value.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *Int32Value) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_Int32Value.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *Int32Value) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_Int32Value.Size(m)
|
|
||||||
}
|
|
||||||
func (m *Int32Value) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_Int32Value.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_Int32Value proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *Int32Value) GetValue() int32 {
|
|
||||||
if m != nil {
|
|
||||||
return m.Value
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `uint32`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `UInt32Value` is JSON number.
|
|
||||||
type UInt32Value struct {
|
|
||||||
// The uint32 value.
|
|
||||||
Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *UInt32Value) Reset() { *m = UInt32Value{} }
|
|
||||||
func (m *UInt32Value) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*UInt32Value) ProtoMessage() {}
|
|
||||||
func (*UInt32Value) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_5377b62bda767935, []int{5}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" }
|
|
||||||
|
|
||||||
func (m *UInt32Value) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_UInt32Value.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *UInt32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_UInt32Value.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *UInt32Value) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_UInt32Value.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *UInt32Value) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_UInt32Value.Size(m)
|
|
||||||
}
|
|
||||||
func (m *UInt32Value) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_UInt32Value.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_UInt32Value proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *UInt32Value) GetValue() uint32 {
|
|
||||||
if m != nil {
|
|
||||||
return m.Value
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `bool`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `BoolValue` is JSON `true` and `false`.
|
|
||||||
type BoolValue struct {
|
|
||||||
// The bool value.
|
|
||||||
Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *BoolValue) Reset() { *m = BoolValue{} }
|
|
||||||
func (m *BoolValue) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*BoolValue) ProtoMessage() {}
|
|
||||||
func (*BoolValue) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_5377b62bda767935, []int{6}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" }
|
|
||||||
|
|
||||||
func (m *BoolValue) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_BoolValue.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *BoolValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_BoolValue.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *BoolValue) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_BoolValue.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *BoolValue) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_BoolValue.Size(m)
|
|
||||||
}
|
|
||||||
func (m *BoolValue) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_BoolValue.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_BoolValue proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *BoolValue) GetValue() bool {
|
|
||||||
if m != nil {
|
|
||||||
return m.Value
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `string`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `StringValue` is JSON string.
|
|
||||||
type StringValue struct {
|
|
||||||
// The string value.
|
|
||||||
Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *StringValue) Reset() { *m = StringValue{} }
|
|
||||||
func (m *StringValue) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*StringValue) ProtoMessage() {}
|
|
||||||
func (*StringValue) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_5377b62bda767935, []int{7}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*StringValue) XXX_WellKnownType() string { return "StringValue" }
|
|
||||||
|
|
||||||
func (m *StringValue) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_StringValue.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *StringValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_StringValue.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *StringValue) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_StringValue.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *StringValue) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_StringValue.Size(m)
|
|
||||||
}
|
|
||||||
func (m *StringValue) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_StringValue.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_StringValue proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *StringValue) GetValue() string {
|
|
||||||
if m != nil {
|
|
||||||
return m.Value
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `bytes`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `BytesValue` is JSON string.
|
|
||||||
type BytesValue struct {
|
|
||||||
// The bytes value.
|
|
||||||
Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *BytesValue) Reset() { *m = BytesValue{} }
|
|
||||||
func (m *BytesValue) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*BytesValue) ProtoMessage() {}
|
|
||||||
func (*BytesValue) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_5377b62bda767935, []int{8}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" }
|
|
||||||
|
|
||||||
func (m *BytesValue) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_BytesValue.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *BytesValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_BytesValue.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *BytesValue) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_BytesValue.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *BytesValue) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_BytesValue.Size(m)
|
|
||||||
}
|
|
||||||
func (m *BytesValue) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_BytesValue.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_BytesValue proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *BytesValue) GetValue() []byte {
|
|
||||||
if m != nil {
|
|
||||||
return m.Value
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue")
|
|
||||||
proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue")
|
|
||||||
proto.RegisterType((*Int64Value)(nil), "google.protobuf.Int64Value")
|
|
||||||
proto.RegisterType((*UInt64Value)(nil), "google.protobuf.UInt64Value")
|
|
||||||
proto.RegisterType((*Int32Value)(nil), "google.protobuf.Int32Value")
|
|
||||||
proto.RegisterType((*UInt32Value)(nil), "google.protobuf.UInt32Value")
|
|
||||||
proto.RegisterType((*BoolValue)(nil), "google.protobuf.BoolValue")
|
|
||||||
proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue")
|
|
||||||
proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue")
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor_5377b62bda767935)
|
|
||||||
}
|
|
||||||
|
|
||||||
var fileDescriptor_5377b62bda767935 = []byte{
|
|
||||||
// 259 bytes of a gzipped FileDescriptorProto
|
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
|
|
||||||
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x2f, 0x4a, 0x2c,
|
|
||||||
0x28, 0x48, 0x2d, 0x2a, 0xd6, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0xca,
|
|
||||||
0x5c, 0xdc, 0x2e, 0xf9, 0xa5, 0x49, 0x39, 0xa9, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0x42, 0x22, 0x5c,
|
|
||||||
0xac, 0x65, 0x20, 0x86, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x63, 0x10, 0x84, 0xa3, 0xa4, 0xc4, 0xc5,
|
|
||||||
0xe5, 0x96, 0x93, 0x9f, 0x58, 0x82, 0x45, 0x0d, 0x13, 0x92, 0x1a, 0xcf, 0xbc, 0x12, 0x33, 0x13,
|
|
||||||
0x2c, 0x6a, 0x98, 0x61, 0x6a, 0x94, 0xb9, 0xb8, 0x43, 0x71, 0x29, 0x62, 0x41, 0x35, 0xc8, 0xd8,
|
|
||||||
0x08, 0x8b, 0x1a, 0x56, 0x34, 0x83, 0xb0, 0x2a, 0xe2, 0x85, 0x29, 0x52, 0xe4, 0xe2, 0x74, 0xca,
|
|
||||||
0xcf, 0xcf, 0xc1, 0xa2, 0x84, 0x03, 0xc9, 0x9c, 0xe0, 0x92, 0xa2, 0xcc, 0xbc, 0x74, 0x2c, 0x8a,
|
|
||||||
0x38, 0x91, 0x1c, 0xe4, 0x54, 0x59, 0x92, 0x5a, 0x8c, 0x45, 0x0d, 0x0f, 0x54, 0x8d, 0x53, 0x0d,
|
|
||||||
0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x5a, 0xe8, 0x3a, 0xf1, 0x86, 0x43, 0x83, 0x3f, 0x00, 0x24,
|
|
||||||
0x12, 0xc0, 0x18, 0xa5, 0x95, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x9f,
|
|
||||||
0x9e, 0x9f, 0x93, 0x98, 0x97, 0x8e, 0x88, 0xaa, 0x82, 0x92, 0xca, 0x82, 0xd4, 0x62, 0x78, 0x8c,
|
|
||||||
0xfd, 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e,
|
|
||||||
0x00, 0x54, 0xa9, 0x5e, 0x78, 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b,
|
|
||||||
0x12, 0x1b, 0xd8, 0x0c, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x19, 0x6c, 0xb9, 0xb8, 0xfe,
|
|
||||||
0x01, 0x00, 0x00,
|
|
||||||
}
|
}
|
||||||
|
123
vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto
generated
vendored
123
vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto
generated
vendored
@ -1,123 +0,0 @@
|
|||||||
// Protocol Buffers - Google's data interchange format
|
|
||||||
// Copyright 2008 Google Inc. All rights reserved.
|
|
||||||
// https://developers.google.com/protocol-buffers/
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
// Wrappers for primitive (non-message) types. These types are useful
|
|
||||||
// for embedding primitives in the `google.protobuf.Any` type and for places
|
|
||||||
// where we need to distinguish between the absence of a primitive
|
|
||||||
// typed field and its default value.
|
|
||||||
//
|
|
||||||
// These wrappers have no meaningful use within repeated fields as they lack
|
|
||||||
// the ability to detect presence on individual elements.
|
|
||||||
// These wrappers have no meaningful use within a map or a oneof since
|
|
||||||
// individual entries of a map or fields of a oneof can already detect presence.
|
|
||||||
|
|
||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package google.protobuf;
|
|
||||||
|
|
||||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
|
||||||
option cc_enable_arenas = true;
|
|
||||||
option go_package = "github.com/golang/protobuf/ptypes/wrappers";
|
|
||||||
option java_package = "com.google.protobuf";
|
|
||||||
option java_outer_classname = "WrappersProto";
|
|
||||||
option java_multiple_files = true;
|
|
||||||
option objc_class_prefix = "GPB";
|
|
||||||
|
|
||||||
// Wrapper message for `double`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `DoubleValue` is JSON number.
|
|
||||||
message DoubleValue {
|
|
||||||
// The double value.
|
|
||||||
double value = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `float`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `FloatValue` is JSON number.
|
|
||||||
message FloatValue {
|
|
||||||
// The float value.
|
|
||||||
float value = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `int64`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `Int64Value` is JSON string.
|
|
||||||
message Int64Value {
|
|
||||||
// The int64 value.
|
|
||||||
int64 value = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `uint64`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `UInt64Value` is JSON string.
|
|
||||||
message UInt64Value {
|
|
||||||
// The uint64 value.
|
|
||||||
uint64 value = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `int32`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `Int32Value` is JSON number.
|
|
||||||
message Int32Value {
|
|
||||||
// The int32 value.
|
|
||||||
int32 value = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `uint32`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `UInt32Value` is JSON number.
|
|
||||||
message UInt32Value {
|
|
||||||
// The uint32 value.
|
|
||||||
uint32 value = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `bool`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `BoolValue` is JSON `true` and `false`.
|
|
||||||
message BoolValue {
|
|
||||||
// The bool value.
|
|
||||||
bool value = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `string`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `StringValue` is JSON string.
|
|
||||||
message StringValue {
|
|
||||||
// The string value.
|
|
||||||
string value = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `bytes`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `BytesValue` is JSON string.
|
|
||||||
message BytesValue {
|
|
||||||
// The bytes value.
|
|
||||||
bytes value = 1;
|
|
||||||
}
|
|
16
vendor/github.com/golang/snappy/.gitignore
generated
vendored
Normal file
16
vendor/github.com/golang/snappy/.gitignore
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
cmd/snappytool/snappytool
|
||||||
|
testdata/bench
|
||||||
|
|
||||||
|
# These explicitly listed benchmark data files are for an obsolete version of
|
||||||
|
# snappy_test.go.
|
||||||
|
testdata/alice29.txt
|
||||||
|
testdata/asyoulik.txt
|
||||||
|
testdata/fireworks.jpeg
|
||||||
|
testdata/geo.protodata
|
||||||
|
testdata/html
|
||||||
|
testdata/html_x_4
|
||||||
|
testdata/kppkn.gtb
|
||||||
|
testdata/lcet10.txt
|
||||||
|
testdata/paper-100k.pdf
|
||||||
|
testdata/plrabn12.txt
|
||||||
|
testdata/urls.10K
|
15
vendor/github.com/golang/snappy/AUTHORS
generated
vendored
Normal file
15
vendor/github.com/golang/snappy/AUTHORS
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
# This is the official list of Snappy-Go authors for copyright purposes.
|
||||||
|
# This file is distinct from the CONTRIBUTORS files.
|
||||||
|
# See the latter for an explanation.
|
||||||
|
|
||||||
|
# Names should be added to this file as
|
||||||
|
# Name or Organization <email address>
|
||||||
|
# The email address is not required for organizations.
|
||||||
|
|
||||||
|
# Please keep the list sorted.
|
||||||
|
|
||||||
|
Damian Gryski <dgryski@gmail.com>
|
||||||
|
Google Inc.
|
||||||
|
Jan Mercl <0xjnml@gmail.com>
|
||||||
|
Rodolfo Carvalho <rhcarvalho@gmail.com>
|
||||||
|
Sebastien Binet <seb.binet@gmail.com>
|
37
vendor/github.com/golang/snappy/CONTRIBUTORS
generated
vendored
Normal file
37
vendor/github.com/golang/snappy/CONTRIBUTORS
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
# This is the official list of people who can contribute
|
||||||
|
# (and typically have contributed) code to the Snappy-Go repository.
|
||||||
|
# The AUTHORS file lists the copyright holders; this file
|
||||||
|
# lists people. For example, Google employees are listed here
|
||||||
|
# but not in AUTHORS, because Google holds the copyright.
|
||||||
|
#
|
||||||
|
# The submission process automatically checks to make sure
|
||||||
|
# that people submitting code are listed in this file (by email address).
|
||||||
|
#
|
||||||
|
# Names should be added to this file only after verifying that
|
||||||
|
# the individual or the individual's organization has agreed to
|
||||||
|
# the appropriate Contributor License Agreement, found here:
|
||||||
|
#
|
||||||
|
# http://code.google.com/legal/individual-cla-v1.0.html
|
||||||
|
# http://code.google.com/legal/corporate-cla-v1.0.html
|
||||||
|
#
|
||||||
|
# The agreement for individuals can be filled out on the web.
|
||||||
|
#
|
||||||
|
# When adding J Random Contributor's name to this file,
|
||||||
|
# either J's name or J's organization's name should be
|
||||||
|
# added to the AUTHORS file, depending on whether the
|
||||||
|
# individual or corporate CLA was used.
|
||||||
|
|
||||||
|
# Names should be added to this file like so:
|
||||||
|
# Name <email address>
|
||||||
|
|
||||||
|
# Please keep the list sorted.
|
||||||
|
|
||||||
|
Damian Gryski <dgryski@gmail.com>
|
||||||
|
Jan Mercl <0xjnml@gmail.com>
|
||||||
|
Kai Backman <kaib@golang.org>
|
||||||
|
Marc-Antoine Ruel <maruel@chromium.org>
|
||||||
|
Nigel Tao <nigeltao@golang.org>
|
||||||
|
Rob Pike <r@golang.org>
|
||||||
|
Rodolfo Carvalho <rhcarvalho@gmail.com>
|
||||||
|
Russ Cox <rsc@golang.org>
|
||||||
|
Sebastien Binet <seb.binet@gmail.com>
|
27
vendor/github.com/golang/snappy/LICENSE
generated
vendored
Normal file
27
vendor/github.com/golang/snappy/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
|
in the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
* Neither the name of Google Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
107
vendor/github.com/golang/snappy/README
generated
vendored
Normal file
107
vendor/github.com/golang/snappy/README
generated
vendored
Normal file
@ -0,0 +1,107 @@
|
|||||||
|
The Snappy compression format in the Go programming language.
|
||||||
|
|
||||||
|
To download and install from source:
|
||||||
|
$ go get github.com/golang/snappy
|
||||||
|
|
||||||
|
Unless otherwise noted, the Snappy-Go source files are distributed
|
||||||
|
under the BSD-style license found in the LICENSE file.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Benchmarks.
|
||||||
|
|
||||||
|
The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten
|
||||||
|
or so files, the same set used by the C++ Snappy code (github.com/google/snappy
|
||||||
|
and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @
|
||||||
|
3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29:
|
||||||
|
|
||||||
|
"go test -test.bench=."
|
||||||
|
|
||||||
|
_UFlat0-8 2.19GB/s ± 0% html
|
||||||
|
_UFlat1-8 1.41GB/s ± 0% urls
|
||||||
|
_UFlat2-8 23.5GB/s ± 2% jpg
|
||||||
|
_UFlat3-8 1.91GB/s ± 0% jpg_200
|
||||||
|
_UFlat4-8 14.0GB/s ± 1% pdf
|
||||||
|
_UFlat5-8 1.97GB/s ± 0% html4
|
||||||
|
_UFlat6-8 814MB/s ± 0% txt1
|
||||||
|
_UFlat7-8 785MB/s ± 0% txt2
|
||||||
|
_UFlat8-8 857MB/s ± 0% txt3
|
||||||
|
_UFlat9-8 719MB/s ± 1% txt4
|
||||||
|
_UFlat10-8 2.84GB/s ± 0% pb
|
||||||
|
_UFlat11-8 1.05GB/s ± 0% gaviota
|
||||||
|
|
||||||
|
_ZFlat0-8 1.04GB/s ± 0% html
|
||||||
|
_ZFlat1-8 534MB/s ± 0% urls
|
||||||
|
_ZFlat2-8 15.7GB/s ± 1% jpg
|
||||||
|
_ZFlat3-8 740MB/s ± 3% jpg_200
|
||||||
|
_ZFlat4-8 9.20GB/s ± 1% pdf
|
||||||
|
_ZFlat5-8 991MB/s ± 0% html4
|
||||||
|
_ZFlat6-8 379MB/s ± 0% txt1
|
||||||
|
_ZFlat7-8 352MB/s ± 0% txt2
|
||||||
|
_ZFlat8-8 396MB/s ± 1% txt3
|
||||||
|
_ZFlat9-8 327MB/s ± 1% txt4
|
||||||
|
_ZFlat10-8 1.33GB/s ± 1% pb
|
||||||
|
_ZFlat11-8 605MB/s ± 1% gaviota
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
"go test -test.bench=. -tags=noasm"
|
||||||
|
|
||||||
|
_UFlat0-8 621MB/s ± 2% html
|
||||||
|
_UFlat1-8 494MB/s ± 1% urls
|
||||||
|
_UFlat2-8 23.2GB/s ± 1% jpg
|
||||||
|
_UFlat3-8 1.12GB/s ± 1% jpg_200
|
||||||
|
_UFlat4-8 4.35GB/s ± 1% pdf
|
||||||
|
_UFlat5-8 609MB/s ± 0% html4
|
||||||
|
_UFlat6-8 296MB/s ± 0% txt1
|
||||||
|
_UFlat7-8 288MB/s ± 0% txt2
|
||||||
|
_UFlat8-8 309MB/s ± 1% txt3
|
||||||
|
_UFlat9-8 280MB/s ± 1% txt4
|
||||||
|
_UFlat10-8 753MB/s ± 0% pb
|
||||||
|
_UFlat11-8 400MB/s ± 0% gaviota
|
||||||
|
|
||||||
|
_ZFlat0-8 409MB/s ± 1% html
|
||||||
|
_ZFlat1-8 250MB/s ± 1% urls
|
||||||
|
_ZFlat2-8 12.3GB/s ± 1% jpg
|
||||||
|
_ZFlat3-8 132MB/s ± 0% jpg_200
|
||||||
|
_ZFlat4-8 2.92GB/s ± 0% pdf
|
||||||
|
_ZFlat5-8 405MB/s ± 1% html4
|
||||||
|
_ZFlat6-8 179MB/s ± 1% txt1
|
||||||
|
_ZFlat7-8 170MB/s ± 1% txt2
|
||||||
|
_ZFlat8-8 189MB/s ± 1% txt3
|
||||||
|
_ZFlat9-8 164MB/s ± 1% txt4
|
||||||
|
_ZFlat10-8 479MB/s ± 1% pb
|
||||||
|
_ZFlat11-8 270MB/s ± 1% gaviota
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
For comparison (Go's encoded output is byte-for-byte identical to C++'s), here
|
||||||
|
are the numbers from C++ Snappy's
|
||||||
|
|
||||||
|
make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log
|
||||||
|
|
||||||
|
BM_UFlat/0 2.4GB/s html
|
||||||
|
BM_UFlat/1 1.4GB/s urls
|
||||||
|
BM_UFlat/2 21.8GB/s jpg
|
||||||
|
BM_UFlat/3 1.5GB/s jpg_200
|
||||||
|
BM_UFlat/4 13.3GB/s pdf
|
||||||
|
BM_UFlat/5 2.1GB/s html4
|
||||||
|
BM_UFlat/6 1.0GB/s txt1
|
||||||
|
BM_UFlat/7 959.4MB/s txt2
|
||||||
|
BM_UFlat/8 1.0GB/s txt3
|
||||||
|
BM_UFlat/9 864.5MB/s txt4
|
||||||
|
BM_UFlat/10 2.9GB/s pb
|
||||||
|
BM_UFlat/11 1.2GB/s gaviota
|
||||||
|
|
||||||
|
BM_ZFlat/0 944.3MB/s html (22.31 %)
|
||||||
|
BM_ZFlat/1 501.6MB/s urls (47.78 %)
|
||||||
|
BM_ZFlat/2 14.3GB/s jpg (99.95 %)
|
||||||
|
BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %)
|
||||||
|
BM_ZFlat/4 8.3GB/s pdf (83.30 %)
|
||||||
|
BM_ZFlat/5 903.5MB/s html4 (22.52 %)
|
||||||
|
BM_ZFlat/6 336.0MB/s txt1 (57.88 %)
|
||||||
|
BM_ZFlat/7 312.3MB/s txt2 (61.91 %)
|
||||||
|
BM_ZFlat/8 353.1MB/s txt3 (54.99 %)
|
||||||
|
BM_ZFlat/9 289.9MB/s txt4 (66.26 %)
|
||||||
|
BM_ZFlat/10 1.2GB/s pb (19.68 %)
|
||||||
|
BM_ZFlat/11 527.4MB/s gaviota (37.72 %)
|
237
vendor/github.com/golang/snappy/decode.go
generated
vendored
Normal file
237
vendor/github.com/golang/snappy/decode.go
generated
vendored
Normal file
@ -0,0 +1,237 @@
|
|||||||
|
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package snappy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrCorrupt reports that the input is invalid.
|
||||||
|
ErrCorrupt = errors.New("snappy: corrupt input")
|
||||||
|
// ErrTooLarge reports that the uncompressed length is too large.
|
||||||
|
ErrTooLarge = errors.New("snappy: decoded block is too large")
|
||||||
|
// ErrUnsupported reports that the input isn't supported.
|
||||||
|
ErrUnsupported = errors.New("snappy: unsupported input")
|
||||||
|
|
||||||
|
errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length")
|
||||||
|
)
|
||||||
|
|
||||||
|
// DecodedLen returns the length of the decoded block.
|
||||||
|
func DecodedLen(src []byte) (int, error) {
|
||||||
|
v, _, err := decodedLen(src)
|
||||||
|
return v, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// decodedLen returns the length of the decoded block and the number of bytes
|
||||||
|
// that the length header occupied.
|
||||||
|
func decodedLen(src []byte) (blockLen, headerLen int, err error) {
|
||||||
|
v, n := binary.Uvarint(src)
|
||||||
|
if n <= 0 || v > 0xffffffff {
|
||||||
|
return 0, 0, ErrCorrupt
|
||||||
|
}
|
||||||
|
|
||||||
|
const wordSize = 32 << (^uint(0) >> 32 & 1)
|
||||||
|
if wordSize == 32 && v > 0x7fffffff {
|
||||||
|
return 0, 0, ErrTooLarge
|
||||||
|
}
|
||||||
|
return int(v), n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
decodeErrCodeCorrupt = 1
|
||||||
|
decodeErrCodeUnsupportedLiteralLength = 2
|
||||||
|
)
|
||||||
|
|
||||||
|
// Decode returns the decoded form of src. The returned slice may be a sub-
|
||||||
|
// slice of dst if dst was large enough to hold the entire decoded block.
|
||||||
|
// Otherwise, a newly allocated slice will be returned.
|
||||||
|
//
|
||||||
|
// The dst and src must not overlap. It is valid to pass a nil dst.
|
||||||
|
func Decode(dst, src []byte) ([]byte, error) {
|
||||||
|
dLen, s, err := decodedLen(src)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if dLen <= len(dst) {
|
||||||
|
dst = dst[:dLen]
|
||||||
|
} else {
|
||||||
|
dst = make([]byte, dLen)
|
||||||
|
}
|
||||||
|
switch decode(dst, src[s:]) {
|
||||||
|
case 0:
|
||||||
|
return dst, nil
|
||||||
|
case decodeErrCodeUnsupportedLiteralLength:
|
||||||
|
return nil, errUnsupportedLiteralLength
|
||||||
|
}
|
||||||
|
return nil, ErrCorrupt
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewReader returns a new Reader that decompresses from r, using the framing
|
||||||
|
// format described at
|
||||||
|
// https://github.com/google/snappy/blob/master/framing_format.txt
|
||||||
|
func NewReader(r io.Reader) *Reader {
|
||||||
|
return &Reader{
|
||||||
|
r: r,
|
||||||
|
decoded: make([]byte, maxBlockSize),
|
||||||
|
buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reader is an io.Reader that can read Snappy-compressed bytes.
|
||||||
|
type Reader struct {
|
||||||
|
r io.Reader
|
||||||
|
err error
|
||||||
|
decoded []byte
|
||||||
|
buf []byte
|
||||||
|
// decoded[i:j] contains decoded bytes that have not yet been passed on.
|
||||||
|
i, j int
|
||||||
|
readHeader bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset discards any buffered data, resets all state, and switches the Snappy
|
||||||
|
// reader to read from r. This permits reusing a Reader rather than allocating
|
||||||
|
// a new one.
|
||||||
|
func (r *Reader) Reset(reader io.Reader) {
|
||||||
|
r.r = reader
|
||||||
|
r.err = nil
|
||||||
|
r.i = 0
|
||||||
|
r.j = 0
|
||||||
|
r.readHeader = false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) {
|
||||||
|
if _, r.err = io.ReadFull(r.r, p); r.err != nil {
|
||||||
|
if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
|
||||||
|
r.err = ErrCorrupt
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read satisfies the io.Reader interface.
|
||||||
|
func (r *Reader) Read(p []byte) (int, error) {
|
||||||
|
if r.err != nil {
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
if r.i < r.j {
|
||||||
|
n := copy(p, r.decoded[r.i:r.j])
|
||||||
|
r.i += n
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
if !r.readFull(r.buf[:4], true) {
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
chunkType := r.buf[0]
|
||||||
|
if !r.readHeader {
|
||||||
|
if chunkType != chunkTypeStreamIdentifier {
|
||||||
|
r.err = ErrCorrupt
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
r.readHeader = true
|
||||||
|
}
|
||||||
|
chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
|
||||||
|
if chunkLen > len(r.buf) {
|
||||||
|
r.err = ErrUnsupported
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// The chunk types are specified at
|
||||||
|
// https://github.com/google/snappy/blob/master/framing_format.txt
|
||||||
|
switch chunkType {
|
||||||
|
case chunkTypeCompressedData:
|
||||||
|
// Section 4.2. Compressed data (chunk type 0x00).
|
||||||
|
if chunkLen < checksumSize {
|
||||||
|
r.err = ErrCorrupt
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
buf := r.buf[:chunkLen]
|
||||||
|
if !r.readFull(buf, false) {
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
|
||||||
|
buf = buf[checksumSize:]
|
||||||
|
|
||||||
|
n, err := DecodedLen(buf)
|
||||||
|
if err != nil {
|
||||||
|
r.err = err
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
if n > len(r.decoded) {
|
||||||
|
r.err = ErrCorrupt
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
if _, err := Decode(r.decoded, buf); err != nil {
|
||||||
|
r.err = err
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
if crc(r.decoded[:n]) != checksum {
|
||||||
|
r.err = ErrCorrupt
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
r.i, r.j = 0, n
|
||||||
|
continue
|
||||||
|
|
||||||
|
case chunkTypeUncompressedData:
|
||||||
|
// Section 4.3. Uncompressed data (chunk type 0x01).
|
||||||
|
if chunkLen < checksumSize {
|
||||||
|
r.err = ErrCorrupt
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
buf := r.buf[:checksumSize]
|
||||||
|
if !r.readFull(buf, false) {
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
|
||||||
|
// Read directly into r.decoded instead of via r.buf.
|
||||||
|
n := chunkLen - checksumSize
|
||||||
|
if n > len(r.decoded) {
|
||||||
|
r.err = ErrCorrupt
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
if !r.readFull(r.decoded[:n], false) {
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
if crc(r.decoded[:n]) != checksum {
|
||||||
|
r.err = ErrCorrupt
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
r.i, r.j = 0, n
|
||||||
|
continue
|
||||||
|
|
||||||
|
case chunkTypeStreamIdentifier:
|
||||||
|
// Section 4.1. Stream identifier (chunk type 0xff).
|
||||||
|
if chunkLen != len(magicBody) {
|
||||||
|
r.err = ErrCorrupt
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
if !r.readFull(r.buf[:len(magicBody)], false) {
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
for i := 0; i < len(magicBody); i++ {
|
||||||
|
if r.buf[i] != magicBody[i] {
|
||||||
|
r.err = ErrCorrupt
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if chunkType <= 0x7f {
|
||||||
|
// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
|
||||||
|
r.err = ErrUnsupported
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
// Section 4.4 Padding (chunk type 0xfe).
|
||||||
|
// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
|
||||||
|
if !r.readFull(r.buf[:chunkLen], false) {
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
14
vendor/github.com/golang/snappy/decode_amd64.go
generated
vendored
Normal file
14
vendor/github.com/golang/snappy/decode_amd64.go
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !appengine
|
||||||
|
// +build gc
|
||||||
|
// +build !noasm
|
||||||
|
|
||||||
|
package snappy
|
||||||
|
|
||||||
|
// decode has the same semantics as in decode_other.go.
|
||||||
|
//
|
||||||
|
//go:noescape
|
||||||
|
func decode(dst, src []byte) int
|
490
vendor/github.com/golang/snappy/decode_amd64.s
generated
vendored
Normal file
490
vendor/github.com/golang/snappy/decode_amd64.s
generated
vendored
Normal file
@ -0,0 +1,490 @@
|
|||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !appengine
|
||||||
|
// +build gc
|
||||||
|
// +build !noasm
|
||||||
|
|
||||||
|
#include "textflag.h"
|
||||||
|
|
||||||
|
// The asm code generally follows the pure Go code in decode_other.go, except
|
||||||
|
// where marked with a "!!!".
|
||||||
|
|
||||||
|
// func decode(dst, src []byte) int
|
||||||
|
//
|
||||||
|
// All local variables fit into registers. The non-zero stack size is only to
|
||||||
|
// spill registers and push args when issuing a CALL. The register allocation:
|
||||||
|
// - AX scratch
|
||||||
|
// - BX scratch
|
||||||
|
// - CX length or x
|
||||||
|
// - DX offset
|
||||||
|
// - SI &src[s]
|
||||||
|
// - DI &dst[d]
|
||||||
|
// + R8 dst_base
|
||||||
|
// + R9 dst_len
|
||||||
|
// + R10 dst_base + dst_len
|
||||||
|
// + R11 src_base
|
||||||
|
// + R12 src_len
|
||||||
|
// + R13 src_base + src_len
|
||||||
|
// - R14 used by doCopy
|
||||||
|
// - R15 used by doCopy
|
||||||
|
//
|
||||||
|
// The registers R8-R13 (marked with a "+") are set at the start of the
|
||||||
|
// function, and after a CALL returns, and are not otherwise modified.
|
||||||
|
//
|
||||||
|
// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI.
|
||||||
|
// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI.
|
||||||
|
TEXT ·decode(SB), NOSPLIT, $48-56
|
||||||
|
// Initialize SI, DI and R8-R13.
|
||||||
|
MOVQ dst_base+0(FP), R8
|
||||||
|
MOVQ dst_len+8(FP), R9
|
||||||
|
MOVQ R8, DI
|
||||||
|
MOVQ R8, R10
|
||||||
|
ADDQ R9, R10
|
||||||
|
MOVQ src_base+24(FP), R11
|
||||||
|
MOVQ src_len+32(FP), R12
|
||||||
|
MOVQ R11, SI
|
||||||
|
MOVQ R11, R13
|
||||||
|
ADDQ R12, R13
|
||||||
|
|
||||||
|
loop:
|
||||||
|
// for s < len(src)
|
||||||
|
CMPQ SI, R13
|
||||||
|
JEQ end
|
||||||
|
|
||||||
|
// CX = uint32(src[s])
|
||||||
|
//
|
||||||
|
// switch src[s] & 0x03
|
||||||
|
MOVBLZX (SI), CX
|
||||||
|
MOVL CX, BX
|
||||||
|
ANDL $3, BX
|
||||||
|
CMPL BX, $1
|
||||||
|
JAE tagCopy
|
||||||
|
|
||||||
|
// ----------------------------------------
|
||||||
|
// The code below handles literal tags.
|
||||||
|
|
||||||
|
// case tagLiteral:
|
||||||
|
// x := uint32(src[s] >> 2)
|
||||||
|
// switch
|
||||||
|
SHRL $2, CX
|
||||||
|
CMPL CX, $60
|
||||||
|
JAE tagLit60Plus
|
||||||
|
|
||||||
|
// case x < 60:
|
||||||
|
// s++
|
||||||
|
INCQ SI
|
||||||
|
|
||||||
|
doLit:
|
||||||
|
// This is the end of the inner "switch", when we have a literal tag.
|
||||||
|
//
|
||||||
|
// We assume that CX == x and x fits in a uint32, where x is the variable
|
||||||
|
// used in the pure Go decode_other.go code.
|
||||||
|
|
||||||
|
// length = int(x) + 1
|
||||||
|
//
|
||||||
|
// Unlike the pure Go code, we don't need to check if length <= 0 because
|
||||||
|
// CX can hold 64 bits, so the increment cannot overflow.
|
||||||
|
INCQ CX
|
||||||
|
|
||||||
|
// Prepare to check if copying length bytes will run past the end of dst or
|
||||||
|
// src.
|
||||||
|
//
|
||||||
|
// AX = len(dst) - d
|
||||||
|
// BX = len(src) - s
|
||||||
|
MOVQ R10, AX
|
||||||
|
SUBQ DI, AX
|
||||||
|
MOVQ R13, BX
|
||||||
|
SUBQ SI, BX
|
||||||
|
|
||||||
|
// !!! Try a faster technique for short (16 or fewer bytes) copies.
|
||||||
|
//
|
||||||
|
// if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
|
||||||
|
// goto callMemmove // Fall back on calling runtime·memmove.
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
|
||||||
|
// against 21 instead of 16, because it cannot assume that all of its input
|
||||||
|
// is contiguous in memory and so it needs to leave enough source bytes to
|
||||||
|
// read the next tag without refilling buffers, but Go's Decode assumes
|
||||||
|
// contiguousness (the src argument is a []byte).
|
||||||
|
CMPQ CX, $16
|
||||||
|
JGT callMemmove
|
||||||
|
CMPQ AX, $16
|
||||||
|
JLT callMemmove
|
||||||
|
CMPQ BX, $16
|
||||||
|
JLT callMemmove
|
||||||
|
|
||||||
|
// !!! Implement the copy from src to dst as a 16-byte load and store.
|
||||||
|
// (Decode's documentation says that dst and src must not overlap.)
|
||||||
|
//
|
||||||
|
// This always copies 16 bytes, instead of only length bytes, but that's
|
||||||
|
// OK. If the input is a valid Snappy encoding then subsequent iterations
|
||||||
|
// will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
|
||||||
|
// non-nil error), so the overrun will be ignored.
|
||||||
|
//
|
||||||
|
// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
|
||||||
|
// 16-byte loads and stores. This technique probably wouldn't be as
|
||||||
|
// effective on architectures that are fussier about alignment.
|
||||||
|
MOVOU 0(SI), X0
|
||||||
|
MOVOU X0, 0(DI)
|
||||||
|
|
||||||
|
// d += length
|
||||||
|
// s += length
|
||||||
|
ADDQ CX, DI
|
||||||
|
ADDQ CX, SI
|
||||||
|
JMP loop
|
||||||
|
|
||||||
|
callMemmove:
|
||||||
|
// if length > len(dst)-d || length > len(src)-s { etc }
|
||||||
|
CMPQ CX, AX
|
||||||
|
JGT errCorrupt
|
||||||
|
CMPQ CX, BX
|
||||||
|
JGT errCorrupt
|
||||||
|
|
||||||
|
// copy(dst[d:], src[s:s+length])
|
||||||
|
//
|
||||||
|
// This means calling runtime·memmove(&dst[d], &src[s], length), so we push
|
||||||
|
// DI, SI and CX as arguments. Coincidentally, we also need to spill those
|
||||||
|
// three registers to the stack, to save local variables across the CALL.
|
||||||
|
MOVQ DI, 0(SP)
|
||||||
|
MOVQ SI, 8(SP)
|
||||||
|
MOVQ CX, 16(SP)
|
||||||
|
MOVQ DI, 24(SP)
|
||||||
|
MOVQ SI, 32(SP)
|
||||||
|
MOVQ CX, 40(SP)
|
||||||
|
CALL runtime·memmove(SB)
|
||||||
|
|
||||||
|
// Restore local variables: unspill registers from the stack and
|
||||||
|
// re-calculate R8-R13.
|
||||||
|
MOVQ 24(SP), DI
|
||||||
|
MOVQ 32(SP), SI
|
||||||
|
MOVQ 40(SP), CX
|
||||||
|
MOVQ dst_base+0(FP), R8
|
||||||
|
MOVQ dst_len+8(FP), R9
|
||||||
|
MOVQ R8, R10
|
||||||
|
ADDQ R9, R10
|
||||||
|
MOVQ src_base+24(FP), R11
|
||||||
|
MOVQ src_len+32(FP), R12
|
||||||
|
MOVQ R11, R13
|
||||||
|
ADDQ R12, R13
|
||||||
|
|
||||||
|
// d += length
|
||||||
|
// s += length
|
||||||
|
ADDQ CX, DI
|
||||||
|
ADDQ CX, SI
|
||||||
|
JMP loop
|
||||||
|
|
||||||
|
tagLit60Plus:
|
||||||
|
// !!! This fragment does the
|
||||||
|
//
|
||||||
|
// s += x - 58; if uint(s) > uint(len(src)) { etc }
|
||||||
|
//
|
||||||
|
// checks. In the asm version, we code it once instead of once per switch case.
|
||||||
|
ADDQ CX, SI
|
||||||
|
SUBQ $58, SI
|
||||||
|
MOVQ SI, BX
|
||||||
|
SUBQ R11, BX
|
||||||
|
CMPQ BX, R12
|
||||||
|
JA errCorrupt
|
||||||
|
|
||||||
|
// case x == 60:
|
||||||
|
CMPL CX, $61
|
||||||
|
JEQ tagLit61
|
||||||
|
JA tagLit62Plus
|
||||||
|
|
||||||
|
// x = uint32(src[s-1])
|
||||||
|
MOVBLZX -1(SI), CX
|
||||||
|
JMP doLit
|
||||||
|
|
||||||
|
tagLit61:
|
||||||
|
// case x == 61:
|
||||||
|
// x = uint32(src[s-2]) | uint32(src[s-1])<<8
|
||||||
|
MOVWLZX -2(SI), CX
|
||||||
|
JMP doLit
|
||||||
|
|
||||||
|
tagLit62Plus:
|
||||||
|
CMPL CX, $62
|
||||||
|
JA tagLit63
|
||||||
|
|
||||||
|
// case x == 62:
|
||||||
|
// x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
|
||||||
|
MOVWLZX -3(SI), CX
|
||||||
|
MOVBLZX -1(SI), BX
|
||||||
|
SHLL $16, BX
|
||||||
|
ORL BX, CX
|
||||||
|
JMP doLit
|
||||||
|
|
||||||
|
tagLit63:
|
||||||
|
// case x == 63:
|
||||||
|
// x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
|
||||||
|
MOVL -4(SI), CX
|
||||||
|
JMP doLit
|
||||||
|
|
||||||
|
// The code above handles literal tags.
|
||||||
|
// ----------------------------------------
|
||||||
|
// The code below handles copy tags.
|
||||||
|
|
||||||
|
tagCopy4:
|
||||||
|
// case tagCopy4:
|
||||||
|
// s += 5
|
||||||
|
ADDQ $5, SI
|
||||||
|
|
||||||
|
// if uint(s) > uint(len(src)) { etc }
|
||||||
|
MOVQ SI, BX
|
||||||
|
SUBQ R11, BX
|
||||||
|
CMPQ BX, R12
|
||||||
|
JA errCorrupt
|
||||||
|
|
||||||
|
// length = 1 + int(src[s-5])>>2
|
||||||
|
SHRQ $2, CX
|
||||||
|
INCQ CX
|
||||||
|
|
||||||
|
// offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
|
||||||
|
MOVLQZX -4(SI), DX
|
||||||
|
JMP doCopy
|
||||||
|
|
||||||
|
tagCopy2:
|
||||||
|
// case tagCopy2:
|
||||||
|
// s += 3
|
||||||
|
ADDQ $3, SI
|
||||||
|
|
||||||
|
// if uint(s) > uint(len(src)) { etc }
|
||||||
|
MOVQ SI, BX
|
||||||
|
SUBQ R11, BX
|
||||||
|
CMPQ BX, R12
|
||||||
|
JA errCorrupt
|
||||||
|
|
||||||
|
// length = 1 + int(src[s-3])>>2
|
||||||
|
SHRQ $2, CX
|
||||||
|
INCQ CX
|
||||||
|
|
||||||
|
// offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
|
||||||
|
MOVWQZX -2(SI), DX
|
||||||
|
JMP doCopy
|
||||||
|
|
||||||
|
tagCopy:
|
||||||
|
// We have a copy tag. We assume that:
|
||||||
|
// - BX == src[s] & 0x03
|
||||||
|
// - CX == src[s]
|
||||||
|
CMPQ BX, $2
|
||||||
|
JEQ tagCopy2
|
||||||
|
JA tagCopy4
|
||||||
|
|
||||||
|
// case tagCopy1:
|
||||||
|
// s += 2
|
||||||
|
ADDQ $2, SI
|
||||||
|
|
||||||
|
// if uint(s) > uint(len(src)) { etc }
|
||||||
|
MOVQ SI, BX
|
||||||
|
SUBQ R11, BX
|
||||||
|
CMPQ BX, R12
|
||||||
|
JA errCorrupt
|
||||||
|
|
||||||
|
// offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
|
||||||
|
MOVQ CX, DX
|
||||||
|
ANDQ $0xe0, DX
|
||||||
|
SHLQ $3, DX
|
||||||
|
MOVBQZX -1(SI), BX
|
||||||
|
ORQ BX, DX
|
||||||
|
|
||||||
|
// length = 4 + int(src[s-2])>>2&0x7
|
||||||
|
SHRQ $2, CX
|
||||||
|
ANDQ $7, CX
|
||||||
|
ADDQ $4, CX
|
||||||
|
|
||||||
|
doCopy:
|
||||||
|
// This is the end of the outer "switch", when we have a copy tag.
|
||||||
|
//
|
||||||
|
// We assume that:
|
||||||
|
// - CX == length && CX > 0
|
||||||
|
// - DX == offset
|
||||||
|
|
||||||
|
// if offset <= 0 { etc }
|
||||||
|
CMPQ DX, $0
|
||||||
|
JLE errCorrupt
|
||||||
|
|
||||||
|
// if d < offset { etc }
|
||||||
|
MOVQ DI, BX
|
||||||
|
SUBQ R8, BX
|
||||||
|
CMPQ BX, DX
|
||||||
|
JLT errCorrupt
|
||||||
|
|
||||||
|
// if length > len(dst)-d { etc }
|
||||||
|
MOVQ R10, BX
|
||||||
|
SUBQ DI, BX
|
||||||
|
CMPQ CX, BX
|
||||||
|
JGT errCorrupt
|
||||||
|
|
||||||
|
// forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
|
||||||
|
//
|
||||||
|
// Set:
|
||||||
|
// - R14 = len(dst)-d
|
||||||
|
// - R15 = &dst[d-offset]
|
||||||
|
MOVQ R10, R14
|
||||||
|
SUBQ DI, R14
|
||||||
|
MOVQ DI, R15
|
||||||
|
SUBQ DX, R15
|
||||||
|
|
||||||
|
// !!! Try a faster technique for short (16 or fewer bytes) forward copies.
|
||||||
|
//
|
||||||
|
// First, try using two 8-byte load/stores, similar to the doLit technique
|
||||||
|
// above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
|
||||||
|
// still OK if offset >= 8. Note that this has to be two 8-byte load/stores
|
||||||
|
// and not one 16-byte load/store, and the first store has to be before the
|
||||||
|
// second load, due to the overlap if offset is in the range [8, 16).
|
||||||
|
//
|
||||||
|
// if length > 16 || offset < 8 || len(dst)-d < 16 {
|
||||||
|
// goto slowForwardCopy
|
||||||
|
// }
|
||||||
|
// copy 16 bytes
|
||||||
|
// d += length
|
||||||
|
CMPQ CX, $16
|
||||||
|
JGT slowForwardCopy
|
||||||
|
CMPQ DX, $8
|
||||||
|
JLT slowForwardCopy
|
||||||
|
CMPQ R14, $16
|
||||||
|
JLT slowForwardCopy
|
||||||
|
MOVQ 0(R15), AX
|
||||||
|
MOVQ AX, 0(DI)
|
||||||
|
MOVQ 8(R15), BX
|
||||||
|
MOVQ BX, 8(DI)
|
||||||
|
ADDQ CX, DI
|
||||||
|
JMP loop
|
||||||
|
|
||||||
|
slowForwardCopy:
|
||||||
|
// !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
|
||||||
|
// can still try 8-byte load stores, provided we can overrun up to 10 extra
|
||||||
|
// bytes. As above, the overrun will be fixed up by subsequent iterations
|
||||||
|
// of the outermost loop.
|
||||||
|
//
|
||||||
|
// The C++ snappy code calls this technique IncrementalCopyFastPath. Its
|
||||||
|
// commentary says:
|
||||||
|
//
|
||||||
|
// ----
|
||||||
|
//
|
||||||
|
// The main part of this loop is a simple copy of eight bytes at a time
|
||||||
|
// until we've copied (at least) the requested amount of bytes. However,
|
||||||
|
// if d and d-offset are less than eight bytes apart (indicating a
|
||||||
|
// repeating pattern of length < 8), we first need to expand the pattern in
|
||||||
|
// order to get the correct results. For instance, if the buffer looks like
|
||||||
|
// this, with the eight-byte <d-offset> and <d> patterns marked as
|
||||||
|
// intervals:
|
||||||
|
//
|
||||||
|
// abxxxxxxxxxxxx
|
||||||
|
// [------] d-offset
|
||||||
|
// [------] d
|
||||||
|
//
|
||||||
|
// a single eight-byte copy from <d-offset> to <d> will repeat the pattern
|
||||||
|
// once, after which we can move <d> two bytes without moving <d-offset>:
|
||||||
|
//
|
||||||
|
// ababxxxxxxxxxx
|
||||||
|
// [------] d-offset
|
||||||
|
// [------] d
|
||||||
|
//
|
||||||
|
// and repeat the exercise until the two no longer overlap.
|
||||||
|
//
|
||||||
|
// This allows us to do very well in the special case of one single byte
|
||||||
|
// repeated many times, without taking a big hit for more general cases.
|
||||||
|
//
|
||||||
|
// The worst case of extra writing past the end of the match occurs when
|
||||||
|
// offset == 1 and length == 1; the last copy will read from byte positions
|
||||||
|
// [0..7] and write to [4..11], whereas it was only supposed to write to
|
||||||
|
// position 1. Thus, ten excess bytes.
|
||||||
|
//
|
||||||
|
// ----
|
||||||
|
//
|
||||||
|
// That "10 byte overrun" worst case is confirmed by Go's
|
||||||
|
// TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
|
||||||
|
// and finishSlowForwardCopy algorithm.
|
||||||
|
//
|
||||||
|
// if length > len(dst)-d-10 {
|
||||||
|
// goto verySlowForwardCopy
|
||||||
|
// }
|
||||||
|
SUBQ $10, R14
|
||||||
|
CMPQ CX, R14
|
||||||
|
JGT verySlowForwardCopy
|
||||||
|
|
||||||
|
makeOffsetAtLeast8:
|
||||||
|
// !!! As above, expand the pattern so that offset >= 8 and we can use
|
||||||
|
// 8-byte load/stores.
|
||||||
|
//
|
||||||
|
// for offset < 8 {
|
||||||
|
// copy 8 bytes from dst[d-offset:] to dst[d:]
|
||||||
|
// length -= offset
|
||||||
|
// d += offset
|
||||||
|
// offset += offset
|
||||||
|
// // The two previous lines together means that d-offset, and therefore
|
||||||
|
// // R15, is unchanged.
|
||||||
|
// }
|
||||||
|
CMPQ DX, $8
|
||||||
|
JGE fixUpSlowForwardCopy
|
||||||
|
MOVQ (R15), BX
|
||||||
|
MOVQ BX, (DI)
|
||||||
|
SUBQ DX, CX
|
||||||
|
ADDQ DX, DI
|
||||||
|
ADDQ DX, DX
|
||||||
|
JMP makeOffsetAtLeast8
|
||||||
|
|
||||||
|
fixUpSlowForwardCopy:
|
||||||
|
// !!! Add length (which might be negative now) to d (implied by DI being
|
||||||
|
// &dst[d]) so that d ends up at the right place when we jump back to the
|
||||||
|
// top of the loop. Before we do that, though, we save DI to AX so that, if
|
||||||
|
// length is positive, copying the remaining length bytes will write to the
|
||||||
|
// right place.
|
||||||
|
MOVQ DI, AX
|
||||||
|
ADDQ CX, DI
|
||||||
|
|
||||||
|
finishSlowForwardCopy:
|
||||||
|
// !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
|
||||||
|
// length means that we overrun, but as above, that will be fixed up by
|
||||||
|
// subsequent iterations of the outermost loop.
|
||||||
|
CMPQ CX, $0
|
||||||
|
JLE loop
|
||||||
|
MOVQ (R15), BX
|
||||||
|
MOVQ BX, (AX)
|
||||||
|
ADDQ $8, R15
|
||||||
|
ADDQ $8, AX
|
||||||
|
SUBQ $8, CX
|
||||||
|
JMP finishSlowForwardCopy
|
||||||
|
|
||||||
|
verySlowForwardCopy:
|
||||||
|
// verySlowForwardCopy is a simple implementation of forward copy. In C
|
||||||
|
// parlance, this is a do/while loop instead of a while loop, since we know
|
||||||
|
// that length > 0. In Go syntax:
|
||||||
|
//
|
||||||
|
// for {
|
||||||
|
// dst[d] = dst[d - offset]
|
||||||
|
// d++
|
||||||
|
// length--
|
||||||
|
// if length == 0 {
|
||||||
|
// break
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
MOVB (R15), BX
|
||||||
|
MOVB BX, (DI)
|
||||||
|
INCQ R15
|
||||||
|
INCQ DI
|
||||||
|
DECQ CX
|
||||||
|
JNZ verySlowForwardCopy
|
||||||
|
JMP loop
|
||||||
|
|
||||||
|
// The code above handles copy tags.
|
||||||
|
// ----------------------------------------
|
||||||
|
|
||||||
|
end:
|
||||||
|
// This is the end of the "for s < len(src)".
|
||||||
|
//
|
||||||
|
// if d != len(dst) { etc }
|
||||||
|
CMPQ DI, R10
|
||||||
|
JNE errCorrupt
|
||||||
|
|
||||||
|
// return 0
|
||||||
|
MOVQ $0, ret+48(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
errCorrupt:
|
||||||
|
// return decodeErrCodeCorrupt
|
||||||
|
MOVQ $1, ret+48(FP)
|
||||||
|
RET
|
101
vendor/github.com/golang/snappy/decode_other.go
generated
vendored
Normal file
101
vendor/github.com/golang/snappy/decode_other.go
generated
vendored
Normal file
@ -0,0 +1,101 @@
|
|||||||
|
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !amd64 appengine !gc noasm
|
||||||
|
|
||||||
|
package snappy
|
||||||
|
|
||||||
|
// decode writes the decoding of src to dst. It assumes that the varint-encoded
|
||||||
|
// length of the decompressed bytes has already been read, and that len(dst)
|
||||||
|
// equals that length.
|
||||||
|
//
|
||||||
|
// It returns 0 on success or a decodeErrCodeXxx error code on failure.
|
||||||
|
func decode(dst, src []byte) int {
|
||||||
|
var d, s, offset, length int
|
||||||
|
for s < len(src) {
|
||||||
|
switch src[s] & 0x03 {
|
||||||
|
case tagLiteral:
|
||||||
|
x := uint32(src[s] >> 2)
|
||||||
|
switch {
|
||||||
|
case x < 60:
|
||||||
|
s++
|
||||||
|
case x == 60:
|
||||||
|
s += 2
|
||||||
|
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||||
|
return decodeErrCodeCorrupt
|
||||||
|
}
|
||||||
|
x = uint32(src[s-1])
|
||||||
|
case x == 61:
|
||||||
|
s += 3
|
||||||
|
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||||
|
return decodeErrCodeCorrupt
|
||||||
|
}
|
||||||
|
x = uint32(src[s-2]) | uint32(src[s-1])<<8
|
||||||
|
case x == 62:
|
||||||
|
s += 4
|
||||||
|
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||||
|
return decodeErrCodeCorrupt
|
||||||
|
}
|
||||||
|
x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
|
||||||
|
case x == 63:
|
||||||
|
s += 5
|
||||||
|
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||||
|
return decodeErrCodeCorrupt
|
||||||
|
}
|
||||||
|
x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
|
||||||
|
}
|
||||||
|
length = int(x) + 1
|
||||||
|
if length <= 0 {
|
||||||
|
return decodeErrCodeUnsupportedLiteralLength
|
||||||
|
}
|
||||||
|
if length > len(dst)-d || length > len(src)-s {
|
||||||
|
return decodeErrCodeCorrupt
|
||||||
|
}
|
||||||
|
copy(dst[d:], src[s:s+length])
|
||||||
|
d += length
|
||||||
|
s += length
|
||||||
|
continue
|
||||||
|
|
||||||
|
case tagCopy1:
|
||||||
|
s += 2
|
||||||
|
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||||
|
return decodeErrCodeCorrupt
|
||||||
|
}
|
||||||
|
length = 4 + int(src[s-2])>>2&0x7
|
||||||
|
offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
|
||||||
|
|
||||||
|
case tagCopy2:
|
||||||
|
s += 3
|
||||||
|
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||||
|
return decodeErrCodeCorrupt
|
||||||
|
}
|
||||||
|
length = 1 + int(src[s-3])>>2
|
||||||
|
offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
|
||||||
|
|
||||||
|
case tagCopy4:
|
||||||
|
s += 5
|
||||||
|
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||||
|
return decodeErrCodeCorrupt
|
||||||
|
}
|
||||||
|
length = 1 + int(src[s-5])>>2
|
||||||
|
offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
|
||||||
|
}
|
||||||
|
|
||||||
|
if offset <= 0 || d < offset || length > len(dst)-d {
|
||||||
|
return decodeErrCodeCorrupt
|
||||||
|
}
|
||||||
|
// Copy from an earlier sub-slice of dst to a later sub-slice. Unlike
|
||||||
|
// the built-in copy function, this byte-by-byte copy always runs
|
||||||
|
// forwards, even if the slices overlap. Conceptually, this is:
|
||||||
|
//
|
||||||
|
// d += forwardCopy(dst[d:d+length], dst[d-offset:])
|
||||||
|
for end := d + length; d != end; d++ {
|
||||||
|
dst[d] = dst[d-offset]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if d != len(dst) {
|
||||||
|
return decodeErrCodeCorrupt
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
285
vendor/github.com/golang/snappy/encode.go
generated
vendored
Normal file
285
vendor/github.com/golang/snappy/encode.go
generated
vendored
Normal file
@ -0,0 +1,285 @@
|
|||||||
|
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package snappy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Encode returns the encoded form of src. The returned slice may be a sub-
|
||||||
|
// slice of dst if dst was large enough to hold the entire encoded block.
|
||||||
|
// Otherwise, a newly allocated slice will be returned.
|
||||||
|
//
|
||||||
|
// The dst and src must not overlap. It is valid to pass a nil dst.
|
||||||
|
func Encode(dst, src []byte) []byte {
|
||||||
|
if n := MaxEncodedLen(len(src)); n < 0 {
|
||||||
|
panic(ErrTooLarge)
|
||||||
|
} else if len(dst) < n {
|
||||||
|
dst = make([]byte, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The block starts with the varint-encoded length of the decompressed bytes.
|
||||||
|
d := binary.PutUvarint(dst, uint64(len(src)))
|
||||||
|
|
||||||
|
for len(src) > 0 {
|
||||||
|
p := src
|
||||||
|
src = nil
|
||||||
|
if len(p) > maxBlockSize {
|
||||||
|
p, src = p[:maxBlockSize], p[maxBlockSize:]
|
||||||
|
}
|
||||||
|
if len(p) < minNonLiteralBlockSize {
|
||||||
|
d += emitLiteral(dst[d:], p)
|
||||||
|
} else {
|
||||||
|
d += encodeBlock(dst[d:], p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst[:d]
|
||||||
|
}
|
||||||
|
|
||||||
|
// inputMargin is the minimum number of extra input bytes to keep, inside
|
||||||
|
// encodeBlock's inner loop. On some architectures, this margin lets us
|
||||||
|
// implement a fast path for emitLiteral, where the copy of short (<= 16 byte)
|
||||||
|
// literals can be implemented as a single load to and store from a 16-byte
|
||||||
|
// register. That literal's actual length can be as short as 1 byte, so this
|
||||||
|
// can copy up to 15 bytes too much, but that's OK as subsequent iterations of
|
||||||
|
// the encoding loop will fix up the copy overrun, and this inputMargin ensures
|
||||||
|
// that we don't overrun the dst and src buffers.
|
||||||
|
const inputMargin = 16 - 1
|
||||||
|
|
||||||
|
// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that
|
||||||
|
// could be encoded with a copy tag. This is the minimum with respect to the
|
||||||
|
// algorithm used by encodeBlock, not a minimum enforced by the file format.
|
||||||
|
//
|
||||||
|
// The encoded output must start with at least a 1 byte literal, as there are
|
||||||
|
// no previous bytes to copy. A minimal (1 byte) copy after that, generated
|
||||||
|
// from an emitCopy call in encodeBlock's main loop, would require at least
|
||||||
|
// another inputMargin bytes, for the reason above: we want any emitLiteral
|
||||||
|
// calls inside encodeBlock's main loop to use the fast path if possible, which
|
||||||
|
// requires being able to overrun by inputMargin bytes. Thus,
|
||||||
|
// minNonLiteralBlockSize equals 1 + 1 + inputMargin.
|
||||||
|
//
|
||||||
|
// The C++ code doesn't use this exact threshold, but it could, as discussed at
|
||||||
|
// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion
|
||||||
|
// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an
|
||||||
|
// optimization. It should not affect the encoded form. This is tested by
|
||||||
|
// TestSameEncodingAsCppShortCopies.
|
||||||
|
const minNonLiteralBlockSize = 1 + 1 + inputMargin
|
||||||
|
|
||||||
|
// MaxEncodedLen returns the maximum length of a snappy block, given its
|
||||||
|
// uncompressed length.
|
||||||
|
//
|
||||||
|
// It will return a negative value if srcLen is too large to encode.
|
||||||
|
func MaxEncodedLen(srcLen int) int {
|
||||||
|
n := uint64(srcLen)
|
||||||
|
if n > 0xffffffff {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
// Compressed data can be defined as:
|
||||||
|
// compressed := item* literal*
|
||||||
|
// item := literal* copy
|
||||||
|
//
|
||||||
|
// The trailing literal sequence has a space blowup of at most 62/60
|
||||||
|
// since a literal of length 60 needs one tag byte + one extra byte
|
||||||
|
// for length information.
|
||||||
|
//
|
||||||
|
// Item blowup is trickier to measure. Suppose the "copy" op copies
|
||||||
|
// 4 bytes of data. Because of a special check in the encoding code,
|
||||||
|
// we produce a 4-byte copy only if the offset is < 65536. Therefore
|
||||||
|
// the copy op takes 3 bytes to encode, and this type of item leads
|
||||||
|
// to at most the 62/60 blowup for representing literals.
|
||||||
|
//
|
||||||
|
// Suppose the "copy" op copies 5 bytes of data. If the offset is big
|
||||||
|
// enough, it will take 5 bytes to encode the copy op. Therefore the
|
||||||
|
// worst case here is a one-byte literal followed by a five-byte copy.
|
||||||
|
// That is, 6 bytes of input turn into 7 bytes of "compressed" data.
|
||||||
|
//
|
||||||
|
// This last factor dominates the blowup, so the final estimate is:
|
||||||
|
n = 32 + n + n/6
|
||||||
|
if n > 0xffffffff {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return int(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
var errClosed = errors.New("snappy: Writer is closed")
|
||||||
|
|
||||||
|
// NewWriter returns a new Writer that compresses to w.
|
||||||
|
//
|
||||||
|
// The Writer returned does not buffer writes. There is no need to Flush or
|
||||||
|
// Close such a Writer.
|
||||||
|
//
|
||||||
|
// Deprecated: the Writer returned is not suitable for many small writes, only
|
||||||
|
// for few large writes. Use NewBufferedWriter instead, which is efficient
|
||||||
|
// regardless of the frequency and shape of the writes, and remember to Close
|
||||||
|
// that Writer when done.
|
||||||
|
func NewWriter(w io.Writer) *Writer {
|
||||||
|
return &Writer{
|
||||||
|
w: w,
|
||||||
|
obuf: make([]byte, obufLen),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBufferedWriter returns a new Writer that compresses to w, using the
|
||||||
|
// framing format described at
|
||||||
|
// https://github.com/google/snappy/blob/master/framing_format.txt
|
||||||
|
//
|
||||||
|
// The Writer returned buffers writes. Users must call Close to guarantee all
|
||||||
|
// data has been forwarded to the underlying io.Writer. They may also call
|
||||||
|
// Flush zero or more times before calling Close.
|
||||||
|
func NewBufferedWriter(w io.Writer) *Writer {
|
||||||
|
return &Writer{
|
||||||
|
w: w,
|
||||||
|
ibuf: make([]byte, 0, maxBlockSize),
|
||||||
|
obuf: make([]byte, obufLen),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Writer is an io.Writer that can write Snappy-compressed bytes.
|
||||||
|
type Writer struct {
|
||||||
|
w io.Writer
|
||||||
|
err error
|
||||||
|
|
||||||
|
// ibuf is a buffer for the incoming (uncompressed) bytes.
|
||||||
|
//
|
||||||
|
// Its use is optional. For backwards compatibility, Writers created by the
|
||||||
|
// NewWriter function have ibuf == nil, do not buffer incoming bytes, and
|
||||||
|
// therefore do not need to be Flush'ed or Close'd.
|
||||||
|
ibuf []byte
|
||||||
|
|
||||||
|
// obuf is a buffer for the outgoing (compressed) bytes.
|
||||||
|
obuf []byte
|
||||||
|
|
||||||
|
// wroteStreamHeader is whether we have written the stream header.
|
||||||
|
wroteStreamHeader bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset discards the writer's state and switches the Snappy writer to write to
|
||||||
|
// w. This permits reusing a Writer rather than allocating a new one.
|
||||||
|
func (w *Writer) Reset(writer io.Writer) {
|
||||||
|
w.w = writer
|
||||||
|
w.err = nil
|
||||||
|
if w.ibuf != nil {
|
||||||
|
w.ibuf = w.ibuf[:0]
|
||||||
|
}
|
||||||
|
w.wroteStreamHeader = false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write satisfies the io.Writer interface.
|
||||||
|
func (w *Writer) Write(p []byte) (nRet int, errRet error) {
|
||||||
|
if w.ibuf == nil {
|
||||||
|
// Do not buffer incoming bytes. This does not perform or compress well
|
||||||
|
// if the caller of Writer.Write writes many small slices. This
|
||||||
|
// behavior is therefore deprecated, but still supported for backwards
|
||||||
|
// compatibility with code that doesn't explicitly Flush or Close.
|
||||||
|
return w.write(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The remainder of this method is based on bufio.Writer.Write from the
|
||||||
|
// standard library.
|
||||||
|
|
||||||
|
for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil {
|
||||||
|
var n int
|
||||||
|
if len(w.ibuf) == 0 {
|
||||||
|
// Large write, empty buffer.
|
||||||
|
// Write directly from p to avoid copy.
|
||||||
|
n, _ = w.write(p)
|
||||||
|
} else {
|
||||||
|
n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
|
||||||
|
w.ibuf = w.ibuf[:len(w.ibuf)+n]
|
||||||
|
w.Flush()
|
||||||
|
}
|
||||||
|
nRet += n
|
||||||
|
p = p[n:]
|
||||||
|
}
|
||||||
|
if w.err != nil {
|
||||||
|
return nRet, w.err
|
||||||
|
}
|
||||||
|
n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
|
||||||
|
w.ibuf = w.ibuf[:len(w.ibuf)+n]
|
||||||
|
nRet += n
|
||||||
|
return nRet, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) write(p []byte) (nRet int, errRet error) {
|
||||||
|
if w.err != nil {
|
||||||
|
return 0, w.err
|
||||||
|
}
|
||||||
|
for len(p) > 0 {
|
||||||
|
obufStart := len(magicChunk)
|
||||||
|
if !w.wroteStreamHeader {
|
||||||
|
w.wroteStreamHeader = true
|
||||||
|
copy(w.obuf, magicChunk)
|
||||||
|
obufStart = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
var uncompressed []byte
|
||||||
|
if len(p) > maxBlockSize {
|
||||||
|
uncompressed, p = p[:maxBlockSize], p[maxBlockSize:]
|
||||||
|
} else {
|
||||||
|
uncompressed, p = p, nil
|
||||||
|
}
|
||||||
|
checksum := crc(uncompressed)
|
||||||
|
|
||||||
|
// Compress the buffer, discarding the result if the improvement
|
||||||
|
// isn't at least 12.5%.
|
||||||
|
compressed := Encode(w.obuf[obufHeaderLen:], uncompressed)
|
||||||
|
chunkType := uint8(chunkTypeCompressedData)
|
||||||
|
chunkLen := 4 + len(compressed)
|
||||||
|
obufEnd := obufHeaderLen + len(compressed)
|
||||||
|
if len(compressed) >= len(uncompressed)-len(uncompressed)/8 {
|
||||||
|
chunkType = chunkTypeUncompressedData
|
||||||
|
chunkLen = 4 + len(uncompressed)
|
||||||
|
obufEnd = obufHeaderLen
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fill in the per-chunk header that comes before the body.
|
||||||
|
w.obuf[len(magicChunk)+0] = chunkType
|
||||||
|
w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0)
|
||||||
|
w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8)
|
||||||
|
w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16)
|
||||||
|
w.obuf[len(magicChunk)+4] = uint8(checksum >> 0)
|
||||||
|
w.obuf[len(magicChunk)+5] = uint8(checksum >> 8)
|
||||||
|
w.obuf[len(magicChunk)+6] = uint8(checksum >> 16)
|
||||||
|
w.obuf[len(magicChunk)+7] = uint8(checksum >> 24)
|
||||||
|
|
||||||
|
if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil {
|
||||||
|
w.err = err
|
||||||
|
return nRet, err
|
||||||
|
}
|
||||||
|
if chunkType == chunkTypeUncompressedData {
|
||||||
|
if _, err := w.w.Write(uncompressed); err != nil {
|
||||||
|
w.err = err
|
||||||
|
return nRet, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
nRet += len(uncompressed)
|
||||||
|
}
|
||||||
|
return nRet, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flush flushes the Writer to its underlying io.Writer.
|
||||||
|
func (w *Writer) Flush() error {
|
||||||
|
if w.err != nil {
|
||||||
|
return w.err
|
||||||
|
}
|
||||||
|
if len(w.ibuf) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
w.write(w.ibuf)
|
||||||
|
w.ibuf = w.ibuf[:0]
|
||||||
|
return w.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close calls Flush and then closes the Writer.
|
||||||
|
func (w *Writer) Close() error {
|
||||||
|
w.Flush()
|
||||||
|
ret := w.err
|
||||||
|
if w.err == nil {
|
||||||
|
w.err = errClosed
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
29
vendor/github.com/golang/snappy/encode_amd64.go
generated
vendored
Normal file
29
vendor/github.com/golang/snappy/encode_amd64.go
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !appengine
|
||||||
|
// +build gc
|
||||||
|
// +build !noasm
|
||||||
|
|
||||||
|
package snappy
|
||||||
|
|
||||||
|
// emitLiteral has the same semantics as in encode_other.go.
|
||||||
|
//
|
||||||
|
//go:noescape
|
||||||
|
func emitLiteral(dst, lit []byte) int
|
||||||
|
|
||||||
|
// emitCopy has the same semantics as in encode_other.go.
|
||||||
|
//
|
||||||
|
//go:noescape
|
||||||
|
func emitCopy(dst []byte, offset, length int) int
|
||||||
|
|
||||||
|
// extendMatch has the same semantics as in encode_other.go.
|
||||||
|
//
|
||||||
|
//go:noescape
|
||||||
|
func extendMatch(src []byte, i, j int) int
|
||||||
|
|
||||||
|
// encodeBlock has the same semantics as in encode_other.go.
|
||||||
|
//
|
||||||
|
//go:noescape
|
||||||
|
func encodeBlock(dst, src []byte) (d int)
|
730
vendor/github.com/golang/snappy/encode_amd64.s
generated
vendored
Normal file
730
vendor/github.com/golang/snappy/encode_amd64.s
generated
vendored
Normal file
@ -0,0 +1,730 @@
|
|||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !appengine
|
||||||
|
// +build gc
|
||||||
|
// +build !noasm
|
||||||
|
|
||||||
|
#include "textflag.h"
|
||||||
|
|
||||||
|
// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a
|
||||||
|
// Go toolchain regression. See https://github.com/golang/go/issues/15426 and
|
||||||
|
// https://github.com/golang/snappy/issues/29
|
||||||
|
//
|
||||||
|
// As a workaround, the package was built with a known good assembler, and
|
||||||
|
// those instructions were disassembled by "objdump -d" to yield the
|
||||||
|
// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
|
||||||
|
// style comments, in AT&T asm syntax. Note that rsp here is a physical
|
||||||
|
// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm).
|
||||||
|
// The instructions were then encoded as "BYTE $0x.." sequences, which assemble
|
||||||
|
// fine on Go 1.6.
|
||||||
|
|
||||||
|
// The asm code generally follows the pure Go code in encode_other.go, except
|
||||||
|
// where marked with a "!!!".
|
||||||
|
|
||||||
|
// ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
// func emitLiteral(dst, lit []byte) int
|
||||||
|
//
|
||||||
|
// All local variables fit into registers. The register allocation:
|
||||||
|
// - AX len(lit)
|
||||||
|
// - BX n
|
||||||
|
// - DX return value
|
||||||
|
// - DI &dst[i]
|
||||||
|
// - R10 &lit[0]
|
||||||
|
//
|
||||||
|
// The 24 bytes of stack space is to call runtime·memmove.
|
||||||
|
//
|
||||||
|
// The unusual register allocation of local variables, such as R10 for the
|
||||||
|
// source pointer, matches the allocation used at the call site in encodeBlock,
|
||||||
|
// which makes it easier to manually inline this function.
|
||||||
|
TEXT ·emitLiteral(SB), NOSPLIT, $24-56
|
||||||
|
MOVQ dst_base+0(FP), DI
|
||||||
|
MOVQ lit_base+24(FP), R10
|
||||||
|
MOVQ lit_len+32(FP), AX
|
||||||
|
MOVQ AX, DX
|
||||||
|
MOVL AX, BX
|
||||||
|
SUBL $1, BX
|
||||||
|
|
||||||
|
CMPL BX, $60
|
||||||
|
JLT oneByte
|
||||||
|
CMPL BX, $256
|
||||||
|
JLT twoBytes
|
||||||
|
|
||||||
|
threeBytes:
|
||||||
|
MOVB $0xf4, 0(DI)
|
||||||
|
MOVW BX, 1(DI)
|
||||||
|
ADDQ $3, DI
|
||||||
|
ADDQ $3, DX
|
||||||
|
JMP memmove
|
||||||
|
|
||||||
|
twoBytes:
|
||||||
|
MOVB $0xf0, 0(DI)
|
||||||
|
MOVB BX, 1(DI)
|
||||||
|
ADDQ $2, DI
|
||||||
|
ADDQ $2, DX
|
||||||
|
JMP memmove
|
||||||
|
|
||||||
|
oneByte:
|
||||||
|
SHLB $2, BX
|
||||||
|
MOVB BX, 0(DI)
|
||||||
|
ADDQ $1, DI
|
||||||
|
ADDQ $1, DX
|
||||||
|
|
||||||
|
memmove:
|
||||||
|
MOVQ DX, ret+48(FP)
|
||||||
|
|
||||||
|
// copy(dst[i:], lit)
|
||||||
|
//
|
||||||
|
// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
|
||||||
|
// DI, R10 and AX as arguments.
|
||||||
|
MOVQ DI, 0(SP)
|
||||||
|
MOVQ R10, 8(SP)
|
||||||
|
MOVQ AX, 16(SP)
|
||||||
|
CALL runtime·memmove(SB)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
// func emitCopy(dst []byte, offset, length int) int
|
||||||
|
//
|
||||||
|
// All local variables fit into registers. The register allocation:
|
||||||
|
// - AX length
|
||||||
|
// - SI &dst[0]
|
||||||
|
// - DI &dst[i]
|
||||||
|
// - R11 offset
|
||||||
|
//
|
||||||
|
// The unusual register allocation of local variables, such as R11 for the
|
||||||
|
// offset, matches the allocation used at the call site in encodeBlock, which
|
||||||
|
// makes it easier to manually inline this function.
|
||||||
|
TEXT ·emitCopy(SB), NOSPLIT, $0-48
|
||||||
|
MOVQ dst_base+0(FP), DI
|
||||||
|
MOVQ DI, SI
|
||||||
|
MOVQ offset+24(FP), R11
|
||||||
|
MOVQ length+32(FP), AX
|
||||||
|
|
||||||
|
loop0:
|
||||||
|
// for length >= 68 { etc }
|
||||||
|
CMPL AX, $68
|
||||||
|
JLT step1
|
||||||
|
|
||||||
|
// Emit a length 64 copy, encoded as 3 bytes.
|
||||||
|
MOVB $0xfe, 0(DI)
|
||||||
|
MOVW R11, 1(DI)
|
||||||
|
ADDQ $3, DI
|
||||||
|
SUBL $64, AX
|
||||||
|
JMP loop0
|
||||||
|
|
||||||
|
step1:
|
||||||
|
// if length > 64 { etc }
|
||||||
|
CMPL AX, $64
|
||||||
|
JLE step2
|
||||||
|
|
||||||
|
// Emit a length 60 copy, encoded as 3 bytes.
|
||||||
|
MOVB $0xee, 0(DI)
|
||||||
|
MOVW R11, 1(DI)
|
||||||
|
ADDQ $3, DI
|
||||||
|
SUBL $60, AX
|
||||||
|
|
||||||
|
step2:
|
||||||
|
// if length >= 12 || offset >= 2048 { goto step3 }
|
||||||
|
CMPL AX, $12
|
||||||
|
JGE step3
|
||||||
|
CMPL R11, $2048
|
||||||
|
JGE step3
|
||||||
|
|
||||||
|
// Emit the remaining copy, encoded as 2 bytes.
|
||||||
|
MOVB R11, 1(DI)
|
||||||
|
SHRL $8, R11
|
||||||
|
SHLB $5, R11
|
||||||
|
SUBB $4, AX
|
||||||
|
SHLB $2, AX
|
||||||
|
ORB AX, R11
|
||||||
|
ORB $1, R11
|
||||||
|
MOVB R11, 0(DI)
|
||||||
|
ADDQ $2, DI
|
||||||
|
|
||||||
|
// Return the number of bytes written.
|
||||||
|
SUBQ SI, DI
|
||||||
|
MOVQ DI, ret+40(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
step3:
|
||||||
|
// Emit the remaining copy, encoded as 3 bytes.
|
||||||
|
SUBL $1, AX
|
||||||
|
SHLB $2, AX
|
||||||
|
ORB $2, AX
|
||||||
|
MOVB AX, 0(DI)
|
||||||
|
MOVW R11, 1(DI)
|
||||||
|
ADDQ $3, DI
|
||||||
|
|
||||||
|
// Return the number of bytes written.
|
||||||
|
SUBQ SI, DI
|
||||||
|
MOVQ DI, ret+40(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
// func extendMatch(src []byte, i, j int) int
|
||||||
|
//
|
||||||
|
// All local variables fit into registers. The register allocation:
|
||||||
|
// - DX &src[0]
|
||||||
|
// - SI &src[j]
|
||||||
|
// - R13 &src[len(src) - 8]
|
||||||
|
// - R14 &src[len(src)]
|
||||||
|
// - R15 &src[i]
|
||||||
|
//
|
||||||
|
// The unusual register allocation of local variables, such as R15 for a source
|
||||||
|
// pointer, matches the allocation used at the call site in encodeBlock, which
|
||||||
|
// makes it easier to manually inline this function.
|
||||||
|
TEXT ·extendMatch(SB), NOSPLIT, $0-48
|
||||||
|
MOVQ src_base+0(FP), DX
|
||||||
|
MOVQ src_len+8(FP), R14
|
||||||
|
MOVQ i+24(FP), R15
|
||||||
|
MOVQ j+32(FP), SI
|
||||||
|
ADDQ DX, R14
|
||||||
|
ADDQ DX, R15
|
||||||
|
ADDQ DX, SI
|
||||||
|
MOVQ R14, R13
|
||||||
|
SUBQ $8, R13
|
||||||
|
|
||||||
|
cmp8:
|
||||||
|
// As long as we are 8 or more bytes before the end of src, we can load and
|
||||||
|
// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
|
||||||
|
CMPQ SI, R13
|
||||||
|
JA cmp1
|
||||||
|
MOVQ (R15), AX
|
||||||
|
MOVQ (SI), BX
|
||||||
|
CMPQ AX, BX
|
||||||
|
JNE bsf
|
||||||
|
ADDQ $8, R15
|
||||||
|
ADDQ $8, SI
|
||||||
|
JMP cmp8
|
||||||
|
|
||||||
|
bsf:
|
||||||
|
// If those 8 bytes were not equal, XOR the two 8 byte values, and return
|
||||||
|
// the index of the first byte that differs. The BSF instruction finds the
|
||||||
|
// least significant 1 bit, the amd64 architecture is little-endian, and
|
||||||
|
// the shift by 3 converts a bit index to a byte index.
|
||||||
|
XORQ AX, BX
|
||||||
|
BSFQ BX, BX
|
||||||
|
SHRQ $3, BX
|
||||||
|
ADDQ BX, SI
|
||||||
|
|
||||||
|
// Convert from &src[ret] to ret.
|
||||||
|
SUBQ DX, SI
|
||||||
|
MOVQ SI, ret+40(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
cmp1:
|
||||||
|
// In src's tail, compare 1 byte at a time.
|
||||||
|
CMPQ SI, R14
|
||||||
|
JAE extendMatchEnd
|
||||||
|
MOVB (R15), AX
|
||||||
|
MOVB (SI), BX
|
||||||
|
CMPB AX, BX
|
||||||
|
JNE extendMatchEnd
|
||||||
|
ADDQ $1, R15
|
||||||
|
ADDQ $1, SI
|
||||||
|
JMP cmp1
|
||||||
|
|
||||||
|
extendMatchEnd:
|
||||||
|
// Convert from &src[ret] to ret.
|
||||||
|
SUBQ DX, SI
|
||||||
|
MOVQ SI, ret+40(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
// func encodeBlock(dst, src []byte) (d int)
|
||||||
|
//
|
||||||
|
// All local variables fit into registers, other than "var table". The register
|
||||||
|
// allocation:
|
||||||
|
// - AX . .
|
||||||
|
// - BX . .
|
||||||
|
// - CX 56 shift (note that amd64 shifts by non-immediates must use CX).
|
||||||
|
// - DX 64 &src[0], tableSize
|
||||||
|
// - SI 72 &src[s]
|
||||||
|
// - DI 80 &dst[d]
|
||||||
|
// - R9 88 sLimit
|
||||||
|
// - R10 . &src[nextEmit]
|
||||||
|
// - R11 96 prevHash, currHash, nextHash, offset
|
||||||
|
// - R12 104 &src[base], skip
|
||||||
|
// - R13 . &src[nextS], &src[len(src) - 8]
|
||||||
|
// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x
|
||||||
|
// - R15 112 candidate
|
||||||
|
//
|
||||||
|
// The second column (56, 64, etc) is the stack offset to spill the registers
|
||||||
|
// when calling other functions. We could pack this slightly tighter, but it's
|
||||||
|
// simpler to have a dedicated spill map independent of the function called.
|
||||||
|
//
|
||||||
|
// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An
|
||||||
|
// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill
|
||||||
|
// local variables (registers) during calls gives 32768 + 56 + 64 = 32888.
|
||||||
|
TEXT ·encodeBlock(SB), 0, $32888-56
|
||||||
|
MOVQ dst_base+0(FP), DI
|
||||||
|
MOVQ src_base+24(FP), SI
|
||||||
|
MOVQ src_len+32(FP), R14
|
||||||
|
|
||||||
|
// shift, tableSize := uint32(32-8), 1<<8
|
||||||
|
MOVQ $24, CX
|
||||||
|
MOVQ $256, DX
|
||||||
|
|
||||||
|
calcShift:
|
||||||
|
// for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
|
||||||
|
// shift--
|
||||||
|
// }
|
||||||
|
CMPQ DX, $16384
|
||||||
|
JGE varTable
|
||||||
|
CMPQ DX, R14
|
||||||
|
JGE varTable
|
||||||
|
SUBQ $1, CX
|
||||||
|
SHLQ $1, DX
|
||||||
|
JMP calcShift
|
||||||
|
|
||||||
|
varTable:
|
||||||
|
// var table [maxTableSize]uint16
|
||||||
|
//
|
||||||
|
// In the asm code, unlike the Go code, we can zero-initialize only the
|
||||||
|
// first tableSize elements. Each uint16 element is 2 bytes and each MOVOU
|
||||||
|
// writes 16 bytes, so we can do only tableSize/8 writes instead of the
|
||||||
|
// 2048 writes that would zero-initialize all of table's 32768 bytes.
|
||||||
|
SHRQ $3, DX
|
||||||
|
LEAQ table-32768(SP), BX
|
||||||
|
PXOR X0, X0
|
||||||
|
|
||||||
|
memclr:
|
||||||
|
MOVOU X0, 0(BX)
|
||||||
|
ADDQ $16, BX
|
||||||
|
SUBQ $1, DX
|
||||||
|
JNZ memclr
|
||||||
|
|
||||||
|
// !!! DX = &src[0]
|
||||||
|
MOVQ SI, DX
|
||||||
|
|
||||||
|
// sLimit := len(src) - inputMargin
|
||||||
|
MOVQ R14, R9
|
||||||
|
SUBQ $15, R9
|
||||||
|
|
||||||
|
// !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't
|
||||||
|
// change for the rest of the function.
|
||||||
|
MOVQ CX, 56(SP)
|
||||||
|
MOVQ DX, 64(SP)
|
||||||
|
MOVQ R9, 88(SP)
|
||||||
|
|
||||||
|
// nextEmit := 0
|
||||||
|
MOVQ DX, R10
|
||||||
|
|
||||||
|
// s := 1
|
||||||
|
ADDQ $1, SI
|
||||||
|
|
||||||
|
// nextHash := hash(load32(src, s), shift)
|
||||||
|
MOVL 0(SI), R11
|
||||||
|
IMULL $0x1e35a7bd, R11
|
||||||
|
SHRL CX, R11
|
||||||
|
|
||||||
|
outer:
|
||||||
|
// for { etc }
|
||||||
|
|
||||||
|
// skip := 32
|
||||||
|
MOVQ $32, R12
|
||||||
|
|
||||||
|
// nextS := s
|
||||||
|
MOVQ SI, R13
|
||||||
|
|
||||||
|
// candidate := 0
|
||||||
|
MOVQ $0, R15
|
||||||
|
|
||||||
|
inner0:
|
||||||
|
// for { etc }
|
||||||
|
|
||||||
|
// s := nextS
|
||||||
|
MOVQ R13, SI
|
||||||
|
|
||||||
|
// bytesBetweenHashLookups := skip >> 5
|
||||||
|
MOVQ R12, R14
|
||||||
|
SHRQ $5, R14
|
||||||
|
|
||||||
|
// nextS = s + bytesBetweenHashLookups
|
||||||
|
ADDQ R14, R13
|
||||||
|
|
||||||
|
// skip += bytesBetweenHashLookups
|
||||||
|
ADDQ R14, R12
|
||||||
|
|
||||||
|
// if nextS > sLimit { goto emitRemainder }
|
||||||
|
MOVQ R13, AX
|
||||||
|
SUBQ DX, AX
|
||||||
|
CMPQ AX, R9
|
||||||
|
JA emitRemainder
|
||||||
|
|
||||||
|
// candidate = int(table[nextHash])
|
||||||
|
// XXX: MOVWQZX table-32768(SP)(R11*2), R15
|
||||||
|
// XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
|
||||||
|
BYTE $0x4e
|
||||||
|
BYTE $0x0f
|
||||||
|
BYTE $0xb7
|
||||||
|
BYTE $0x7c
|
||||||
|
BYTE $0x5c
|
||||||
|
BYTE $0x78
|
||||||
|
|
||||||
|
// table[nextHash] = uint16(s)
|
||||||
|
MOVQ SI, AX
|
||||||
|
SUBQ DX, AX
|
||||||
|
|
||||||
|
// XXX: MOVW AX, table-32768(SP)(R11*2)
|
||||||
|
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
|
||||||
|
BYTE $0x66
|
||||||
|
BYTE $0x42
|
||||||
|
BYTE $0x89
|
||||||
|
BYTE $0x44
|
||||||
|
BYTE $0x5c
|
||||||
|
BYTE $0x78
|
||||||
|
|
||||||
|
// nextHash = hash(load32(src, nextS), shift)
|
||||||
|
MOVL 0(R13), R11
|
||||||
|
IMULL $0x1e35a7bd, R11
|
||||||
|
SHRL CX, R11
|
||||||
|
|
||||||
|
// if load32(src, s) != load32(src, candidate) { continue } break
|
||||||
|
MOVL 0(SI), AX
|
||||||
|
MOVL (DX)(R15*1), BX
|
||||||
|
CMPL AX, BX
|
||||||
|
JNE inner0
|
||||||
|
|
||||||
|
fourByteMatch:
|
||||||
|
// As per the encode_other.go code:
|
||||||
|
//
|
||||||
|
// A 4-byte match has been found. We'll later see etc.
|
||||||
|
|
||||||
|
// !!! Jump to a fast path for short (<= 16 byte) literals. See the comment
|
||||||
|
// on inputMargin in encode.go.
|
||||||
|
MOVQ SI, AX
|
||||||
|
SUBQ R10, AX
|
||||||
|
CMPQ AX, $16
|
||||||
|
JLE emitLiteralFastPath
|
||||||
|
|
||||||
|
// ----------------------------------------
|
||||||
|
// Begin inline of the emitLiteral call.
|
||||||
|
//
|
||||||
|
// d += emitLiteral(dst[d:], src[nextEmit:s])
|
||||||
|
|
||||||
|
MOVL AX, BX
|
||||||
|
SUBL $1, BX
|
||||||
|
|
||||||
|
CMPL BX, $60
|
||||||
|
JLT inlineEmitLiteralOneByte
|
||||||
|
CMPL BX, $256
|
||||||
|
JLT inlineEmitLiteralTwoBytes
|
||||||
|
|
||||||
|
inlineEmitLiteralThreeBytes:
|
||||||
|
MOVB $0xf4, 0(DI)
|
||||||
|
MOVW BX, 1(DI)
|
||||||
|
ADDQ $3, DI
|
||||||
|
JMP inlineEmitLiteralMemmove
|
||||||
|
|
||||||
|
inlineEmitLiteralTwoBytes:
|
||||||
|
MOVB $0xf0, 0(DI)
|
||||||
|
MOVB BX, 1(DI)
|
||||||
|
ADDQ $2, DI
|
||||||
|
JMP inlineEmitLiteralMemmove
|
||||||
|
|
||||||
|
inlineEmitLiteralOneByte:
|
||||||
|
SHLB $2, BX
|
||||||
|
MOVB BX, 0(DI)
|
||||||
|
ADDQ $1, DI
|
||||||
|
|
||||||
|
inlineEmitLiteralMemmove:
|
||||||
|
// Spill local variables (registers) onto the stack; call; unspill.
|
||||||
|
//
|
||||||
|
// copy(dst[i:], lit)
|
||||||
|
//
|
||||||
|
// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
|
||||||
|
// DI, R10 and AX as arguments.
|
||||||
|
MOVQ DI, 0(SP)
|
||||||
|
MOVQ R10, 8(SP)
|
||||||
|
MOVQ AX, 16(SP)
|
||||||
|
ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)".
|
||||||
|
MOVQ SI, 72(SP)
|
||||||
|
MOVQ DI, 80(SP)
|
||||||
|
MOVQ R15, 112(SP)
|
||||||
|
CALL runtime·memmove(SB)
|
||||||
|
MOVQ 56(SP), CX
|
||||||
|
MOVQ 64(SP), DX
|
||||||
|
MOVQ 72(SP), SI
|
||||||
|
MOVQ 80(SP), DI
|
||||||
|
MOVQ 88(SP), R9
|
||||||
|
MOVQ 112(SP), R15
|
||||||
|
JMP inner1
|
||||||
|
|
||||||
|
inlineEmitLiteralEnd:
|
||||||
|
// End inline of the emitLiteral call.
|
||||||
|
// ----------------------------------------
|
||||||
|
|
||||||
|
emitLiteralFastPath:
|
||||||
|
// !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2".
|
||||||
|
MOVB AX, BX
|
||||||
|
SUBB $1, BX
|
||||||
|
SHLB $2, BX
|
||||||
|
MOVB BX, (DI)
|
||||||
|
ADDQ $1, DI
|
||||||
|
|
||||||
|
// !!! Implement the copy from lit to dst as a 16-byte load and store.
|
||||||
|
// (Encode's documentation says that dst and src must not overlap.)
|
||||||
|
//
|
||||||
|
// This always copies 16 bytes, instead of only len(lit) bytes, but that's
|
||||||
|
// OK. Subsequent iterations will fix up the overrun.
|
||||||
|
//
|
||||||
|
// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
|
||||||
|
// 16-byte loads and stores. This technique probably wouldn't be as
|
||||||
|
// effective on architectures that are fussier about alignment.
|
||||||
|
MOVOU 0(R10), X0
|
||||||
|
MOVOU X0, 0(DI)
|
||||||
|
ADDQ AX, DI
|
||||||
|
|
||||||
|
inner1:
|
||||||
|
// for { etc }
|
||||||
|
|
||||||
|
// base := s
|
||||||
|
MOVQ SI, R12
|
||||||
|
|
||||||
|
// !!! offset := base - candidate
|
||||||
|
MOVQ R12, R11
|
||||||
|
SUBQ R15, R11
|
||||||
|
SUBQ DX, R11
|
||||||
|
|
||||||
|
// ----------------------------------------
|
||||||
|
// Begin inline of the extendMatch call.
|
||||||
|
//
|
||||||
|
// s = extendMatch(src, candidate+4, s+4)
|
||||||
|
|
||||||
|
// !!! R14 = &src[len(src)]
|
||||||
|
MOVQ src_len+32(FP), R14
|
||||||
|
ADDQ DX, R14
|
||||||
|
|
||||||
|
// !!! R13 = &src[len(src) - 8]
|
||||||
|
MOVQ R14, R13
|
||||||
|
SUBQ $8, R13
|
||||||
|
|
||||||
|
// !!! R15 = &src[candidate + 4]
|
||||||
|
ADDQ $4, R15
|
||||||
|
ADDQ DX, R15
|
||||||
|
|
||||||
|
// !!! s += 4
|
||||||
|
ADDQ $4, SI
|
||||||
|
|
||||||
|
inlineExtendMatchCmp8:
|
||||||
|
// As long as we are 8 or more bytes before the end of src, we can load and
|
||||||
|
// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
|
||||||
|
CMPQ SI, R13
|
||||||
|
JA inlineExtendMatchCmp1
|
||||||
|
MOVQ (R15), AX
|
||||||
|
MOVQ (SI), BX
|
||||||
|
CMPQ AX, BX
|
||||||
|
JNE inlineExtendMatchBSF
|
||||||
|
ADDQ $8, R15
|
||||||
|
ADDQ $8, SI
|
||||||
|
JMP inlineExtendMatchCmp8
|
||||||
|
|
||||||
|
inlineExtendMatchBSF:
|
||||||
|
// If those 8 bytes were not equal, XOR the two 8 byte values, and return
|
||||||
|
// the index of the first byte that differs. The BSF instruction finds the
|
||||||
|
// least significant 1 bit, the amd64 architecture is little-endian, and
|
||||||
|
// the shift by 3 converts a bit index to a byte index.
|
||||||
|
XORQ AX, BX
|
||||||
|
BSFQ BX, BX
|
||||||
|
SHRQ $3, BX
|
||||||
|
ADDQ BX, SI
|
||||||
|
JMP inlineExtendMatchEnd
|
||||||
|
|
||||||
|
inlineExtendMatchCmp1:
|
||||||
|
// In src's tail, compare 1 byte at a time.
|
||||||
|
CMPQ SI, R14
|
||||||
|
JAE inlineExtendMatchEnd
|
||||||
|
MOVB (R15), AX
|
||||||
|
MOVB (SI), BX
|
||||||
|
CMPB AX, BX
|
||||||
|
JNE inlineExtendMatchEnd
|
||||||
|
ADDQ $1, R15
|
||||||
|
ADDQ $1, SI
|
||||||
|
JMP inlineExtendMatchCmp1
|
||||||
|
|
||||||
|
inlineExtendMatchEnd:
|
||||||
|
// End inline of the extendMatch call.
|
||||||
|
// ----------------------------------------
|
||||||
|
|
||||||
|
// ----------------------------------------
|
||||||
|
// Begin inline of the emitCopy call.
|
||||||
|
//
|
||||||
|
// d += emitCopy(dst[d:], base-candidate, s-base)
|
||||||
|
|
||||||
|
// !!! length := s - base
|
||||||
|
MOVQ SI, AX
|
||||||
|
SUBQ R12, AX
|
||||||
|
|
||||||
|
inlineEmitCopyLoop0:
|
||||||
|
// for length >= 68 { etc }
|
||||||
|
CMPL AX, $68
|
||||||
|
JLT inlineEmitCopyStep1
|
||||||
|
|
||||||
|
// Emit a length 64 copy, encoded as 3 bytes.
|
||||||
|
MOVB $0xfe, 0(DI)
|
||||||
|
MOVW R11, 1(DI)
|
||||||
|
ADDQ $3, DI
|
||||||
|
SUBL $64, AX
|
||||||
|
JMP inlineEmitCopyLoop0
|
||||||
|
|
||||||
|
inlineEmitCopyStep1:
|
||||||
|
// if length > 64 { etc }
|
||||||
|
CMPL AX, $64
|
||||||
|
JLE inlineEmitCopyStep2
|
||||||
|
|
||||||
|
// Emit a length 60 copy, encoded as 3 bytes.
|
||||||
|
MOVB $0xee, 0(DI)
|
||||||
|
MOVW R11, 1(DI)
|
||||||
|
ADDQ $3, DI
|
||||||
|
SUBL $60, AX
|
||||||
|
|
||||||
|
inlineEmitCopyStep2:
|
||||||
|
// if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 }
|
||||||
|
CMPL AX, $12
|
||||||
|
JGE inlineEmitCopyStep3
|
||||||
|
CMPL R11, $2048
|
||||||
|
JGE inlineEmitCopyStep3
|
||||||
|
|
||||||
|
// Emit the remaining copy, encoded as 2 bytes.
|
||||||
|
MOVB R11, 1(DI)
|
||||||
|
SHRL $8, R11
|
||||||
|
SHLB $5, R11
|
||||||
|
SUBB $4, AX
|
||||||
|
SHLB $2, AX
|
||||||
|
ORB AX, R11
|
||||||
|
ORB $1, R11
|
||||||
|
MOVB R11, 0(DI)
|
||||||
|
ADDQ $2, DI
|
||||||
|
JMP inlineEmitCopyEnd
|
||||||
|
|
||||||
|
inlineEmitCopyStep3:
|
||||||
|
// Emit the remaining copy, encoded as 3 bytes.
|
||||||
|
SUBL $1, AX
|
||||||
|
SHLB $2, AX
|
||||||
|
ORB $2, AX
|
||||||
|
MOVB AX, 0(DI)
|
||||||
|
MOVW R11, 1(DI)
|
||||||
|
ADDQ $3, DI
|
||||||
|
|
||||||
|
inlineEmitCopyEnd:
|
||||||
|
// End inline of the emitCopy call.
|
||||||
|
// ----------------------------------------
|
||||||
|
|
||||||
|
// nextEmit = s
|
||||||
|
MOVQ SI, R10
|
||||||
|
|
||||||
|
// if s >= sLimit { goto emitRemainder }
|
||||||
|
MOVQ SI, AX
|
||||||
|
SUBQ DX, AX
|
||||||
|
CMPQ AX, R9
|
||||||
|
JAE emitRemainder
|
||||||
|
|
||||||
|
// As per the encode_other.go code:
|
||||||
|
//
|
||||||
|
// We could immediately etc.
|
||||||
|
|
||||||
|
// x := load64(src, s-1)
|
||||||
|
MOVQ -1(SI), R14
|
||||||
|
|
||||||
|
// prevHash := hash(uint32(x>>0), shift)
|
||||||
|
MOVL R14, R11
|
||||||
|
IMULL $0x1e35a7bd, R11
|
||||||
|
SHRL CX, R11
|
||||||
|
|
||||||
|
// table[prevHash] = uint16(s-1)
|
||||||
|
MOVQ SI, AX
|
||||||
|
SUBQ DX, AX
|
||||||
|
SUBQ $1, AX
|
||||||
|
|
||||||
|
// XXX: MOVW AX, table-32768(SP)(R11*2)
|
||||||
|
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
|
||||||
|
BYTE $0x66
|
||||||
|
BYTE $0x42
|
||||||
|
BYTE $0x89
|
||||||
|
BYTE $0x44
|
||||||
|
BYTE $0x5c
|
||||||
|
BYTE $0x78
|
||||||
|
|
||||||
|
// currHash := hash(uint32(x>>8), shift)
|
||||||
|
SHRQ $8, R14
|
||||||
|
MOVL R14, R11
|
||||||
|
IMULL $0x1e35a7bd, R11
|
||||||
|
SHRL CX, R11
|
||||||
|
|
||||||
|
// candidate = int(table[currHash])
|
||||||
|
// XXX: MOVWQZX table-32768(SP)(R11*2), R15
|
||||||
|
// XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
|
||||||
|
BYTE $0x4e
|
||||||
|
BYTE $0x0f
|
||||||
|
BYTE $0xb7
|
||||||
|
BYTE $0x7c
|
||||||
|
BYTE $0x5c
|
||||||
|
BYTE $0x78
|
||||||
|
|
||||||
|
// table[currHash] = uint16(s)
|
||||||
|
ADDQ $1, AX
|
||||||
|
|
||||||
|
// XXX: MOVW AX, table-32768(SP)(R11*2)
|
||||||
|
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
|
||||||
|
BYTE $0x66
|
||||||
|
BYTE $0x42
|
||||||
|
BYTE $0x89
|
||||||
|
BYTE $0x44
|
||||||
|
BYTE $0x5c
|
||||||
|
BYTE $0x78
|
||||||
|
|
||||||
|
// if uint32(x>>8) == load32(src, candidate) { continue }
|
||||||
|
MOVL (DX)(R15*1), BX
|
||||||
|
CMPL R14, BX
|
||||||
|
JEQ inner1
|
||||||
|
|
||||||
|
// nextHash = hash(uint32(x>>16), shift)
|
||||||
|
SHRQ $8, R14
|
||||||
|
MOVL R14, R11
|
||||||
|
IMULL $0x1e35a7bd, R11
|
||||||
|
SHRL CX, R11
|
||||||
|
|
||||||
|
// s++
|
||||||
|
ADDQ $1, SI
|
||||||
|
|
||||||
|
// break out of the inner1 for loop, i.e. continue the outer loop.
|
||||||
|
JMP outer
|
||||||
|
|
||||||
|
emitRemainder:
|
||||||
|
// if nextEmit < len(src) { etc }
|
||||||
|
MOVQ src_len+32(FP), AX
|
||||||
|
ADDQ DX, AX
|
||||||
|
CMPQ R10, AX
|
||||||
|
JEQ encodeBlockEnd
|
||||||
|
|
||||||
|
// d += emitLiteral(dst[d:], src[nextEmit:])
|
||||||
|
//
|
||||||
|
// Push args.
|
||||||
|
MOVQ DI, 0(SP)
|
||||||
|
MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative.
|
||||||
|
MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative.
|
||||||
|
MOVQ R10, 24(SP)
|
||||||
|
SUBQ R10, AX
|
||||||
|
MOVQ AX, 32(SP)
|
||||||
|
MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative.
|
||||||
|
|
||||||
|
// Spill local variables (registers) onto the stack; call; unspill.
|
||||||
|
MOVQ DI, 80(SP)
|
||||||
|
CALL ·emitLiteral(SB)
|
||||||
|
MOVQ 80(SP), DI
|
||||||
|
|
||||||
|
// Finish the "d +=" part of "d += emitLiteral(etc)".
|
||||||
|
ADDQ 48(SP), DI
|
||||||
|
|
||||||
|
encodeBlockEnd:
|
||||||
|
MOVQ dst_base+0(FP), AX
|
||||||
|
SUBQ AX, DI
|
||||||
|
MOVQ DI, d+48(FP)
|
||||||
|
RET
|
238
vendor/github.com/golang/snappy/encode_other.go
generated
vendored
Normal file
238
vendor/github.com/golang/snappy/encode_other.go
generated
vendored
Normal file
@ -0,0 +1,238 @@
|
|||||||
|
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !amd64 appengine !gc noasm
|
||||||
|
|
||||||
|
package snappy
|
||||||
|
|
||||||
|
func load32(b []byte, i int) uint32 {
|
||||||
|
b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
|
||||||
|
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
|
||||||
|
}
|
||||||
|
|
||||||
|
func load64(b []byte, i int) uint64 {
|
||||||
|
b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
|
||||||
|
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
|
||||||
|
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
|
||||||
|
}
|
||||||
|
|
||||||
|
// emitLiteral writes a literal chunk and returns the number of bytes written.
|
||||||
|
//
|
||||||
|
// It assumes that:
|
||||||
|
// dst is long enough to hold the encoded bytes
|
||||||
|
// 1 <= len(lit) && len(lit) <= 65536
|
||||||
|
func emitLiteral(dst, lit []byte) int {
|
||||||
|
i, n := 0, uint(len(lit)-1)
|
||||||
|
switch {
|
||||||
|
case n < 60:
|
||||||
|
dst[0] = uint8(n)<<2 | tagLiteral
|
||||||
|
i = 1
|
||||||
|
case n < 1<<8:
|
||||||
|
dst[0] = 60<<2 | tagLiteral
|
||||||
|
dst[1] = uint8(n)
|
||||||
|
i = 2
|
||||||
|
default:
|
||||||
|
dst[0] = 61<<2 | tagLiteral
|
||||||
|
dst[1] = uint8(n)
|
||||||
|
dst[2] = uint8(n >> 8)
|
||||||
|
i = 3
|
||||||
|
}
|
||||||
|
return i + copy(dst[i:], lit)
|
||||||
|
}
|
||||||
|
|
||||||
|
// emitCopy writes a copy chunk and returns the number of bytes written.
|
||||||
|
//
|
||||||
|
// It assumes that:
|
||||||
|
// dst is long enough to hold the encoded bytes
|
||||||
|
// 1 <= offset && offset <= 65535
|
||||||
|
// 4 <= length && length <= 65535
|
||||||
|
func emitCopy(dst []byte, offset, length int) int {
|
||||||
|
i := 0
|
||||||
|
// The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The
|
||||||
|
// threshold for this loop is a little higher (at 68 = 64 + 4), and the
|
||||||
|
// length emitted down below is is a little lower (at 60 = 64 - 4), because
|
||||||
|
// it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed
|
||||||
|
// by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as
|
||||||
|
// a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as
|
||||||
|
// 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a
|
||||||
|
// tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an
|
||||||
|
// encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1.
|
||||||
|
for length >= 68 {
|
||||||
|
// Emit a length 64 copy, encoded as 3 bytes.
|
||||||
|
dst[i+0] = 63<<2 | tagCopy2
|
||||||
|
dst[i+1] = uint8(offset)
|
||||||
|
dst[i+2] = uint8(offset >> 8)
|
||||||
|
i += 3
|
||||||
|
length -= 64
|
||||||
|
}
|
||||||
|
if length > 64 {
|
||||||
|
// Emit a length 60 copy, encoded as 3 bytes.
|
||||||
|
dst[i+0] = 59<<2 | tagCopy2
|
||||||
|
dst[i+1] = uint8(offset)
|
||||||
|
dst[i+2] = uint8(offset >> 8)
|
||||||
|
i += 3
|
||||||
|
length -= 60
|
||||||
|
}
|
||||||
|
if length >= 12 || offset >= 2048 {
|
||||||
|
// Emit the remaining copy, encoded as 3 bytes.
|
||||||
|
dst[i+0] = uint8(length-1)<<2 | tagCopy2
|
||||||
|
dst[i+1] = uint8(offset)
|
||||||
|
dst[i+2] = uint8(offset >> 8)
|
||||||
|
return i + 3
|
||||||
|
}
|
||||||
|
// Emit the remaining copy, encoded as 2 bytes.
|
||||||
|
dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
|
||||||
|
dst[i+1] = uint8(offset)
|
||||||
|
return i + 2
|
||||||
|
}
|
||||||
|
|
||||||
|
// extendMatch returns the largest k such that k <= len(src) and that
|
||||||
|
// src[i:i+k-j] and src[j:k] have the same contents.
|
||||||
|
//
|
||||||
|
// It assumes that:
|
||||||
|
// 0 <= i && i < j && j <= len(src)
|
||||||
|
func extendMatch(src []byte, i, j int) int {
|
||||||
|
for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
|
||||||
|
}
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
func hash(u, shift uint32) uint32 {
|
||||||
|
return (u * 0x1e35a7bd) >> shift
|
||||||
|
}
|
||||||
|
|
||||||
|
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
|
||||||
|
// assumes that the varint-encoded length of the decompressed bytes has already
|
||||||
|
// been written.
|
||||||
|
//
|
||||||
|
// It also assumes that:
|
||||||
|
// len(dst) >= MaxEncodedLen(len(src)) &&
|
||||||
|
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
|
||||||
|
func encodeBlock(dst, src []byte) (d int) {
|
||||||
|
// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
|
||||||
|
// The table element type is uint16, as s < sLimit and sLimit < len(src)
|
||||||
|
// and len(src) <= maxBlockSize and maxBlockSize == 65536.
|
||||||
|
const (
|
||||||
|
maxTableSize = 1 << 14
|
||||||
|
// tableMask is redundant, but helps the compiler eliminate bounds
|
||||||
|
// checks.
|
||||||
|
tableMask = maxTableSize - 1
|
||||||
|
)
|
||||||
|
shift := uint32(32 - 8)
|
||||||
|
for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
|
||||||
|
shift--
|
||||||
|
}
|
||||||
|
// In Go, all array elements are zero-initialized, so there is no advantage
|
||||||
|
// to a smaller tableSize per se. However, it matches the C++ algorithm,
|
||||||
|
// and in the asm versions of this code, we can get away with zeroing only
|
||||||
|
// the first tableSize elements.
|
||||||
|
var table [maxTableSize]uint16
|
||||||
|
|
||||||
|
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
||||||
|
// lets us use a fast path for emitLiteral in the main loop, while we are
|
||||||
|
// looking for copies.
|
||||||
|
sLimit := len(src) - inputMargin
|
||||||
|
|
||||||
|
// nextEmit is where in src the next emitLiteral should start from.
|
||||||
|
nextEmit := 0
|
||||||
|
|
||||||
|
// The encoded form must start with a literal, as there are no previous
|
||||||
|
// bytes to copy, so we start looking for hash matches at s == 1.
|
||||||
|
s := 1
|
||||||
|
nextHash := hash(load32(src, s), shift)
|
||||||
|
|
||||||
|
for {
|
||||||
|
// Copied from the C++ snappy implementation:
|
||||||
|
//
|
||||||
|
// Heuristic match skipping: If 32 bytes are scanned with no matches
|
||||||
|
// found, start looking only at every other byte. If 32 more bytes are
|
||||||
|
// scanned (or skipped), look at every third byte, etc.. When a match
|
||||||
|
// is found, immediately go back to looking at every byte. This is a
|
||||||
|
// small loss (~5% performance, ~0.1% density) for compressible data
|
||||||
|
// due to more bookkeeping, but for non-compressible data (such as
|
||||||
|
// JPEG) it's a huge win since the compressor quickly "realizes" the
|
||||||
|
// data is incompressible and doesn't bother looking for matches
|
||||||
|
// everywhere.
|
||||||
|
//
|
||||||
|
// The "skip" variable keeps track of how many bytes there are since
|
||||||
|
// the last match; dividing it by 32 (ie. right-shifting by five) gives
|
||||||
|
// the number of bytes to move ahead for each iteration.
|
||||||
|
skip := 32
|
||||||
|
|
||||||
|
nextS := s
|
||||||
|
candidate := 0
|
||||||
|
for {
|
||||||
|
s = nextS
|
||||||
|
bytesBetweenHashLookups := skip >> 5
|
||||||
|
nextS = s + bytesBetweenHashLookups
|
||||||
|
skip += bytesBetweenHashLookups
|
||||||
|
if nextS > sLimit {
|
||||||
|
goto emitRemainder
|
||||||
|
}
|
||||||
|
candidate = int(table[nextHash&tableMask])
|
||||||
|
table[nextHash&tableMask] = uint16(s)
|
||||||
|
nextHash = hash(load32(src, nextS), shift)
|
||||||
|
if load32(src, s) == load32(src, candidate) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A 4-byte match has been found. We'll later see if more than 4 bytes
|
||||||
|
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
|
||||||
|
// them as literal bytes.
|
||||||
|
d += emitLiteral(dst[d:], src[nextEmit:s])
|
||||||
|
|
||||||
|
// Call emitCopy, and then see if another emitCopy could be our next
|
||||||
|
// move. Repeat until we find no match for the input immediately after
|
||||||
|
// what was consumed by the last emitCopy call.
|
||||||
|
//
|
||||||
|
// If we exit this loop normally then we need to call emitLiteral next,
|
||||||
|
// though we don't yet know how big the literal will be. We handle that
|
||||||
|
// by proceeding to the next iteration of the main loop. We also can
|
||||||
|
// exit this loop via goto if we get close to exhausting the input.
|
||||||
|
for {
|
||||||
|
// Invariant: we have a 4-byte match at s, and no need to emit any
|
||||||
|
// literal bytes prior to s.
|
||||||
|
base := s
|
||||||
|
|
||||||
|
// Extend the 4-byte match as long as possible.
|
||||||
|
//
|
||||||
|
// This is an inlined version of:
|
||||||
|
// s = extendMatch(src, candidate+4, s+4)
|
||||||
|
s += 4
|
||||||
|
for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 {
|
||||||
|
}
|
||||||
|
|
||||||
|
d += emitCopy(dst[d:], base-candidate, s-base)
|
||||||
|
nextEmit = s
|
||||||
|
if s >= sLimit {
|
||||||
|
goto emitRemainder
|
||||||
|
}
|
||||||
|
|
||||||
|
// We could immediately start working at s now, but to improve
|
||||||
|
// compression we first update the hash table at s-1 and at s. If
|
||||||
|
// another emitCopy is not our next move, also calculate nextHash
|
||||||
|
// at s+1. At least on GOARCH=amd64, these three hash calculations
|
||||||
|
// are faster as one load64 call (with some shifts) instead of
|
||||||
|
// three load32 calls.
|
||||||
|
x := load64(src, s-1)
|
||||||
|
prevHash := hash(uint32(x>>0), shift)
|
||||||
|
table[prevHash&tableMask] = uint16(s - 1)
|
||||||
|
currHash := hash(uint32(x>>8), shift)
|
||||||
|
candidate = int(table[currHash&tableMask])
|
||||||
|
table[currHash&tableMask] = uint16(s)
|
||||||
|
if uint32(x>>8) != load32(src, candidate) {
|
||||||
|
nextHash = hash(uint32(x>>16), shift)
|
||||||
|
s++
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
emitRemainder:
|
||||||
|
if nextEmit < len(src) {
|
||||||
|
d += emitLiteral(dst[d:], src[nextEmit:])
|
||||||
|
}
|
||||||
|
return d
|
||||||
|
}
|
1
vendor/github.com/golang/snappy/go.mod
generated
vendored
Normal file
1
vendor/github.com/golang/snappy/go.mod
generated
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
module github.com/golang/snappy
|
98
vendor/github.com/golang/snappy/snappy.go
generated
vendored
Normal file
98
vendor/github.com/golang/snappy/snappy.go
generated
vendored
Normal file
@ -0,0 +1,98 @@
|
|||||||
|
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package snappy implements the Snappy compression format. It aims for very
|
||||||
|
// high speeds and reasonable compression.
|
||||||
|
//
|
||||||
|
// There are actually two Snappy formats: block and stream. They are related,
|
||||||
|
// but different: trying to decompress block-compressed data as a Snappy stream
|
||||||
|
// will fail, and vice versa. The block format is the Decode and Encode
|
||||||
|
// functions and the stream format is the Reader and Writer types.
|
||||||
|
//
|
||||||
|
// The block format, the more common case, is used when the complete size (the
|
||||||
|
// number of bytes) of the original data is known upfront, at the time
|
||||||
|
// compression starts. The stream format, also known as the framing format, is
|
||||||
|
// for when that isn't always true.
|
||||||
|
//
|
||||||
|
// The canonical, C++ implementation is at https://github.com/google/snappy and
|
||||||
|
// it only implements the block format.
|
||||||
|
package snappy // import "github.com/golang/snappy"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"hash/crc32"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
Each encoded block begins with the varint-encoded length of the decoded data,
|
||||||
|
followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
|
||||||
|
first byte of each chunk is broken into its 2 least and 6 most significant bits
|
||||||
|
called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
|
||||||
|
Zero means a literal tag. All other values mean a copy tag.
|
||||||
|
|
||||||
|
For literal tags:
|
||||||
|
- If m < 60, the next 1 + m bytes are literal bytes.
|
||||||
|
- Otherwise, let n be the little-endian unsigned integer denoted by the next
|
||||||
|
m - 59 bytes. The next 1 + n bytes after that are literal bytes.
|
||||||
|
|
||||||
|
For copy tags, length bytes are copied from offset bytes ago, in the style of
|
||||||
|
Lempel-Ziv compression algorithms. In particular:
|
||||||
|
- For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
|
||||||
|
The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
|
||||||
|
of the offset. The next byte is bits 0-7 of the offset.
|
||||||
|
- For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
|
||||||
|
The length is 1 + m. The offset is the little-endian unsigned integer
|
||||||
|
denoted by the next 2 bytes.
|
||||||
|
- For l == 3, this tag is a legacy format that is no longer issued by most
|
||||||
|
encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in
|
||||||
|
[1, 65). The length is 1 + m. The offset is the little-endian unsigned
|
||||||
|
integer denoted by the next 4 bytes.
|
||||||
|
*/
|
||||||
|
const (
|
||||||
|
tagLiteral = 0x00
|
||||||
|
tagCopy1 = 0x01
|
||||||
|
tagCopy2 = 0x02
|
||||||
|
tagCopy4 = 0x03
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
checksumSize = 4
|
||||||
|
chunkHeaderSize = 4
|
||||||
|
magicChunk = "\xff\x06\x00\x00" + magicBody
|
||||||
|
magicBody = "sNaPpY"
|
||||||
|
|
||||||
|
// maxBlockSize is the maximum size of the input to encodeBlock. It is not
|
||||||
|
// part of the wire format per se, but some parts of the encoder assume
|
||||||
|
// that an offset fits into a uint16.
|
||||||
|
//
|
||||||
|
// Also, for the framing format (Writer type instead of Encode function),
|
||||||
|
// https://github.com/google/snappy/blob/master/framing_format.txt says
|
||||||
|
// that "the uncompressed data in a chunk must be no longer than 65536
|
||||||
|
// bytes".
|
||||||
|
maxBlockSize = 65536
|
||||||
|
|
||||||
|
// maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is
|
||||||
|
// hard coded to be a const instead of a variable, so that obufLen can also
|
||||||
|
// be a const. Their equivalence is confirmed by
|
||||||
|
// TestMaxEncodedLenOfMaxBlockSize.
|
||||||
|
maxEncodedLenOfMaxBlockSize = 76490
|
||||||
|
|
||||||
|
obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize
|
||||||
|
obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
chunkTypeCompressedData = 0x00
|
||||||
|
chunkTypeUncompressedData = 0x01
|
||||||
|
chunkTypePadding = 0xfe
|
||||||
|
chunkTypeStreamIdentifier = 0xff
|
||||||
|
)
|
||||||
|
|
||||||
|
var crcTable = crc32.MakeTable(crc32.Castagnoli)
|
||||||
|
|
||||||
|
// crc implements the checksum specified in section 3 of
|
||||||
|
// https://github.com/google/snappy/blob/master/framing_format.txt
|
||||||
|
func crc(b []byte) uint32 {
|
||||||
|
c := crc32.Update(0, crcTable, b)
|
||||||
|
return uint32(c>>15|c<<17) + 0xa282ead8
|
||||||
|
}
|
83
vendor/github.com/google/go-cmp/cmp/compare.go
generated
vendored
83
vendor/github.com/google/go-cmp/cmp/compare.go
generated
vendored
@ -6,6 +6,10 @@
|
|||||||
//
|
//
|
||||||
// This package is intended to be a more powerful and safer alternative to
|
// This package is intended to be a more powerful and safer alternative to
|
||||||
// reflect.DeepEqual for comparing whether two values are semantically equal.
|
// reflect.DeepEqual for comparing whether two values are semantically equal.
|
||||||
|
// It is intended to only be used in tests, as performance is not a goal and
|
||||||
|
// it may panic if it cannot compare the values. Its propensity towards
|
||||||
|
// panicking means that its unsuitable for production environments where a
|
||||||
|
// spurious panic may be fatal.
|
||||||
//
|
//
|
||||||
// The primary features of cmp are:
|
// The primary features of cmp are:
|
||||||
//
|
//
|
||||||
@ -86,6 +90,52 @@ import (
|
|||||||
// If there is a cycle, then the pointed at values are considered equal
|
// If there is a cycle, then the pointed at values are considered equal
|
||||||
// only if both addresses were previously visited in the same path step.
|
// only if both addresses were previously visited in the same path step.
|
||||||
func Equal(x, y interface{}, opts ...Option) bool {
|
func Equal(x, y interface{}, opts ...Option) bool {
|
||||||
|
s := newState(opts)
|
||||||
|
s.compareAny(rootStep(x, y))
|
||||||
|
return s.result.Equal()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Diff returns a human-readable report of the differences between two values.
|
||||||
|
// It returns an empty string if and only if Equal returns true for the same
|
||||||
|
// input values and options.
|
||||||
|
//
|
||||||
|
// The output is displayed as a literal in pseudo-Go syntax.
|
||||||
|
// At the start of each line, a "-" prefix indicates an element removed from x,
|
||||||
|
// a "+" prefix to indicates an element added to y, and the lack of a prefix
|
||||||
|
// indicates an element common to both x and y. If possible, the output
|
||||||
|
// uses fmt.Stringer.String or error.Error methods to produce more humanly
|
||||||
|
// readable outputs. In such cases, the string is prefixed with either an
|
||||||
|
// 's' or 'e' character, respectively, to indicate that the method was called.
|
||||||
|
//
|
||||||
|
// Do not depend on this output being stable. If you need the ability to
|
||||||
|
// programmatically interpret the difference, consider using a custom Reporter.
|
||||||
|
func Diff(x, y interface{}, opts ...Option) string {
|
||||||
|
s := newState(opts)
|
||||||
|
|
||||||
|
// Optimization: If there are no other reporters, we can optimize for the
|
||||||
|
// common case where the result is equal (and thus no reported difference).
|
||||||
|
// This avoids the expensive construction of a difference tree.
|
||||||
|
if len(s.reporters) == 0 {
|
||||||
|
s.compareAny(rootStep(x, y))
|
||||||
|
if s.result.Equal() {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
s.result = diff.Result{} // Reset results
|
||||||
|
}
|
||||||
|
|
||||||
|
r := new(defaultReporter)
|
||||||
|
s.reporters = append(s.reporters, reporter{r})
|
||||||
|
s.compareAny(rootStep(x, y))
|
||||||
|
d := r.String()
|
||||||
|
if (d == "") != s.result.Equal() {
|
||||||
|
panic("inconsistent difference and equality results")
|
||||||
|
}
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
|
||||||
|
// rootStep constructs the first path step. If x and y have differing types,
|
||||||
|
// then they are stored within an empty interface type.
|
||||||
|
func rootStep(x, y interface{}) PathStep {
|
||||||
vx := reflect.ValueOf(x)
|
vx := reflect.ValueOf(x)
|
||||||
vy := reflect.ValueOf(y)
|
vy := reflect.ValueOf(y)
|
||||||
|
|
||||||
@ -108,33 +158,7 @@ func Equal(x, y interface{}, opts ...Option) bool {
|
|||||||
t = vx.Type()
|
t = vx.Type()
|
||||||
}
|
}
|
||||||
|
|
||||||
s := newState(opts)
|
return &pathStep{t, vx, vy}
|
||||||
s.compareAny(&pathStep{t, vx, vy})
|
|
||||||
return s.result.Equal()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Diff returns a human-readable report of the differences between two values.
|
|
||||||
// It returns an empty string if and only if Equal returns true for the same
|
|
||||||
// input values and options.
|
|
||||||
//
|
|
||||||
// The output is displayed as a literal in pseudo-Go syntax.
|
|
||||||
// At the start of each line, a "-" prefix indicates an element removed from x,
|
|
||||||
// a "+" prefix to indicates an element added to y, and the lack of a prefix
|
|
||||||
// indicates an element common to both x and y. If possible, the output
|
|
||||||
// uses fmt.Stringer.String or error.Error methods to produce more humanly
|
|
||||||
// readable outputs. In such cases, the string is prefixed with either an
|
|
||||||
// 's' or 'e' character, respectively, to indicate that the method was called.
|
|
||||||
//
|
|
||||||
// Do not depend on this output being stable. If you need the ability to
|
|
||||||
// programmatically interpret the difference, consider using a custom Reporter.
|
|
||||||
func Diff(x, y interface{}, opts ...Option) string {
|
|
||||||
r := new(defaultReporter)
|
|
||||||
eq := Equal(x, y, Options(opts), Reporter(r))
|
|
||||||
d := r.String()
|
|
||||||
if (d == "") != eq {
|
|
||||||
panic("inconsistent difference and equality results")
|
|
||||||
}
|
|
||||||
return d
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type state struct {
|
type state struct {
|
||||||
@ -352,7 +376,7 @@ func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) {
|
|||||||
// assuming that T is assignable to R.
|
// assuming that T is assignable to R.
|
||||||
// Otherwise, it returns the input value as is.
|
// Otherwise, it returns the input value as is.
|
||||||
func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value {
|
func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value {
|
||||||
// TODO(dsnet): Workaround for reflect bug (https://golang.org/issue/22143).
|
// TODO(≥go1.10): Workaround for reflect bug (https://golang.org/issue/22143).
|
||||||
if !flags.AtLeastGo110 {
|
if !flags.AtLeastGo110 {
|
||||||
if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t {
|
if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t {
|
||||||
return reflect.New(t).Elem()
|
return reflect.New(t).Elem()
|
||||||
@ -362,6 +386,7 @@ func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) {
|
func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) {
|
||||||
|
var addr bool
|
||||||
var vax, vay reflect.Value // Addressable versions of vx and vy
|
var vax, vay reflect.Value // Addressable versions of vx and vy
|
||||||
|
|
||||||
var mayForce, mayForceInit bool
|
var mayForce, mayForceInit bool
|
||||||
@ -383,6 +408,7 @@ func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) {
|
|||||||
// For retrieveUnexportedField to work, the parent struct must
|
// For retrieveUnexportedField to work, the parent struct must
|
||||||
// be addressable. Create a new copy of the values if
|
// be addressable. Create a new copy of the values if
|
||||||
// necessary to make them addressable.
|
// necessary to make them addressable.
|
||||||
|
addr = vx.CanAddr() || vy.CanAddr()
|
||||||
vax = makeAddressable(vx)
|
vax = makeAddressable(vx)
|
||||||
vay = makeAddressable(vy)
|
vay = makeAddressable(vy)
|
||||||
}
|
}
|
||||||
@ -393,6 +419,7 @@ func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) {
|
|||||||
mayForceInit = true
|
mayForceInit = true
|
||||||
}
|
}
|
||||||
step.mayForce = mayForce
|
step.mayForce = mayForce
|
||||||
|
step.paddr = addr
|
||||||
step.pvx = vax
|
step.pvx = vax
|
||||||
step.pvy = vay
|
step.pvy = vay
|
||||||
step.field = t.Field(i)
|
step.field = t.Field(i)
|
||||||
|
2
vendor/github.com/google/go-cmp/cmp/export_panic.go
generated
vendored
2
vendor/github.com/google/go-cmp/cmp/export_panic.go
generated
vendored
@ -10,6 +10,6 @@ import "reflect"
|
|||||||
|
|
||||||
const supportExporters = false
|
const supportExporters = false
|
||||||
|
|
||||||
func retrieveUnexportedField(reflect.Value, reflect.StructField) reflect.Value {
|
func retrieveUnexportedField(reflect.Value, reflect.StructField, bool) reflect.Value {
|
||||||
panic("no support for forcibly accessing unexported fields")
|
panic("no support for forcibly accessing unexported fields")
|
||||||
}
|
}
|
||||||
|
20
vendor/github.com/google/go-cmp/cmp/export_unsafe.go
generated
vendored
20
vendor/github.com/google/go-cmp/cmp/export_unsafe.go
generated
vendored
@ -17,9 +17,19 @@ const supportExporters = true
|
|||||||
// a struct such that the value has read-write permissions.
|
// a struct such that the value has read-write permissions.
|
||||||
//
|
//
|
||||||
// The parent struct, v, must be addressable, while f must be a StructField
|
// The parent struct, v, must be addressable, while f must be a StructField
|
||||||
// describing the field to retrieve.
|
// describing the field to retrieve. If addr is false,
|
||||||
func retrieveUnexportedField(v reflect.Value, f reflect.StructField) reflect.Value {
|
// then the returned value will be shallowed copied to be non-addressable.
|
||||||
// See https://github.com/google/go-cmp/issues/167 for discussion of the
|
func retrieveUnexportedField(v reflect.Value, f reflect.StructField, addr bool) reflect.Value {
|
||||||
// following expression.
|
ve := reflect.NewAt(f.Type, unsafe.Pointer(uintptr(unsafe.Pointer(v.UnsafeAddr()))+f.Offset)).Elem()
|
||||||
return reflect.NewAt(f.Type, unsafe.Pointer(uintptr(unsafe.Pointer(v.UnsafeAddr()))+f.Offset)).Elem()
|
if !addr {
|
||||||
|
// A field is addressable if and only if the struct is addressable.
|
||||||
|
// If the original parent value was not addressable, shallow copy the
|
||||||
|
// value to make it non-addressable to avoid leaking an implementation
|
||||||
|
// detail of how forcibly exporting a field works.
|
||||||
|
if ve.Kind() == reflect.Interface && ve.IsNil() {
|
||||||
|
return reflect.Zero(f.Type)
|
||||||
|
}
|
||||||
|
return reflect.ValueOf(ve.Interface()).Convert(f.Type)
|
||||||
|
}
|
||||||
|
return ve
|
||||||
}
|
}
|
||||||
|
22
vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
generated
vendored
22
vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
generated
vendored
@ -12,6 +12,13 @@
|
|||||||
// is more important than obtaining a minimal Levenshtein distance.
|
// is more important than obtaining a minimal Levenshtein distance.
|
||||||
package diff
|
package diff
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp/internal/flags"
|
||||||
|
)
|
||||||
|
|
||||||
// EditType represents a single operation within an edit-script.
|
// EditType represents a single operation within an edit-script.
|
||||||
type EditType uint8
|
type EditType uint8
|
||||||
|
|
||||||
@ -112,6 +119,8 @@ func (r Result) Similar() bool {
|
|||||||
return r.NumSame+1 >= r.NumDiff
|
return r.NumSame+1 >= r.NumDiff
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var randInt = rand.New(rand.NewSource(time.Now().Unix())).Intn(2)
|
||||||
|
|
||||||
// Difference reports whether two lists of lengths nx and ny are equal
|
// Difference reports whether two lists of lengths nx and ny are equal
|
||||||
// given the definition of equality provided as f.
|
// given the definition of equality provided as f.
|
||||||
//
|
//
|
||||||
@ -159,6 +168,17 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) {
|
|||||||
// A vertical edge is equivalent to inserting a symbol from list Y.
|
// A vertical edge is equivalent to inserting a symbol from list Y.
|
||||||
// A diagonal edge is equivalent to a matching symbol between both X and Y.
|
// A diagonal edge is equivalent to a matching symbol between both X and Y.
|
||||||
|
|
||||||
|
// To ensure flexibility in changing the algorithm in the future,
|
||||||
|
// introduce some degree of deliberate instability.
|
||||||
|
// This is achieved by fiddling the zigzag iterator to start searching
|
||||||
|
// the graph starting from the bottom-right versus than the top-left.
|
||||||
|
// The result may differ depending on the starting search location,
|
||||||
|
// but still produces a valid edit script.
|
||||||
|
zigzagInit := randInt // either 0 or 1
|
||||||
|
if flags.Deterministic {
|
||||||
|
zigzagInit = 0
|
||||||
|
}
|
||||||
|
|
||||||
// Invariants:
|
// Invariants:
|
||||||
// • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx
|
// • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx
|
||||||
// • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny
|
// • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny
|
||||||
@ -209,7 +229,7 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) {
|
|||||||
if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
|
if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
|
for stop1, stop2, i := false, false, zigzagInit; !(stop1 && stop2) && searchBudget > 0; i++ {
|
||||||
// Search in a diagonal pattern for a match.
|
// Search in a diagonal pattern for a match.
|
||||||
z := zigzag(i)
|
z := zigzag(i)
|
||||||
p := point{fwdFrontier.X + z, fwdFrontier.Y - z}
|
p := point{fwdFrontier.X + z, fwdFrontier.Y - z}
|
||||||
|
157
vendor/github.com/google/go-cmp/cmp/internal/value/name.go
generated
vendored
Normal file
157
vendor/github.com/google/go-cmp/cmp/internal/value/name.go
generated
vendored
Normal file
@ -0,0 +1,157 @@
|
|||||||
|
// Copyright 2020, The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE.md file.
|
||||||
|
|
||||||
|
package value
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TypeString is nearly identical to reflect.Type.String,
|
||||||
|
// but has an additional option to specify that full type names be used.
|
||||||
|
func TypeString(t reflect.Type, qualified bool) string {
|
||||||
|
return string(appendTypeName(nil, t, qualified, false))
|
||||||
|
}
|
||||||
|
|
||||||
|
func appendTypeName(b []byte, t reflect.Type, qualified, elideFunc bool) []byte {
|
||||||
|
// BUG: Go reflection provides no way to disambiguate two named types
|
||||||
|
// of the same name and within the same package,
|
||||||
|
// but declared within the namespace of different functions.
|
||||||
|
|
||||||
|
// Named type.
|
||||||
|
if t.Name() != "" {
|
||||||
|
if qualified && t.PkgPath() != "" {
|
||||||
|
b = append(b, '"')
|
||||||
|
b = append(b, t.PkgPath()...)
|
||||||
|
b = append(b, '"')
|
||||||
|
b = append(b, '.')
|
||||||
|
b = append(b, t.Name()...)
|
||||||
|
} else {
|
||||||
|
b = append(b, t.String()...)
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unnamed type.
|
||||||
|
switch k := t.Kind(); k {
|
||||||
|
case reflect.Bool, reflect.String, reflect.UnsafePointer,
|
||||||
|
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
||||||
|
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
|
||||||
|
reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
|
||||||
|
b = append(b, k.String()...)
|
||||||
|
case reflect.Chan:
|
||||||
|
if t.ChanDir() == reflect.RecvDir {
|
||||||
|
b = append(b, "<-"...)
|
||||||
|
}
|
||||||
|
b = append(b, "chan"...)
|
||||||
|
if t.ChanDir() == reflect.SendDir {
|
||||||
|
b = append(b, "<-"...)
|
||||||
|
}
|
||||||
|
b = append(b, ' ')
|
||||||
|
b = appendTypeName(b, t.Elem(), qualified, false)
|
||||||
|
case reflect.Func:
|
||||||
|
if !elideFunc {
|
||||||
|
b = append(b, "func"...)
|
||||||
|
}
|
||||||
|
b = append(b, '(')
|
||||||
|
for i := 0; i < t.NumIn(); i++ {
|
||||||
|
if i > 0 {
|
||||||
|
b = append(b, ", "...)
|
||||||
|
}
|
||||||
|
if i == t.NumIn()-1 && t.IsVariadic() {
|
||||||
|
b = append(b, "..."...)
|
||||||
|
b = appendTypeName(b, t.In(i).Elem(), qualified, false)
|
||||||
|
} else {
|
||||||
|
b = appendTypeName(b, t.In(i), qualified, false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b = append(b, ')')
|
||||||
|
switch t.NumOut() {
|
||||||
|
case 0:
|
||||||
|
// Do nothing
|
||||||
|
case 1:
|
||||||
|
b = append(b, ' ')
|
||||||
|
b = appendTypeName(b, t.Out(0), qualified, false)
|
||||||
|
default:
|
||||||
|
b = append(b, " ("...)
|
||||||
|
for i := 0; i < t.NumOut(); i++ {
|
||||||
|
if i > 0 {
|
||||||
|
b = append(b, ", "...)
|
||||||
|
}
|
||||||
|
b = appendTypeName(b, t.Out(i), qualified, false)
|
||||||
|
}
|
||||||
|
b = append(b, ')')
|
||||||
|
}
|
||||||
|
case reflect.Struct:
|
||||||
|
b = append(b, "struct{ "...)
|
||||||
|
for i := 0; i < t.NumField(); i++ {
|
||||||
|
if i > 0 {
|
||||||
|
b = append(b, "; "...)
|
||||||
|
}
|
||||||
|
sf := t.Field(i)
|
||||||
|
if !sf.Anonymous {
|
||||||
|
if qualified && sf.PkgPath != "" {
|
||||||
|
b = append(b, '"')
|
||||||
|
b = append(b, sf.PkgPath...)
|
||||||
|
b = append(b, '"')
|
||||||
|
b = append(b, '.')
|
||||||
|
}
|
||||||
|
b = append(b, sf.Name...)
|
||||||
|
b = append(b, ' ')
|
||||||
|
}
|
||||||
|
b = appendTypeName(b, sf.Type, qualified, false)
|
||||||
|
if sf.Tag != "" {
|
||||||
|
b = append(b, ' ')
|
||||||
|
b = strconv.AppendQuote(b, string(sf.Tag))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if b[len(b)-1] == ' ' {
|
||||||
|
b = b[:len(b)-1]
|
||||||
|
} else {
|
||||||
|
b = append(b, ' ')
|
||||||
|
}
|
||||||
|
b = append(b, '}')
|
||||||
|
case reflect.Slice, reflect.Array:
|
||||||
|
b = append(b, '[')
|
||||||
|
if k == reflect.Array {
|
||||||
|
b = strconv.AppendUint(b, uint64(t.Len()), 10)
|
||||||
|
}
|
||||||
|
b = append(b, ']')
|
||||||
|
b = appendTypeName(b, t.Elem(), qualified, false)
|
||||||
|
case reflect.Map:
|
||||||
|
b = append(b, "map["...)
|
||||||
|
b = appendTypeName(b, t.Key(), qualified, false)
|
||||||
|
b = append(b, ']')
|
||||||
|
b = appendTypeName(b, t.Elem(), qualified, false)
|
||||||
|
case reflect.Ptr:
|
||||||
|
b = append(b, '*')
|
||||||
|
b = appendTypeName(b, t.Elem(), qualified, false)
|
||||||
|
case reflect.Interface:
|
||||||
|
b = append(b, "interface{ "...)
|
||||||
|
for i := 0; i < t.NumMethod(); i++ {
|
||||||
|
if i > 0 {
|
||||||
|
b = append(b, "; "...)
|
||||||
|
}
|
||||||
|
m := t.Method(i)
|
||||||
|
if qualified && m.PkgPath != "" {
|
||||||
|
b = append(b, '"')
|
||||||
|
b = append(b, m.PkgPath...)
|
||||||
|
b = append(b, '"')
|
||||||
|
b = append(b, '.')
|
||||||
|
}
|
||||||
|
b = append(b, m.Name...)
|
||||||
|
b = appendTypeName(b, m.Type, qualified, true)
|
||||||
|
}
|
||||||
|
if b[len(b)-1] == ' ' {
|
||||||
|
b = b[:len(b)-1]
|
||||||
|
} else {
|
||||||
|
b = append(b, ' ')
|
||||||
|
}
|
||||||
|
b = append(b, '}')
|
||||||
|
default:
|
||||||
|
panic("invalid kind: " + k.String())
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
10
vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go
generated
vendored
10
vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go
generated
vendored
@ -21,3 +21,13 @@ func PointerOf(v reflect.Value) Pointer {
|
|||||||
// assumes that the GC implementation does not use a moving collector.
|
// assumes that the GC implementation does not use a moving collector.
|
||||||
return Pointer{v.Pointer(), v.Type()}
|
return Pointer{v.Pointer(), v.Type()}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsNil reports whether the pointer is nil.
|
||||||
|
func (p Pointer) IsNil() bool {
|
||||||
|
return p.p == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uintptr returns the pointer as a uintptr.
|
||||||
|
func (p Pointer) Uintptr() uintptr {
|
||||||
|
return p.p
|
||||||
|
}
|
||||||
|
10
vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go
generated
vendored
10
vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go
generated
vendored
@ -24,3 +24,13 @@ func PointerOf(v reflect.Value) Pointer {
|
|||||||
// which is necessary if the GC ever uses a moving collector.
|
// which is necessary if the GC ever uses a moving collector.
|
||||||
return Pointer{unsafe.Pointer(v.Pointer()), v.Type()}
|
return Pointer{unsafe.Pointer(v.Pointer()), v.Type()}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsNil reports whether the pointer is nil.
|
||||||
|
func (p Pointer) IsNil() bool {
|
||||||
|
return p.p == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uintptr returns the pointer as a uintptr.
|
||||||
|
func (p Pointer) Uintptr() uintptr {
|
||||||
|
return uintptr(p.p)
|
||||||
|
}
|
||||||
|
7
vendor/github.com/google/go-cmp/cmp/path.go
generated
vendored
7
vendor/github.com/google/go-cmp/cmp/path.go
generated
vendored
@ -177,7 +177,8 @@ type structField struct {
|
|||||||
// pvx, pvy, and field are only valid if unexported is true.
|
// pvx, pvy, and field are only valid if unexported is true.
|
||||||
unexported bool
|
unexported bool
|
||||||
mayForce bool // Forcibly allow visibility
|
mayForce bool // Forcibly allow visibility
|
||||||
pvx, pvy reflect.Value // Parent values
|
paddr bool // Was parent addressable?
|
||||||
|
pvx, pvy reflect.Value // Parent values (always addressible)
|
||||||
field reflect.StructField // Field information
|
field reflect.StructField // Field information
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -189,8 +190,8 @@ func (sf StructField) Values() (vx, vy reflect.Value) {
|
|||||||
|
|
||||||
// Forcibly obtain read-write access to an unexported struct field.
|
// Forcibly obtain read-write access to an unexported struct field.
|
||||||
if sf.mayForce {
|
if sf.mayForce {
|
||||||
vx = retrieveUnexportedField(sf.pvx, sf.field)
|
vx = retrieveUnexportedField(sf.pvx, sf.field, sf.paddr)
|
||||||
vy = retrieveUnexportedField(sf.pvy, sf.field)
|
vy = retrieveUnexportedField(sf.pvy, sf.field, sf.paddr)
|
||||||
return vx, vy // CanInterface reports true
|
return vx, vy // CanInterface reports true
|
||||||
}
|
}
|
||||||
return sf.vx, sf.vy // CanInterface reports false
|
return sf.vx, sf.vy // CanInterface reports false
|
||||||
|
5
vendor/github.com/google/go-cmp/cmp/report.go
generated
vendored
5
vendor/github.com/google/go-cmp/cmp/report.go
generated
vendored
@ -41,7 +41,10 @@ func (r *defaultReporter) String() string {
|
|||||||
if r.root.NumDiff == 0 {
|
if r.root.NumDiff == 0 {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
return formatOptions{}.FormatDiff(r.root).String()
|
ptrs := new(pointerReferences)
|
||||||
|
text := formatOptions{}.FormatDiff(r.root, ptrs)
|
||||||
|
resolveReferences(text)
|
||||||
|
return text.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
func assert(ok bool) {
|
func assert(ok bool) {
|
||||||
|
200
vendor/github.com/google/go-cmp/cmp/report_compare.go
generated
vendored
200
vendor/github.com/google/go-cmp/cmp/report_compare.go
generated
vendored
@ -11,14 +11,6 @@ import (
|
|||||||
"github.com/google/go-cmp/cmp/internal/value"
|
"github.com/google/go-cmp/cmp/internal/value"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TODO: Enforce limits?
|
|
||||||
// * Enforce maximum number of records to print per node?
|
|
||||||
// * Enforce maximum size in bytes allowed?
|
|
||||||
// * As a heuristic, use less verbosity for equal nodes than unequal nodes.
|
|
||||||
// TODO: Enforce unique outputs?
|
|
||||||
// * Avoid Stringer methods if it results in same output?
|
|
||||||
// * Print pointer address if outputs still equal?
|
|
||||||
|
|
||||||
// numContextRecords is the number of surrounding equal records to print.
|
// numContextRecords is the number of surrounding equal records to print.
|
||||||
const numContextRecords = 2
|
const numContextRecords = 2
|
||||||
|
|
||||||
@ -71,24 +63,66 @@ func (opts formatOptions) WithTypeMode(t typeMode) formatOptions {
|
|||||||
opts.TypeMode = t
|
opts.TypeMode = t
|
||||||
return opts
|
return opts
|
||||||
}
|
}
|
||||||
|
func (opts formatOptions) WithVerbosity(level int) formatOptions {
|
||||||
|
opts.VerbosityLevel = level
|
||||||
|
opts.LimitVerbosity = true
|
||||||
|
return opts
|
||||||
|
}
|
||||||
|
func (opts formatOptions) verbosity() uint {
|
||||||
|
switch {
|
||||||
|
case opts.VerbosityLevel < 0:
|
||||||
|
return 0
|
||||||
|
case opts.VerbosityLevel > 16:
|
||||||
|
return 16 // some reasonable maximum to avoid shift overflow
|
||||||
|
default:
|
||||||
|
return uint(opts.VerbosityLevel)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const maxVerbosityPreset = 3
|
||||||
|
|
||||||
|
// verbosityPreset modifies the verbosity settings given an index
|
||||||
|
// between 0 and maxVerbosityPreset, inclusive.
|
||||||
|
func verbosityPreset(opts formatOptions, i int) formatOptions {
|
||||||
|
opts.VerbosityLevel = int(opts.verbosity()) + 2*i
|
||||||
|
if i > 0 {
|
||||||
|
opts.AvoidStringer = true
|
||||||
|
}
|
||||||
|
if i >= maxVerbosityPreset {
|
||||||
|
opts.PrintAddresses = true
|
||||||
|
opts.QualifiedNames = true
|
||||||
|
}
|
||||||
|
return opts
|
||||||
|
}
|
||||||
|
|
||||||
// FormatDiff converts a valueNode tree into a textNode tree, where the later
|
// FormatDiff converts a valueNode tree into a textNode tree, where the later
|
||||||
// is a textual representation of the differences detected in the former.
|
// is a textual representation of the differences detected in the former.
|
||||||
func (opts formatOptions) FormatDiff(v *valueNode) textNode {
|
func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out textNode) {
|
||||||
|
if opts.DiffMode == diffIdentical {
|
||||||
|
opts = opts.WithVerbosity(1)
|
||||||
|
} else {
|
||||||
|
opts = opts.WithVerbosity(3)
|
||||||
|
}
|
||||||
|
|
||||||
// Check whether we have specialized formatting for this node.
|
// Check whether we have specialized formatting for this node.
|
||||||
// This is not necessary, but helpful for producing more readable outputs.
|
// This is not necessary, but helpful for producing more readable outputs.
|
||||||
if opts.CanFormatDiffSlice(v) {
|
if opts.CanFormatDiffSlice(v) {
|
||||||
return opts.FormatDiffSlice(v)
|
return opts.FormatDiffSlice(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var parentKind reflect.Kind
|
||||||
|
if v.parent != nil && v.parent.TransformerName == "" {
|
||||||
|
parentKind = v.parent.Type.Kind()
|
||||||
|
}
|
||||||
|
|
||||||
// For leaf nodes, format the value based on the reflect.Values alone.
|
// For leaf nodes, format the value based on the reflect.Values alone.
|
||||||
if v.MaxDepth == 0 {
|
if v.MaxDepth == 0 {
|
||||||
switch opts.DiffMode {
|
switch opts.DiffMode {
|
||||||
case diffUnknown, diffIdentical:
|
case diffUnknown, diffIdentical:
|
||||||
// Format Equal.
|
// Format Equal.
|
||||||
if v.NumDiff == 0 {
|
if v.NumDiff == 0 {
|
||||||
outx := opts.FormatValue(v.ValueX, visitedPointers{})
|
outx := opts.FormatValue(v.ValueX, parentKind, ptrs)
|
||||||
outy := opts.FormatValue(v.ValueY, visitedPointers{})
|
outy := opts.FormatValue(v.ValueY, parentKind, ptrs)
|
||||||
if v.NumIgnored > 0 && v.NumSame == 0 {
|
if v.NumIgnored > 0 && v.NumSame == 0 {
|
||||||
return textEllipsis
|
return textEllipsis
|
||||||
} else if outx.Len() < outy.Len() {
|
} else if outx.Len() < outy.Len() {
|
||||||
@ -101,8 +135,13 @@ func (opts formatOptions) FormatDiff(v *valueNode) textNode {
|
|||||||
// Format unequal.
|
// Format unequal.
|
||||||
assert(opts.DiffMode == diffUnknown)
|
assert(opts.DiffMode == diffUnknown)
|
||||||
var list textList
|
var list textList
|
||||||
outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, visitedPointers{})
|
outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, parentKind, ptrs)
|
||||||
outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, visitedPointers{})
|
outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, parentKind, ptrs)
|
||||||
|
for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ {
|
||||||
|
opts2 := verbosityPreset(opts, i).WithTypeMode(elideType)
|
||||||
|
outx = opts2.FormatValue(v.ValueX, parentKind, ptrs)
|
||||||
|
outy = opts2.FormatValue(v.ValueY, parentKind, ptrs)
|
||||||
|
}
|
||||||
if outx != nil {
|
if outx != nil {
|
||||||
list = append(list, textRecord{Diff: '-', Value: outx})
|
list = append(list, textRecord{Diff: '-', Value: outx})
|
||||||
}
|
}
|
||||||
@ -111,34 +150,57 @@ func (opts formatOptions) FormatDiff(v *valueNode) textNode {
|
|||||||
}
|
}
|
||||||
return opts.WithTypeMode(emitType).FormatType(v.Type, list)
|
return opts.WithTypeMode(emitType).FormatType(v.Type, list)
|
||||||
case diffRemoved:
|
case diffRemoved:
|
||||||
return opts.FormatValue(v.ValueX, visitedPointers{})
|
return opts.FormatValue(v.ValueX, parentKind, ptrs)
|
||||||
case diffInserted:
|
case diffInserted:
|
||||||
return opts.FormatValue(v.ValueY, visitedPointers{})
|
return opts.FormatValue(v.ValueY, parentKind, ptrs)
|
||||||
default:
|
default:
|
||||||
panic("invalid diff mode")
|
panic("invalid diff mode")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Register slice element to support cycle detection.
|
||||||
|
if parentKind == reflect.Slice {
|
||||||
|
ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, true)
|
||||||
|
defer ptrs.Pop()
|
||||||
|
defer func() { out = wrapTrunkReferences(ptrRefs, out) }()
|
||||||
|
}
|
||||||
|
|
||||||
// Descend into the child value node.
|
// Descend into the child value node.
|
||||||
if v.TransformerName != "" {
|
if v.TransformerName != "" {
|
||||||
out := opts.WithTypeMode(emitType).FormatDiff(v.Value)
|
out := opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs)
|
||||||
out = textWrap{"Inverse(" + v.TransformerName + ", ", out, ")"}
|
out = &textWrap{Prefix: "Inverse(" + v.TransformerName + ", ", Value: out, Suffix: ")"}
|
||||||
return opts.FormatType(v.Type, out)
|
return opts.FormatType(v.Type, out)
|
||||||
} else {
|
} else {
|
||||||
switch k := v.Type.Kind(); k {
|
switch k := v.Type.Kind(); k {
|
||||||
case reflect.Struct, reflect.Array, reflect.Slice, reflect.Map:
|
case reflect.Struct, reflect.Array, reflect.Slice:
|
||||||
return opts.FormatType(v.Type, opts.formatDiffList(v.Records, k))
|
out = opts.formatDiffList(v.Records, k, ptrs)
|
||||||
|
out = opts.FormatType(v.Type, out)
|
||||||
|
case reflect.Map:
|
||||||
|
// Register map to support cycle detection.
|
||||||
|
ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false)
|
||||||
|
defer ptrs.Pop()
|
||||||
|
|
||||||
|
out = opts.formatDiffList(v.Records, k, ptrs)
|
||||||
|
out = wrapTrunkReferences(ptrRefs, out)
|
||||||
|
out = opts.FormatType(v.Type, out)
|
||||||
case reflect.Ptr:
|
case reflect.Ptr:
|
||||||
return textWrap{"&", opts.FormatDiff(v.Value), ""}
|
// Register pointer to support cycle detection.
|
||||||
|
ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false)
|
||||||
|
defer ptrs.Pop()
|
||||||
|
|
||||||
|
out = opts.FormatDiff(v.Value, ptrs)
|
||||||
|
out = wrapTrunkReferences(ptrRefs, out)
|
||||||
|
out = &textWrap{Prefix: "&", Value: out}
|
||||||
case reflect.Interface:
|
case reflect.Interface:
|
||||||
return opts.WithTypeMode(emitType).FormatDiff(v.Value)
|
out = opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs)
|
||||||
default:
|
default:
|
||||||
panic(fmt.Sprintf("%v cannot have children", k))
|
panic(fmt.Sprintf("%v cannot have children", k))
|
||||||
}
|
}
|
||||||
|
return out
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) textNode {
|
func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind, ptrs *pointerReferences) textNode {
|
||||||
// Derive record name based on the data structure kind.
|
// Derive record name based on the data structure kind.
|
||||||
var name string
|
var name string
|
||||||
var formatKey func(reflect.Value) string
|
var formatKey func(reflect.Value) string
|
||||||
@ -154,7 +216,17 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) te
|
|||||||
case reflect.Map:
|
case reflect.Map:
|
||||||
name = "entry"
|
name = "entry"
|
||||||
opts = opts.WithTypeMode(elideType)
|
opts = opts.WithTypeMode(elideType)
|
||||||
formatKey = formatMapKey
|
formatKey = func(v reflect.Value) string { return formatMapKey(v, false, ptrs) }
|
||||||
|
}
|
||||||
|
|
||||||
|
maxLen := -1
|
||||||
|
if opts.LimitVerbosity {
|
||||||
|
if opts.DiffMode == diffIdentical {
|
||||||
|
maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
|
||||||
|
} else {
|
||||||
|
maxLen = (1 << opts.verbosity()) << 1 // 2, 4, 8, 16, 32, 64, etc...
|
||||||
|
}
|
||||||
|
opts.VerbosityLevel--
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle unification.
|
// Handle unification.
|
||||||
@ -163,6 +235,11 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) te
|
|||||||
var list textList
|
var list textList
|
||||||
var deferredEllipsis bool // Add final "..." to indicate records were dropped
|
var deferredEllipsis bool // Add final "..." to indicate records were dropped
|
||||||
for _, r := range recs {
|
for _, r := range recs {
|
||||||
|
if len(list) == maxLen {
|
||||||
|
deferredEllipsis = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
// Elide struct fields that are zero value.
|
// Elide struct fields that are zero value.
|
||||||
if k == reflect.Struct {
|
if k == reflect.Struct {
|
||||||
var isZero bool
|
var isZero bool
|
||||||
@ -186,23 +263,31 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) te
|
|||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if out := opts.FormatDiff(r.Value); out != nil {
|
if out := opts.FormatDiff(r.Value, ptrs); out != nil {
|
||||||
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
|
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if deferredEllipsis {
|
if deferredEllipsis {
|
||||||
list.AppendEllipsis(diffStats{})
|
list.AppendEllipsis(diffStats{})
|
||||||
}
|
}
|
||||||
return textWrap{"{", list, "}"}
|
return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
||||||
case diffUnknown:
|
case diffUnknown:
|
||||||
default:
|
default:
|
||||||
panic("invalid diff mode")
|
panic("invalid diff mode")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle differencing.
|
// Handle differencing.
|
||||||
|
var numDiffs int
|
||||||
var list textList
|
var list textList
|
||||||
|
var keys []reflect.Value // invariant: len(list) == len(keys)
|
||||||
groups := coalesceAdjacentRecords(name, recs)
|
groups := coalesceAdjacentRecords(name, recs)
|
||||||
|
maxGroup := diffStats{Name: name}
|
||||||
for i, ds := range groups {
|
for i, ds := range groups {
|
||||||
|
if maxLen >= 0 && numDiffs >= maxLen {
|
||||||
|
maxGroup = maxGroup.Append(ds)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
// Handle equal records.
|
// Handle equal records.
|
||||||
if ds.NumDiff() == 0 {
|
if ds.NumDiff() == 0 {
|
||||||
// Compute the number of leading and trailing records to print.
|
// Compute the number of leading and trailing records to print.
|
||||||
@ -226,16 +311,21 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) te
|
|||||||
|
|
||||||
// Format the equal values.
|
// Format the equal values.
|
||||||
for _, r := range recs[:numLo] {
|
for _, r := range recs[:numLo] {
|
||||||
out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value)
|
out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs)
|
||||||
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
|
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
|
||||||
|
keys = append(keys, r.Key)
|
||||||
}
|
}
|
||||||
if numEqual > numLo+numHi {
|
if numEqual > numLo+numHi {
|
||||||
ds.NumIdentical -= numLo + numHi
|
ds.NumIdentical -= numLo + numHi
|
||||||
list.AppendEllipsis(ds)
|
list.AppendEllipsis(ds)
|
||||||
|
for len(keys) < len(list) {
|
||||||
|
keys = append(keys, reflect.Value{})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
for _, r := range recs[numEqual-numHi : numEqual] {
|
for _, r := range recs[numEqual-numHi : numEqual] {
|
||||||
out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value)
|
out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs)
|
||||||
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
|
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
|
||||||
|
keys = append(keys, r.Key)
|
||||||
}
|
}
|
||||||
recs = recs[numEqual:]
|
recs = recs[numEqual:]
|
||||||
continue
|
continue
|
||||||
@ -247,24 +337,70 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) te
|
|||||||
case opts.CanFormatDiffSlice(r.Value):
|
case opts.CanFormatDiffSlice(r.Value):
|
||||||
out := opts.FormatDiffSlice(r.Value)
|
out := opts.FormatDiffSlice(r.Value)
|
||||||
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
|
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
|
||||||
|
keys = append(keys, r.Key)
|
||||||
case r.Value.NumChildren == r.Value.MaxDepth:
|
case r.Value.NumChildren == r.Value.MaxDepth:
|
||||||
outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value)
|
outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs)
|
||||||
outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value)
|
outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs)
|
||||||
|
for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ {
|
||||||
|
opts2 := verbosityPreset(opts, i)
|
||||||
|
outx = opts2.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs)
|
||||||
|
outy = opts2.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs)
|
||||||
|
}
|
||||||
if outx != nil {
|
if outx != nil {
|
||||||
list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx})
|
list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx})
|
||||||
|
keys = append(keys, r.Key)
|
||||||
}
|
}
|
||||||
if outy != nil {
|
if outy != nil {
|
||||||
list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy})
|
list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy})
|
||||||
|
keys = append(keys, r.Key)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
out := opts.FormatDiff(r.Value)
|
out := opts.FormatDiff(r.Value, ptrs)
|
||||||
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
|
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
|
||||||
|
keys = append(keys, r.Key)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
recs = recs[ds.NumDiff():]
|
recs = recs[ds.NumDiff():]
|
||||||
|
numDiffs += ds.NumDiff()
|
||||||
}
|
}
|
||||||
assert(len(recs) == 0)
|
if maxGroup.IsZero() {
|
||||||
return textWrap{"{", list, "}"}
|
assert(len(recs) == 0)
|
||||||
|
} else {
|
||||||
|
list.AppendEllipsis(maxGroup)
|
||||||
|
for len(keys) < len(list) {
|
||||||
|
keys = append(keys, reflect.Value{})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert(len(list) == len(keys))
|
||||||
|
|
||||||
|
// For maps, the default formatting logic uses fmt.Stringer which may
|
||||||
|
// produce ambiguous output. Avoid calling String to disambiguate.
|
||||||
|
if k == reflect.Map {
|
||||||
|
var ambiguous bool
|
||||||
|
seenKeys := map[string]reflect.Value{}
|
||||||
|
for i, currKey := range keys {
|
||||||
|
if currKey.IsValid() {
|
||||||
|
strKey := list[i].Key
|
||||||
|
prevKey, seen := seenKeys[strKey]
|
||||||
|
if seen && prevKey.CanInterface() && currKey.CanInterface() {
|
||||||
|
ambiguous = prevKey.Interface() != currKey.Interface()
|
||||||
|
if ambiguous {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
seenKeys[strKey] = currKey
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ambiguous {
|
||||||
|
for i, k := range keys {
|
||||||
|
if k.IsValid() {
|
||||||
|
list[i].Key = formatMapKey(k, true, ptrs)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
||||||
}
|
}
|
||||||
|
|
||||||
// coalesceAdjacentRecords coalesces the list of records into groups of
|
// coalesceAdjacentRecords coalesces the list of records into groups of
|
||||||
|
264
vendor/github.com/google/go-cmp/cmp/report_references.go
generated
vendored
Normal file
264
vendor/github.com/google/go-cmp/cmp/report_references.go
generated
vendored
Normal file
@ -0,0 +1,264 @@
|
|||||||
|
// Copyright 2020, The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE.md file.
|
||||||
|
|
||||||
|
package cmp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp/internal/flags"
|
||||||
|
"github.com/google/go-cmp/cmp/internal/value"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
pointerDelimPrefix = "⟪"
|
||||||
|
pointerDelimSuffix = "⟫"
|
||||||
|
)
|
||||||
|
|
||||||
|
// formatPointer prints the address of the pointer.
|
||||||
|
func formatPointer(p value.Pointer, withDelims bool) string {
|
||||||
|
v := p.Uintptr()
|
||||||
|
if flags.Deterministic {
|
||||||
|
v = 0xdeadf00f // Only used for stable testing purposes
|
||||||
|
}
|
||||||
|
if withDelims {
|
||||||
|
return pointerDelimPrefix + formatHex(uint64(v)) + pointerDelimSuffix
|
||||||
|
}
|
||||||
|
return formatHex(uint64(v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// pointerReferences is a stack of pointers visited so far.
|
||||||
|
type pointerReferences [][2]value.Pointer
|
||||||
|
|
||||||
|
func (ps *pointerReferences) PushPair(vx, vy reflect.Value, d diffMode, deref bool) (pp [2]value.Pointer) {
|
||||||
|
if deref && vx.IsValid() {
|
||||||
|
vx = vx.Addr()
|
||||||
|
}
|
||||||
|
if deref && vy.IsValid() {
|
||||||
|
vy = vy.Addr()
|
||||||
|
}
|
||||||
|
switch d {
|
||||||
|
case diffUnknown, diffIdentical:
|
||||||
|
pp = [2]value.Pointer{value.PointerOf(vx), value.PointerOf(vy)}
|
||||||
|
case diffRemoved:
|
||||||
|
pp = [2]value.Pointer{value.PointerOf(vx), value.Pointer{}}
|
||||||
|
case diffInserted:
|
||||||
|
pp = [2]value.Pointer{value.Pointer{}, value.PointerOf(vy)}
|
||||||
|
}
|
||||||
|
*ps = append(*ps, pp)
|
||||||
|
return pp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ps *pointerReferences) Push(v reflect.Value) (p value.Pointer, seen bool) {
|
||||||
|
p = value.PointerOf(v)
|
||||||
|
for _, pp := range *ps {
|
||||||
|
if p == pp[0] || p == pp[1] {
|
||||||
|
return p, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*ps = append(*ps, [2]value.Pointer{p, p})
|
||||||
|
return p, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ps *pointerReferences) Pop() {
|
||||||
|
*ps = (*ps)[:len(*ps)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// trunkReferences is metadata for a textNode indicating that the sub-tree
|
||||||
|
// represents the value for either pointer in a pair of references.
|
||||||
|
type trunkReferences struct{ pp [2]value.Pointer }
|
||||||
|
|
||||||
|
// trunkReference is metadata for a textNode indicating that the sub-tree
|
||||||
|
// represents the value for the given pointer reference.
|
||||||
|
type trunkReference struct{ p value.Pointer }
|
||||||
|
|
||||||
|
// leafReference is metadata for a textNode indicating that the value is
|
||||||
|
// truncated as it refers to another part of the tree (i.e., a trunk).
|
||||||
|
type leafReference struct{ p value.Pointer }
|
||||||
|
|
||||||
|
func wrapTrunkReferences(pp [2]value.Pointer, s textNode) textNode {
|
||||||
|
switch {
|
||||||
|
case pp[0].IsNil():
|
||||||
|
return &textWrap{Value: s, Metadata: trunkReference{pp[1]}}
|
||||||
|
case pp[1].IsNil():
|
||||||
|
return &textWrap{Value: s, Metadata: trunkReference{pp[0]}}
|
||||||
|
case pp[0] == pp[1]:
|
||||||
|
return &textWrap{Value: s, Metadata: trunkReference{pp[0]}}
|
||||||
|
default:
|
||||||
|
return &textWrap{Value: s, Metadata: trunkReferences{pp}}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func wrapTrunkReference(p value.Pointer, printAddress bool, s textNode) textNode {
|
||||||
|
var prefix string
|
||||||
|
if printAddress {
|
||||||
|
prefix = formatPointer(p, true)
|
||||||
|
}
|
||||||
|
return &textWrap{Prefix: prefix, Value: s, Metadata: trunkReference{p}}
|
||||||
|
}
|
||||||
|
func makeLeafReference(p value.Pointer, printAddress bool) textNode {
|
||||||
|
out := &textWrap{Prefix: "(", Value: textEllipsis, Suffix: ")"}
|
||||||
|
var prefix string
|
||||||
|
if printAddress {
|
||||||
|
prefix = formatPointer(p, true)
|
||||||
|
}
|
||||||
|
return &textWrap{Prefix: prefix, Value: out, Metadata: leafReference{p}}
|
||||||
|
}
|
||||||
|
|
||||||
|
// resolveReferences walks the textNode tree searching for any leaf reference
|
||||||
|
// metadata and resolves each against the corresponding trunk references.
|
||||||
|
// Since pointer addresses in memory are not particularly readable to the user,
|
||||||
|
// it replaces each pointer value with an arbitrary and unique reference ID.
|
||||||
|
func resolveReferences(s textNode) {
|
||||||
|
var walkNodes func(textNode, func(textNode))
|
||||||
|
walkNodes = func(s textNode, f func(textNode)) {
|
||||||
|
f(s)
|
||||||
|
switch s := s.(type) {
|
||||||
|
case *textWrap:
|
||||||
|
walkNodes(s.Value, f)
|
||||||
|
case textList:
|
||||||
|
for _, r := range s {
|
||||||
|
walkNodes(r.Value, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect all trunks and leaves with reference metadata.
|
||||||
|
var trunks, leaves []*textWrap
|
||||||
|
walkNodes(s, func(s textNode) {
|
||||||
|
if s, ok := s.(*textWrap); ok {
|
||||||
|
switch s.Metadata.(type) {
|
||||||
|
case leafReference:
|
||||||
|
leaves = append(leaves, s)
|
||||||
|
case trunkReference, trunkReferences:
|
||||||
|
trunks = append(trunks, s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// No leaf references to resolve.
|
||||||
|
if len(leaves) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect the set of all leaf references to resolve.
|
||||||
|
leafPtrs := make(map[value.Pointer]bool)
|
||||||
|
for _, leaf := range leaves {
|
||||||
|
leafPtrs[leaf.Metadata.(leafReference).p] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect the set of trunk pointers that are always paired together.
|
||||||
|
// This allows us to assign a single ID to both pointers for brevity.
|
||||||
|
// If a pointer in a pair ever occurs by itself or as a different pair,
|
||||||
|
// then the pair is broken.
|
||||||
|
pairedTrunkPtrs := make(map[value.Pointer]value.Pointer)
|
||||||
|
unpair := func(p value.Pointer) {
|
||||||
|
if !pairedTrunkPtrs[p].IsNil() {
|
||||||
|
pairedTrunkPtrs[pairedTrunkPtrs[p]] = value.Pointer{} // invalidate other half
|
||||||
|
}
|
||||||
|
pairedTrunkPtrs[p] = value.Pointer{} // invalidate this half
|
||||||
|
}
|
||||||
|
for _, trunk := range trunks {
|
||||||
|
switch p := trunk.Metadata.(type) {
|
||||||
|
case trunkReference:
|
||||||
|
unpair(p.p) // standalone pointer cannot be part of a pair
|
||||||
|
case trunkReferences:
|
||||||
|
p0, ok0 := pairedTrunkPtrs[p.pp[0]]
|
||||||
|
p1, ok1 := pairedTrunkPtrs[p.pp[1]]
|
||||||
|
switch {
|
||||||
|
case !ok0 && !ok1:
|
||||||
|
// Register the newly seen pair.
|
||||||
|
pairedTrunkPtrs[p.pp[0]] = p.pp[1]
|
||||||
|
pairedTrunkPtrs[p.pp[1]] = p.pp[0]
|
||||||
|
case ok0 && ok1 && p0 == p.pp[1] && p1 == p.pp[0]:
|
||||||
|
// Exact pair already seen; do nothing.
|
||||||
|
default:
|
||||||
|
// Pair conflicts with some other pair; break all pairs.
|
||||||
|
unpair(p.pp[0])
|
||||||
|
unpair(p.pp[1])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Correlate each pointer referenced by leaves to a unique identifier,
|
||||||
|
// and print the IDs for each trunk that matches those pointers.
|
||||||
|
var nextID uint
|
||||||
|
ptrIDs := make(map[value.Pointer]uint)
|
||||||
|
newID := func() uint {
|
||||||
|
id := nextID
|
||||||
|
nextID++
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
for _, trunk := range trunks {
|
||||||
|
switch p := trunk.Metadata.(type) {
|
||||||
|
case trunkReference:
|
||||||
|
if print := leafPtrs[p.p]; print {
|
||||||
|
id, ok := ptrIDs[p.p]
|
||||||
|
if !ok {
|
||||||
|
id = newID()
|
||||||
|
ptrIDs[p.p] = id
|
||||||
|
}
|
||||||
|
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id))
|
||||||
|
}
|
||||||
|
case trunkReferences:
|
||||||
|
print0 := leafPtrs[p.pp[0]]
|
||||||
|
print1 := leafPtrs[p.pp[1]]
|
||||||
|
if print0 || print1 {
|
||||||
|
id0, ok0 := ptrIDs[p.pp[0]]
|
||||||
|
id1, ok1 := ptrIDs[p.pp[1]]
|
||||||
|
isPair := pairedTrunkPtrs[p.pp[0]] == p.pp[1] && pairedTrunkPtrs[p.pp[1]] == p.pp[0]
|
||||||
|
if isPair {
|
||||||
|
var id uint
|
||||||
|
assert(ok0 == ok1) // must be seen together or not at all
|
||||||
|
if ok0 {
|
||||||
|
assert(id0 == id1) // must have the same ID
|
||||||
|
id = id0
|
||||||
|
} else {
|
||||||
|
id = newID()
|
||||||
|
ptrIDs[p.pp[0]] = id
|
||||||
|
ptrIDs[p.pp[1]] = id
|
||||||
|
}
|
||||||
|
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id))
|
||||||
|
} else {
|
||||||
|
if print0 && !ok0 {
|
||||||
|
id0 = newID()
|
||||||
|
ptrIDs[p.pp[0]] = id0
|
||||||
|
}
|
||||||
|
if print1 && !ok1 {
|
||||||
|
id1 = newID()
|
||||||
|
ptrIDs[p.pp[1]] = id1
|
||||||
|
}
|
||||||
|
switch {
|
||||||
|
case print0 && print1:
|
||||||
|
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)+","+formatReference(id1))
|
||||||
|
case print0:
|
||||||
|
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0))
|
||||||
|
case print1:
|
||||||
|
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id1))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update all leaf references with the unique identifier.
|
||||||
|
for _, leaf := range leaves {
|
||||||
|
if id, ok := ptrIDs[leaf.Metadata.(leafReference).p]; ok {
|
||||||
|
leaf.Prefix = updateReferencePrefix(leaf.Prefix, formatReference(id))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func formatReference(id uint) string {
|
||||||
|
return fmt.Sprintf("ref#%d", id)
|
||||||
|
}
|
||||||
|
|
||||||
|
func updateReferencePrefix(prefix, ref string) string {
|
||||||
|
if prefix == "" {
|
||||||
|
return pointerDelimPrefix + ref + pointerDelimSuffix
|
||||||
|
}
|
||||||
|
suffix := strings.TrimPrefix(prefix, pointerDelimPrefix)
|
||||||
|
return pointerDelimPrefix + ref + ": " + suffix
|
||||||
|
}
|
241
vendor/github.com/google/go-cmp/cmp/report_reflect.go
generated
vendored
241
vendor/github.com/google/go-cmp/cmp/report_reflect.go
generated
vendored
@ -10,8 +10,8 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"unicode"
|
"unicode"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp/internal/flags"
|
|
||||||
"github.com/google/go-cmp/cmp/internal/value"
|
"github.com/google/go-cmp/cmp/internal/value"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -20,14 +20,22 @@ type formatValueOptions struct {
|
|||||||
// methods like error.Error or fmt.Stringer.String.
|
// methods like error.Error or fmt.Stringer.String.
|
||||||
AvoidStringer bool
|
AvoidStringer bool
|
||||||
|
|
||||||
// ShallowPointers controls whether to avoid descending into pointers.
|
|
||||||
// Useful when printing map keys, where pointer comparison is performed
|
|
||||||
// on the pointer address rather than the pointed-at value.
|
|
||||||
ShallowPointers bool
|
|
||||||
|
|
||||||
// PrintAddresses controls whether to print the address of all pointers,
|
// PrintAddresses controls whether to print the address of all pointers,
|
||||||
// slice elements, and maps.
|
// slice elements, and maps.
|
||||||
PrintAddresses bool
|
PrintAddresses bool
|
||||||
|
|
||||||
|
// QualifiedNames controls whether FormatType uses the fully qualified name
|
||||||
|
// (including the full package path as opposed to just the package name).
|
||||||
|
QualifiedNames bool
|
||||||
|
|
||||||
|
// VerbosityLevel controls the amount of output to produce.
|
||||||
|
// A higher value produces more output. A value of zero or lower produces
|
||||||
|
// no output (represented using an ellipsis).
|
||||||
|
// If LimitVerbosity is false, then the level is treated as infinite.
|
||||||
|
VerbosityLevel int
|
||||||
|
|
||||||
|
// LimitVerbosity specifies that formatting should respect VerbosityLevel.
|
||||||
|
LimitVerbosity bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// FormatType prints the type as if it were wrapping s.
|
// FormatType prints the type as if it were wrapping s.
|
||||||
@ -44,12 +52,15 @@ func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode {
|
|||||||
default:
|
default:
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
if opts.DiffMode == diffIdentical {
|
||||||
|
return s // elide type for identical nodes
|
||||||
|
}
|
||||||
case elideType:
|
case elideType:
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
// Determine the type label, applying special handling for unnamed types.
|
// Determine the type label, applying special handling for unnamed types.
|
||||||
typeName := t.String()
|
typeName := value.TypeString(t, opts.QualifiedNames)
|
||||||
if t.Name() == "" {
|
if t.Name() == "" {
|
||||||
// According to Go grammar, certain type literals contain symbols that
|
// According to Go grammar, certain type literals contain symbols that
|
||||||
// do not strongly bind to the next lexicographical token (e.g., *T).
|
// do not strongly bind to the next lexicographical token (e.g., *T).
|
||||||
@ -57,39 +68,78 @@ func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode {
|
|||||||
case reflect.Chan, reflect.Func, reflect.Ptr:
|
case reflect.Chan, reflect.Func, reflect.Ptr:
|
||||||
typeName = "(" + typeName + ")"
|
typeName = "(" + typeName + ")"
|
||||||
}
|
}
|
||||||
typeName = strings.Replace(typeName, "struct {", "struct{", -1)
|
|
||||||
typeName = strings.Replace(typeName, "interface {", "interface{", -1)
|
|
||||||
}
|
}
|
||||||
|
return &textWrap{Prefix: typeName, Value: wrapParens(s)}
|
||||||
|
}
|
||||||
|
|
||||||
// Avoid wrap the value in parenthesis if unnecessary.
|
// wrapParens wraps s with a set of parenthesis, but avoids it if the
|
||||||
if s, ok := s.(textWrap); ok {
|
// wrapped node itself is already surrounded by a pair of parenthesis or braces.
|
||||||
hasParens := strings.HasPrefix(s.Prefix, "(") && strings.HasSuffix(s.Suffix, ")")
|
// It handles unwrapping one level of pointer-reference nodes.
|
||||||
hasBraces := strings.HasPrefix(s.Prefix, "{") && strings.HasSuffix(s.Suffix, "}")
|
func wrapParens(s textNode) textNode {
|
||||||
|
var refNode *textWrap
|
||||||
|
if s2, ok := s.(*textWrap); ok {
|
||||||
|
// Unwrap a single pointer reference node.
|
||||||
|
switch s2.Metadata.(type) {
|
||||||
|
case leafReference, trunkReference, trunkReferences:
|
||||||
|
refNode = s2
|
||||||
|
if s3, ok := refNode.Value.(*textWrap); ok {
|
||||||
|
s2 = s3
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Already has delimiters that make parenthesis unnecessary.
|
||||||
|
hasParens := strings.HasPrefix(s2.Prefix, "(") && strings.HasSuffix(s2.Suffix, ")")
|
||||||
|
hasBraces := strings.HasPrefix(s2.Prefix, "{") && strings.HasSuffix(s2.Suffix, "}")
|
||||||
if hasParens || hasBraces {
|
if hasParens || hasBraces {
|
||||||
return textWrap{typeName, s, ""}
|
return s
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return textWrap{typeName + "(", s, ")"}
|
if refNode != nil {
|
||||||
|
refNode.Value = &textWrap{Prefix: "(", Value: refNode.Value, Suffix: ")"}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
return &textWrap{Prefix: "(", Value: s, Suffix: ")"}
|
||||||
}
|
}
|
||||||
|
|
||||||
// FormatValue prints the reflect.Value, taking extra care to avoid descending
|
// FormatValue prints the reflect.Value, taking extra care to avoid descending
|
||||||
// into pointers already in m. As pointers are visited, m is also updated.
|
// into pointers already in ptrs. As pointers are visited, ptrs is also updated.
|
||||||
func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out textNode) {
|
func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, ptrs *pointerReferences) (out textNode) {
|
||||||
if !v.IsValid() {
|
if !v.IsValid() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
t := v.Type()
|
t := v.Type()
|
||||||
|
|
||||||
|
// Check slice element for cycles.
|
||||||
|
if parentKind == reflect.Slice {
|
||||||
|
ptrRef, visited := ptrs.Push(v.Addr())
|
||||||
|
if visited {
|
||||||
|
return makeLeafReference(ptrRef, false)
|
||||||
|
}
|
||||||
|
defer ptrs.Pop()
|
||||||
|
defer func() { out = wrapTrunkReference(ptrRef, false, out) }()
|
||||||
|
}
|
||||||
|
|
||||||
// Check whether there is an Error or String method to call.
|
// Check whether there is an Error or String method to call.
|
||||||
if !opts.AvoidStringer && v.CanInterface() {
|
if !opts.AvoidStringer && v.CanInterface() {
|
||||||
// Avoid calling Error or String methods on nil receivers since many
|
// Avoid calling Error or String methods on nil receivers since many
|
||||||
// implementations crash when doing so.
|
// implementations crash when doing so.
|
||||||
if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() {
|
if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() {
|
||||||
|
var prefix, strVal string
|
||||||
switch v := v.Interface().(type) {
|
switch v := v.Interface().(type) {
|
||||||
case error:
|
case error:
|
||||||
return textLine("e" + formatString(v.Error()))
|
prefix, strVal = "e", v.Error()
|
||||||
case fmt.Stringer:
|
case fmt.Stringer:
|
||||||
return textLine("s" + formatString(v.String()))
|
prefix, strVal = "s", v.String()
|
||||||
|
}
|
||||||
|
if prefix != "" {
|
||||||
|
maxLen := len(strVal)
|
||||||
|
if opts.LimitVerbosity {
|
||||||
|
maxLen = (1 << opts.verbosity()) << 5 // 32, 64, 128, 256, etc...
|
||||||
|
}
|
||||||
|
if len(strVal) > maxLen+len(textEllipsis) {
|
||||||
|
return textLine(prefix + formatString(strVal[:maxLen]) + string(textEllipsis))
|
||||||
|
}
|
||||||
|
return textLine(prefix + formatString(strVal))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -102,94 +152,136 @@ func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out t
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
var ptr string
|
|
||||||
switch t.Kind() {
|
switch t.Kind() {
|
||||||
case reflect.Bool:
|
case reflect.Bool:
|
||||||
return textLine(fmt.Sprint(v.Bool()))
|
return textLine(fmt.Sprint(v.Bool()))
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||||
return textLine(fmt.Sprint(v.Int()))
|
return textLine(fmt.Sprint(v.Int()))
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||||
// Unnamed uints are usually bytes or words, so use hexadecimal.
|
return textLine(fmt.Sprint(v.Uint()))
|
||||||
if t.PkgPath() == "" || t.Kind() == reflect.Uintptr {
|
case reflect.Uint8:
|
||||||
|
if parentKind == reflect.Slice || parentKind == reflect.Array {
|
||||||
return textLine(formatHex(v.Uint()))
|
return textLine(formatHex(v.Uint()))
|
||||||
}
|
}
|
||||||
return textLine(fmt.Sprint(v.Uint()))
|
return textLine(fmt.Sprint(v.Uint()))
|
||||||
|
case reflect.Uintptr:
|
||||||
|
return textLine(formatHex(v.Uint()))
|
||||||
case reflect.Float32, reflect.Float64:
|
case reflect.Float32, reflect.Float64:
|
||||||
return textLine(fmt.Sprint(v.Float()))
|
return textLine(fmt.Sprint(v.Float()))
|
||||||
case reflect.Complex64, reflect.Complex128:
|
case reflect.Complex64, reflect.Complex128:
|
||||||
return textLine(fmt.Sprint(v.Complex()))
|
return textLine(fmt.Sprint(v.Complex()))
|
||||||
case reflect.String:
|
case reflect.String:
|
||||||
|
maxLen := v.Len()
|
||||||
|
if opts.LimitVerbosity {
|
||||||
|
maxLen = (1 << opts.verbosity()) << 5 // 32, 64, 128, 256, etc...
|
||||||
|
}
|
||||||
|
if v.Len() > maxLen+len(textEllipsis) {
|
||||||
|
return textLine(formatString(v.String()[:maxLen]) + string(textEllipsis))
|
||||||
|
}
|
||||||
return textLine(formatString(v.String()))
|
return textLine(formatString(v.String()))
|
||||||
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
||||||
return textLine(formatPointer(v))
|
return textLine(formatPointer(value.PointerOf(v), true))
|
||||||
case reflect.Struct:
|
case reflect.Struct:
|
||||||
var list textList
|
var list textList
|
||||||
|
v := makeAddressable(v) // needed for retrieveUnexportedField
|
||||||
|
maxLen := v.NumField()
|
||||||
|
if opts.LimitVerbosity {
|
||||||
|
maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
|
||||||
|
opts.VerbosityLevel--
|
||||||
|
}
|
||||||
for i := 0; i < v.NumField(); i++ {
|
for i := 0; i < v.NumField(); i++ {
|
||||||
vv := v.Field(i)
|
vv := v.Field(i)
|
||||||
if value.IsZero(vv) {
|
if value.IsZero(vv) {
|
||||||
continue // Elide fields with zero values
|
continue // Elide fields with zero values
|
||||||
}
|
}
|
||||||
s := opts.WithTypeMode(autoType).FormatValue(vv, m)
|
if len(list) == maxLen {
|
||||||
list = append(list, textRecord{Key: t.Field(i).Name, Value: s})
|
list.AppendEllipsis(diffStats{})
|
||||||
|
break
|
||||||
|
}
|
||||||
|
sf := t.Field(i)
|
||||||
|
if supportExporters && !isExported(sf.Name) {
|
||||||
|
vv = retrieveUnexportedField(v, sf, true)
|
||||||
|
}
|
||||||
|
s := opts.WithTypeMode(autoType).FormatValue(vv, t.Kind(), ptrs)
|
||||||
|
list = append(list, textRecord{Key: sf.Name, Value: s})
|
||||||
}
|
}
|
||||||
return textWrap{"{", list, "}"}
|
return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
||||||
case reflect.Slice:
|
case reflect.Slice:
|
||||||
if v.IsNil() {
|
if v.IsNil() {
|
||||||
return textNil
|
return textNil
|
||||||
}
|
}
|
||||||
if opts.PrintAddresses {
|
|
||||||
ptr = formatPointer(v)
|
|
||||||
}
|
|
||||||
fallthrough
|
fallthrough
|
||||||
case reflect.Array:
|
case reflect.Array:
|
||||||
|
maxLen := v.Len()
|
||||||
|
if opts.LimitVerbosity {
|
||||||
|
maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
|
||||||
|
opts.VerbosityLevel--
|
||||||
|
}
|
||||||
var list textList
|
var list textList
|
||||||
for i := 0; i < v.Len(); i++ {
|
for i := 0; i < v.Len(); i++ {
|
||||||
vi := v.Index(i)
|
if len(list) == maxLen {
|
||||||
if vi.CanAddr() { // Check for cyclic elements
|
list.AppendEllipsis(diffStats{})
|
||||||
p := vi.Addr()
|
break
|
||||||
if m.Visit(p) {
|
|
||||||
var out textNode
|
|
||||||
out = textLine(formatPointer(p))
|
|
||||||
out = opts.WithTypeMode(emitType).FormatType(p.Type(), out)
|
|
||||||
out = textWrap{"*", out, ""}
|
|
||||||
list = append(list, textRecord{Value: out})
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
s := opts.WithTypeMode(elideType).FormatValue(vi, m)
|
s := opts.WithTypeMode(elideType).FormatValue(v.Index(i), t.Kind(), ptrs)
|
||||||
list = append(list, textRecord{Value: s})
|
list = append(list, textRecord{Value: s})
|
||||||
}
|
}
|
||||||
return textWrap{ptr + "{", list, "}"}
|
|
||||||
|
out = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
||||||
|
if t.Kind() == reflect.Slice && opts.PrintAddresses {
|
||||||
|
header := fmt.Sprintf("ptr:%v, len:%d, cap:%d", formatPointer(value.PointerOf(v), false), v.Len(), v.Cap())
|
||||||
|
out = &textWrap{Prefix: pointerDelimPrefix + header + pointerDelimSuffix, Value: out}
|
||||||
|
}
|
||||||
|
return out
|
||||||
case reflect.Map:
|
case reflect.Map:
|
||||||
if v.IsNil() {
|
if v.IsNil() {
|
||||||
return textNil
|
return textNil
|
||||||
}
|
}
|
||||||
if m.Visit(v) {
|
|
||||||
return textLine(formatPointer(v))
|
|
||||||
}
|
|
||||||
|
|
||||||
|
// Check pointer for cycles.
|
||||||
|
ptrRef, visited := ptrs.Push(v)
|
||||||
|
if visited {
|
||||||
|
return makeLeafReference(ptrRef, opts.PrintAddresses)
|
||||||
|
}
|
||||||
|
defer ptrs.Pop()
|
||||||
|
|
||||||
|
maxLen := v.Len()
|
||||||
|
if opts.LimitVerbosity {
|
||||||
|
maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
|
||||||
|
opts.VerbosityLevel--
|
||||||
|
}
|
||||||
var list textList
|
var list textList
|
||||||
for _, k := range value.SortKeys(v.MapKeys()) {
|
for _, k := range value.SortKeys(v.MapKeys()) {
|
||||||
sk := formatMapKey(k)
|
if len(list) == maxLen {
|
||||||
sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), m)
|
list.AppendEllipsis(diffStats{})
|
||||||
|
break
|
||||||
|
}
|
||||||
|
sk := formatMapKey(k, false, ptrs)
|
||||||
|
sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), t.Kind(), ptrs)
|
||||||
list = append(list, textRecord{Key: sk, Value: sv})
|
list = append(list, textRecord{Key: sk, Value: sv})
|
||||||
}
|
}
|
||||||
if opts.PrintAddresses {
|
|
||||||
ptr = formatPointer(v)
|
out = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
||||||
}
|
out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out)
|
||||||
return textWrap{ptr + "{", list, "}"}
|
return out
|
||||||
case reflect.Ptr:
|
case reflect.Ptr:
|
||||||
if v.IsNil() {
|
if v.IsNil() {
|
||||||
return textNil
|
return textNil
|
||||||
}
|
}
|
||||||
if m.Visit(v) || opts.ShallowPointers {
|
|
||||||
return textLine(formatPointer(v))
|
// Check pointer for cycles.
|
||||||
}
|
ptrRef, visited := ptrs.Push(v)
|
||||||
if opts.PrintAddresses {
|
if visited {
|
||||||
ptr = formatPointer(v)
|
out = makeLeafReference(ptrRef, opts.PrintAddresses)
|
||||||
|
return &textWrap{Prefix: "&", Value: out}
|
||||||
}
|
}
|
||||||
|
defer ptrs.Pop()
|
||||||
|
|
||||||
skipType = true // Let the underlying value print the type instead
|
skipType = true // Let the underlying value print the type instead
|
||||||
return textWrap{"&" + ptr, opts.FormatValue(v.Elem(), m), ""}
|
out = opts.FormatValue(v.Elem(), t.Kind(), ptrs)
|
||||||
|
out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out)
|
||||||
|
out = &textWrap{Prefix: "&", Value: out}
|
||||||
|
return out
|
||||||
case reflect.Interface:
|
case reflect.Interface:
|
||||||
if v.IsNil() {
|
if v.IsNil() {
|
||||||
return textNil
|
return textNil
|
||||||
@ -197,7 +289,7 @@ func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out t
|
|||||||
// Interfaces accept different concrete types,
|
// Interfaces accept different concrete types,
|
||||||
// so configure the underlying value to explicitly print the type.
|
// so configure the underlying value to explicitly print the type.
|
||||||
skipType = true // Print the concrete type instead
|
skipType = true // Print the concrete type instead
|
||||||
return opts.WithTypeMode(emitType).FormatValue(v.Elem(), m)
|
return opts.WithTypeMode(emitType).FormatValue(v.Elem(), t.Kind(), ptrs)
|
||||||
default:
|
default:
|
||||||
panic(fmt.Sprintf("%v kind not handled", v.Kind()))
|
panic(fmt.Sprintf("%v kind not handled", v.Kind()))
|
||||||
}
|
}
|
||||||
@ -205,11 +297,14 @@ func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out t
|
|||||||
|
|
||||||
// formatMapKey formats v as if it were a map key.
|
// formatMapKey formats v as if it were a map key.
|
||||||
// The result is guaranteed to be a single line.
|
// The result is guaranteed to be a single line.
|
||||||
func formatMapKey(v reflect.Value) string {
|
func formatMapKey(v reflect.Value, disambiguate bool, ptrs *pointerReferences) string {
|
||||||
var opts formatOptions
|
var opts formatOptions
|
||||||
|
opts.DiffMode = diffIdentical
|
||||||
opts.TypeMode = elideType
|
opts.TypeMode = elideType
|
||||||
opts.ShallowPointers = true
|
opts.PrintAddresses = disambiguate
|
||||||
s := opts.FormatValue(v, visitedPointers{}).String()
|
opts.AvoidStringer = disambiguate
|
||||||
|
opts.QualifiedNames = disambiguate
|
||||||
|
s := opts.FormatValue(v, reflect.Map, ptrs).String()
|
||||||
return strings.TrimSpace(s)
|
return strings.TrimSpace(s)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -227,7 +322,7 @@ func formatString(s string) string {
|
|||||||
rawInvalid := func(r rune) bool {
|
rawInvalid := func(r rune) bool {
|
||||||
return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t')
|
return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t')
|
||||||
}
|
}
|
||||||
if strings.IndexFunc(s, rawInvalid) < 0 {
|
if utf8.ValidString(s) && strings.IndexFunc(s, rawInvalid) < 0 {
|
||||||
return "`" + s + "`"
|
return "`" + s + "`"
|
||||||
}
|
}
|
||||||
return qs
|
return qs
|
||||||
@ -256,23 +351,3 @@ func formatHex(u uint64) string {
|
|||||||
}
|
}
|
||||||
return fmt.Sprintf(f, u)
|
return fmt.Sprintf(f, u)
|
||||||
}
|
}
|
||||||
|
|
||||||
// formatPointer prints the address of the pointer.
|
|
||||||
func formatPointer(v reflect.Value) string {
|
|
||||||
p := v.Pointer()
|
|
||||||
if flags.Deterministic {
|
|
||||||
p = 0xdeadf00f // Only used for stable testing purposes
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("⟪0x%x⟫", p)
|
|
||||||
}
|
|
||||||
|
|
||||||
type visitedPointers map[value.Pointer]struct{}
|
|
||||||
|
|
||||||
// Visit inserts pointer v into the visited map and reports whether it had
|
|
||||||
// already been visited before.
|
|
||||||
func (m visitedPointers) Visit(v reflect.Value) bool {
|
|
||||||
p := value.PointerOf(v)
|
|
||||||
_, visited := m[p]
|
|
||||||
m[p] = struct{}{}
|
|
||||||
return visited
|
|
||||||
}
|
|
||||||
|
135
vendor/github.com/google/go-cmp/cmp/report_slices.go
generated
vendored
135
vendor/github.com/google/go-cmp/cmp/report_slices.go
generated
vendored
@ -8,6 +8,7 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"unicode"
|
"unicode"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
@ -23,11 +24,25 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool {
|
|||||||
return false // Must be formatting in diff mode
|
return false // Must be formatting in diff mode
|
||||||
case v.NumDiff == 0:
|
case v.NumDiff == 0:
|
||||||
return false // No differences detected
|
return false // No differences detected
|
||||||
case v.NumIgnored+v.NumCompared+v.NumTransformed > 0:
|
|
||||||
// TODO: Handle the case where someone uses bytes.Equal on a large slice.
|
|
||||||
return false // Some custom option was used to determined equality
|
|
||||||
case !v.ValueX.IsValid() || !v.ValueY.IsValid():
|
case !v.ValueX.IsValid() || !v.ValueY.IsValid():
|
||||||
return false // Both values must be valid
|
return false // Both values must be valid
|
||||||
|
case v.Type.Kind() == reflect.Slice && (v.ValueX.Len() == 0 || v.ValueY.Len() == 0):
|
||||||
|
return false // Both slice values have to be non-empty
|
||||||
|
case v.NumIgnored > 0:
|
||||||
|
return false // Some ignore option was used
|
||||||
|
case v.NumTransformed > 0:
|
||||||
|
return false // Some transform option was used
|
||||||
|
case v.NumCompared > 1:
|
||||||
|
return false // More than one comparison was used
|
||||||
|
case v.NumCompared == 1 && v.Type.Name() != "":
|
||||||
|
// The need for cmp to check applicability of options on every element
|
||||||
|
// in a slice is a significant performance detriment for large []byte.
|
||||||
|
// The workaround is to specify Comparer(bytes.Equal),
|
||||||
|
// which enables cmp to compare []byte more efficiently.
|
||||||
|
// If they differ, we still want to provide batched diffing.
|
||||||
|
// The logic disallows named types since they tend to have their own
|
||||||
|
// String method, with nicer formatting than what this provides.
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
switch t := v.Type; t.Kind() {
|
switch t := v.Type; t.Kind() {
|
||||||
@ -82,7 +97,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
|
|||||||
}
|
}
|
||||||
if isText || isBinary {
|
if isText || isBinary {
|
||||||
var numLines, lastLineIdx, maxLineLen int
|
var numLines, lastLineIdx, maxLineLen int
|
||||||
isBinary = false
|
isBinary = !utf8.ValidString(sx) || !utf8.ValidString(sy)
|
||||||
for i, r := range sx + sy {
|
for i, r := range sx + sy {
|
||||||
if !(unicode.IsPrint(r) || unicode.IsSpace(r)) || r == utf8.RuneError {
|
if !(unicode.IsPrint(r) || unicode.IsSpace(r)) || r == utf8.RuneError {
|
||||||
isBinary = true
|
isBinary = true
|
||||||
@ -97,7 +112,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
isText = !isBinary
|
isText = !isBinary
|
||||||
isLinedText = isText && numLines >= 4 && maxLineLen <= 256
|
isLinedText = isText && numLines >= 4 && maxLineLen <= 1024
|
||||||
}
|
}
|
||||||
|
|
||||||
// Format the string into printable records.
|
// Format the string into printable records.
|
||||||
@ -117,6 +132,83 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
delim = "\n"
|
delim = "\n"
|
||||||
|
|
||||||
|
// If possible, use a custom triple-quote (""") syntax for printing
|
||||||
|
// differences in a string literal. This format is more readable,
|
||||||
|
// but has edge-cases where differences are visually indistinguishable.
|
||||||
|
// This format is avoided under the following conditions:
|
||||||
|
// • A line starts with `"""`
|
||||||
|
// • A line starts with "..."
|
||||||
|
// • A line contains non-printable characters
|
||||||
|
// • Adjacent different lines differ only by whitespace
|
||||||
|
//
|
||||||
|
// For example:
|
||||||
|
// """
|
||||||
|
// ... // 3 identical lines
|
||||||
|
// foo
|
||||||
|
// bar
|
||||||
|
// - baz
|
||||||
|
// + BAZ
|
||||||
|
// """
|
||||||
|
isTripleQuoted := true
|
||||||
|
prevRemoveLines := map[string]bool{}
|
||||||
|
prevInsertLines := map[string]bool{}
|
||||||
|
var list2 textList
|
||||||
|
list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true})
|
||||||
|
for _, r := range list {
|
||||||
|
if !r.Value.Equal(textEllipsis) {
|
||||||
|
line, _ := strconv.Unquote(string(r.Value.(textLine)))
|
||||||
|
line = strings.TrimPrefix(strings.TrimSuffix(line, "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support
|
||||||
|
normLine := strings.Map(func(r rune) rune {
|
||||||
|
if unicode.IsSpace(r) {
|
||||||
|
return -1 // drop whitespace to avoid visually indistinguishable output
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}, line)
|
||||||
|
isPrintable := func(r rune) bool {
|
||||||
|
return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable
|
||||||
|
}
|
||||||
|
isTripleQuoted = !strings.HasPrefix(line, `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == ""
|
||||||
|
switch r.Diff {
|
||||||
|
case diffRemoved:
|
||||||
|
isTripleQuoted = isTripleQuoted && !prevInsertLines[normLine]
|
||||||
|
prevRemoveLines[normLine] = true
|
||||||
|
case diffInserted:
|
||||||
|
isTripleQuoted = isTripleQuoted && !prevRemoveLines[normLine]
|
||||||
|
prevInsertLines[normLine] = true
|
||||||
|
}
|
||||||
|
if !isTripleQuoted {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
r.Value = textLine(line)
|
||||||
|
r.ElideComma = true
|
||||||
|
}
|
||||||
|
if !(r.Diff == diffRemoved || r.Diff == diffInserted) { // start a new non-adjacent difference group
|
||||||
|
prevRemoveLines = map[string]bool{}
|
||||||
|
prevInsertLines = map[string]bool{}
|
||||||
|
}
|
||||||
|
list2 = append(list2, r)
|
||||||
|
}
|
||||||
|
if r := list2[len(list2)-1]; r.Diff == diffIdentical && len(r.Value.(textLine)) == 0 {
|
||||||
|
list2 = list2[:len(list2)-1] // elide single empty line at the end
|
||||||
|
}
|
||||||
|
list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true})
|
||||||
|
if isTripleQuoted {
|
||||||
|
var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"}
|
||||||
|
switch t.Kind() {
|
||||||
|
case reflect.String:
|
||||||
|
if t != reflect.TypeOf(string("")) {
|
||||||
|
out = opts.FormatType(t, out)
|
||||||
|
}
|
||||||
|
case reflect.Slice:
|
||||||
|
// Always emit type for slices since the triple-quote syntax
|
||||||
|
// looks like a string (not a slice).
|
||||||
|
opts = opts.WithTypeMode(emitType)
|
||||||
|
out = opts.FormatType(t, out)
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// If the text appears to be single-lined text,
|
// If the text appears to be single-lined text,
|
||||||
// then perform differencing in approximately fixed-sized chunks.
|
// then perform differencing in approximately fixed-sized chunks.
|
||||||
// The output is printed as quoted strings.
|
// The output is printed as quoted strings.
|
||||||
@ -129,6 +221,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
delim = ""
|
delim = ""
|
||||||
|
|
||||||
// If the text appears to be binary data,
|
// If the text appears to be binary data,
|
||||||
// then perform differencing in approximately fixed-sized chunks.
|
// then perform differencing in approximately fixed-sized chunks.
|
||||||
// The output is inspired by hexdump.
|
// The output is inspired by hexdump.
|
||||||
@ -145,6 +238,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
|
|||||||
return textRecord{Diff: d, Value: textLine(s), Comment: comment}
|
return textRecord{Diff: d, Value: textLine(s), Comment: comment}
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
// For all other slices of primitive types,
|
// For all other slices of primitive types,
|
||||||
// then perform differencing in approximately fixed-sized chunks.
|
// then perform differencing in approximately fixed-sized chunks.
|
||||||
// The size of each chunk depends on the width of the element kind.
|
// The size of each chunk depends on the width of the element kind.
|
||||||
@ -172,7 +266,9 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
|
|||||||
switch t.Elem().Kind() {
|
switch t.Elem().Kind() {
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||||
ss = append(ss, fmt.Sprint(v.Index(i).Int()))
|
ss = append(ss, fmt.Sprint(v.Index(i).Int()))
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||||
|
ss = append(ss, fmt.Sprint(v.Index(i).Uint()))
|
||||||
|
case reflect.Uint8, reflect.Uintptr:
|
||||||
ss = append(ss, formatHex(v.Index(i).Uint()))
|
ss = append(ss, formatHex(v.Index(i).Uint()))
|
||||||
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
|
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
|
||||||
ss = append(ss, fmt.Sprint(v.Index(i).Interface()))
|
ss = append(ss, fmt.Sprint(v.Index(i).Interface()))
|
||||||
@ -185,7 +281,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Wrap the output with appropriate type information.
|
// Wrap the output with appropriate type information.
|
||||||
var out textNode = textWrap{"{", list, "}"}
|
var out textNode = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
||||||
if !isText {
|
if !isText {
|
||||||
// The "{...}" byte-sequence literal is not valid Go syntax for strings.
|
// The "{...}" byte-sequence literal is not valid Go syntax for strings.
|
||||||
// Emit the type for extra clarity (e.g. "string{...}").
|
// Emit the type for extra clarity (e.g. "string{...}").
|
||||||
@ -196,12 +292,12 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
|
|||||||
}
|
}
|
||||||
switch t.Kind() {
|
switch t.Kind() {
|
||||||
case reflect.String:
|
case reflect.String:
|
||||||
out = textWrap{"strings.Join(", out, fmt.Sprintf(", %q)", delim)}
|
out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
|
||||||
if t != reflect.TypeOf(string("")) {
|
if t != reflect.TypeOf(string("")) {
|
||||||
out = opts.FormatType(t, out)
|
out = opts.FormatType(t, out)
|
||||||
}
|
}
|
||||||
case reflect.Slice:
|
case reflect.Slice:
|
||||||
out = textWrap{"bytes.Join(", out, fmt.Sprintf(", %q)", delim)}
|
out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
|
||||||
if t != reflect.TypeOf([]byte(nil)) {
|
if t != reflect.TypeOf([]byte(nil)) {
|
||||||
out = opts.FormatType(t, out)
|
out = opts.FormatType(t, out)
|
||||||
}
|
}
|
||||||
@ -242,9 +338,22 @@ func (opts formatOptions) formatDiffSlice(
|
|||||||
return n0 - v.Len()
|
return n0 - v.Len()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var numDiffs int
|
||||||
|
maxLen := -1
|
||||||
|
if opts.LimitVerbosity {
|
||||||
|
maxLen = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc...
|
||||||
|
opts.VerbosityLevel--
|
||||||
|
}
|
||||||
|
|
||||||
groups := coalesceAdjacentEdits(name, es)
|
groups := coalesceAdjacentEdits(name, es)
|
||||||
groups = coalesceInterveningIdentical(groups, chunkSize/4)
|
groups = coalesceInterveningIdentical(groups, chunkSize/4)
|
||||||
|
maxGroup := diffStats{Name: name}
|
||||||
for i, ds := range groups {
|
for i, ds := range groups {
|
||||||
|
if maxLen >= 0 && numDiffs >= maxLen {
|
||||||
|
maxGroup = maxGroup.Append(ds)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
// Print equal.
|
// Print equal.
|
||||||
if ds.NumDiff() == 0 {
|
if ds.NumDiff() == 0 {
|
||||||
// Compute the number of leading and trailing equal bytes to print.
|
// Compute the number of leading and trailing equal bytes to print.
|
||||||
@ -273,12 +382,18 @@ func (opts formatOptions) formatDiffSlice(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Print unequal.
|
// Print unequal.
|
||||||
|
len0 := len(list)
|
||||||
nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved)
|
nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved)
|
||||||
vx = vx.Slice(nx, vx.Len())
|
vx = vx.Slice(nx, vx.Len())
|
||||||
ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted)
|
ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted)
|
||||||
vy = vy.Slice(ny, vy.Len())
|
vy = vy.Slice(ny, vy.Len())
|
||||||
|
numDiffs += len(list) - len0
|
||||||
|
}
|
||||||
|
if maxGroup.IsZero() {
|
||||||
|
assert(vx.Len() == 0 && vy.Len() == 0)
|
||||||
|
} else {
|
||||||
|
list.AppendEllipsis(maxGroup)
|
||||||
}
|
}
|
||||||
assert(vx.Len() == 0 && vy.Len() == 0)
|
|
||||||
return list
|
return list
|
||||||
}
|
}
|
||||||
|
|
||||||
|
86
vendor/github.com/google/go-cmp/cmp/report_text.go
generated
vendored
86
vendor/github.com/google/go-cmp/cmp/report_text.go
generated
vendored
@ -10,12 +10,15 @@ import (
|
|||||||
"math/rand"
|
"math/rand"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp/internal/flags"
|
"github.com/google/go-cmp/cmp/internal/flags"
|
||||||
)
|
)
|
||||||
|
|
||||||
var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0
|
var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0
|
||||||
|
|
||||||
|
const maxColumnLength = 80
|
||||||
|
|
||||||
type indentMode int
|
type indentMode int
|
||||||
|
|
||||||
func (n indentMode) appendIndent(b []byte, d diffMode) []byte {
|
func (n indentMode) appendIndent(b []byte, d diffMode) []byte {
|
||||||
@ -91,21 +94,22 @@ type textNode interface {
|
|||||||
// textWrap is a wrapper that concatenates a prefix and/or a suffix
|
// textWrap is a wrapper that concatenates a prefix and/or a suffix
|
||||||
// to the underlying node.
|
// to the underlying node.
|
||||||
type textWrap struct {
|
type textWrap struct {
|
||||||
Prefix string // e.g., "bytes.Buffer{"
|
Prefix string // e.g., "bytes.Buffer{"
|
||||||
Value textNode // textWrap | textList | textLine
|
Value textNode // textWrap | textList | textLine
|
||||||
Suffix string // e.g., "}"
|
Suffix string // e.g., "}"
|
||||||
|
Metadata interface{} // arbitrary metadata; has no effect on formatting
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s textWrap) Len() int {
|
func (s *textWrap) Len() int {
|
||||||
return len(s.Prefix) + s.Value.Len() + len(s.Suffix)
|
return len(s.Prefix) + s.Value.Len() + len(s.Suffix)
|
||||||
}
|
}
|
||||||
func (s1 textWrap) Equal(s2 textNode) bool {
|
func (s1 *textWrap) Equal(s2 textNode) bool {
|
||||||
if s2, ok := s2.(textWrap); ok {
|
if s2, ok := s2.(*textWrap); ok {
|
||||||
return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix
|
return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
func (s textWrap) String() string {
|
func (s *textWrap) String() string {
|
||||||
var d diffMode
|
var d diffMode
|
||||||
var n indentMode
|
var n indentMode
|
||||||
_, s2 := s.formatCompactTo(nil, d)
|
_, s2 := s.formatCompactTo(nil, d)
|
||||||
@ -114,7 +118,7 @@ func (s textWrap) String() string {
|
|||||||
b = append(b, '\n') // Trailing newline
|
b = append(b, '\n') // Trailing newline
|
||||||
return string(b)
|
return string(b)
|
||||||
}
|
}
|
||||||
func (s textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
|
func (s *textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
|
||||||
n0 := len(b) // Original buffer length
|
n0 := len(b) // Original buffer length
|
||||||
b = append(b, s.Prefix...)
|
b = append(b, s.Prefix...)
|
||||||
b, s.Value = s.Value.formatCompactTo(b, d)
|
b, s.Value = s.Value.formatCompactTo(b, d)
|
||||||
@ -124,7 +128,7 @@ func (s textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
|
|||||||
}
|
}
|
||||||
return b, s
|
return b, s
|
||||||
}
|
}
|
||||||
func (s textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte {
|
func (s *textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte {
|
||||||
b = append(b, s.Prefix...)
|
b = append(b, s.Prefix...)
|
||||||
b = s.Value.formatExpandedTo(b, d, n)
|
b = s.Value.formatExpandedTo(b, d, n)
|
||||||
b = append(b, s.Suffix...)
|
b = append(b, s.Suffix...)
|
||||||
@ -136,22 +140,23 @@ func (s textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte {
|
|||||||
// of the textList.formatCompactTo method.
|
// of the textList.formatCompactTo method.
|
||||||
type textList []textRecord
|
type textList []textRecord
|
||||||
type textRecord struct {
|
type textRecord struct {
|
||||||
Diff diffMode // e.g., 0 or '-' or '+'
|
Diff diffMode // e.g., 0 or '-' or '+'
|
||||||
Key string // e.g., "MyField"
|
Key string // e.g., "MyField"
|
||||||
Value textNode // textWrap | textLine
|
Value textNode // textWrap | textLine
|
||||||
Comment fmt.Stringer // e.g., "6 identical fields"
|
ElideComma bool // avoid trailing comma
|
||||||
|
Comment fmt.Stringer // e.g., "6 identical fields"
|
||||||
}
|
}
|
||||||
|
|
||||||
// AppendEllipsis appends a new ellipsis node to the list if none already
|
// AppendEllipsis appends a new ellipsis node to the list if none already
|
||||||
// exists at the end. If cs is non-zero it coalesces the statistics with the
|
// exists at the end. If cs is non-zero it coalesces the statistics with the
|
||||||
// previous diffStats.
|
// previous diffStats.
|
||||||
func (s *textList) AppendEllipsis(ds diffStats) {
|
func (s *textList) AppendEllipsis(ds diffStats) {
|
||||||
hasStats := ds != diffStats{}
|
hasStats := !ds.IsZero()
|
||||||
if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) {
|
if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) {
|
||||||
if hasStats {
|
if hasStats {
|
||||||
*s = append(*s, textRecord{Value: textEllipsis, Comment: ds})
|
*s = append(*s, textRecord{Value: textEllipsis, ElideComma: true, Comment: ds})
|
||||||
} else {
|
} else {
|
||||||
*s = append(*s, textRecord{Value: textEllipsis})
|
*s = append(*s, textRecord{Value: textEllipsis, ElideComma: true})
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -191,7 +196,7 @@ func (s1 textList) Equal(s2 textNode) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s textList) String() string {
|
func (s textList) String() string {
|
||||||
return textWrap{"{", s, "}"}.String()
|
return (&textWrap{Prefix: "{", Value: s, Suffix: "}"}).String()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
|
func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
|
||||||
@ -221,7 +226,7 @@ func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
|
|||||||
}
|
}
|
||||||
// Force multi-lined output when printing a removed/inserted node that
|
// Force multi-lined output when printing a removed/inserted node that
|
||||||
// is sufficiently long.
|
// is sufficiently long.
|
||||||
if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > 80 {
|
if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > maxColumnLength {
|
||||||
multiLine = true
|
multiLine = true
|
||||||
}
|
}
|
||||||
if !multiLine {
|
if !multiLine {
|
||||||
@ -236,16 +241,50 @@ func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte {
|
|||||||
_, isLine := r.Value.(textLine)
|
_, isLine := r.Value.(textLine)
|
||||||
return r.Key == "" || !isLine
|
return r.Key == "" || !isLine
|
||||||
},
|
},
|
||||||
func(r textRecord) int { return len(r.Key) },
|
func(r textRecord) int { return utf8.RuneCountInString(r.Key) },
|
||||||
)
|
)
|
||||||
alignValueLens := s.alignLens(
|
alignValueLens := s.alignLens(
|
||||||
func(r textRecord) bool {
|
func(r textRecord) bool {
|
||||||
_, isLine := r.Value.(textLine)
|
_, isLine := r.Value.(textLine)
|
||||||
return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil
|
return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil
|
||||||
},
|
},
|
||||||
func(r textRecord) int { return len(r.Value.(textLine)) },
|
func(r textRecord) int { return utf8.RuneCount(r.Value.(textLine)) },
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Format lists of simple lists in a batched form.
|
||||||
|
// If the list is sequence of only textLine values,
|
||||||
|
// then batch multiple values on a single line.
|
||||||
|
var isSimple bool
|
||||||
|
for _, r := range s {
|
||||||
|
_, isLine := r.Value.(textLine)
|
||||||
|
isSimple = r.Diff == 0 && r.Key == "" && isLine && r.Comment == nil
|
||||||
|
if !isSimple {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if isSimple {
|
||||||
|
n++
|
||||||
|
var batch []byte
|
||||||
|
emitBatch := func() {
|
||||||
|
if len(batch) > 0 {
|
||||||
|
b = n.appendIndent(append(b, '\n'), d)
|
||||||
|
b = append(b, bytes.TrimRight(batch, " ")...)
|
||||||
|
batch = batch[:0]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, r := range s {
|
||||||
|
line := r.Value.(textLine)
|
||||||
|
if len(batch)+len(line)+len(", ") > maxColumnLength {
|
||||||
|
emitBatch()
|
||||||
|
}
|
||||||
|
batch = append(batch, line...)
|
||||||
|
batch = append(batch, ", "...)
|
||||||
|
}
|
||||||
|
emitBatch()
|
||||||
|
n--
|
||||||
|
return n.appendIndent(append(b, '\n'), d)
|
||||||
|
}
|
||||||
|
|
||||||
// Format the list as a multi-lined output.
|
// Format the list as a multi-lined output.
|
||||||
n++
|
n++
|
||||||
for i, r := range s {
|
for i, r := range s {
|
||||||
@ -256,7 +295,7 @@ func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte {
|
|||||||
b = alignKeyLens[i].appendChar(b, ' ')
|
b = alignKeyLens[i].appendChar(b, ' ')
|
||||||
|
|
||||||
b = r.Value.formatExpandedTo(b, d|r.Diff, n)
|
b = r.Value.formatExpandedTo(b, d|r.Diff, n)
|
||||||
if !r.Value.Equal(textEllipsis) {
|
if !r.ElideComma {
|
||||||
b = append(b, ',')
|
b = append(b, ',')
|
||||||
}
|
}
|
||||||
b = alignValueLens[i].appendChar(b, ' ')
|
b = alignValueLens[i].appendChar(b, ' ')
|
||||||
@ -332,6 +371,11 @@ type diffStats struct {
|
|||||||
NumModified int
|
NumModified int
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s diffStats) IsZero() bool {
|
||||||
|
s.Name = ""
|
||||||
|
return s == diffStats{}
|
||||||
|
}
|
||||||
|
|
||||||
func (s diffStats) NumDiff() int {
|
func (s diffStats) NumDiff() int {
|
||||||
return s.NumRemoved + s.NumInserted + s.NumModified
|
return s.NumRemoved + s.NumInserted + s.NumModified
|
||||||
}
|
}
|
||||||
|
2
vendor/github.com/grpc-ecosystem/go-grpc-middleware/.gitignore
generated
vendored
2
vendor/github.com/grpc-ecosystem/go-grpc-middleware/.gitignore
generated
vendored
@ -200,5 +200,3 @@ coverage.txt
|
|||||||
|
|
||||||
#vendor
|
#vendor
|
||||||
vendor/
|
vendor/
|
||||||
|
|
||||||
.envrc
|
|
16
vendor/github.com/grpc-ecosystem/go-grpc-middleware/.travis.yml
generated
vendored
16
vendor/github.com/grpc-ecosystem/go-grpc-middleware/.travis.yml
generated
vendored
@ -1,12 +1,18 @@
|
|||||||
sudo: false
|
sudo: false
|
||||||
language: go
|
language: go
|
||||||
go:
|
go:
|
||||||
- 1.11.x
|
- 1.8.x
|
||||||
- 1.12.x
|
|
||||||
|
|
||||||
env:
|
env:
|
||||||
global:
|
- DEP_VERSION="0.3.2"
|
||||||
- GO111MODULE=on
|
|
||||||
|
before_install:
|
||||||
|
# Download the binary to bin folder in $GOPATH
|
||||||
|
- curl -L -s https://github.com/golang/dep/releases/download/v${DEP_VERSION}/dep-linux-amd64 -o $GOPATH/bin/dep
|
||||||
|
# Make the binary executable
|
||||||
|
- chmod +x $GOPATH/bin/dep
|
||||||
|
|
||||||
|
install:
|
||||||
|
- dep ensure
|
||||||
|
|
||||||
script:
|
script:
|
||||||
- make test
|
- make test
|
||||||
|
27
vendor/github.com/grpc-ecosystem/go-grpc-middleware/CHANGELOG.md
generated
vendored
27
vendor/github.com/grpc-ecosystem/go-grpc-middleware/CHANGELOG.md
generated
vendored
@ -13,30 +13,10 @@ Types of changes:
|
|||||||
- `Security` in case of vulnerabilities.
|
- `Security` in case of vulnerabilities.
|
||||||
|
|
||||||
## [Unreleased]
|
## [Unreleased]
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
- This CHANGELOG file to keep track of changes.
|
||||||
|
|
||||||
- [#223](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/223) Add go-kit logging middleware - [adrien-f](https://github.com/adrien-f)
|
## 1.0.0 - 2018-05-08
|
||||||
|
|
||||||
## [v1.1.0] - 2019-09-12
|
|
||||||
### Added
|
|
||||||
- [#226](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/226) Support for go modules.
|
|
||||||
- [#221](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/221) logging/zap add support for gRPC LoggerV2 - [kush-patel-hs](https://github.com/kush-patel-hs)
|
|
||||||
- [#181](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/181) Rate Limit support - [ceshihao](https://github.com/ceshihao)
|
|
||||||
- [#161](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/161) Retry on server stream call - [lonnblad](https://github.com/lonnblad)
|
|
||||||
- [#152](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/152) Exponential backoff functions - [polyfloyd](https://github.com/polyfloyd)
|
|
||||||
- [#147](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/147) Jaeger support for ctxtags extraction - [vporoshok](https://github.com/vporoshok)
|
|
||||||
- [#184](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/184) ctxTags identifies if the call was sampled
|
|
||||||
|
|
||||||
### Deprecated
|
|
||||||
- [#201](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/201) `golang.org/x/net/context` - [houz42](https://github.com/houz42)
|
|
||||||
- [#183](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/183) Documentation Generation in favour of <godoc.org>.
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- [172](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/172) Passing ctx into retry and recover - [johanbrandhorst](https://github.com/johanbrandhorst)
|
|
||||||
- Numerious documentation fixes.
|
|
||||||
|
|
||||||
## v1.0.0 - 2018-05-08
|
|
||||||
### Added
|
### Added
|
||||||
- grpc_auth
|
- grpc_auth
|
||||||
- grpc_ctxtags
|
- grpc_ctxtags
|
||||||
@ -47,5 +27,4 @@ Types of changes:
|
|||||||
- grpc_validator
|
- grpc_validator
|
||||||
- grpc_recovery
|
- grpc_recovery
|
||||||
|
|
||||||
[Unreleased]: https://github.com/grpc-ecosystem/go-grpc-middleware/compare/v1.1.0...HEAD
|
[Unreleased]: https://github.com/grpc-ecosystem/go-grpc-middleware/compare/v1.0.0...HEAD
|
||||||
[v1.1.0]: https://github.com/grpc-ecosystem/go-grpc-middleware/compare/v1.0.0...v1.1.0
|
|
||||||
|
123
vendor/github.com/grpc-ecosystem/go-grpc-middleware/Gopkg.lock
generated
vendored
Normal file
123
vendor/github.com/grpc-ecosystem/go-grpc-middleware/Gopkg.lock
generated
vendored
Normal file
@ -0,0 +1,123 @@
|
|||||||
|
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||||
|
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "cloud.google.com/go"
|
||||||
|
packages = ["compute/metadata"]
|
||||||
|
revision = "2d3a6656c17a60b0815b7e06ab0be04eacb6e613"
|
||||||
|
version = "v0.16.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/davecgh/go-spew"
|
||||||
|
packages = ["spew"]
|
||||||
|
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
|
||||||
|
version = "v1.1.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/gogo/protobuf"
|
||||||
|
packages = ["gogoproto","proto","protoc-gen-gogo/descriptor"]
|
||||||
|
revision = "342cbe0a04158f6dcb03ca0079991a51a4248c02"
|
||||||
|
version = "v0.5"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/golang/protobuf"
|
||||||
|
packages = ["jsonpb","proto","ptypes","ptypes/any","ptypes/duration","ptypes/struct","ptypes/timestamp"]
|
||||||
|
revision = "1e59b77b52bf8e4b449a57e6f79f21226d571845"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/opentracing/opentracing-go"
|
||||||
|
packages = [".","ext","log","mocktracer"]
|
||||||
|
revision = "1949ddbfd147afd4d964a9f00b24eb291e0e7c38"
|
||||||
|
version = "v1.0.2"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/pmezard/go-difflib"
|
||||||
|
packages = ["difflib"]
|
||||||
|
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
|
||||||
|
version = "v1.0.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/sirupsen/logrus"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "f006c2ac4710855cf0f916dd6b77acf6b048dc6e"
|
||||||
|
version = "v1.0.3"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/stretchr/testify"
|
||||||
|
packages = ["assert","require","suite"]
|
||||||
|
revision = "69483b4bd14f5845b5a1e55bca19e954e827f1d0"
|
||||||
|
version = "v1.1.4"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "go.uber.org/atomic"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "8474b86a5a6f79c443ce4b2992817ff32cf208b8"
|
||||||
|
version = "v1.3.1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "go.uber.org/multierr"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a"
|
||||||
|
version = "v1.1.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "go.uber.org/zap"
|
||||||
|
packages = [".","buffer","internal/bufferpool","internal/color","internal/exit","zapcore"]
|
||||||
|
revision = "35aad584952c3e7020db7b839f6b102de6271f89"
|
||||||
|
version = "v1.7.1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "golang.org/x/crypto"
|
||||||
|
packages = ["ssh/terminal"]
|
||||||
|
revision = "94eea52f7b742c7cbe0b03b22f0c4c8631ece122"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "golang.org/x/net"
|
||||||
|
packages = ["context","context/ctxhttp","http2","http2/hpack","idna","internal/timeseries","lex/httplex","trace"]
|
||||||
|
revision = "a8b9294777976932365dabb6640cf1468d95c70f"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "golang.org/x/oauth2"
|
||||||
|
packages = [".","google","internal","jws","jwt"]
|
||||||
|
revision = "f95fa95eaa936d9d87489b15d1d18b97c1ba9c28"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "golang.org/x/sys"
|
||||||
|
packages = ["unix","windows"]
|
||||||
|
revision = "13fcbd661c8ececa8807a29b48407d674b1d8ed8"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "golang.org/x/text"
|
||||||
|
packages = ["collate","collate/build","internal/colltab","internal/gen","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"]
|
||||||
|
revision = "75cc3cad82b5f47d3fb229ddda8c5167da14f294"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "google.golang.org/appengine"
|
||||||
|
packages = [".","internal","internal/app_identity","internal/base","internal/datastore","internal/log","internal/modules","internal/remote_api","internal/urlfetch","urlfetch"]
|
||||||
|
revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
|
||||||
|
version = "v1.0.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "google.golang.org/genproto"
|
||||||
|
packages = ["googleapis/rpc/status"]
|
||||||
|
revision = "7f0da29060c682909f650ad8ed4e515bd74fa12a"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "google.golang.org/grpc"
|
||||||
|
packages = [".","balancer","balancer/roundrobin","codes","connectivity","credentials","credentials/oauth","encoding","grpclb/grpc_lb_v1/messages","grpclog","internal","keepalive","metadata","naming","peer","resolver","resolver/dns","resolver/passthrough","stats","status","tap","transport"]
|
||||||
|
revision = "5a9f7b402fe85096d2e1d0383435ee1876e863d0"
|
||||||
|
version = "v1.8.0"
|
||||||
|
|
||||||
|
[solve-meta]
|
||||||
|
analyzer-name = "dep"
|
||||||
|
analyzer-version = 1
|
||||||
|
inputs-digest = "b24c6670412eb0bc44ed1db77fecc52333f8725f3e3272bdc568f5683a63031f"
|
||||||
|
solver-name = "gps-cdcl"
|
||||||
|
solver-version = 1
|
35
vendor/github.com/grpc-ecosystem/go-grpc-middleware/Gopkg.toml
generated
vendored
Normal file
35
vendor/github.com/grpc-ecosystem/go-grpc-middleware/Gopkg.toml
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
[[constraint]]
|
||||||
|
name = "github.com/gogo/protobuf"
|
||||||
|
version = "0.5.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/golang/protobuf"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/opentracing/opentracing-go"
|
||||||
|
version = "1.0.2"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/sirupsen/logrus"
|
||||||
|
version = "1.0.3"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/stretchr/testify"
|
||||||
|
version = "1.1.4"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "go.uber.org/zap"
|
||||||
|
version = "1.7.1"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
branch = "master"
|
||||||
|
name = "golang.org/x/net"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
branch = "master"
|
||||||
|
name = "golang.org/x/oauth2"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "google.golang.org/grpc"
|
||||||
|
version = "1.8.0"
|
5
vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md
generated
vendored
5
vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md
generated
vendored
@ -7,7 +7,7 @@
|
|||||||
[![codecov](https://codecov.io/gh/grpc-ecosystem/go-grpc-middleware/branch/master/graph/badge.svg)](https://codecov.io/gh/grpc-ecosystem/go-grpc-middleware)
|
[![codecov](https://codecov.io/gh/grpc-ecosystem/go-grpc-middleware/branch/master/graph/badge.svg)](https://codecov.io/gh/grpc-ecosystem/go-grpc-middleware)
|
||||||
[![Apache 2.0 License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE)
|
[![Apache 2.0 License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE)
|
||||||
[![quality: production](https://img.shields.io/badge/quality-production-orange.svg)](#status)
|
[![quality: production](https://img.shields.io/badge/quality-production-orange.svg)](#status)
|
||||||
[![Slack](https://img.shields.io/badge/slack-%23grpc--middleware-brightgreen)](https://slack.com/share/IRUQCFC23/9Tm7hxRFVKKNoajQfMOcUiIk/enQtODc4ODI4NTIyMDcxLWM5NDA0ZTE4Njg5YjRjYWZkMTI5MzQwNDY3YzBjMzE1YzdjOGM5ZjI1NDNiM2JmNzI2YjM5ODE5OTRiNTEyOWE)
|
[![Slack](slack.png)](https://join.slack.com/t/improbable-eng/shared_invite/enQtMzQ1ODcyMzQ5MjM4LWY5ZWZmNGM2ODc5MmViNmQ3ZTA3ZTY3NzQwOTBlMTkzZmIxZTIxODk0OWU3YjZhNWVlNDU3MDlkZGViZjhkMjc)
|
||||||
|
|
||||||
[gRPC Go](https://github.com/grpc/grpc-go) Middleware: interceptors, helpers, utilities.
|
[gRPC Go](https://github.com/grpc/grpc-go) Middleware: interceptors, helpers, utilities.
|
||||||
|
|
||||||
@ -58,7 +58,7 @@ myServer := grpc.NewServer(
|
|||||||
* [`grpc_ctxtags`](tags/) - a library that adds a `Tag` map to context, with data populated from request body
|
* [`grpc_ctxtags`](tags/) - a library that adds a `Tag` map to context, with data populated from request body
|
||||||
* [`grpc_zap`](logging/zap/) - integration of [zap](https://github.com/uber-go/zap) logging library into gRPC handlers.
|
* [`grpc_zap`](logging/zap/) - integration of [zap](https://github.com/uber-go/zap) logging library into gRPC handlers.
|
||||||
* [`grpc_logrus`](logging/logrus/) - integration of [logrus](https://github.com/sirupsen/logrus) logging library into gRPC handlers.
|
* [`grpc_logrus`](logging/logrus/) - integration of [logrus](https://github.com/sirupsen/logrus) logging library into gRPC handlers.
|
||||||
* [`grpc_kit`](logging/kit/) - integration of [go-kit](https://github.com/go-kit/kit/tree/master/log) logging library into gRPC handlers.
|
|
||||||
|
|
||||||
#### Monitoring
|
#### Monitoring
|
||||||
* [`grpc_prometheus`⚡](https://github.com/grpc-ecosystem/go-grpc-prometheus) - Prometheus client-side and server-side monitoring middleware
|
* [`grpc_prometheus`⚡](https://github.com/grpc-ecosystem/go-grpc-prometheus) - Prometheus client-side and server-side monitoring middleware
|
||||||
@ -71,7 +71,6 @@ myServer := grpc.NewServer(
|
|||||||
#### Server
|
#### Server
|
||||||
* [`grpc_validator`](validator/) - codegen inbound message validation from `.proto` options
|
* [`grpc_validator`](validator/) - codegen inbound message validation from `.proto` options
|
||||||
* [`grpc_recovery`](recovery/) - turn panics into gRPC errors
|
* [`grpc_recovery`](recovery/) - turn panics into gRPC errors
|
||||||
* [`ratelimit`](ratelimit/) - grpc rate limiting by your own limiter
|
|
||||||
|
|
||||||
|
|
||||||
## Status
|
## Status
|
||||||
|
139
vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go
generated
vendored
139
vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go
generated
vendored
@ -6,8 +6,7 @@
|
|||||||
package grpc_middleware
|
package grpc_middleware
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -19,19 +18,35 @@ import (
|
|||||||
func ChainUnaryServer(interceptors ...grpc.UnaryServerInterceptor) grpc.UnaryServerInterceptor {
|
func ChainUnaryServer(interceptors ...grpc.UnaryServerInterceptor) grpc.UnaryServerInterceptor {
|
||||||
n := len(interceptors)
|
n := len(interceptors)
|
||||||
|
|
||||||
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
|
if n > 1 {
|
||||||
chainer := func(currentInter grpc.UnaryServerInterceptor, currentHandler grpc.UnaryHandler) grpc.UnaryHandler {
|
lastI := n - 1
|
||||||
return func(currentCtx context.Context, currentReq interface{}) (interface{}, error) {
|
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
|
||||||
return currentInter(currentCtx, currentReq, info, currentHandler)
|
var (
|
||||||
|
chainHandler grpc.UnaryHandler
|
||||||
|
curI int
|
||||||
|
)
|
||||||
|
|
||||||
|
chainHandler = func(currentCtx context.Context, currentReq interface{}) (interface{}, error) {
|
||||||
|
if curI == lastI {
|
||||||
|
return handler(currentCtx, currentReq)
|
||||||
|
}
|
||||||
|
curI++
|
||||||
|
resp, err := interceptors[curI](currentCtx, currentReq, info, chainHandler)
|
||||||
|
curI--
|
||||||
|
return resp, err
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
chainedHandler := handler
|
return interceptors[0](ctx, req, info, chainHandler)
|
||||||
for i := n - 1; i >= 0; i-- {
|
|
||||||
chainedHandler = chainer(interceptors[i], chainedHandler)
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return chainedHandler(ctx, req)
|
if n == 1 {
|
||||||
|
return interceptors[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
// n == 0; Dummy interceptor maintained for backward compatibility to avoid returning nil.
|
||||||
|
return func(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
|
||||||
|
return handler(ctx, req)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -43,19 +58,35 @@ func ChainUnaryServer(interceptors ...grpc.UnaryServerInterceptor) grpc.UnarySer
|
|||||||
func ChainStreamServer(interceptors ...grpc.StreamServerInterceptor) grpc.StreamServerInterceptor {
|
func ChainStreamServer(interceptors ...grpc.StreamServerInterceptor) grpc.StreamServerInterceptor {
|
||||||
n := len(interceptors)
|
n := len(interceptors)
|
||||||
|
|
||||||
return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
if n > 1 {
|
||||||
chainer := func(currentInter grpc.StreamServerInterceptor, currentHandler grpc.StreamHandler) grpc.StreamHandler {
|
lastI := n - 1
|
||||||
return func(currentSrv interface{}, currentStream grpc.ServerStream) error {
|
return func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||||
return currentInter(currentSrv, currentStream, info, currentHandler)
|
var (
|
||||||
|
chainHandler grpc.StreamHandler
|
||||||
|
curI int
|
||||||
|
)
|
||||||
|
|
||||||
|
chainHandler = func(currentSrv interface{}, currentStream grpc.ServerStream) error {
|
||||||
|
if curI == lastI {
|
||||||
|
return handler(currentSrv, currentStream)
|
||||||
|
}
|
||||||
|
curI++
|
||||||
|
err := interceptors[curI](currentSrv, currentStream, info, chainHandler)
|
||||||
|
curI--
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
chainedHandler := handler
|
return interceptors[0](srv, stream, info, chainHandler)
|
||||||
for i := n - 1; i >= 0; i-- {
|
|
||||||
chainedHandler = chainer(interceptors[i], chainedHandler)
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return chainedHandler(srv, ss)
|
if n == 1 {
|
||||||
|
return interceptors[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
// n == 0; Dummy interceptor maintained for backward compatibility to avoid returning nil.
|
||||||
|
return func(srv interface{}, stream grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||||
|
return handler(srv, stream)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -66,19 +97,35 @@ func ChainStreamServer(interceptors ...grpc.StreamServerInterceptor) grpc.Stream
|
|||||||
func ChainUnaryClient(interceptors ...grpc.UnaryClientInterceptor) grpc.UnaryClientInterceptor {
|
func ChainUnaryClient(interceptors ...grpc.UnaryClientInterceptor) grpc.UnaryClientInterceptor {
|
||||||
n := len(interceptors)
|
n := len(interceptors)
|
||||||
|
|
||||||
return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
|
if n > 1 {
|
||||||
chainer := func(currentInter grpc.UnaryClientInterceptor, currentInvoker grpc.UnaryInvoker) grpc.UnaryInvoker {
|
lastI := n - 1
|
||||||
return func(currentCtx context.Context, currentMethod string, currentReq, currentRepl interface{}, currentConn *grpc.ClientConn, currentOpts ...grpc.CallOption) error {
|
return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
|
||||||
return currentInter(currentCtx, currentMethod, currentReq, currentRepl, currentConn, currentInvoker, currentOpts...)
|
var (
|
||||||
|
chainHandler grpc.UnaryInvoker
|
||||||
|
curI int
|
||||||
|
)
|
||||||
|
|
||||||
|
chainHandler = func(currentCtx context.Context, currentMethod string, currentReq, currentRepl interface{}, currentConn *grpc.ClientConn, currentOpts ...grpc.CallOption) error {
|
||||||
|
if curI == lastI {
|
||||||
|
return invoker(currentCtx, currentMethod, currentReq, currentRepl, currentConn, currentOpts...)
|
||||||
|
}
|
||||||
|
curI++
|
||||||
|
err := interceptors[curI](currentCtx, currentMethod, currentReq, currentRepl, currentConn, chainHandler, currentOpts...)
|
||||||
|
curI--
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
chainedInvoker := invoker
|
return interceptors[0](ctx, method, req, reply, cc, chainHandler, opts...)
|
||||||
for i := n - 1; i >= 0; i-- {
|
|
||||||
chainedInvoker = chainer(interceptors[i], chainedInvoker)
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return chainedInvoker(ctx, method, req, reply, cc, opts...)
|
if n == 1 {
|
||||||
|
return interceptors[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
// n == 0; Dummy interceptor maintained for backward compatibility to avoid returning nil.
|
||||||
|
return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
|
||||||
|
return invoker(ctx, method, req, reply, cc, opts...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -89,19 +136,35 @@ func ChainUnaryClient(interceptors ...grpc.UnaryClientInterceptor) grpc.UnaryCli
|
|||||||
func ChainStreamClient(interceptors ...grpc.StreamClientInterceptor) grpc.StreamClientInterceptor {
|
func ChainStreamClient(interceptors ...grpc.StreamClientInterceptor) grpc.StreamClientInterceptor {
|
||||||
n := len(interceptors)
|
n := len(interceptors)
|
||||||
|
|
||||||
return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
|
if n > 1 {
|
||||||
chainer := func(currentInter grpc.StreamClientInterceptor, currentStreamer grpc.Streamer) grpc.Streamer {
|
lastI := n - 1
|
||||||
return func(currentCtx context.Context, currentDesc *grpc.StreamDesc, currentConn *grpc.ClientConn, currentMethod string, currentOpts ...grpc.CallOption) (grpc.ClientStream, error) {
|
return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
|
||||||
return currentInter(currentCtx, currentDesc, currentConn, currentMethod, currentStreamer, currentOpts...)
|
var (
|
||||||
|
chainHandler grpc.Streamer
|
||||||
|
curI int
|
||||||
|
)
|
||||||
|
|
||||||
|
chainHandler = func(currentCtx context.Context, currentDesc *grpc.StreamDesc, currentConn *grpc.ClientConn, currentMethod string, currentOpts ...grpc.CallOption) (grpc.ClientStream, error) {
|
||||||
|
if curI == lastI {
|
||||||
|
return streamer(currentCtx, currentDesc, currentConn, currentMethod, currentOpts...)
|
||||||
|
}
|
||||||
|
curI++
|
||||||
|
stream, err := interceptors[curI](currentCtx, currentDesc, currentConn, currentMethod, chainHandler, currentOpts...)
|
||||||
|
curI--
|
||||||
|
return stream, err
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
chainedStreamer := streamer
|
return interceptors[0](ctx, desc, cc, method, chainHandler, opts...)
|
||||||
for i := n - 1; i >= 0; i-- {
|
|
||||||
chainedStreamer = chainer(interceptors[i], chainedStreamer)
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return chainedStreamer(ctx, desc, cc, method, opts...)
|
if n == 1 {
|
||||||
|
return interceptors[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
// n == 0; Dummy interceptor maintained for backward compatibility to avoid returning nil.
|
||||||
|
return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
|
||||||
|
return streamer(ctx, desc, cc, method, opts...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
22
vendor/github.com/grpc-ecosystem/go-grpc-middleware/go.mod
generated
vendored
22
vendor/github.com/grpc-ecosystem/go-grpc-middleware/go.mod
generated
vendored
@ -1,22 +0,0 @@
|
|||||||
module github.com/grpc-ecosystem/go-grpc-middleware
|
|
||||||
|
|
||||||
require (
|
|
||||||
github.com/go-kit/kit v0.9.0
|
|
||||||
github.com/go-logfmt/logfmt v0.4.0 // indirect
|
|
||||||
github.com/go-stack/stack v1.8.0 // indirect
|
|
||||||
github.com/gogo/protobuf v1.2.1
|
|
||||||
github.com/golang/protobuf v1.3.2
|
|
||||||
github.com/opentracing/opentracing-go v1.1.0
|
|
||||||
github.com/pkg/errors v0.8.1 // indirect
|
|
||||||
github.com/sirupsen/logrus v1.4.2
|
|
||||||
github.com/stretchr/testify v1.4.0
|
|
||||||
go.uber.org/atomic v1.4.0 // indirect
|
|
||||||
go.uber.org/multierr v1.1.0 // indirect
|
|
||||||
go.uber.org/zap v1.10.0
|
|
||||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a
|
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be
|
|
||||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 // indirect
|
|
||||||
google.golang.org/grpc v1.19.0
|
|
||||||
)
|
|
||||||
|
|
||||||
go 1.13
|
|
86
vendor/github.com/grpc-ecosystem/go-grpc-middleware/go.sum
generated
vendored
86
vendor/github.com/grpc-ecosystem/go-grpc-middleware/go.sum
generated
vendored
@ -1,86 +0,0 @@
|
|||||||
cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ=
|
|
||||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
|
||||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
|
||||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
|
||||||
github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk=
|
|
||||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
|
||||||
github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA=
|
|
||||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
|
||||||
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
|
|
||||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
|
||||||
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
|
|
||||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
|
||||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
|
||||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
|
||||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
|
||||||
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
|
||||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
|
||||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
|
||||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
|
||||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
|
||||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=
|
|
||||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
|
||||||
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
|
|
||||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
|
||||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
|
||||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
|
||||||
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
|
|
||||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
|
||||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
|
||||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
|
||||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
|
||||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
|
||||||
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
|
|
||||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
|
||||||
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
|
|
||||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
|
||||||
go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
|
|
||||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
|
||||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
|
||||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
|
||||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=
|
|
||||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
|
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc=
|
|
||||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
|
||||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
|
||||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
|
||||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
|
||||||
google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs=
|
|
||||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
|
||||||
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
|
|
||||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
|
||||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
|
|
||||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
|
||||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE=
|
|
||||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
|
||||||
google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8=
|
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
|
||||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
|
||||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|
||||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
|
3
vendor/github.com/grpc-ecosystem/go-grpc-middleware/makefile
generated
vendored
3
vendor/github.com/grpc-ecosystem/go-grpc-middleware/makefile
generated
vendored
@ -8,8 +8,7 @@ fmt:
|
|||||||
go fmt $(GOFILES_NOVENDOR)
|
go fmt $(GOFILES_NOVENDOR)
|
||||||
|
|
||||||
vet:
|
vet:
|
||||||
# do not check lostcancel, they are intentional.
|
go vet $(GOFILES_NOVENDOR)
|
||||||
go vet -lostcancel=false $(GOFILES_NOVENDOR)
|
|
||||||
|
|
||||||
test: vet
|
test: vet
|
||||||
./scripts/test_all.sh
|
./scripts/test_all.sh
|
||||||
|
3
vendor/github.com/grpc-ecosystem/go-grpc-middleware/wrappers.go
generated
vendored
3
vendor/github.com/grpc-ecosystem/go-grpc-middleware/wrappers.go
generated
vendored
@ -4,8 +4,7 @@
|
|||||||
package grpc_middleware
|
package grpc_middleware
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
354
vendor/github.com/hashicorp/errwrap/LICENSE
generated
vendored
Normal file
354
vendor/github.com/hashicorp/errwrap/LICENSE
generated
vendored
Normal file
@ -0,0 +1,354 @@
|
|||||||
|
Mozilla Public License, version 2.0
|
||||||
|
|
||||||
|
1. Definitions
|
||||||
|
|
||||||
|
1.1. “Contributor”
|
||||||
|
|
||||||
|
means each individual or legal entity that creates, contributes to the
|
||||||
|
creation of, or owns Covered Software.
|
||||||
|
|
||||||
|
1.2. “Contributor Version”
|
||||||
|
|
||||||
|
means the combination of the Contributions of others (if any) used by a
|
||||||
|
Contributor and that particular Contributor’s Contribution.
|
||||||
|
|
||||||
|
1.3. “Contribution”
|
||||||
|
|
||||||
|
means Covered Software of a particular Contributor.
|
||||||
|
|
||||||
|
1.4. “Covered Software”
|
||||||
|
|
||||||
|
means Source Code Form to which the initial Contributor has attached the
|
||||||
|
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||||
|
Modifications of such Source Code Form, in each case including portions
|
||||||
|
thereof.
|
||||||
|
|
||||||
|
1.5. “Incompatible With Secondary Licenses”
|
||||||
|
means
|
||||||
|
|
||||||
|
a. that the initial Contributor has attached the notice described in
|
||||||
|
Exhibit B to the Covered Software; or
|
||||||
|
|
||||||
|
b. that the Covered Software was made available under the terms of version
|
||||||
|
1.1 or earlier of the License, but not also under the terms of a
|
||||||
|
Secondary License.
|
||||||
|
|
||||||
|
1.6. “Executable Form”
|
||||||
|
|
||||||
|
means any form of the work other than Source Code Form.
|
||||||
|
|
||||||
|
1.7. “Larger Work”
|
||||||
|
|
||||||
|
means a work that combines Covered Software with other material, in a separate
|
||||||
|
file or files, that is not Covered Software.
|
||||||
|
|
||||||
|
1.8. “License”
|
||||||
|
|
||||||
|
means this document.
|
||||||
|
|
||||||
|
1.9. “Licensable”
|
||||||
|
|
||||||
|
means having the right to grant, to the maximum extent possible, whether at the
|
||||||
|
time of the initial grant or subsequently, any and all of the rights conveyed by
|
||||||
|
this License.
|
||||||
|
|
||||||
|
1.10. “Modifications”
|
||||||
|
|
||||||
|
means any of the following:
|
||||||
|
|
||||||
|
a. any file in Source Code Form that results from an addition to, deletion
|
||||||
|
from, or modification of the contents of Covered Software; or
|
||||||
|
|
||||||
|
b. any new file in Source Code Form that contains any Covered Software.
|
||||||
|
|
||||||
|
1.11. “Patent Claims” of a Contributor
|
||||||
|
|
||||||
|
means any patent claim(s), including without limitation, method, process,
|
||||||
|
and apparatus claims, in any patent Licensable by such Contributor that
|
||||||
|
would be infringed, but for the grant of the License, by the making,
|
||||||
|
using, selling, offering for sale, having made, import, or transfer of
|
||||||
|
either its Contributions or its Contributor Version.
|
||||||
|
|
||||||
|
1.12. “Secondary License”
|
||||||
|
|
||||||
|
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||||
|
General Public License, Version 2.1, the GNU Affero General Public
|
||||||
|
License, Version 3.0, or any later versions of those licenses.
|
||||||
|
|
||||||
|
1.13. “Source Code Form”
|
||||||
|
|
||||||
|
means the form of the work preferred for making modifications.
|
||||||
|
|
||||||
|
1.14. “You” (or “Your”)
|
||||||
|
|
||||||
|
means an individual or a legal entity exercising rights under this
|
||||||
|
License. For legal entities, “You” includes any entity that controls, is
|
||||||
|
controlled by, or is under common control with You. For purposes of this
|
||||||
|
definition, “control” means (a) the power, direct or indirect, to cause
|
||||||
|
the direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||||
|
outstanding shares or beneficial ownership of such entity.
|
||||||
|
|
||||||
|
|
||||||
|
2. License Grants and Conditions
|
||||||
|
|
||||||
|
2.1. Grants
|
||||||
|
|
||||||
|
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||||
|
non-exclusive license:
|
||||||
|
|
||||||
|
a. under intellectual property rights (other than patent or trademark)
|
||||||
|
Licensable by such Contributor to use, reproduce, make available,
|
||||||
|
modify, display, perform, distribute, and otherwise exploit its
|
||||||
|
Contributions, either on an unmodified basis, with Modifications, or as
|
||||||
|
part of a Larger Work; and
|
||||||
|
|
||||||
|
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||||
|
sale, have made, import, and otherwise transfer either its Contributions
|
||||||
|
or its Contributor Version.
|
||||||
|
|
||||||
|
2.2. Effective Date
|
||||||
|
|
||||||
|
The licenses granted in Section 2.1 with respect to any Contribution become
|
||||||
|
effective for each Contribution on the date the Contributor first distributes
|
||||||
|
such Contribution.
|
||||||
|
|
||||||
|
2.3. Limitations on Grant Scope
|
||||||
|
|
||||||
|
The licenses granted in this Section 2 are the only rights granted under this
|
||||||
|
License. No additional rights or licenses will be implied from the distribution
|
||||||
|
or licensing of Covered Software under this License. Notwithstanding Section
|
||||||
|
2.1(b) above, no patent license is granted by a Contributor:
|
||||||
|
|
||||||
|
a. for any code that a Contributor has removed from Covered Software; or
|
||||||
|
|
||||||
|
b. for infringements caused by: (i) Your and any other third party’s
|
||||||
|
modifications of Covered Software, or (ii) the combination of its
|
||||||
|
Contributions with other software (except as part of its Contributor
|
||||||
|
Version); or
|
||||||
|
|
||||||
|
c. under Patent Claims infringed by Covered Software in the absence of its
|
||||||
|
Contributions.
|
||||||
|
|
||||||
|
This License does not grant any rights in the trademarks, service marks, or
|
||||||
|
logos of any Contributor (except as may be necessary to comply with the
|
||||||
|
notice requirements in Section 3.4).
|
||||||
|
|
||||||
|
2.4. Subsequent Licenses
|
||||||
|
|
||||||
|
No Contributor makes additional grants as a result of Your choice to
|
||||||
|
distribute the Covered Software under a subsequent version of this License
|
||||||
|
(see Section 10.2) or under the terms of a Secondary License (if permitted
|
||||||
|
under the terms of Section 3.3).
|
||||||
|
|
||||||
|
2.5. Representation
|
||||||
|
|
||||||
|
Each Contributor represents that the Contributor believes its Contributions
|
||||||
|
are its original creation(s) or it has sufficient rights to grant the
|
||||||
|
rights to its Contributions conveyed by this License.
|
||||||
|
|
||||||
|
2.6. Fair Use
|
||||||
|
|
||||||
|
This License is not intended to limit any rights You have under applicable
|
||||||
|
copyright doctrines of fair use, fair dealing, or other equivalents.
|
||||||
|
|
||||||
|
2.7. Conditions
|
||||||
|
|
||||||
|
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||||
|
Section 2.1.
|
||||||
|
|
||||||
|
|
||||||
|
3. Responsibilities
|
||||||
|
|
||||||
|
3.1. Distribution of Source Form
|
||||||
|
|
||||||
|
All distribution of Covered Software in Source Code Form, including any
|
||||||
|
Modifications that You create or to which You contribute, must be under the
|
||||||
|
terms of this License. You must inform recipients that the Source Code Form
|
||||||
|
of the Covered Software is governed by the terms of this License, and how
|
||||||
|
they can obtain a copy of this License. You may not attempt to alter or
|
||||||
|
restrict the recipients’ rights in the Source Code Form.
|
||||||
|
|
||||||
|
3.2. Distribution of Executable Form
|
||||||
|
|
||||||
|
If You distribute Covered Software in Executable Form then:
|
||||||
|
|
||||||
|
a. such Covered Software must also be made available in Source Code Form,
|
||||||
|
as described in Section 3.1, and You must inform recipients of the
|
||||||
|
Executable Form how they can obtain a copy of such Source Code Form by
|
||||||
|
reasonable means in a timely manner, at a charge no more than the cost
|
||||||
|
of distribution to the recipient; and
|
||||||
|
|
||||||
|
b. You may distribute such Executable Form under the terms of this License,
|
||||||
|
or sublicense it under different terms, provided that the license for
|
||||||
|
the Executable Form does not attempt to limit or alter the recipients’
|
||||||
|
rights in the Source Code Form under this License.
|
||||||
|
|
||||||
|
3.3. Distribution of a Larger Work
|
||||||
|
|
||||||
|
You may create and distribute a Larger Work under terms of Your choice,
|
||||||
|
provided that You also comply with the requirements of this License for the
|
||||||
|
Covered Software. If the Larger Work is a combination of Covered Software
|
||||||
|
with a work governed by one or more Secondary Licenses, and the Covered
|
||||||
|
Software is not Incompatible With Secondary Licenses, this License permits
|
||||||
|
You to additionally distribute such Covered Software under the terms of
|
||||||
|
such Secondary License(s), so that the recipient of the Larger Work may, at
|
||||||
|
their option, further distribute the Covered Software under the terms of
|
||||||
|
either this License or such Secondary License(s).
|
||||||
|
|
||||||
|
3.4. Notices
|
||||||
|
|
||||||
|
You may not remove or alter the substance of any license notices (including
|
||||||
|
copyright notices, patent notices, disclaimers of warranty, or limitations
|
||||||
|
of liability) contained within the Source Code Form of the Covered
|
||||||
|
Software, except that You may alter any license notices to the extent
|
||||||
|
required to remedy known factual inaccuracies.
|
||||||
|
|
||||||
|
3.5. Application of Additional Terms
|
||||||
|
|
||||||
|
You may choose to offer, and to charge a fee for, warranty, support,
|
||||||
|
indemnity or liability obligations to one or more recipients of Covered
|
||||||
|
Software. However, You may do so only on Your own behalf, and not on behalf
|
||||||
|
of any Contributor. You must make it absolutely clear that any such
|
||||||
|
warranty, support, indemnity, or liability obligation is offered by You
|
||||||
|
alone, and You hereby agree to indemnify every Contributor for any
|
||||||
|
liability incurred by such Contributor as a result of warranty, support,
|
||||||
|
indemnity or liability terms You offer. You may include additional
|
||||||
|
disclaimers of warranty and limitations of liability specific to any
|
||||||
|
jurisdiction.
|
||||||
|
|
||||||
|
4. Inability to Comply Due to Statute or Regulation
|
||||||
|
|
||||||
|
If it is impossible for You to comply with any of the terms of this License
|
||||||
|
with respect to some or all of the Covered Software due to statute, judicial
|
||||||
|
order, or regulation then You must: (a) comply with the terms of this License
|
||||||
|
to the maximum extent possible; and (b) describe the limitations and the code
|
||||||
|
they affect. Such description must be placed in a text file included with all
|
||||||
|
distributions of the Covered Software under this License. Except to the
|
||||||
|
extent prohibited by statute or regulation, such description must be
|
||||||
|
sufficiently detailed for a recipient of ordinary skill to be able to
|
||||||
|
understand it.
|
||||||
|
|
||||||
|
5. Termination
|
||||||
|
|
||||||
|
5.1. The rights granted under this License will terminate automatically if You
|
||||||
|
fail to comply with any of its terms. However, if You become compliant,
|
||||||
|
then the rights granted under this License from a particular Contributor
|
||||||
|
are reinstated (a) provisionally, unless and until such Contributor
|
||||||
|
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
|
||||||
|
if such Contributor fails to notify You of the non-compliance by some
|
||||||
|
reasonable means prior to 60 days after You have come back into compliance.
|
||||||
|
Moreover, Your grants from a particular Contributor are reinstated on an
|
||||||
|
ongoing basis if such Contributor notifies You of the non-compliance by
|
||||||
|
some reasonable means, this is the first time You have received notice of
|
||||||
|
non-compliance with this License from such Contributor, and You become
|
||||||
|
compliant prior to 30 days after Your receipt of the notice.
|
||||||
|
|
||||||
|
5.2. If You initiate litigation against any entity by asserting a patent
|
||||||
|
infringement claim (excluding declaratory judgment actions, counter-claims,
|
||||||
|
and cross-claims) alleging that a Contributor Version directly or
|
||||||
|
indirectly infringes any patent, then the rights granted to You by any and
|
||||||
|
all Contributors for the Covered Software under Section 2.1 of this License
|
||||||
|
shall terminate.
|
||||||
|
|
||||||
|
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||||
|
license agreements (excluding distributors and resellers) which have been
|
||||||
|
validly granted by You or Your distributors under this License prior to
|
||||||
|
termination shall survive termination.
|
||||||
|
|
||||||
|
6. Disclaimer of Warranty
|
||||||
|
|
||||||
|
Covered Software is provided under this License on an “as is” basis, without
|
||||||
|
warranty of any kind, either expressed, implied, or statutory, including,
|
||||||
|
without limitation, warranties that the Covered Software is free of defects,
|
||||||
|
merchantable, fit for a particular purpose or non-infringing. The entire
|
||||||
|
risk as to the quality and performance of the Covered Software is with You.
|
||||||
|
Should any Covered Software prove defective in any respect, You (not any
|
||||||
|
Contributor) assume the cost of any necessary servicing, repair, or
|
||||||
|
correction. This disclaimer of warranty constitutes an essential part of this
|
||||||
|
License. No use of any Covered Software is authorized under this License
|
||||||
|
except under this disclaimer.
|
||||||
|
|
||||||
|
7. Limitation of Liability
|
||||||
|
|
||||||
|
Under no circumstances and under no legal theory, whether tort (including
|
||||||
|
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||||
|
distributes Covered Software as permitted above, be liable to You for any
|
||||||
|
direct, indirect, special, incidental, or consequential damages of any
|
||||||
|
character including, without limitation, damages for lost profits, loss of
|
||||||
|
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses, even if such party shall have been
|
||||||
|
informed of the possibility of such damages. This limitation of liability
|
||||||
|
shall not apply to liability for death or personal injury resulting from such
|
||||||
|
party’s negligence to the extent applicable law prohibits such limitation.
|
||||||
|
Some jurisdictions do not allow the exclusion or limitation of incidental or
|
||||||
|
consequential damages, so this exclusion and limitation may not apply to You.
|
||||||
|
|
||||||
|
8. Litigation
|
||||||
|
|
||||||
|
Any litigation relating to this License may be brought only in the courts of
|
||||||
|
a jurisdiction where the defendant maintains its principal place of business
|
||||||
|
and such litigation shall be governed by laws of that jurisdiction, without
|
||||||
|
reference to its conflict-of-law provisions. Nothing in this Section shall
|
||||||
|
prevent a party’s ability to bring cross-claims or counter-claims.
|
||||||
|
|
||||||
|
9. Miscellaneous
|
||||||
|
|
||||||
|
This License represents the complete agreement concerning the subject matter
|
||||||
|
hereof. If any provision of this License is held to be unenforceable, such
|
||||||
|
provision shall be reformed only to the extent necessary to make it
|
||||||
|
enforceable. Any law or regulation which provides that the language of a
|
||||||
|
contract shall be construed against the drafter shall not be used to construe
|
||||||
|
this License against a Contributor.
|
||||||
|
|
||||||
|
|
||||||
|
10. Versions of the License
|
||||||
|
|
||||||
|
10.1. New Versions
|
||||||
|
|
||||||
|
Mozilla Foundation is the license steward. Except as provided in Section
|
||||||
|
10.3, no one other than the license steward has the right to modify or
|
||||||
|
publish new versions of this License. Each version will be given a
|
||||||
|
distinguishing version number.
|
||||||
|
|
||||||
|
10.2. Effect of New Versions
|
||||||
|
|
||||||
|
You may distribute the Covered Software under the terms of the version of
|
||||||
|
the License under which You originally received the Covered Software, or
|
||||||
|
under the terms of any subsequent version published by the license
|
||||||
|
steward.
|
||||||
|
|
||||||
|
10.3. Modified Versions
|
||||||
|
|
||||||
|
If you create software not governed by this License, and you want to
|
||||||
|
create a new license for such software, you may create and use a modified
|
||||||
|
version of this License if you rename the license and remove any
|
||||||
|
references to the name of the license steward (except to note that such
|
||||||
|
modified license differs from this License).
|
||||||
|
|
||||||
|
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
|
||||||
|
If You choose to distribute Source Code Form that is Incompatible With
|
||||||
|
Secondary Licenses under the terms of this version of the License, the
|
||||||
|
notice described in Exhibit B of this License must be attached.
|
||||||
|
|
||||||
|
Exhibit A - Source Code Form License Notice
|
||||||
|
|
||||||
|
This Source Code Form is subject to the
|
||||||
|
terms of the Mozilla Public License, v.
|
||||||
|
2.0. If a copy of the MPL was not
|
||||||
|
distributed with this file, You can
|
||||||
|
obtain one at
|
||||||
|
http://mozilla.org/MPL/2.0/.
|
||||||
|
|
||||||
|
If it is not possible or desirable to put the notice in a particular file, then
|
||||||
|
You may include the notice in a location (such as a LICENSE file in a relevant
|
||||||
|
directory) where a recipient would be likely to look for such a notice.
|
||||||
|
|
||||||
|
You may add additional accurate notices of copyright ownership.
|
||||||
|
|
||||||
|
Exhibit B - “Incompatible With Secondary Licenses” Notice
|
||||||
|
|
||||||
|
This Source Code Form is “Incompatible
|
||||||
|
With Secondary Licenses”, as defined by
|
||||||
|
the Mozilla Public License, v. 2.0.
|
||||||
|
|
89
vendor/github.com/hashicorp/errwrap/README.md
generated
vendored
Normal file
89
vendor/github.com/hashicorp/errwrap/README.md
generated
vendored
Normal file
@ -0,0 +1,89 @@
|
|||||||
|
# errwrap
|
||||||
|
|
||||||
|
`errwrap` is a package for Go that formalizes the pattern of wrapping errors
|
||||||
|
and checking if an error contains another error.
|
||||||
|
|
||||||
|
There is a common pattern in Go of taking a returned `error` value and
|
||||||
|
then wrapping it (such as with `fmt.Errorf`) before returning it. The problem
|
||||||
|
with this pattern is that you completely lose the original `error` structure.
|
||||||
|
|
||||||
|
Arguably the _correct_ approach is that you should make a custom structure
|
||||||
|
implementing the `error` interface, and have the original error as a field
|
||||||
|
on that structure, such [as this example](http://golang.org/pkg/os/#PathError).
|
||||||
|
This is a good approach, but you have to know the entire chain of possible
|
||||||
|
rewrapping that happens, when you might just care about one.
|
||||||
|
|
||||||
|
`errwrap` formalizes this pattern (it doesn't matter what approach you use
|
||||||
|
above) by giving a single interface for wrapping errors, checking if a specific
|
||||||
|
error is wrapped, and extracting that error.
|
||||||
|
|
||||||
|
## Installation and Docs
|
||||||
|
|
||||||
|
Install using `go get github.com/hashicorp/errwrap`.
|
||||||
|
|
||||||
|
Full documentation is available at
|
||||||
|
http://godoc.org/github.com/hashicorp/errwrap
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
#### Basic Usage
|
||||||
|
|
||||||
|
Below is a very basic example of its usage:
|
||||||
|
|
||||||
|
```go
|
||||||
|
// A function that always returns an error, but wraps it, like a real
|
||||||
|
// function might.
|
||||||
|
func tryOpen() error {
|
||||||
|
_, err := os.Open("/i/dont/exist")
|
||||||
|
if err != nil {
|
||||||
|
return errwrap.Wrapf("Doesn't exist: {{err}}", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
err := tryOpen()
|
||||||
|
|
||||||
|
// We can use the Contains helpers to check if an error contains
|
||||||
|
// another error. It is safe to do this with a nil error, or with
|
||||||
|
// an error that doesn't even use the errwrap package.
|
||||||
|
if errwrap.Contains(err, "does not exist") {
|
||||||
|
// Do something
|
||||||
|
}
|
||||||
|
if errwrap.ContainsType(err, new(os.PathError)) {
|
||||||
|
// Do something
|
||||||
|
}
|
||||||
|
|
||||||
|
// Or we can use the associated `Get` functions to just extract
|
||||||
|
// a specific error. This would return nil if that specific error doesn't
|
||||||
|
// exist.
|
||||||
|
perr := errwrap.GetType(err, new(os.PathError))
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Custom Types
|
||||||
|
|
||||||
|
If you're already making custom types that properly wrap errors, then
|
||||||
|
you can get all the functionality of `errwraps.Contains` and such by
|
||||||
|
implementing the `Wrapper` interface with just one function. Example:
|
||||||
|
|
||||||
|
```go
|
||||||
|
type AppError {
|
||||||
|
Code ErrorCode
|
||||||
|
Err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *AppError) WrappedErrors() []error {
|
||||||
|
return []error{e.Err}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Now this works:
|
||||||
|
|
||||||
|
```go
|
||||||
|
err := &AppError{Err: fmt.Errorf("an error")}
|
||||||
|
if errwrap.ContainsType(err, fmt.Errorf("")) {
|
||||||
|
// This will work!
|
||||||
|
}
|
||||||
|
```
|
169
vendor/github.com/hashicorp/errwrap/errwrap.go
generated
vendored
Normal file
169
vendor/github.com/hashicorp/errwrap/errwrap.go
generated
vendored
Normal file
@ -0,0 +1,169 @@
|
|||||||
|
// Package errwrap implements methods to formalize error wrapping in Go.
|
||||||
|
//
|
||||||
|
// All of the top-level functions that take an `error` are built to be able
|
||||||
|
// to take any error, not just wrapped errors. This allows you to use errwrap
|
||||||
|
// without having to type-check and type-cast everywhere.
|
||||||
|
package errwrap
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// WalkFunc is the callback called for Walk.
|
||||||
|
type WalkFunc func(error)
|
||||||
|
|
||||||
|
// Wrapper is an interface that can be implemented by custom types to
|
||||||
|
// have all the Contains, Get, etc. functions in errwrap work.
|
||||||
|
//
|
||||||
|
// When Walk reaches a Wrapper, it will call the callback for every
|
||||||
|
// wrapped error in addition to the wrapper itself. Since all the top-level
|
||||||
|
// functions in errwrap use Walk, this means that all those functions work
|
||||||
|
// with your custom type.
|
||||||
|
type Wrapper interface {
|
||||||
|
WrappedErrors() []error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wrap defines that outer wraps inner, returning an error type that
|
||||||
|
// can be cleanly used with the other methods in this package, such as
|
||||||
|
// Contains, GetAll, etc.
|
||||||
|
//
|
||||||
|
// This function won't modify the error message at all (the outer message
|
||||||
|
// will be used).
|
||||||
|
func Wrap(outer, inner error) error {
|
||||||
|
return &wrappedError{
|
||||||
|
Outer: outer,
|
||||||
|
Inner: inner,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wrapf wraps an error with a formatting message. This is similar to using
|
||||||
|
// `fmt.Errorf` to wrap an error. If you're using `fmt.Errorf` to wrap
|
||||||
|
// errors, you should replace it with this.
|
||||||
|
//
|
||||||
|
// format is the format of the error message. The string '{{err}}' will
|
||||||
|
// be replaced with the original error message.
|
||||||
|
func Wrapf(format string, err error) error {
|
||||||
|
outerMsg := "<nil>"
|
||||||
|
if err != nil {
|
||||||
|
outerMsg = err.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
outer := errors.New(strings.Replace(
|
||||||
|
format, "{{err}}", outerMsg, -1))
|
||||||
|
|
||||||
|
return Wrap(outer, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Contains checks if the given error contains an error with the
|
||||||
|
// message msg. If err is not a wrapped error, this will always return
|
||||||
|
// false unless the error itself happens to match this msg.
|
||||||
|
func Contains(err error, msg string) bool {
|
||||||
|
return len(GetAll(err, msg)) > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContainsType checks if the given error contains an error with
|
||||||
|
// the same concrete type as v. If err is not a wrapped error, this will
|
||||||
|
// check the err itself.
|
||||||
|
func ContainsType(err error, v interface{}) bool {
|
||||||
|
return len(GetAllType(err, v)) > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get is the same as GetAll but returns the deepest matching error.
|
||||||
|
func Get(err error, msg string) error {
|
||||||
|
es := GetAll(err, msg)
|
||||||
|
if len(es) > 0 {
|
||||||
|
return es[len(es)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetType is the same as GetAllType but returns the deepest matching error.
|
||||||
|
func GetType(err error, v interface{}) error {
|
||||||
|
es := GetAllType(err, v)
|
||||||
|
if len(es) > 0 {
|
||||||
|
return es[len(es)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAll gets all the errors that might be wrapped in err with the
|
||||||
|
// given message. The order of the errors is such that the outermost
|
||||||
|
// matching error (the most recent wrap) is index zero, and so on.
|
||||||
|
func GetAll(err error, msg string) []error {
|
||||||
|
var result []error
|
||||||
|
|
||||||
|
Walk(err, func(err error) {
|
||||||
|
if err.Error() == msg {
|
||||||
|
result = append(result, err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAllType gets all the errors that are the same type as v.
|
||||||
|
//
|
||||||
|
// The order of the return value is the same as described in GetAll.
|
||||||
|
func GetAllType(err error, v interface{}) []error {
|
||||||
|
var result []error
|
||||||
|
|
||||||
|
var search string
|
||||||
|
if v != nil {
|
||||||
|
search = reflect.TypeOf(v).String()
|
||||||
|
}
|
||||||
|
Walk(err, func(err error) {
|
||||||
|
var needle string
|
||||||
|
if err != nil {
|
||||||
|
needle = reflect.TypeOf(err).String()
|
||||||
|
}
|
||||||
|
|
||||||
|
if needle == search {
|
||||||
|
result = append(result, err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// Walk walks all the wrapped errors in err and calls the callback. If
|
||||||
|
// err isn't a wrapped error, this will be called once for err. If err
|
||||||
|
// is a wrapped error, the callback will be called for both the wrapper
|
||||||
|
// that implements error as well as the wrapped error itself.
|
||||||
|
func Walk(err error, cb WalkFunc) {
|
||||||
|
if err == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
switch e := err.(type) {
|
||||||
|
case *wrappedError:
|
||||||
|
cb(e.Outer)
|
||||||
|
Walk(e.Inner, cb)
|
||||||
|
case Wrapper:
|
||||||
|
cb(err)
|
||||||
|
|
||||||
|
for _, err := range e.WrappedErrors() {
|
||||||
|
Walk(err, cb)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
cb(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// wrappedError is an implementation of error that has both the
|
||||||
|
// outer and inner errors.
|
||||||
|
type wrappedError struct {
|
||||||
|
Outer error
|
||||||
|
Inner error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *wrappedError) Error() string {
|
||||||
|
return w.Outer.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *wrappedError) WrappedErrors() []error {
|
||||||
|
return []error{w.Outer, w.Inner}
|
||||||
|
}
|
1
vendor/github.com/hashicorp/errwrap/go.mod
generated
vendored
Normal file
1
vendor/github.com/hashicorp/errwrap/go.mod
generated
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
module github.com/hashicorp/errwrap
|
363
vendor/github.com/hashicorp/go-cleanhttp/LICENSE
generated
vendored
Normal file
363
vendor/github.com/hashicorp/go-cleanhttp/LICENSE
generated
vendored
Normal file
@ -0,0 +1,363 @@
|
|||||||
|
Mozilla Public License, version 2.0
|
||||||
|
|
||||||
|
1. Definitions
|
||||||
|
|
||||||
|
1.1. "Contributor"
|
||||||
|
|
||||||
|
means each individual or legal entity that creates, contributes to the
|
||||||
|
creation of, or owns Covered Software.
|
||||||
|
|
||||||
|
1.2. "Contributor Version"
|
||||||
|
|
||||||
|
means the combination of the Contributions of others (if any) used by a
|
||||||
|
Contributor and that particular Contributor's Contribution.
|
||||||
|
|
||||||
|
1.3. "Contribution"
|
||||||
|
|
||||||
|
means Covered Software of a particular Contributor.
|
||||||
|
|
||||||
|
1.4. "Covered Software"
|
||||||
|
|
||||||
|
means Source Code Form to which the initial Contributor has attached the
|
||||||
|
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||||
|
Modifications of such Source Code Form, in each case including portions
|
||||||
|
thereof.
|
||||||
|
|
||||||
|
1.5. "Incompatible With Secondary Licenses"
|
||||||
|
means
|
||||||
|
|
||||||
|
a. that the initial Contributor has attached the notice described in
|
||||||
|
Exhibit B to the Covered Software; or
|
||||||
|
|
||||||
|
b. that the Covered Software was made available under the terms of
|
||||||
|
version 1.1 or earlier of the License, but not also under the terms of
|
||||||
|
a Secondary License.
|
||||||
|
|
||||||
|
1.6. "Executable Form"
|
||||||
|
|
||||||
|
means any form of the work other than Source Code Form.
|
||||||
|
|
||||||
|
1.7. "Larger Work"
|
||||||
|
|
||||||
|
means a work that combines Covered Software with other material, in a
|
||||||
|
separate file or files, that is not Covered Software.
|
||||||
|
|
||||||
|
1.8. "License"
|
||||||
|
|
||||||
|
means this document.
|
||||||
|
|
||||||
|
1.9. "Licensable"
|
||||||
|
|
||||||
|
means having the right to grant, to the maximum extent possible, whether
|
||||||
|
at the time of the initial grant or subsequently, any and all of the
|
||||||
|
rights conveyed by this License.
|
||||||
|
|
||||||
|
1.10. "Modifications"
|
||||||
|
|
||||||
|
means any of the following:
|
||||||
|
|
||||||
|
a. any file in Source Code Form that results from an addition to,
|
||||||
|
deletion from, or modification of the contents of Covered Software; or
|
||||||
|
|
||||||
|
b. any new file in Source Code Form that contains any Covered Software.
|
||||||
|
|
||||||
|
1.11. "Patent Claims" of a Contributor
|
||||||
|
|
||||||
|
means any patent claim(s), including without limitation, method,
|
||||||
|
process, and apparatus claims, in any patent Licensable by such
|
||||||
|
Contributor that would be infringed, but for the grant of the License,
|
||||||
|
by the making, using, selling, offering for sale, having made, import,
|
||||||
|
or transfer of either its Contributions or its Contributor Version.
|
||||||
|
|
||||||
|
1.12. "Secondary License"
|
||||||
|
|
||||||
|
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||||
|
General Public License, Version 2.1, the GNU Affero General Public
|
||||||
|
License, Version 3.0, or any later versions of those licenses.
|
||||||
|
|
||||||
|
1.13. "Source Code Form"
|
||||||
|
|
||||||
|
means the form of the work preferred for making modifications.
|
||||||
|
|
||||||
|
1.14. "You" (or "Your")
|
||||||
|
|
||||||
|
means an individual or a legal entity exercising rights under this
|
||||||
|
License. For legal entities, "You" includes any entity that controls, is
|
||||||
|
controlled by, or is under common control with You. For purposes of this
|
||||||
|
definition, "control" means (a) the power, direct or indirect, to cause
|
||||||
|
the direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||||
|
outstanding shares or beneficial ownership of such entity.
|
||||||
|
|
||||||
|
|
||||||
|
2. License Grants and Conditions
|
||||||
|
|
||||||
|
2.1. Grants
|
||||||
|
|
||||||
|
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||||
|
non-exclusive license:
|
||||||
|
|
||||||
|
a. under intellectual property rights (other than patent or trademark)
|
||||||
|
Licensable by such Contributor to use, reproduce, make available,
|
||||||
|
modify, display, perform, distribute, and otherwise exploit its
|
||||||
|
Contributions, either on an unmodified basis, with Modifications, or
|
||||||
|
as part of a Larger Work; and
|
||||||
|
|
||||||
|
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||||
|
sale, have made, import, and otherwise transfer either its
|
||||||
|
Contributions or its Contributor Version.
|
||||||
|
|
||||||
|
2.2. Effective Date
|
||||||
|
|
||||||
|
The licenses granted in Section 2.1 with respect to any Contribution
|
||||||
|
become effective for each Contribution on the date the Contributor first
|
||||||
|
distributes such Contribution.
|
||||||
|
|
||||||
|
2.3. Limitations on Grant Scope
|
||||||
|
|
||||||
|
The licenses granted in this Section 2 are the only rights granted under
|
||||||
|
this License. No additional rights or licenses will be implied from the
|
||||||
|
distribution or licensing of Covered Software under this License.
|
||||||
|
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
||||||
|
Contributor:
|
||||||
|
|
||||||
|
a. for any code that a Contributor has removed from Covered Software; or
|
||||||
|
|
||||||
|
b. for infringements caused by: (i) Your and any other third party's
|
||||||
|
modifications of Covered Software, or (ii) the combination of its
|
||||||
|
Contributions with other software (except as part of its Contributor
|
||||||
|
Version); or
|
||||||
|
|
||||||
|
c. under Patent Claims infringed by Covered Software in the absence of
|
||||||
|
its Contributions.
|
||||||
|
|
||||||
|
This License does not grant any rights in the trademarks, service marks,
|
||||||
|
or logos of any Contributor (except as may be necessary to comply with
|
||||||
|
the notice requirements in Section 3.4).
|
||||||
|
|
||||||
|
2.4. Subsequent Licenses
|
||||||
|
|
||||||
|
No Contributor makes additional grants as a result of Your choice to
|
||||||
|
distribute the Covered Software under a subsequent version of this
|
||||||
|
License (see Section 10.2) or under the terms of a Secondary License (if
|
||||||
|
permitted under the terms of Section 3.3).
|
||||||
|
|
||||||
|
2.5. Representation
|
||||||
|
|
||||||
|
Each Contributor represents that the Contributor believes its
|
||||||
|
Contributions are its original creation(s) or it has sufficient rights to
|
||||||
|
grant the rights to its Contributions conveyed by this License.
|
||||||
|
|
||||||
|
2.6. Fair Use
|
||||||
|
|
||||||
|
This License is not intended to limit any rights You have under
|
||||||
|
applicable copyright doctrines of fair use, fair dealing, or other
|
||||||
|
equivalents.
|
||||||
|
|
||||||
|
2.7. Conditions
|
||||||
|
|
||||||
|
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||||
|
Section 2.1.
|
||||||
|
|
||||||
|
|
||||||
|
3. Responsibilities
|
||||||
|
|
||||||
|
3.1. Distribution of Source Form
|
||||||
|
|
||||||
|
All distribution of Covered Software in Source Code Form, including any
|
||||||
|
Modifications that You create or to which You contribute, must be under
|
||||||
|
the terms of this License. You must inform recipients that the Source
|
||||||
|
Code Form of the Covered Software is governed by the terms of this
|
||||||
|
License, and how they can obtain a copy of this License. You may not
|
||||||
|
attempt to alter or restrict the recipients' rights in the Source Code
|
||||||
|
Form.
|
||||||
|
|
||||||
|
3.2. Distribution of Executable Form
|
||||||
|
|
||||||
|
If You distribute Covered Software in Executable Form then:
|
||||||
|
|
||||||
|
a. such Covered Software must also be made available in Source Code Form,
|
||||||
|
as described in Section 3.1, and You must inform recipients of the
|
||||||
|
Executable Form how they can obtain a copy of such Source Code Form by
|
||||||
|
reasonable means in a timely manner, at a charge no more than the cost
|
||||||
|
of distribution to the recipient; and
|
||||||
|
|
||||||
|
b. You may distribute such Executable Form under the terms of this
|
||||||
|
License, or sublicense it under different terms, provided that the
|
||||||
|
license for the Executable Form does not attempt to limit or alter the
|
||||||
|
recipients' rights in the Source Code Form under this License.
|
||||||
|
|
||||||
|
3.3. Distribution of a Larger Work
|
||||||
|
|
||||||
|
You may create and distribute a Larger Work under terms of Your choice,
|
||||||
|
provided that You also comply with the requirements of this License for
|
||||||
|
the Covered Software. If the Larger Work is a combination of Covered
|
||||||
|
Software with a work governed by one or more Secondary Licenses, and the
|
||||||
|
Covered Software is not Incompatible With Secondary Licenses, this
|
||||||
|
License permits You to additionally distribute such Covered Software
|
||||||
|
under the terms of such Secondary License(s), so that the recipient of
|
||||||
|
the Larger Work may, at their option, further distribute the Covered
|
||||||
|
Software under the terms of either this License or such Secondary
|
||||||
|
License(s).
|
||||||
|
|
||||||
|
3.4. Notices
|
||||||
|
|
||||||
|
You may not remove or alter the substance of any license notices
|
||||||
|
(including copyright notices, patent notices, disclaimers of warranty, or
|
||||||
|
limitations of liability) contained within the Source Code Form of the
|
||||||
|
Covered Software, except that You may alter any license notices to the
|
||||||
|
extent required to remedy known factual inaccuracies.
|
||||||
|
|
||||||
|
3.5. Application of Additional Terms
|
||||||
|
|
||||||
|
You may choose to offer, and to charge a fee for, warranty, support,
|
||||||
|
indemnity or liability obligations to one or more recipients of Covered
|
||||||
|
Software. However, You may do so only on Your own behalf, and not on
|
||||||
|
behalf of any Contributor. You must make it absolutely clear that any
|
||||||
|
such warranty, support, indemnity, or liability obligation is offered by
|
||||||
|
You alone, and You hereby agree to indemnify every Contributor for any
|
||||||
|
liability incurred by such Contributor as a result of warranty, support,
|
||||||
|
indemnity or liability terms You offer. You may include additional
|
||||||
|
disclaimers of warranty and limitations of liability specific to any
|
||||||
|
jurisdiction.
|
||||||
|
|
||||||
|
4. Inability to Comply Due to Statute or Regulation
|
||||||
|
|
||||||
|
If it is impossible for You to comply with any of the terms of this License
|
||||||
|
with respect to some or all of the Covered Software due to statute,
|
||||||
|
judicial order, or regulation then You must: (a) comply with the terms of
|
||||||
|
this License to the maximum extent possible; and (b) describe the
|
||||||
|
limitations and the code they affect. Such description must be placed in a
|
||||||
|
text file included with all distributions of the Covered Software under
|
||||||
|
this License. Except to the extent prohibited by statute or regulation,
|
||||||
|
such description must be sufficiently detailed for a recipient of ordinary
|
||||||
|
skill to be able to understand it.
|
||||||
|
|
||||||
|
5. Termination
|
||||||
|
|
||||||
|
5.1. The rights granted under this License will terminate automatically if You
|
||||||
|
fail to comply with any of its terms. However, if You become compliant,
|
||||||
|
then the rights granted under this License from a particular Contributor
|
||||||
|
are reinstated (a) provisionally, unless and until such Contributor
|
||||||
|
explicitly and finally terminates Your grants, and (b) on an ongoing
|
||||||
|
basis, if such Contributor fails to notify You of the non-compliance by
|
||||||
|
some reasonable means prior to 60 days after You have come back into
|
||||||
|
compliance. Moreover, Your grants from a particular Contributor are
|
||||||
|
reinstated on an ongoing basis if such Contributor notifies You of the
|
||||||
|
non-compliance by some reasonable means, this is the first time You have
|
||||||
|
received notice of non-compliance with this License from such
|
||||||
|
Contributor, and You become compliant prior to 30 days after Your receipt
|
||||||
|
of the notice.
|
||||||
|
|
||||||
|
5.2. If You initiate litigation against any entity by asserting a patent
|
||||||
|
infringement claim (excluding declaratory judgment actions,
|
||||||
|
counter-claims, and cross-claims) alleging that a Contributor Version
|
||||||
|
directly or indirectly infringes any patent, then the rights granted to
|
||||||
|
You by any and all Contributors for the Covered Software under Section
|
||||||
|
2.1 of this License shall terminate.
|
||||||
|
|
||||||
|
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||||
|
license agreements (excluding distributors and resellers) which have been
|
||||||
|
validly granted by You or Your distributors under this License prior to
|
||||||
|
termination shall survive termination.
|
||||||
|
|
||||||
|
6. Disclaimer of Warranty
|
||||||
|
|
||||||
|
Covered Software is provided under this License on an "as is" basis,
|
||||||
|
without warranty of any kind, either expressed, implied, or statutory,
|
||||||
|
including, without limitation, warranties that the Covered Software is free
|
||||||
|
of defects, merchantable, fit for a particular purpose or non-infringing.
|
||||||
|
The entire risk as to the quality and performance of the Covered Software
|
||||||
|
is with You. Should any Covered Software prove defective in any respect,
|
||||||
|
You (not any Contributor) assume the cost of any necessary servicing,
|
||||||
|
repair, or correction. This disclaimer of warranty constitutes an essential
|
||||||
|
part of this License. No use of any Covered Software is authorized under
|
||||||
|
this License except under this disclaimer.
|
||||||
|
|
||||||
|
7. Limitation of Liability
|
||||||
|
|
||||||
|
Under no circumstances and under no legal theory, whether tort (including
|
||||||
|
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||||
|
distributes Covered Software as permitted above, be liable to You for any
|
||||||
|
direct, indirect, special, incidental, or consequential damages of any
|
||||||
|
character including, without limitation, damages for lost profits, loss of
|
||||||
|
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses, even if such party shall have been
|
||||||
|
informed of the possibility of such damages. This limitation of liability
|
||||||
|
shall not apply to liability for death or personal injury resulting from
|
||||||
|
such party's negligence to the extent applicable law prohibits such
|
||||||
|
limitation. Some jurisdictions do not allow the exclusion or limitation of
|
||||||
|
incidental or consequential damages, so this exclusion and limitation may
|
||||||
|
not apply to You.
|
||||||
|
|
||||||
|
8. Litigation
|
||||||
|
|
||||||
|
Any litigation relating to this License may be brought only in the courts
|
||||||
|
of a jurisdiction where the defendant maintains its principal place of
|
||||||
|
business and such litigation shall be governed by laws of that
|
||||||
|
jurisdiction, without reference to its conflict-of-law provisions. Nothing
|
||||||
|
in this Section shall prevent a party's ability to bring cross-claims or
|
||||||
|
counter-claims.
|
||||||
|
|
||||||
|
9. Miscellaneous
|
||||||
|
|
||||||
|
This License represents the complete agreement concerning the subject
|
||||||
|
matter hereof. If any provision of this License is held to be
|
||||||
|
unenforceable, such provision shall be reformed only to the extent
|
||||||
|
necessary to make it enforceable. Any law or regulation which provides that
|
||||||
|
the language of a contract shall be construed against the drafter shall not
|
||||||
|
be used to construe this License against a Contributor.
|
||||||
|
|
||||||
|
|
||||||
|
10. Versions of the License
|
||||||
|
|
||||||
|
10.1. New Versions
|
||||||
|
|
||||||
|
Mozilla Foundation is the license steward. Except as provided in Section
|
||||||
|
10.3, no one other than the license steward has the right to modify or
|
||||||
|
publish new versions of this License. Each version will be given a
|
||||||
|
distinguishing version number.
|
||||||
|
|
||||||
|
10.2. Effect of New Versions
|
||||||
|
|
||||||
|
You may distribute the Covered Software under the terms of the version
|
||||||
|
of the License under which You originally received the Covered Software,
|
||||||
|
or under the terms of any subsequent version published by the license
|
||||||
|
steward.
|
||||||
|
|
||||||
|
10.3. Modified Versions
|
||||||
|
|
||||||
|
If you create software not governed by this License, and you want to
|
||||||
|
create a new license for such software, you may create and use a
|
||||||
|
modified version of this License if you rename the license and remove
|
||||||
|
any references to the name of the license steward (except to note that
|
||||||
|
such modified license differs from this License).
|
||||||
|
|
||||||
|
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
||||||
|
Licenses If You choose to distribute Source Code Form that is
|
||||||
|
Incompatible With Secondary Licenses under the terms of this version of
|
||||||
|
the License, the notice described in Exhibit B of this License must be
|
||||||
|
attached.
|
||||||
|
|
||||||
|
Exhibit A - Source Code Form License Notice
|
||||||
|
|
||||||
|
This Source Code Form is subject to the
|
||||||
|
terms of the Mozilla Public License, v.
|
||||||
|
2.0. If a copy of the MPL was not
|
||||||
|
distributed with this file, You can
|
||||||
|
obtain one at
|
||||||
|
http://mozilla.org/MPL/2.0/.
|
||||||
|
|
||||||
|
If it is not possible or desirable to put the notice in a particular file,
|
||||||
|
then You may include the notice in a location (such as a LICENSE file in a
|
||||||
|
relevant directory) where a recipient would be likely to look for such a
|
||||||
|
notice.
|
||||||
|
|
||||||
|
You may add additional accurate notices of copyright ownership.
|
||||||
|
|
||||||
|
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
||||||
|
|
||||||
|
This Source Code Form is "Incompatible
|
||||||
|
With Secondary Licenses", as defined by
|
||||||
|
the Mozilla Public License, v. 2.0.
|
||||||
|
|
30
vendor/github.com/hashicorp/go-cleanhttp/README.md
generated
vendored
Normal file
30
vendor/github.com/hashicorp/go-cleanhttp/README.md
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
# cleanhttp
|
||||||
|
|
||||||
|
Functions for accessing "clean" Go http.Client values
|
||||||
|
|
||||||
|
-------------
|
||||||
|
|
||||||
|
The Go standard library contains a default `http.Client` called
|
||||||
|
`http.DefaultClient`. It is a common idiom in Go code to start with
|
||||||
|
`http.DefaultClient` and tweak it as necessary, and in fact, this is
|
||||||
|
encouraged; from the `http` package documentation:
|
||||||
|
|
||||||
|
> The Client's Transport typically has internal state (cached TCP connections),
|
||||||
|
so Clients should be reused instead of created as needed. Clients are safe for
|
||||||
|
concurrent use by multiple goroutines.
|
||||||
|
|
||||||
|
Unfortunately, this is a shared value, and it is not uncommon for libraries to
|
||||||
|
assume that they are free to modify it at will. With enough dependencies, it
|
||||||
|
can be very easy to encounter strange problems and race conditions due to
|
||||||
|
manipulation of this shared value across libraries and goroutines (clients are
|
||||||
|
safe for concurrent use, but writing values to the client struct itself is not
|
||||||
|
protected).
|
||||||
|
|
||||||
|
Making things worse is the fact that a bare `http.Client` will use a default
|
||||||
|
`http.Transport` called `http.DefaultTransport`, which is another global value
|
||||||
|
that behaves the same way. So it is not simply enough to replace
|
||||||
|
`http.DefaultClient` with `&http.Client{}`.
|
||||||
|
|
||||||
|
This repository provides some simple functions to get a "clean" `http.Client`
|
||||||
|
-- one that uses the same default values as the Go standard library, but
|
||||||
|
returns a client that does not share any state with other clients.
|
57
vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go
generated
vendored
Normal file
57
vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go
generated
vendored
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
package cleanhttp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"runtime"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DefaultTransport returns a new http.Transport with similar default values to
|
||||||
|
// http.DefaultTransport, but with idle connections and keepalives disabled.
|
||||||
|
func DefaultTransport() *http.Transport {
|
||||||
|
transport := DefaultPooledTransport()
|
||||||
|
transport.DisableKeepAlives = true
|
||||||
|
transport.MaxIdleConnsPerHost = -1
|
||||||
|
return transport
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultPooledTransport returns a new http.Transport with similar default
|
||||||
|
// values to http.DefaultTransport. Do not use this for transient transports as
|
||||||
|
// it can leak file descriptors over time. Only use this for transports that
|
||||||
|
// will be re-used for the same host(s).
|
||||||
|
func DefaultPooledTransport() *http.Transport {
|
||||||
|
transport := &http.Transport{
|
||||||
|
Proxy: http.ProxyFromEnvironment,
|
||||||
|
DialContext: (&net.Dialer{
|
||||||
|
Timeout: 30 * time.Second,
|
||||||
|
KeepAlive: 30 * time.Second,
|
||||||
|
DualStack: true,
|
||||||
|
}).DialContext,
|
||||||
|
MaxIdleConns: 100,
|
||||||
|
IdleConnTimeout: 90 * time.Second,
|
||||||
|
TLSHandshakeTimeout: 10 * time.Second,
|
||||||
|
ExpectContinueTimeout: 1 * time.Second,
|
||||||
|
MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1,
|
||||||
|
}
|
||||||
|
return transport
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultClient returns a new http.Client with similar default values to
|
||||||
|
// http.Client, but with a non-shared Transport, idle connections disabled, and
|
||||||
|
// keepalives disabled.
|
||||||
|
func DefaultClient() *http.Client {
|
||||||
|
return &http.Client{
|
||||||
|
Transport: DefaultTransport(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultPooledClient returns a new http.Client with similar default values to
|
||||||
|
// http.Client, but with a shared Transport. Do not use this function for
|
||||||
|
// transient clients as it can leak file descriptors over time. Only use this
|
||||||
|
// for clients that will be re-used for the same host(s).
|
||||||
|
func DefaultPooledClient() *http.Client {
|
||||||
|
return &http.Client{
|
||||||
|
Transport: DefaultPooledTransport(),
|
||||||
|
}
|
||||||
|
}
|
20
vendor/github.com/hashicorp/go-cleanhttp/doc.go
generated
vendored
Normal file
20
vendor/github.com/hashicorp/go-cleanhttp/doc.go
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
// Package cleanhttp offers convenience utilities for acquiring "clean"
|
||||||
|
// http.Transport and http.Client structs.
|
||||||
|
//
|
||||||
|
// Values set on http.DefaultClient and http.DefaultTransport affect all
|
||||||
|
// callers. This can have detrimental effects, esepcially in TLS contexts,
|
||||||
|
// where client or root certificates set to talk to multiple endpoints can end
|
||||||
|
// up displacing each other, leading to hard-to-debug issues. This package
|
||||||
|
// provides non-shared http.Client and http.Transport structs to ensure that
|
||||||
|
// the configuration will not be overwritten by other parts of the application
|
||||||
|
// or dependencies.
|
||||||
|
//
|
||||||
|
// The DefaultClient and DefaultTransport functions disable idle connections
|
||||||
|
// and keepalives. Without ensuring that idle connections are closed before
|
||||||
|
// garbage collection, short-term clients/transports can leak file descriptors,
|
||||||
|
// eventually leading to "too many open files" errors. If you will be
|
||||||
|
// connecting to the same hosts repeatedly from the same client, you can use
|
||||||
|
// DefaultPooledClient to receive a client that has connection pooling
|
||||||
|
// semantics similar to http.DefaultClient.
|
||||||
|
//
|
||||||
|
package cleanhttp
|
1
vendor/github.com/hashicorp/go-cleanhttp/go.mod
generated
vendored
Normal file
1
vendor/github.com/hashicorp/go-cleanhttp/go.mod
generated
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
module github.com/hashicorp/go-cleanhttp
|
48
vendor/github.com/hashicorp/go-cleanhttp/handlers.go
generated
vendored
Normal file
48
vendor/github.com/hashicorp/go-cleanhttp/handlers.go
generated
vendored
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
package cleanhttp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"unicode"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HandlerInput provides input options to cleanhttp's handlers
|
||||||
|
type HandlerInput struct {
|
||||||
|
ErrStatus int
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrintablePathCheckHandler is a middleware that ensures the request path
|
||||||
|
// contains only printable runes.
|
||||||
|
func PrintablePathCheckHandler(next http.Handler, input *HandlerInput) http.Handler {
|
||||||
|
// Nil-check on input to make it optional
|
||||||
|
if input == nil {
|
||||||
|
input = &HandlerInput{
|
||||||
|
ErrStatus: http.StatusBadRequest,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default to http.StatusBadRequest on error
|
||||||
|
if input.ErrStatus == 0 {
|
||||||
|
input.ErrStatus = http.StatusBadRequest
|
||||||
|
}
|
||||||
|
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
if r != nil {
|
||||||
|
// Check URL path for non-printable characters
|
||||||
|
idx := strings.IndexFunc(r.URL.Path, func(c rune) bool {
|
||||||
|
return !unicode.IsPrint(c)
|
||||||
|
})
|
||||||
|
|
||||||
|
if idx != -1 {
|
||||||
|
w.WriteHeader(input.ErrStatus)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if next != nil {
|
||||||
|
next.ServeHTTP(w, r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
})
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user