mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-11-09 16:00:22 +00:00
rebase: bump k8s.io/kubernetes from 1.26.2 to 1.27.2
Bumps [k8s.io/kubernetes](https://github.com/kubernetes/kubernetes) from 1.26.2 to 1.27.2. - [Release notes](https://github.com/kubernetes/kubernetes/releases) - [Commits](https://github.com/kubernetes/kubernetes/compare/v1.26.2...v1.27.2) --- updated-dependencies: - dependency-name: k8s.io/kubernetes dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
parent
0e79135419
commit
07b05616a0
30
go.mod
30
go.mod
@ -40,16 +40,18 @@ require (
|
||||
k8s.io/client-go v12.0.0+incompatible
|
||||
k8s.io/cloud-provider v0.26.2
|
||||
k8s.io/klog/v2 v2.100.1
|
||||
k8s.io/kubernetes v1.26.2
|
||||
k8s.io/kubernetes v1.27.2
|
||||
k8s.io/mount-utils v0.26.2
|
||||
k8s.io/pod-security-admission v0.0.0
|
||||
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448
|
||||
k8s.io/utils v0.0.0-20230209194617-a36077c30491
|
||||
sigs.k8s.io/controller-runtime v0.14.6
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/NYTimes/gziphandler v1.1.1 // indirect
|
||||
github.com/ansel1/merry v1.6.2 // indirect
|
||||
github.com/ansel1/merry/v2 v2.0.1 // indirect
|
||||
github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect
|
||||
github.com/armon/go-metrics v0.3.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.18.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.33 // indirect
|
||||
@ -61,6 +63,8 @@ require (
|
||||
github.com/cenkalti/backoff/v3 v3.2.2 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.1.3 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/coreos/go-semver v0.3.0 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.4.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/docker/distribution v2.8.2+incompatible // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.9.0 // indirect
|
||||
@ -75,12 +79,13 @@ require (
|
||||
github.com/go-jose/go-jose/v3 v3.0.0 // indirect
|
||||
github.com/go-logr/logr v1.2.4 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.1 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/google/cel-go v0.12.6 // indirect
|
||||
github.com/google/gnostic v0.6.9 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
@ -129,7 +134,12 @@ require (
|
||||
github.com/ryanuber/go-glob v1.0.0 // indirect
|
||||
github.com/spf13/cobra v1.6.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.0 // indirect
|
||||
github.com/stoewer/go-strcase v1.2.0 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.7 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.7 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.7 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect
|
||||
go.opentelemetry.io/otel v1.10.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect
|
||||
@ -142,6 +152,7 @@ require (
|
||||
go.uber.org/multierr v1.8.0 // indirect
|
||||
go.uber.org/zap v1.24.0 // indirect
|
||||
golang.org/x/oauth2 v0.6.0 // indirect
|
||||
golang.org/x/sync v0.1.0 // indirect
|
||||
golang.org/x/term v0.8.0 // indirect
|
||||
golang.org/x/text v0.9.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
@ -150,17 +161,20 @@ require (
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.26.2 // indirect
|
||||
k8s.io/apiserver v0.26.2 // indirect
|
||||
k8s.io/component-base v0.26.2 // indirect
|
||||
k8s.io/component-helpers v0.26.2 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect
|
||||
k8s.io/controller-manager v0.26.2 // indirect
|
||||
k8s.io/kms v0.26.2 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect
|
||||
k8s.io/kubectl v0.0.0 // indirect
|
||||
k8s.io/kubelet v0.0.0 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.35 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
)
|
||||
|
71
go.sum
71
go.sum
@ -134,6 +134,7 @@ github.com/ansel1/merry/v2 v2.0.1/go.mod h1:dD5OhpiPrVkvgseRYd+xgYlx7s6ytU3v9BTT
|
||||
github.com/ansel1/vespucci/v4 v4.1.1/go.mod h1:zzdrO4IgBfgcGMbGTk/qNGL8JPslmW3nPpcBHKReFYY=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20220418222510-f25a4f6275ed/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 h1:yL7+Jz0jTC6yykIK/Wh74gnTJnrGr5AyrNMXuA0gves=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr v1.4.10/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY=
|
||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/apple/foundationdb/bindings/go v0.0.0-20190411004307-cd5c9d91fad2/go.mod h1:OMVSB21p9+xQUIqlGizHPZfjK+SHws1ht+ZytVDoz9U=
|
||||
@ -240,11 +241,14 @@ github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc
|
||||
github.com/coreos/go-oidc v2.0.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
||||
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/coreos/go-systemd/v22 v22.4.0 h1:y9YHcjnjynCd/DVbg5j9L/33jQM3MxJlbj/zWskzfGU=
|
||||
github.com/coreos/go-systemd/v22 v22.4.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
@ -281,6 +285,7 @@ github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdf
|
||||
github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74 h1:2MIhn2R6oXQbgW5yHfS+d6YqyMfXiu2L55rFZC4UD/M=
|
||||
github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74/go.mod h1:UqXY1lYT/ERa4OEAywUqdok1T4RCRdArkhic1Opuavo=
|
||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
@ -339,7 +344,7 @@ github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH
|
||||
github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
|
||||
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
|
||||
github.com/go-errors/errors v1.1.1/go.mod h1:psDX2osz5VnTOnFWbDeWwS7yejl+uV3FEWEp4lssFEs=
|
||||
github.com/go-errors/errors v1.4.1 h1:IvVlgbzSsaUNudsw5dcXSzF3EWyXTi5XrAdngnuhRyg=
|
||||
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
@ -371,11 +376,13 @@ github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AE
|
||||
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
|
||||
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
|
||||
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
|
||||
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
||||
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
|
||||
github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA=
|
||||
github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo=
|
||||
github.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8=
|
||||
github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
||||
github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
|
||||
@ -399,7 +406,7 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A=
|
||||
github.com/golang-jwt/jwt/v4 v4.3.0 h1:kHL1vqdqWNfATmA0FNMdmZNMyZI1U6O31X4rlIPoBog=
|
||||
github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
|
||||
github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE=
|
||||
@ -450,7 +457,9 @@ github.com/gomodules/jsonpatch/v2 v2.2.0 h1:QBjDK/nX43P4z/Os3gnk8VeFdLDgBuMns1Wl
|
||||
github.com/gomodules/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
|
||||
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||
github.com/google/cel-go v0.12.6 h1:kjeKudqV0OygrAqA9fX6J55S8gj+Jre2tckIm5RoG4M=
|
||||
github.com/google/cel-go v0.12.6/go.mod h1:Jk7ljRzLBhkmiAwBoUxB1sZSCVBAzkqPF25olK/iRDw=
|
||||
github.com/google/fscrypt v0.3.4 h1:XGSVMIsQFooj82aRRfYn3JpgU/4fOTnzXPnjhxC8uH8=
|
||||
github.com/google/fscrypt v0.3.4/go.mod h1:BRpw7vaeDitXGRvXa281i/ivQszAdBIiUYDWHjVTkcs=
|
||||
@ -524,6 +533,7 @@ github.com/gorilla/sessions v1.2.0/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/z
|
||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
@ -536,6 +546,7 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.6.2/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 h1:lLT7ZLSzGLI08vc9cpd+tYmNWjdKDqyr/2L+f6U12Fk=
|
||||
@ -738,6 +749,7 @@ github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHW
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ=
|
||||
github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
@ -1046,8 +1058,8 @@ github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMB
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
|
||||
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
|
||||
github.com/smartystreets/assertions v0.0.0-20180725160413-e900ae048470/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
|
||||
@ -1055,6 +1067,7 @@ github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:s
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d h1:bVQRCxQvfjNUeRqaY/uT0tFuvuFY0ulgnczuR684Xic=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
|
||||
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
@ -1071,12 +1084,14 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
|
||||
github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU=
|
||||
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
|
||||
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
@ -1086,6 +1101,7 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY=
|
||||
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||
@ -1096,12 +1112,13 @@ github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
|
||||
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
|
||||
github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDHS3lPnIRmfVJ5Sxy3ao2SIdysLQ=
|
||||
github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.22.5/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/vmware/govmomi v0.20.3 h1:gpw/0Ku+6RgF3jsi7fnCLmlcikBHfKBCUcu1qgc16OU=
|
||||
github.com/vmware/govmomi v0.30.0 h1:Fm8ugPnnlMSTSceDKY9goGvjmqc6eQLPUSUeNXdpeXA=
|
||||
github.com/wadey/gocovmerge v0.0.0-20160331181800-b5bfa59ec0ad/go.mod h1:Hy8o65+MXnS6EwGElrSRjUzQDLXreJlzYLlWiHtt8hM=
|
||||
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
|
||||
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
|
||||
@ -1109,6 +1126,7 @@ github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
|
||||
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
@ -1122,14 +1140,25 @@ go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
|
||||
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
|
||||
go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738 h1:lWF4f9Nypl1ZqSb4gLeh/DGvBYVaUYHuiB93teOmwgc=
|
||||
go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
|
||||
go.etcd.io/etcd/api/v3 v3.5.5/go.mod h1:KFtNaxGDw4Yx/BA4iPPwevUTAuqcsPxzyX8PHydchN8=
|
||||
go.etcd.io/etcd/api/v3 v3.5.7 h1:sbcmosSVesNrWOJ58ZQFitHMdncusIifYcrBfwrlJSY=
|
||||
go.etcd.io/etcd/api/v3 v3.5.7/go.mod h1:9qew1gCdDDLu+VwmeG+iFpL+QlpHTo7iubavdVDgCAA=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.5/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.7 h1:y3kf5Gbp4e4q7egZdn5T7W9TSHUvkClN6u+Rq9mEOmg=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.7/go.mod h1:o0Abi1MK86iad3YrWhgUsbGx1pmTS+hrORWc2CamuhY=
|
||||
go.etcd.io/etcd/client/v2 v2.305.5/go.mod h1:zQjKllfqfBVyVStbt4FaosoX2iYd8fV/GRy/PbowgP4=
|
||||
go.etcd.io/etcd/client/v2 v2.305.7 h1:AELPkjNR3/igjbO7CjyF1fPuVPjrblliiKj+Y6xSGOU=
|
||||
go.etcd.io/etcd/client/v3 v3.5.5/go.mod h1:aApjR4WGlSumpnJ2kloS75h6aHUmAyaPLjHMxpc7E7c=
|
||||
go.etcd.io/etcd/client/v3 v3.5.7 h1:u/OhpiuCgYY8awOHlhIhmGIGpxfBU/GZBUP3m/3/Iz4=
|
||||
go.etcd.io/etcd/client/v3 v3.5.7/go.mod h1:sOWmj9DZUMyAngS7QQwCyAXXAL6WhgTOPLNS/NabQgw=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.5/go.mod h1:6ksYFxttiUGzC2uxyqiyOEvhAiD0tuIqSZkX3TyPdaE=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.7 h1:obOzeVwerFwZ9trMWapU/VjDcYUJb5OfgC1zqEGWO/0=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.5/go.mod h1:76TA48q03g1y1VpTue92jZLr9lIHKUNcYdZOOGyx8rI=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.7 h1:aN79qxLmV3SvIq84aNTliYGmjwsW6NqJSnqmI1HLJKc=
|
||||
go.etcd.io/etcd/server/v3 v3.5.5/go.mod h1:rZ95vDw/jrvsbj9XpTqPrTAB9/kzchVdhRirySPkUBc=
|
||||
go.etcd.io/etcd/server/v3 v3.5.7 h1:BTBD8IJUV7YFgsczZMHhMTS67XuA4KpRquL0MFOJGRk=
|
||||
go.mongodb.org/mongo-driver v1.2.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
|
||||
go.opencensus.io v0.19.1/go.mod h1:gug0GbSHa8Pafr0d2urOSgoXHZ6x/RUlaiT0d9pqb4A=
|
||||
go.opencensus.io v0.19.2/go.mod h1:NO/8qkisMZLZ1FCsKNqtJPwc8/TaclWyY0B6wcYNg9M=
|
||||
@ -1143,9 +1172,11 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.25.0/go.mod h1:E5NNboN0UqSAki0Atn9kVwaN7I+l25gGxDqBueo/74E=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 h1:xFSRQBbXF6VvYRf2lqMJXxoB72XI1K/azav8TekHHSw=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.0 h1:Ajldaqhxqw/gNzQA45IKFWLdG7jZuXX/wBW1d5qvbUI=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.0/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 h1:sxoY9kG1s1WpSYNyzm24rlwH4lnRYFXUVVBmKMBfRgw=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c=
|
||||
go.opentelemetry.io/otel v1.0.1/go.mod h1:OPEOD4jIT2SlZPMmwT6FqZz2C0ZNdQqiWcoK6M0SNFU=
|
||||
go.opentelemetry.io/otel v1.8.0/go.mod h1:2pkj+iMj0o03Y+cW6/m8Y4WkRdYN3AvCXCnzRMp9yvM=
|
||||
go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4=
|
||||
@ -1179,7 +1210,7 @@ go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
|
||||
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
|
||||
go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk=
|
||||
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak=
|
||||
@ -1350,6 +1381,7 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@ -1722,6 +1754,7 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
@ -1736,6 +1769,7 @@ gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU=
|
||||
gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4=
|
||||
gopkg.in/ldap.v3 v3.0.3/go.mod h1:oxD7NyBuxchC+SgJDE1Q5Od05eGt29SDQVBmV+HYbzw=
|
||||
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||
gopkg.in/ory-am/dockertest.v3 v3.3.4/go.mod h1:s9mmoLkaGeAh97qygnNj4xWkiN7e1SKekYC6CovU+ek=
|
||||
gopkg.in/resty.v1 v1.12.0 h1:CuXP0Pjfw9rOuY6EP+UvtNvt5DSqHpIxILZKT/quCZI=
|
||||
@ -1793,6 +1827,9 @@ k8s.io/component-base v0.26.2 h1:IfWgCGUDzrD6wLLgXEstJKYZKAFS2kO+rBRi0p3LqcI=
|
||||
k8s.io/component-base v0.26.2/go.mod h1:DxbuIe9M3IZPRxPIzhch2m1eT7uFrSBJUBuVCQEBivs=
|
||||
k8s.io/component-helpers v0.26.2 h1:+JJ1gwyVsqSwZCJVLJotx/IPq2pMpo0kifeAzfo6i3U=
|
||||
k8s.io/component-helpers v0.26.2/go.mod h1:PRvoduZ5/IeKGGbZRki3J2cTQVwZLD+EUxIEbvvX0W4=
|
||||
k8s.io/controller-manager v0.26.2 h1:Y4g50VqaXkr02v5FNTWDQ47ZPFNM1ls00F0+FoKKaTM=
|
||||
k8s.io/controller-manager v0.26.2/go.mod h1:h8yv0MO3jjo9px49uResC9laZekvOmQRmrRLwe9n6Zw=
|
||||
k8s.io/csi-translation-lib v0.26.2 h1:WWnJjYKBlnoMzwc77ioT9bppJPFYNAZ0uhzert/3QJ4=
|
||||
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||
@ -1802,16 +1839,18 @@ k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg=
|
||||
k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/kms v0.26.2 h1:GM1gg3tFK3OUU/QQFi93yGjG3lJT8s8l3Wkn2+VxBLM=
|
||||
k8s.io/kms v0.26.2/go.mod h1:69qGnf1NsFOQP07fBYqNLZklqEHSJF024JqYCaeVxHg=
|
||||
k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
|
||||
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E=
|
||||
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4=
|
||||
k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg=
|
||||
k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg=
|
||||
k8s.io/kubectl v0.26.2 h1:SMPB4j48eVFxsYluBq3VLyqXtE6b72YnszkbTAtFye4=
|
||||
k8s.io/kubectl v0.26.2/go.mod h1:KYWOXSwp2BrDn3kPeoU/uKzKtdqvhK1dgZGd0+no4cM=
|
||||
k8s.io/kubelet v0.26.2 h1:egg7YfhCpH9wvLwQdL2Mzuy4/kC6hO91azY0jgdYPWA=
|
||||
k8s.io/kubelet v0.26.2/go.mod h1:IXthU5hcJQE6+K33LuaYYO0wUcYO8glhl/ip1Hzux44=
|
||||
k8s.io/kubernetes v1.26.2 h1:6Ve0nzlF2noVXf9jMHSJgbRZC0EkyOV22GYEv1K7MZI=
|
||||
k8s.io/kubernetes v1.26.2/go.mod h1:cv07eVU5+kF6ibpVtAvOGjIBsrfgevQL4ORK85/oqWc=
|
||||
k8s.io/kubernetes v1.27.2 h1:g4v9oY6u7vBUDEuq4FvC50Bbw2K7GZuvM00IIESWVf4=
|
||||
k8s.io/kubernetes v1.27.2/go.mod h1:U8ZXeKBAPxeb4J4/HOaxjw1A9K6WfSH+fY2SS7CR6IM=
|
||||
k8s.io/mount-utils v0.26.2 h1:KoRKqCAAK2l37l71YMvKx6vaLToh52RkNx1RU/dSLGQ=
|
||||
k8s.io/mount-utils v0.26.2/go.mod h1:95yx9K6N37y8YZ0/lUh9U6ITosMODNaW0/v4wvaa0Xw=
|
||||
k8s.io/pod-security-admission v0.26.2 h1:R41JH34lRsqThGUCi1XdDFhG+UoRK4ZFzQ89FxgWDP8=
|
||||
@ -1819,18 +1858,20 @@ k8s.io/pod-security-admission v0.26.2/go.mod h1:tb7Huh4QpEZZets79N8QQOtbvRBARSU0
|
||||
k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 h1:KTgPnR10d5zhztWptI952TNtt/4u5h3IzDXkdIMuo2Y=
|
||||
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
k8s.io/utils v0.0.0-20230209194617-a36077c30491 h1:r0BAOLElQnnFhE/ApUsg3iHdVYYPBjNSSOMowRZxxsY=
|
||||
k8s.io/utils v0.0.0-20230209194617-a36077c30491/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.35 h1:+xBL5uTc+BkPBwmMi3vYfUJjq+N3K+H6PXeETwf5cPI=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.35/go.mod h1:WxjusMwXlKzfAs4p9km6XJRndVt2FROgMVCE4cdohFo=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 h1:trsWhjU5jZrx6UvFu4WzQDrN7Pga4a7Qg+zcfcj64PA=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2/go.mod h1:+qG7ISXqCDVVcyO8hLn12AKVYYUjM7ftlqsqmrhMZE0=
|
||||
sigs.k8s.io/controller-runtime v0.2.2/go.mod h1:9dyohw3ZtoXQuV1e766PHUn+cmrRCIcBh6XIMFNMZ+I=
|
||||
sigs.k8s.io/controller-runtime v0.14.6 h1:oxstGVvXGNnMvY7TAESYk+lzr6S3V5VFxQ6d92KcwQA=
|
||||
sigs.k8s.io/controller-runtime v0.14.6/go.mod h1:WqIdsAY6JBsjfc/CqO0CORmNtoCtE4S6qbPc9s68h+0=
|
||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k=
|
||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
|
||||
sigs.k8s.io/testing_frameworks v0.1.1/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U=
|
||||
|
1
vendor/github.com/NYTimes/gziphandler/.gitignore
generated
vendored
Normal file
1
vendor/github.com/NYTimes/gziphandler/.gitignore
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
*.swp
|
10
vendor/github.com/NYTimes/gziphandler/.travis.yml
generated
vendored
Normal file
10
vendor/github.com/NYTimes/gziphandler/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.x
|
||||
- tip
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
install:
|
||||
- go mod download
|
||||
script:
|
||||
- go test -race -v
|
75
vendor/github.com/NYTimes/gziphandler/CODE_OF_CONDUCT.md
generated
vendored
Normal file
75
vendor/github.com/NYTimes/gziphandler/CODE_OF_CONDUCT.md
generated
vendored
Normal file
@ -0,0 +1,75 @@
|
||||
---
|
||||
layout: code-of-conduct
|
||||
version: v1.0
|
||||
---
|
||||
|
||||
This code of conduct outlines our expectations for participants within the **NYTimes/gziphandler** community, as well as steps to reporting unacceptable behavior. We are committed to providing a welcoming and inspiring community for all and expect our code of conduct to be honored. Anyone who violates this code of conduct may be banned from the community.
|
||||
|
||||
Our open source community strives to:
|
||||
|
||||
* **Be friendly and patient.**
|
||||
* **Be welcoming**: We strive to be a community that welcomes and supports people of all backgrounds and identities. This includes, but is not limited to members of any race, ethnicity, culture, national origin, colour, immigration status, social and economic class, educational level, sex, sexual orientation, gender identity and expression, age, size, family status, political belief, religion, and mental and physical ability.
|
||||
* **Be considerate**: Your work will be used by other people, and you in turn will depend on the work of others. Any decision you take will affect users and colleagues, and you should take those consequences into account when making decisions. Remember that we're a world-wide community, so you might not be communicating in someone else's primary language.
|
||||
* **Be respectful**: Not all of us will agree all the time, but disagreement is no excuse for poor behavior and poor manners. We might all experience some frustration now and then, but we cannot allow that frustration to turn into a personal attack. It’s important to remember that a community where people feel uncomfortable or threatened is not a productive one.
|
||||
* **Be careful in the words that we choose**: we are a community of professionals, and we conduct ourselves professionally. Be kind to others. Do not insult or put down other participants. Harassment and other exclusionary behavior aren't acceptable.
|
||||
* **Try to understand why we disagree**: Disagreements, both social and technical, happen all the time. It is important that we resolve disagreements and differing views constructively. Remember that we’re different. The strength of our community comes from its diversity, people from a wide range of backgrounds. Different people have different perspectives on issues. Being unable to understand why someone holds a viewpoint doesn’t mean that they’re wrong. Don’t forget that it is human to err and blaming each other doesn’t get us anywhere. Instead, focus on helping to resolve issues and learning from mistakes.
|
||||
|
||||
## Definitions
|
||||
|
||||
Harassment includes, but is not limited to:
|
||||
|
||||
- Offensive comments related to gender, gender identity and expression, sexual orientation, disability, mental illness, neuro(a)typicality, physical appearance, body size, race, age, regional discrimination, political or religious affiliation
|
||||
- Unwelcome comments regarding a person’s lifestyle choices and practices, including those related to food, health, parenting, drugs, and employment
|
||||
- Deliberate misgendering. This includes deadnaming or persistently using a pronoun that does not correctly reflect a person's gender identity. You must address people by the name they give you when not addressing them by their username or handle
|
||||
- Physical contact and simulated physical contact (eg, textual descriptions like “*hug*” or “*backrub*”) without consent or after a request to stop
|
||||
- Threats of violence, both physical and psychological
|
||||
- Incitement of violence towards any individual, including encouraging a person to commit suicide or to engage in self-harm
|
||||
- Deliberate intimidation
|
||||
- Stalking or following
|
||||
- Harassing photography or recording, including logging online activity for harassment purposes
|
||||
- Sustained disruption of discussion
|
||||
- Unwelcome sexual attention, including gratuitous or off-topic sexual images or behaviour
|
||||
- Pattern of inappropriate social contact, such as requesting/assuming inappropriate levels of intimacy with others
|
||||
- Continued one-on-one communication after requests to cease
|
||||
- Deliberate “outing” of any aspect of a person’s identity without their consent except as necessary to protect others from intentional abuse
|
||||
- Publication of non-harassing private communication
|
||||
|
||||
Our open source community prioritizes marginalized people’s safety over privileged people’s comfort. We will not act on complaints regarding:
|
||||
|
||||
- ‘Reverse’ -isms, including ‘reverse racism,’ ‘reverse sexism,’ and ‘cisphobia’
|
||||
- Reasonable communication of boundaries, such as “leave me alone,” “go away,” or “I’m not discussing this with you”
|
||||
- Refusal to explain or debate social justice concepts
|
||||
- Communicating in a ‘tone’ you don’t find congenial
|
||||
- Criticizing racist, sexist, cissexist, or otherwise oppressive behavior or assumptions
|
||||
|
||||
|
||||
### Diversity Statement
|
||||
|
||||
We encourage everyone to participate and are committed to building a community for all. Although we will fail at times, we seek to treat everyone both as fairly and equally as possible. Whenever a participant has made a mistake, we expect them to take responsibility for it. If someone has been harmed or offended, it is our responsibility to listen carefully and respectfully, and do our best to right the wrong.
|
||||
|
||||
Although this list cannot be exhaustive, we explicitly honor diversity in age, gender, gender identity or expression, culture, ethnicity, language, national origin, political beliefs, profession, race, religion, sexual orientation, socioeconomic status, and technical ability. We will not tolerate discrimination based on any of the protected
|
||||
characteristics above, including participants with disabilities.
|
||||
|
||||
### Reporting Issues
|
||||
|
||||
If you experience or witness unacceptable behavior—or have any other concerns—please report it by contacting us via **code@nytimes.com**. All reports will be handled with discretion. In your report please include:
|
||||
|
||||
- Your contact information.
|
||||
- Names (real, nicknames, or pseudonyms) of any individuals involved. If there are additional witnesses, please
|
||||
include them as well. Your account of what occurred, and if you believe the incident is ongoing. If there is a publicly available record (e.g. a mailing list archive or a public IRC logger), please include a link.
|
||||
- Any additional information that may be helpful.
|
||||
|
||||
After filing a report, a representative will contact you personally, review the incident, follow up with any additional questions, and make a decision as to how to respond. If the person who is harassing you is part of the response team, they will recuse themselves from handling your incident. If the complaint originates from a member of the response team, it will be handled by a different member of the response team. We will respect confidentiality requests for the purpose of protecting victims of abuse.
|
||||
|
||||
### Attribution & Acknowledgements
|
||||
|
||||
We all stand on the shoulders of giants across many open source communities. We'd like to thank the communities and projects that established code of conducts and diversity statements as our inspiration:
|
||||
|
||||
* [Django](https://www.djangoproject.com/conduct/reporting/)
|
||||
* [Python](https://www.python.org/community/diversity/)
|
||||
* [Ubuntu](http://www.ubuntu.com/about/about-ubuntu/conduct)
|
||||
* [Contributor Covenant](http://contributor-covenant.org/)
|
||||
* [Geek Feminism](http://geekfeminism.org/about/code-of-conduct/)
|
||||
* [Citizen Code of Conduct](http://citizencodeofconduct.org/)
|
||||
|
||||
This Code of Conduct was based on https://github.com/todogroup/opencodeofconduct
|
30
vendor/github.com/NYTimes/gziphandler/CONTRIBUTING.md
generated
vendored
Normal file
30
vendor/github.com/NYTimes/gziphandler/CONTRIBUTING.md
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
# Contributing to NYTimes/gziphandler
|
||||
|
||||
This is an open source project started by handful of developers at The New York Times and open to the entire Go community.
|
||||
|
||||
We really appreciate your help!
|
||||
|
||||
## Filing issues
|
||||
|
||||
When filing an issue, make sure to answer these five questions:
|
||||
|
||||
1. What version of Go are you using (`go version`)?
|
||||
2. What operating system and processor architecture are you using?
|
||||
3. What did you do?
|
||||
4. What did you expect to see?
|
||||
5. What did you see instead?
|
||||
|
||||
## Contributing code
|
||||
|
||||
Before submitting changes, please follow these guidelines:
|
||||
|
||||
1. Check the open issues and pull requests for existing discussions.
|
||||
2. Open an issue to discuss a new feature.
|
||||
3. Write tests.
|
||||
4. Make sure code follows the ['Go Code Review Comments'](https://github.com/golang/go/wiki/CodeReviewComments).
|
||||
5. Make sure your changes pass `go test`.
|
||||
6. Make sure the entire test suite passes locally and on Travis CI.
|
||||
7. Open a Pull Request.
|
||||
8. [Squash your commits](http://gitready.com/advanced/2009/02/10/squashing-commits-with-rebase.html) after receiving feedback and add a [great commit message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html).
|
||||
|
||||
Unless otherwise noted, the gziphandler source files are distributed under the Apache 2.0-style license found in the LICENSE.md file.
|
201
vendor/github.com/NYTimes/gziphandler/LICENSE
generated
vendored
Normal file
201
vendor/github.com/NYTimes/gziphandler/LICENSE
generated
vendored
Normal file
@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2016-2017 The New York Times Company
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
56
vendor/github.com/NYTimes/gziphandler/README.md
generated
vendored
Normal file
56
vendor/github.com/NYTimes/gziphandler/README.md
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
Gzip Handler
|
||||
============
|
||||
|
||||
This is a tiny Go package which wraps HTTP handlers to transparently gzip the
|
||||
response body, for clients which support it. Although it's usually simpler to
|
||||
leave that to a reverse proxy (like nginx or Varnish), this package is useful
|
||||
when that's undesirable.
|
||||
|
||||
## Install
|
||||
```bash
|
||||
go get -u github.com/NYTimes/gziphandler
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
Call `GzipHandler` with any handler (an object which implements the
|
||||
`http.Handler` interface), and it'll return a new handler which gzips the
|
||||
response. For example:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"github.com/NYTimes/gziphandler"
|
||||
)
|
||||
|
||||
func main() {
|
||||
withoutGz := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
io.WriteString(w, "Hello, World")
|
||||
})
|
||||
|
||||
withGz := gziphandler.GzipHandler(withoutGz)
|
||||
|
||||
http.Handle("/", withGz)
|
||||
http.ListenAndServe("0.0.0.0:8000", nil)
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## Documentation
|
||||
|
||||
The docs can be found at [godoc.org][docs], as usual.
|
||||
|
||||
|
||||
## License
|
||||
|
||||
[Apache 2.0][license].
|
||||
|
||||
|
||||
|
||||
|
||||
[docs]: https://godoc.org/github.com/NYTimes/gziphandler
|
||||
[license]: https://github.com/NYTimes/gziphandler/blob/master/LICENSE
|
532
vendor/github.com/NYTimes/gziphandler/gzip.go
generated
vendored
Normal file
532
vendor/github.com/NYTimes/gziphandler/gzip.go
generated
vendored
Normal file
@ -0,0 +1,532 @@
|
||||
package gziphandler // import "github.com/NYTimes/gziphandler"
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
vary = "Vary"
|
||||
acceptEncoding = "Accept-Encoding"
|
||||
contentEncoding = "Content-Encoding"
|
||||
contentType = "Content-Type"
|
||||
contentLength = "Content-Length"
|
||||
)
|
||||
|
||||
type codings map[string]float64
|
||||
|
||||
const (
|
||||
// DefaultQValue is the default qvalue to assign to an encoding if no explicit qvalue is set.
|
||||
// This is actually kind of ambiguous in RFC 2616, so hopefully it's correct.
|
||||
// The examples seem to indicate that it is.
|
||||
DefaultQValue = 1.0
|
||||
|
||||
// DefaultMinSize is the default minimum size until we enable gzip compression.
|
||||
// 1500 bytes is the MTU size for the internet since that is the largest size allowed at the network layer.
|
||||
// If you take a file that is 1300 bytes and compress it to 800 bytes, it’s still transmitted in that same 1500 byte packet regardless, so you’ve gained nothing.
|
||||
// That being the case, you should restrict the gzip compression to files with a size greater than a single packet, 1400 bytes (1.4KB) is a safe value.
|
||||
DefaultMinSize = 1400
|
||||
)
|
||||
|
||||
// gzipWriterPools stores a sync.Pool for each compression level for reuse of
|
||||
// gzip.Writers. Use poolIndex to covert a compression level to an index into
|
||||
// gzipWriterPools.
|
||||
var gzipWriterPools [gzip.BestCompression - gzip.BestSpeed + 2]*sync.Pool
|
||||
|
||||
func init() {
|
||||
for i := gzip.BestSpeed; i <= gzip.BestCompression; i++ {
|
||||
addLevelPool(i)
|
||||
}
|
||||
addLevelPool(gzip.DefaultCompression)
|
||||
}
|
||||
|
||||
// poolIndex maps a compression level to its index into gzipWriterPools. It
|
||||
// assumes that level is a valid gzip compression level.
|
||||
func poolIndex(level int) int {
|
||||
// gzip.DefaultCompression == -1, so we need to treat it special.
|
||||
if level == gzip.DefaultCompression {
|
||||
return gzip.BestCompression - gzip.BestSpeed + 1
|
||||
}
|
||||
return level - gzip.BestSpeed
|
||||
}
|
||||
|
||||
func addLevelPool(level int) {
|
||||
gzipWriterPools[poolIndex(level)] = &sync.Pool{
|
||||
New: func() interface{} {
|
||||
// NewWriterLevel only returns error on a bad level, we are guaranteeing
|
||||
// that this will be a valid level so it is okay to ignore the returned
|
||||
// error.
|
||||
w, _ := gzip.NewWriterLevel(nil, level)
|
||||
return w
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GzipResponseWriter provides an http.ResponseWriter interface, which gzips
|
||||
// bytes before writing them to the underlying response. This doesn't close the
|
||||
// writers, so don't forget to do that.
|
||||
// It can be configured to skip response smaller than minSize.
|
||||
type GzipResponseWriter struct {
|
||||
http.ResponseWriter
|
||||
index int // Index for gzipWriterPools.
|
||||
gw *gzip.Writer
|
||||
|
||||
code int // Saves the WriteHeader value.
|
||||
|
||||
minSize int // Specifed the minimum response size to gzip. If the response length is bigger than this value, it is compressed.
|
||||
buf []byte // Holds the first part of the write before reaching the minSize or the end of the write.
|
||||
ignore bool // If true, then we immediately passthru writes to the underlying ResponseWriter.
|
||||
|
||||
contentTypes []parsedContentType // Only compress if the response is one of these content-types. All are accepted if empty.
|
||||
}
|
||||
|
||||
type GzipResponseWriterWithCloseNotify struct {
|
||||
*GzipResponseWriter
|
||||
}
|
||||
|
||||
func (w GzipResponseWriterWithCloseNotify) CloseNotify() <-chan bool {
|
||||
return w.ResponseWriter.(http.CloseNotifier).CloseNotify()
|
||||
}
|
||||
|
||||
// Write appends data to the gzip writer.
|
||||
func (w *GzipResponseWriter) Write(b []byte) (int, error) {
|
||||
// GZIP responseWriter is initialized. Use the GZIP responseWriter.
|
||||
if w.gw != nil {
|
||||
return w.gw.Write(b)
|
||||
}
|
||||
|
||||
// If we have already decided not to use GZIP, immediately passthrough.
|
||||
if w.ignore {
|
||||
return w.ResponseWriter.Write(b)
|
||||
}
|
||||
|
||||
// Save the write into a buffer for later use in GZIP responseWriter (if content is long enough) or at close with regular responseWriter.
|
||||
// On the first write, w.buf changes from nil to a valid slice
|
||||
w.buf = append(w.buf, b...)
|
||||
|
||||
var (
|
||||
cl, _ = strconv.Atoi(w.Header().Get(contentLength))
|
||||
ct = w.Header().Get(contentType)
|
||||
ce = w.Header().Get(contentEncoding)
|
||||
)
|
||||
// Only continue if they didn't already choose an encoding or a known unhandled content length or type.
|
||||
if ce == "" && (cl == 0 || cl >= w.minSize) && (ct == "" || handleContentType(w.contentTypes, ct)) {
|
||||
// If the current buffer is less than minSize and a Content-Length isn't set, then wait until we have more data.
|
||||
if len(w.buf) < w.minSize && cl == 0 {
|
||||
return len(b), nil
|
||||
}
|
||||
// If the Content-Length is larger than minSize or the current buffer is larger than minSize, then continue.
|
||||
if cl >= w.minSize || len(w.buf) >= w.minSize {
|
||||
// If a Content-Type wasn't specified, infer it from the current buffer.
|
||||
if ct == "" {
|
||||
ct = http.DetectContentType(w.buf)
|
||||
w.Header().Set(contentType, ct)
|
||||
}
|
||||
// If the Content-Type is acceptable to GZIP, initialize the GZIP writer.
|
||||
if handleContentType(w.contentTypes, ct) {
|
||||
if err := w.startGzip(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(b), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
// If we got here, we should not GZIP this response.
|
||||
if err := w.startPlain(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
// startGzip initializes a GZIP writer and writes the buffer.
|
||||
func (w *GzipResponseWriter) startGzip() error {
|
||||
// Set the GZIP header.
|
||||
w.Header().Set(contentEncoding, "gzip")
|
||||
|
||||
// if the Content-Length is already set, then calls to Write on gzip
|
||||
// will fail to set the Content-Length header since its already set
|
||||
// See: https://github.com/golang/go/issues/14975.
|
||||
w.Header().Del(contentLength)
|
||||
|
||||
// Write the header to gzip response.
|
||||
if w.code != 0 {
|
||||
w.ResponseWriter.WriteHeader(w.code)
|
||||
// Ensure that no other WriteHeader's happen
|
||||
w.code = 0
|
||||
}
|
||||
|
||||
// Initialize and flush the buffer into the gzip response if there are any bytes.
|
||||
// If there aren't any, we shouldn't initialize it yet because on Close it will
|
||||
// write the gzip header even if nothing was ever written.
|
||||
if len(w.buf) > 0 {
|
||||
// Initialize the GZIP response.
|
||||
w.init()
|
||||
n, err := w.gw.Write(w.buf)
|
||||
|
||||
// This should never happen (per io.Writer docs), but if the write didn't
|
||||
// accept the entire buffer but returned no specific error, we have no clue
|
||||
// what's going on, so abort just to be safe.
|
||||
if err == nil && n < len(w.buf) {
|
||||
err = io.ErrShortWrite
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// startPlain writes to sent bytes and buffer the underlying ResponseWriter without gzip.
|
||||
func (w *GzipResponseWriter) startPlain() error {
|
||||
if w.code != 0 {
|
||||
w.ResponseWriter.WriteHeader(w.code)
|
||||
// Ensure that no other WriteHeader's happen
|
||||
w.code = 0
|
||||
}
|
||||
w.ignore = true
|
||||
// If Write was never called then don't call Write on the underlying ResponseWriter.
|
||||
if w.buf == nil {
|
||||
return nil
|
||||
}
|
||||
n, err := w.ResponseWriter.Write(w.buf)
|
||||
w.buf = nil
|
||||
// This should never happen (per io.Writer docs), but if the write didn't
|
||||
// accept the entire buffer but returned no specific error, we have no clue
|
||||
// what's going on, so abort just to be safe.
|
||||
if err == nil && n < len(w.buf) {
|
||||
err = io.ErrShortWrite
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// WriteHeader just saves the response code until close or GZIP effective writes.
|
||||
func (w *GzipResponseWriter) WriteHeader(code int) {
|
||||
if w.code == 0 {
|
||||
w.code = code
|
||||
}
|
||||
}
|
||||
|
||||
// init graps a new gzip writer from the gzipWriterPool and writes the correct
|
||||
// content encoding header.
|
||||
func (w *GzipResponseWriter) init() {
|
||||
// Bytes written during ServeHTTP are redirected to this gzip writer
|
||||
// before being written to the underlying response.
|
||||
gzw := gzipWriterPools[w.index].Get().(*gzip.Writer)
|
||||
gzw.Reset(w.ResponseWriter)
|
||||
w.gw = gzw
|
||||
}
|
||||
|
||||
// Close will close the gzip.Writer and will put it back in the gzipWriterPool.
|
||||
func (w *GzipResponseWriter) Close() error {
|
||||
if w.ignore {
|
||||
return nil
|
||||
}
|
||||
|
||||
if w.gw == nil {
|
||||
// GZIP not triggered yet, write out regular response.
|
||||
err := w.startPlain()
|
||||
// Returns the error if any at write.
|
||||
if err != nil {
|
||||
err = fmt.Errorf("gziphandler: write to regular responseWriter at close gets error: %q", err.Error())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
err := w.gw.Close()
|
||||
gzipWriterPools[w.index].Put(w.gw)
|
||||
w.gw = nil
|
||||
return err
|
||||
}
|
||||
|
||||
// Flush flushes the underlying *gzip.Writer and then the underlying
|
||||
// http.ResponseWriter if it is an http.Flusher. This makes GzipResponseWriter
|
||||
// an http.Flusher.
|
||||
func (w *GzipResponseWriter) Flush() {
|
||||
if w.gw == nil && !w.ignore {
|
||||
// Only flush once startGzip or startPlain has been called.
|
||||
//
|
||||
// Flush is thus a no-op until we're certain whether a plain
|
||||
// or gzipped response will be served.
|
||||
return
|
||||
}
|
||||
|
||||
if w.gw != nil {
|
||||
w.gw.Flush()
|
||||
}
|
||||
|
||||
if fw, ok := w.ResponseWriter.(http.Flusher); ok {
|
||||
fw.Flush()
|
||||
}
|
||||
}
|
||||
|
||||
// Hijack implements http.Hijacker. If the underlying ResponseWriter is a
|
||||
// Hijacker, its Hijack method is returned. Otherwise an error is returned.
|
||||
func (w *GzipResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||
if hj, ok := w.ResponseWriter.(http.Hijacker); ok {
|
||||
return hj.Hijack()
|
||||
}
|
||||
return nil, nil, fmt.Errorf("http.Hijacker interface is not supported")
|
||||
}
|
||||
|
||||
// verify Hijacker interface implementation
|
||||
var _ http.Hijacker = &GzipResponseWriter{}
|
||||
|
||||
// MustNewGzipLevelHandler behaves just like NewGzipLevelHandler except that in
|
||||
// an error case it panics rather than returning an error.
|
||||
func MustNewGzipLevelHandler(level int) func(http.Handler) http.Handler {
|
||||
wrap, err := NewGzipLevelHandler(level)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return wrap
|
||||
}
|
||||
|
||||
// NewGzipLevelHandler returns a wrapper function (often known as middleware)
|
||||
// which can be used to wrap an HTTP handler to transparently gzip the response
|
||||
// body if the client supports it (via the Accept-Encoding header). Responses will
|
||||
// be encoded at the given gzip compression level. An error will be returned only
|
||||
// if an invalid gzip compression level is given, so if one can ensure the level
|
||||
// is valid, the returned error can be safely ignored.
|
||||
func NewGzipLevelHandler(level int) (func(http.Handler) http.Handler, error) {
|
||||
return NewGzipLevelAndMinSize(level, DefaultMinSize)
|
||||
}
|
||||
|
||||
// NewGzipLevelAndMinSize behave as NewGzipLevelHandler except it let the caller
|
||||
// specify the minimum size before compression.
|
||||
func NewGzipLevelAndMinSize(level, minSize int) (func(http.Handler) http.Handler, error) {
|
||||
return GzipHandlerWithOpts(CompressionLevel(level), MinSize(minSize))
|
||||
}
|
||||
|
||||
func GzipHandlerWithOpts(opts ...option) (func(http.Handler) http.Handler, error) {
|
||||
c := &config{
|
||||
level: gzip.DefaultCompression,
|
||||
minSize: DefaultMinSize,
|
||||
}
|
||||
|
||||
for _, o := range opts {
|
||||
o(c)
|
||||
}
|
||||
|
||||
if err := c.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return func(h http.Handler) http.Handler {
|
||||
index := poolIndex(c.level)
|
||||
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Add(vary, acceptEncoding)
|
||||
if acceptsGzip(r) {
|
||||
gw := &GzipResponseWriter{
|
||||
ResponseWriter: w,
|
||||
index: index,
|
||||
minSize: c.minSize,
|
||||
contentTypes: c.contentTypes,
|
||||
}
|
||||
defer gw.Close()
|
||||
|
||||
if _, ok := w.(http.CloseNotifier); ok {
|
||||
gwcn := GzipResponseWriterWithCloseNotify{gw}
|
||||
h.ServeHTTP(gwcn, r)
|
||||
} else {
|
||||
h.ServeHTTP(gw, r)
|
||||
}
|
||||
|
||||
} else {
|
||||
h.ServeHTTP(w, r)
|
||||
}
|
||||
})
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Parsed representation of one of the inputs to ContentTypes.
|
||||
// See https://golang.org/pkg/mime/#ParseMediaType
|
||||
type parsedContentType struct {
|
||||
mediaType string
|
||||
params map[string]string
|
||||
}
|
||||
|
||||
// equals returns whether this content type matches another content type.
|
||||
func (pct parsedContentType) equals(mediaType string, params map[string]string) bool {
|
||||
if pct.mediaType != mediaType {
|
||||
return false
|
||||
}
|
||||
// if pct has no params, don't care about other's params
|
||||
if len(pct.params) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
// if pct has any params, they must be identical to other's.
|
||||
if len(pct.params) != len(params) {
|
||||
return false
|
||||
}
|
||||
for k, v := range pct.params {
|
||||
if w, ok := params[k]; !ok || v != w {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Used for functional configuration.
|
||||
type config struct {
|
||||
minSize int
|
||||
level int
|
||||
contentTypes []parsedContentType
|
||||
}
|
||||
|
||||
func (c *config) validate() error {
|
||||
if c.level != gzip.DefaultCompression && (c.level < gzip.BestSpeed || c.level > gzip.BestCompression) {
|
||||
return fmt.Errorf("invalid compression level requested: %d", c.level)
|
||||
}
|
||||
|
||||
if c.minSize < 0 {
|
||||
return fmt.Errorf("minimum size must be more than zero")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type option func(c *config)
|
||||
|
||||
func MinSize(size int) option {
|
||||
return func(c *config) {
|
||||
c.minSize = size
|
||||
}
|
||||
}
|
||||
|
||||
func CompressionLevel(level int) option {
|
||||
return func(c *config) {
|
||||
c.level = level
|
||||
}
|
||||
}
|
||||
|
||||
// ContentTypes specifies a list of content types to compare
|
||||
// the Content-Type header to before compressing. If none
|
||||
// match, the response will be returned as-is.
|
||||
//
|
||||
// Content types are compared in a case-insensitive, whitespace-ignored
|
||||
// manner.
|
||||
//
|
||||
// A MIME type without any other directive will match a content type
|
||||
// that has the same MIME type, regardless of that content type's other
|
||||
// directives. I.e., "text/html" will match both "text/html" and
|
||||
// "text/html; charset=utf-8".
|
||||
//
|
||||
// A MIME type with any other directive will only match a content type
|
||||
// that has the same MIME type and other directives. I.e.,
|
||||
// "text/html; charset=utf-8" will only match "text/html; charset=utf-8".
|
||||
//
|
||||
// By default, responses are gzipped regardless of
|
||||
// Content-Type.
|
||||
func ContentTypes(types []string) option {
|
||||
return func(c *config) {
|
||||
c.contentTypes = []parsedContentType{}
|
||||
for _, v := range types {
|
||||
mediaType, params, err := mime.ParseMediaType(v)
|
||||
if err == nil {
|
||||
c.contentTypes = append(c.contentTypes, parsedContentType{mediaType, params})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GzipHandler wraps an HTTP handler, to transparently gzip the response body if
|
||||
// the client supports it (via the Accept-Encoding header). This will compress at
|
||||
// the default compression level.
|
||||
func GzipHandler(h http.Handler) http.Handler {
|
||||
wrapper, _ := NewGzipLevelHandler(gzip.DefaultCompression)
|
||||
return wrapper(h)
|
||||
}
|
||||
|
||||
// acceptsGzip returns true if the given HTTP request indicates that it will
|
||||
// accept a gzipped response.
|
||||
func acceptsGzip(r *http.Request) bool {
|
||||
acceptedEncodings, _ := parseEncodings(r.Header.Get(acceptEncoding))
|
||||
return acceptedEncodings["gzip"] > 0.0
|
||||
}
|
||||
|
||||
// returns true if we've been configured to compress the specific content type.
|
||||
func handleContentType(contentTypes []parsedContentType, ct string) bool {
|
||||
// If contentTypes is empty we handle all content types.
|
||||
if len(contentTypes) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
mediaType, params, err := mime.ParseMediaType(ct)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, c := range contentTypes {
|
||||
if c.equals(mediaType, params) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// parseEncodings attempts to parse a list of codings, per RFC 2616, as might
|
||||
// appear in an Accept-Encoding header. It returns a map of content-codings to
|
||||
// quality values, and an error containing the errors encountered. It's probably
|
||||
// safe to ignore those, because silently ignoring errors is how the internet
|
||||
// works.
|
||||
//
|
||||
// See: http://tools.ietf.org/html/rfc2616#section-14.3.
|
||||
func parseEncodings(s string) (codings, error) {
|
||||
c := make(codings)
|
||||
var e []string
|
||||
|
||||
for _, ss := range strings.Split(s, ",") {
|
||||
coding, qvalue, err := parseCoding(ss)
|
||||
|
||||
if err != nil {
|
||||
e = append(e, err.Error())
|
||||
} else {
|
||||
c[coding] = qvalue
|
||||
}
|
||||
}
|
||||
|
||||
// TODO (adammck): Use a proper multi-error struct, so the individual errors
|
||||
// can be extracted if anyone cares.
|
||||
if len(e) > 0 {
|
||||
return c, fmt.Errorf("errors while parsing encodings: %s", strings.Join(e, ", "))
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// parseCoding parses a single conding (content-coding with an optional qvalue),
|
||||
// as might appear in an Accept-Encoding header. It attempts to forgive minor
|
||||
// formatting errors.
|
||||
func parseCoding(s string) (coding string, qvalue float64, err error) {
|
||||
for n, part := range strings.Split(s, ";") {
|
||||
part = strings.TrimSpace(part)
|
||||
qvalue = DefaultQValue
|
||||
|
||||
if n == 0 {
|
||||
coding = strings.ToLower(part)
|
||||
} else if strings.HasPrefix(part, "q=") {
|
||||
qvalue, err = strconv.ParseFloat(strings.TrimPrefix(part, "q="), 64)
|
||||
|
||||
if qvalue < 0.0 {
|
||||
qvalue = 0.0
|
||||
} else if qvalue > 1.0 {
|
||||
qvalue = 1.0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if coding == "" {
|
||||
err = fmt.Errorf("empty content-coding")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
43
vendor/github.com/NYTimes/gziphandler/gzip_go18.go
generated
vendored
Normal file
43
vendor/github.com/NYTimes/gziphandler/gzip_go18.go
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
// +build go1.8
|
||||
|
||||
package gziphandler
|
||||
|
||||
import "net/http"
|
||||
|
||||
// Push initiates an HTTP/2 server push.
|
||||
// Push returns ErrNotSupported if the client has disabled push or if push
|
||||
// is not supported on the underlying connection.
|
||||
func (w *GzipResponseWriter) Push(target string, opts *http.PushOptions) error {
|
||||
pusher, ok := w.ResponseWriter.(http.Pusher)
|
||||
if ok && pusher != nil {
|
||||
return pusher.Push(target, setAcceptEncodingForPushOptions(opts))
|
||||
}
|
||||
return http.ErrNotSupported
|
||||
}
|
||||
|
||||
// setAcceptEncodingForPushOptions sets "Accept-Encoding" : "gzip" for PushOptions without overriding existing headers.
|
||||
func setAcceptEncodingForPushOptions(opts *http.PushOptions) *http.PushOptions {
|
||||
|
||||
if opts == nil {
|
||||
opts = &http.PushOptions{
|
||||
Header: http.Header{
|
||||
acceptEncoding: []string{"gzip"},
|
||||
},
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
if opts.Header == nil {
|
||||
opts.Header = http.Header{
|
||||
acceptEncoding: []string{"gzip"},
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
if encoding := opts.Header.Get(acceptEncoding); encoding == "" {
|
||||
opts.Header.Add(acceptEncoding, "gzip")
|
||||
return opts
|
||||
}
|
||||
|
||||
return opts
|
||||
}
|
26
vendor/github.com/antlr/antlr4/runtime/Go/antlr/LICENSE
generated
vendored
Normal file
26
vendor/github.com/antlr/antlr4/runtime/Go/antlr/LICENSE
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
Copyright 2021 The ANTLR Project
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
3. Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived from this
|
||||
software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
159
vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go
generated
vendored
Normal file
159
vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go
generated
vendored
Normal file
@ -0,0 +1,159 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import "sync"
|
||||
|
||||
var ATNInvalidAltNumber int
|
||||
|
||||
type ATN struct {
|
||||
// DecisionToState is the decision points for all rules, subrules, optional
|
||||
// blocks, ()+, ()*, etc. Used to build DFA predictors for them.
|
||||
DecisionToState []DecisionState
|
||||
|
||||
// grammarType is the ATN type and is used for deserializing ATNs from strings.
|
||||
grammarType int
|
||||
|
||||
// lexerActions is referenced by action transitions in the ATN for lexer ATNs.
|
||||
lexerActions []LexerAction
|
||||
|
||||
// maxTokenType is the maximum value for any symbol recognized by a transition in the ATN.
|
||||
maxTokenType int
|
||||
|
||||
modeNameToStartState map[string]*TokensStartState
|
||||
|
||||
modeToStartState []*TokensStartState
|
||||
|
||||
// ruleToStartState maps from rule index to starting state number.
|
||||
ruleToStartState []*RuleStartState
|
||||
|
||||
// ruleToStopState maps from rule index to stop state number.
|
||||
ruleToStopState []*RuleStopState
|
||||
|
||||
// ruleToTokenType maps the rule index to the resulting token type for lexer
|
||||
// ATNs. For parser ATNs, it maps the rule index to the generated bypass token
|
||||
// type if ATNDeserializationOptions.isGenerateRuleBypassTransitions was
|
||||
// specified, and otherwise is nil.
|
||||
ruleToTokenType []int
|
||||
|
||||
states []ATNState
|
||||
|
||||
mu sync.Mutex
|
||||
stateMu sync.RWMutex
|
||||
edgeMu sync.RWMutex
|
||||
}
|
||||
|
||||
func NewATN(grammarType int, maxTokenType int) *ATN {
|
||||
return &ATN{
|
||||
grammarType: grammarType,
|
||||
maxTokenType: maxTokenType,
|
||||
modeNameToStartState: make(map[string]*TokensStartState),
|
||||
}
|
||||
}
|
||||
|
||||
// NextTokensInContext computes the set of valid tokens that can occur starting
|
||||
// in state s. If ctx is nil, the set of tokens will not include what can follow
|
||||
// the rule surrounding s. In other words, the set will be restricted to tokens
|
||||
// reachable staying within the rule of s.
|
||||
func (a *ATN) NextTokensInContext(s ATNState, ctx RuleContext) *IntervalSet {
|
||||
return NewLL1Analyzer(a).Look(s, nil, ctx)
|
||||
}
|
||||
|
||||
// NextTokensNoContext computes the set of valid tokens that can occur starting
|
||||
// in s and staying in same rule. Token.EPSILON is in set if we reach end of
|
||||
// rule.
|
||||
func (a *ATN) NextTokensNoContext(s ATNState) *IntervalSet {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
iset := s.GetNextTokenWithinRule()
|
||||
if iset == nil {
|
||||
iset = a.NextTokensInContext(s, nil)
|
||||
iset.readOnly = true
|
||||
s.SetNextTokenWithinRule(iset)
|
||||
}
|
||||
return iset
|
||||
}
|
||||
|
||||
func (a *ATN) NextTokens(s ATNState, ctx RuleContext) *IntervalSet {
|
||||
if ctx == nil {
|
||||
return a.NextTokensNoContext(s)
|
||||
}
|
||||
|
||||
return a.NextTokensInContext(s, ctx)
|
||||
}
|
||||
|
||||
func (a *ATN) addState(state ATNState) {
|
||||
if state != nil {
|
||||
state.SetATN(a)
|
||||
state.SetStateNumber(len(a.states))
|
||||
}
|
||||
|
||||
a.states = append(a.states, state)
|
||||
}
|
||||
|
||||
func (a *ATN) removeState(state ATNState) {
|
||||
a.states[state.GetStateNumber()] = nil // Just free the memory; don't shift states in the slice
|
||||
}
|
||||
|
||||
func (a *ATN) defineDecisionState(s DecisionState) int {
|
||||
a.DecisionToState = append(a.DecisionToState, s)
|
||||
s.setDecision(len(a.DecisionToState) - 1)
|
||||
|
||||
return s.getDecision()
|
||||
}
|
||||
|
||||
func (a *ATN) getDecisionState(decision int) DecisionState {
|
||||
if len(a.DecisionToState) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return a.DecisionToState[decision]
|
||||
}
|
||||
|
||||
// getExpectedTokens computes the set of input symbols which could follow ATN
|
||||
// state number stateNumber in the specified full parse context ctx and returns
|
||||
// the set of potentially valid input symbols which could follow the specified
|
||||
// state in the specified context. This method considers the complete parser
|
||||
// context, but does not evaluate semantic predicates (i.e. all predicates
|
||||
// encountered during the calculation are assumed true). If a path in the ATN
|
||||
// exists from the starting state to the RuleStopState of the outermost context
|
||||
// without Matching any symbols, Token.EOF is added to the returned set.
|
||||
//
|
||||
// A nil ctx defaults to ParserRuleContext.EMPTY.
|
||||
//
|
||||
// It panics if the ATN does not contain state stateNumber.
|
||||
func (a *ATN) getExpectedTokens(stateNumber int, ctx RuleContext) *IntervalSet {
|
||||
if stateNumber < 0 || stateNumber >= len(a.states) {
|
||||
panic("Invalid state number.")
|
||||
}
|
||||
|
||||
s := a.states[stateNumber]
|
||||
following := a.NextTokens(s, nil)
|
||||
|
||||
if !following.contains(TokenEpsilon) {
|
||||
return following
|
||||
}
|
||||
|
||||
expected := NewIntervalSet()
|
||||
|
||||
expected.addSet(following)
|
||||
expected.removeOne(TokenEpsilon)
|
||||
|
||||
for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) {
|
||||
invokingState := a.states[ctx.GetInvokingState()]
|
||||
rt := invokingState.GetTransitions()[0]
|
||||
|
||||
following = a.NextTokens(rt.(*RuleTransition).followState, nil)
|
||||
expected.addSet(following)
|
||||
expected.removeOne(TokenEpsilon)
|
||||
ctx = ctx.GetParent().(RuleContext)
|
||||
}
|
||||
|
||||
if following.contains(TokenEpsilon) {
|
||||
expected.addOne(TokenEOF)
|
||||
}
|
||||
|
||||
return expected
|
||||
}
|
295
vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config.go
generated
vendored
Normal file
295
vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config.go
generated
vendored
Normal file
@ -0,0 +1,295 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type comparable interface {
|
||||
equals(other interface{}) bool
|
||||
}
|
||||
|
||||
// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic
|
||||
// context). The syntactic context is a graph-structured stack node whose
|
||||
// path(s) to the root is the rule invocation(s) chain used to arrive at the
|
||||
// state. The semantic context is the tree of semantic predicates encountered
|
||||
// before reaching an ATN state.
|
||||
type ATNConfig interface {
|
||||
comparable
|
||||
|
||||
hash() int
|
||||
|
||||
GetState() ATNState
|
||||
GetAlt() int
|
||||
GetSemanticContext() SemanticContext
|
||||
|
||||
GetContext() PredictionContext
|
||||
SetContext(PredictionContext)
|
||||
|
||||
GetReachesIntoOuterContext() int
|
||||
SetReachesIntoOuterContext(int)
|
||||
|
||||
String() string
|
||||
|
||||
getPrecedenceFilterSuppressed() bool
|
||||
setPrecedenceFilterSuppressed(bool)
|
||||
}
|
||||
|
||||
type BaseATNConfig struct {
|
||||
precedenceFilterSuppressed bool
|
||||
state ATNState
|
||||
alt int
|
||||
context PredictionContext
|
||||
semanticContext SemanticContext
|
||||
reachesIntoOuterContext int
|
||||
}
|
||||
|
||||
func NewBaseATNConfig7(old *BaseATNConfig) *BaseATNConfig { // TODO: Dup
|
||||
return &BaseATNConfig{
|
||||
state: old.state,
|
||||
alt: old.alt,
|
||||
context: old.context,
|
||||
semanticContext: old.semanticContext,
|
||||
reachesIntoOuterContext: old.reachesIntoOuterContext,
|
||||
}
|
||||
}
|
||||
|
||||
func NewBaseATNConfig6(state ATNState, alt int, context PredictionContext) *BaseATNConfig {
|
||||
return NewBaseATNConfig5(state, alt, context, SemanticContextNone)
|
||||
}
|
||||
|
||||
func NewBaseATNConfig5(state ATNState, alt int, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig {
|
||||
if semanticContext == nil {
|
||||
panic("semanticContext cannot be nil") // TODO: Necessary?
|
||||
}
|
||||
|
||||
return &BaseATNConfig{state: state, alt: alt, context: context, semanticContext: semanticContext}
|
||||
}
|
||||
|
||||
func NewBaseATNConfig4(c ATNConfig, state ATNState) *BaseATNConfig {
|
||||
return NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext())
|
||||
}
|
||||
|
||||
func NewBaseATNConfig3(c ATNConfig, state ATNState, semanticContext SemanticContext) *BaseATNConfig {
|
||||
return NewBaseATNConfig(c, state, c.GetContext(), semanticContext)
|
||||
}
|
||||
|
||||
func NewBaseATNConfig2(c ATNConfig, semanticContext SemanticContext) *BaseATNConfig {
|
||||
return NewBaseATNConfig(c, c.GetState(), c.GetContext(), semanticContext)
|
||||
}
|
||||
|
||||
func NewBaseATNConfig1(c ATNConfig, state ATNState, context PredictionContext) *BaseATNConfig {
|
||||
return NewBaseATNConfig(c, state, context, c.GetSemanticContext())
|
||||
}
|
||||
|
||||
func NewBaseATNConfig(c ATNConfig, state ATNState, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig {
|
||||
if semanticContext == nil {
|
||||
panic("semanticContext cannot be nil")
|
||||
}
|
||||
|
||||
return &BaseATNConfig{
|
||||
state: state,
|
||||
alt: c.GetAlt(),
|
||||
context: context,
|
||||
semanticContext: semanticContext,
|
||||
reachesIntoOuterContext: c.GetReachesIntoOuterContext(),
|
||||
precedenceFilterSuppressed: c.getPrecedenceFilterSuppressed(),
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BaseATNConfig) getPrecedenceFilterSuppressed() bool {
|
||||
return b.precedenceFilterSuppressed
|
||||
}
|
||||
|
||||
func (b *BaseATNConfig) setPrecedenceFilterSuppressed(v bool) {
|
||||
b.precedenceFilterSuppressed = v
|
||||
}
|
||||
|
||||
func (b *BaseATNConfig) GetState() ATNState {
|
||||
return b.state
|
||||
}
|
||||
|
||||
func (b *BaseATNConfig) GetAlt() int {
|
||||
return b.alt
|
||||
}
|
||||
|
||||
func (b *BaseATNConfig) SetContext(v PredictionContext) {
|
||||
b.context = v
|
||||
}
|
||||
func (b *BaseATNConfig) GetContext() PredictionContext {
|
||||
return b.context
|
||||
}
|
||||
|
||||
func (b *BaseATNConfig) GetSemanticContext() SemanticContext {
|
||||
return b.semanticContext
|
||||
}
|
||||
|
||||
func (b *BaseATNConfig) GetReachesIntoOuterContext() int {
|
||||
return b.reachesIntoOuterContext
|
||||
}
|
||||
|
||||
func (b *BaseATNConfig) SetReachesIntoOuterContext(v int) {
|
||||
b.reachesIntoOuterContext = v
|
||||
}
|
||||
|
||||
// An ATN configuration is equal to another if both have the same state, they
|
||||
// predict the same alternative, and syntactic/semantic contexts are the same.
|
||||
func (b *BaseATNConfig) equals(o interface{}) bool {
|
||||
if b == o {
|
||||
return true
|
||||
}
|
||||
|
||||
var other, ok = o.(*BaseATNConfig)
|
||||
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
var equal bool
|
||||
|
||||
if b.context == nil {
|
||||
equal = other.context == nil
|
||||
} else {
|
||||
equal = b.context.equals(other.context)
|
||||
}
|
||||
|
||||
var (
|
||||
nums = b.state.GetStateNumber() == other.state.GetStateNumber()
|
||||
alts = b.alt == other.alt
|
||||
cons = b.semanticContext.equals(other.semanticContext)
|
||||
sups = b.precedenceFilterSuppressed == other.precedenceFilterSuppressed
|
||||
)
|
||||
|
||||
return nums && alts && cons && sups && equal
|
||||
}
|
||||
|
||||
func (b *BaseATNConfig) hash() int {
|
||||
var c int
|
||||
if b.context != nil {
|
||||
c = b.context.hash()
|
||||
}
|
||||
|
||||
h := murmurInit(7)
|
||||
h = murmurUpdate(h, b.state.GetStateNumber())
|
||||
h = murmurUpdate(h, b.alt)
|
||||
h = murmurUpdate(h, c)
|
||||
h = murmurUpdate(h, b.semanticContext.hash())
|
||||
return murmurFinish(h, 4)
|
||||
}
|
||||
|
||||
func (b *BaseATNConfig) String() string {
|
||||
var s1, s2, s3 string
|
||||
|
||||
if b.context != nil {
|
||||
s1 = ",[" + fmt.Sprint(b.context) + "]"
|
||||
}
|
||||
|
||||
if b.semanticContext != SemanticContextNone {
|
||||
s2 = "," + fmt.Sprint(b.semanticContext)
|
||||
}
|
||||
|
||||
if b.reachesIntoOuterContext > 0 {
|
||||
s3 = ",up=" + fmt.Sprint(b.reachesIntoOuterContext)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("(%v,%v%v%v%v)", b.state, b.alt, s1, s2, s3)
|
||||
}
|
||||
|
||||
type LexerATNConfig struct {
|
||||
*BaseATNConfig
|
||||
lexerActionExecutor *LexerActionExecutor
|
||||
passedThroughNonGreedyDecision bool
|
||||
}
|
||||
|
||||
func NewLexerATNConfig6(state ATNState, alt int, context PredictionContext) *LexerATNConfig {
|
||||
return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)}
|
||||
}
|
||||
|
||||
func NewLexerATNConfig5(state ATNState, alt int, context PredictionContext, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig {
|
||||
return &LexerATNConfig{
|
||||
BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone),
|
||||
lexerActionExecutor: lexerActionExecutor,
|
||||
}
|
||||
}
|
||||
|
||||
func NewLexerATNConfig4(c *LexerATNConfig, state ATNState) *LexerATNConfig {
|
||||
return &LexerATNConfig{
|
||||
BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()),
|
||||
lexerActionExecutor: c.lexerActionExecutor,
|
||||
passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
|
||||
}
|
||||
}
|
||||
|
||||
func NewLexerATNConfig3(c *LexerATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig {
|
||||
return &LexerATNConfig{
|
||||
BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()),
|
||||
lexerActionExecutor: lexerActionExecutor,
|
||||
passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
|
||||
}
|
||||
}
|
||||
|
||||
func NewLexerATNConfig2(c *LexerATNConfig, state ATNState, context PredictionContext) *LexerATNConfig {
|
||||
return &LexerATNConfig{
|
||||
BaseATNConfig: NewBaseATNConfig(c, state, context, c.GetSemanticContext()),
|
||||
lexerActionExecutor: c.lexerActionExecutor,
|
||||
passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
|
||||
}
|
||||
}
|
||||
|
||||
func NewLexerATNConfig1(state ATNState, alt int, context PredictionContext) *LexerATNConfig {
|
||||
return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)}
|
||||
}
|
||||
|
||||
func (l *LexerATNConfig) hash() int {
|
||||
var f int
|
||||
if l.passedThroughNonGreedyDecision {
|
||||
f = 1
|
||||
} else {
|
||||
f = 0
|
||||
}
|
||||
h := murmurInit(7)
|
||||
h = murmurUpdate(h, l.state.GetStateNumber())
|
||||
h = murmurUpdate(h, l.alt)
|
||||
h = murmurUpdate(h, l.context.hash())
|
||||
h = murmurUpdate(h, l.semanticContext.hash())
|
||||
h = murmurUpdate(h, f)
|
||||
h = murmurUpdate(h, l.lexerActionExecutor.hash())
|
||||
h = murmurFinish(h, 6)
|
||||
return h
|
||||
}
|
||||
|
||||
func (l *LexerATNConfig) equals(other interface{}) bool {
|
||||
var othert, ok = other.(*LexerATNConfig)
|
||||
|
||||
if l == other {
|
||||
return true
|
||||
} else if !ok {
|
||||
return false
|
||||
} else if l.passedThroughNonGreedyDecision != othert.passedThroughNonGreedyDecision {
|
||||
return false
|
||||
}
|
||||
|
||||
var b bool
|
||||
|
||||
if l.lexerActionExecutor != nil {
|
||||
b = !l.lexerActionExecutor.equals(othert.lexerActionExecutor)
|
||||
} else {
|
||||
b = othert.lexerActionExecutor != nil
|
||||
}
|
||||
|
||||
if b {
|
||||
return false
|
||||
}
|
||||
|
||||
return l.BaseATNConfig.equals(othert.BaseATNConfig)
|
||||
}
|
||||
|
||||
|
||||
func checkNonGreedyDecision(source *LexerATNConfig, target ATNState) bool {
|
||||
var ds, ok = target.(DecisionState)
|
||||
|
||||
return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy())
|
||||
}
|
407
vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config_set.go
generated
vendored
Normal file
407
vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config_set.go
generated
vendored
Normal file
@ -0,0 +1,407 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import "fmt"
|
||||
|
||||
type ATNConfigSet interface {
|
||||
hash() int
|
||||
Add(ATNConfig, *DoubleDict) bool
|
||||
AddAll([]ATNConfig) bool
|
||||
|
||||
GetStates() Set
|
||||
GetPredicates() []SemanticContext
|
||||
GetItems() []ATNConfig
|
||||
|
||||
OptimizeConfigs(interpreter *BaseATNSimulator)
|
||||
|
||||
Equals(other interface{}) bool
|
||||
|
||||
Length() int
|
||||
IsEmpty() bool
|
||||
Contains(ATNConfig) bool
|
||||
ContainsFast(ATNConfig) bool
|
||||
Clear()
|
||||
String() string
|
||||
|
||||
HasSemanticContext() bool
|
||||
SetHasSemanticContext(v bool)
|
||||
|
||||
ReadOnly() bool
|
||||
SetReadOnly(bool)
|
||||
|
||||
GetConflictingAlts() *BitSet
|
||||
SetConflictingAlts(*BitSet)
|
||||
|
||||
Alts() *BitSet
|
||||
|
||||
FullContext() bool
|
||||
|
||||
GetUniqueAlt() int
|
||||
SetUniqueAlt(int)
|
||||
|
||||
GetDipsIntoOuterContext() bool
|
||||
SetDipsIntoOuterContext(bool)
|
||||
}
|
||||
|
||||
// BaseATNConfigSet is a specialized set of ATNConfig that tracks information
|
||||
// about its elements and can combine similar configurations using a
|
||||
// graph-structured stack.
|
||||
type BaseATNConfigSet struct {
|
||||
cachedHash int
|
||||
|
||||
// configLookup is used to determine whether two BaseATNConfigSets are equal. We
|
||||
// need all configurations with the same (s, i, _, semctx) to be equal. A key
|
||||
// effectively doubles the number of objects associated with ATNConfigs. All
|
||||
// keys are hashed by (s, i, _, pi), not including the context. Wiped out when
|
||||
// read-only because a set becomes a DFA state.
|
||||
configLookup Set
|
||||
|
||||
// configs is the added elements.
|
||||
configs []ATNConfig
|
||||
|
||||
// TODO: These fields make me pretty uncomfortable, but it is nice to pack up
|
||||
// info together because it saves recomputation. Can we track conflicts as they
|
||||
// are added to save scanning configs later?
|
||||
conflictingAlts *BitSet
|
||||
|
||||
// dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates
|
||||
// we hit a pred while computing a closure operation. Do not make a DFA state
|
||||
// from the BaseATNConfigSet in this case. TODO: How is this used by parsers?
|
||||
dipsIntoOuterContext bool
|
||||
|
||||
// fullCtx is whether it is part of a full context LL prediction. Used to
|
||||
// determine how to merge $. It is a wildcard with SLL, but not for an LL
|
||||
// context merge.
|
||||
fullCtx bool
|
||||
|
||||
// Used in parser and lexer. In lexer, it indicates we hit a pred
|
||||
// while computing a closure operation. Don't make a DFA state from a.
|
||||
hasSemanticContext bool
|
||||
|
||||
// readOnly is whether it is read-only. Do not
|
||||
// allow any code to manipulate the set if true because DFA states will point at
|
||||
// sets and those must not change. It not protect other fields; conflictingAlts
|
||||
// in particular, which is assigned after readOnly.
|
||||
readOnly bool
|
||||
|
||||
// TODO: These fields make me pretty uncomfortable, but it is nice to pack up
|
||||
// info together because it saves recomputation. Can we track conflicts as they
|
||||
// are added to save scanning configs later?
|
||||
uniqueAlt int
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) Alts() *BitSet {
|
||||
alts := NewBitSet()
|
||||
for _, it := range b.configs {
|
||||
alts.add(it.GetAlt())
|
||||
}
|
||||
return alts
|
||||
}
|
||||
|
||||
func NewBaseATNConfigSet(fullCtx bool) *BaseATNConfigSet {
|
||||
return &BaseATNConfigSet{
|
||||
cachedHash: -1,
|
||||
configLookup: newArray2DHashSetWithCap(hashATNConfig, equalATNConfigs, 16, 2),
|
||||
fullCtx: fullCtx,
|
||||
}
|
||||
}
|
||||
|
||||
// Add merges contexts with existing configs for (s, i, pi, _), where s is the
|
||||
// ATNConfig.state, i is the ATNConfig.alt, and pi is the
|
||||
// ATNConfig.semanticContext. We use (s,i,pi) as the key. Updates
|
||||
// dipsIntoOuterContext and hasSemanticContext when necessary.
|
||||
func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool {
|
||||
if b.readOnly {
|
||||
panic("set is read-only")
|
||||
}
|
||||
|
||||
if config.GetSemanticContext() != SemanticContextNone {
|
||||
b.hasSemanticContext = true
|
||||
}
|
||||
|
||||
if config.GetReachesIntoOuterContext() > 0 {
|
||||
b.dipsIntoOuterContext = true
|
||||
}
|
||||
|
||||
existing := b.configLookup.Add(config).(ATNConfig)
|
||||
|
||||
if existing == config {
|
||||
b.cachedHash = -1
|
||||
b.configs = append(b.configs, config) // Track order here
|
||||
return true
|
||||
}
|
||||
|
||||
// Merge a previous (s, i, pi, _) with it and save the result
|
||||
rootIsWildcard := !b.fullCtx
|
||||
merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache)
|
||||
|
||||
// No need to check for existing.context because config.context is in the cache,
|
||||
// since the only way to create new graphs is the "call rule" and here. We cache
|
||||
// at both places.
|
||||
existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext()))
|
||||
|
||||
// Preserve the precedence filter suppression during the merge
|
||||
if config.getPrecedenceFilterSuppressed() {
|
||||
existing.setPrecedenceFilterSuppressed(true)
|
||||
}
|
||||
|
||||
// Replace the context because there is no need to do alt mapping
|
||||
existing.SetContext(merged)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) GetStates() Set {
|
||||
states := newArray2DHashSet(nil, nil)
|
||||
|
||||
for i := 0; i < len(b.configs); i++ {
|
||||
states.Add(b.configs[i].GetState())
|
||||
}
|
||||
|
||||
return states
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) HasSemanticContext() bool {
|
||||
return b.hasSemanticContext
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) SetHasSemanticContext(v bool) {
|
||||
b.hasSemanticContext = v
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) GetPredicates() []SemanticContext {
|
||||
preds := make([]SemanticContext, 0)
|
||||
|
||||
for i := 0; i < len(b.configs); i++ {
|
||||
c := b.configs[i].GetSemanticContext()
|
||||
|
||||
if c != SemanticContextNone {
|
||||
preds = append(preds, c)
|
||||
}
|
||||
}
|
||||
|
||||
return preds
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) GetItems() []ATNConfig {
|
||||
return b.configs
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) {
|
||||
if b.readOnly {
|
||||
panic("set is read-only")
|
||||
}
|
||||
|
||||
if b.configLookup.Len() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
for i := 0; i < len(b.configs); i++ {
|
||||
config := b.configs[i]
|
||||
|
||||
config.SetContext(interpreter.getCachedContext(config.GetContext()))
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) AddAll(coll []ATNConfig) bool {
|
||||
for i := 0; i < len(coll); i++ {
|
||||
b.Add(coll[i], nil)
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) Equals(other interface{}) bool {
|
||||
if b == other {
|
||||
return true
|
||||
} else if _, ok := other.(*BaseATNConfigSet); !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
other2 := other.(*BaseATNConfigSet)
|
||||
|
||||
return b.configs != nil &&
|
||||
// TODO: b.configs.equals(other2.configs) && // TODO: Is b necessary?
|
||||
b.fullCtx == other2.fullCtx &&
|
||||
b.uniqueAlt == other2.uniqueAlt &&
|
||||
b.conflictingAlts == other2.conflictingAlts &&
|
||||
b.hasSemanticContext == other2.hasSemanticContext &&
|
||||
b.dipsIntoOuterContext == other2.dipsIntoOuterContext
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) hash() int {
|
||||
if b.readOnly {
|
||||
if b.cachedHash == -1 {
|
||||
b.cachedHash = b.hashCodeConfigs()
|
||||
}
|
||||
|
||||
return b.cachedHash
|
||||
}
|
||||
|
||||
return b.hashCodeConfigs()
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) hashCodeConfigs() int {
|
||||
h := 1
|
||||
for _, config := range b.configs {
|
||||
h = 31*h + config.hash()
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) Length() int {
|
||||
return len(b.configs)
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) IsEmpty() bool {
|
||||
return len(b.configs) == 0
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) Contains(item ATNConfig) bool {
|
||||
if b.configLookup == nil {
|
||||
panic("not implemented for read-only sets")
|
||||
}
|
||||
|
||||
return b.configLookup.Contains(item)
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) ContainsFast(item ATNConfig) bool {
|
||||
if b.configLookup == nil {
|
||||
panic("not implemented for read-only sets")
|
||||
}
|
||||
|
||||
return b.configLookup.Contains(item) // TODO: containsFast is not implemented for Set
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) Clear() {
|
||||
if b.readOnly {
|
||||
panic("set is read-only")
|
||||
}
|
||||
|
||||
b.configs = make([]ATNConfig, 0)
|
||||
b.cachedHash = -1
|
||||
b.configLookup = newArray2DHashSet(nil, equalATNConfigs)
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) FullContext() bool {
|
||||
return b.fullCtx
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) GetDipsIntoOuterContext() bool {
|
||||
return b.dipsIntoOuterContext
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) SetDipsIntoOuterContext(v bool) {
|
||||
b.dipsIntoOuterContext = v
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) GetUniqueAlt() int {
|
||||
return b.uniqueAlt
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) SetUniqueAlt(v int) {
|
||||
b.uniqueAlt = v
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) GetConflictingAlts() *BitSet {
|
||||
return b.conflictingAlts
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) SetConflictingAlts(v *BitSet) {
|
||||
b.conflictingAlts = v
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) ReadOnly() bool {
|
||||
return b.readOnly
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) SetReadOnly(readOnly bool) {
|
||||
b.readOnly = readOnly
|
||||
|
||||
if readOnly {
|
||||
b.configLookup = nil // Read only, so no need for the lookup cache
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) String() string {
|
||||
s := "["
|
||||
|
||||
for i, c := range b.configs {
|
||||
s += c.String()
|
||||
|
||||
if i != len(b.configs)-1 {
|
||||
s += ", "
|
||||
}
|
||||
}
|
||||
|
||||
s += "]"
|
||||
|
||||
if b.hasSemanticContext {
|
||||
s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext)
|
||||
}
|
||||
|
||||
if b.uniqueAlt != ATNInvalidAltNumber {
|
||||
s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt)
|
||||
}
|
||||
|
||||
if b.conflictingAlts != nil {
|
||||
s += ",conflictingAlts=" + b.conflictingAlts.String()
|
||||
}
|
||||
|
||||
if b.dipsIntoOuterContext {
|
||||
s += ",dipsIntoOuterContext"
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
type OrderedATNConfigSet struct {
|
||||
*BaseATNConfigSet
|
||||
}
|
||||
|
||||
func NewOrderedATNConfigSet() *OrderedATNConfigSet {
|
||||
b := NewBaseATNConfigSet(false)
|
||||
|
||||
b.configLookup = newArray2DHashSet(nil, nil)
|
||||
|
||||
return &OrderedATNConfigSet{BaseATNConfigSet: b}
|
||||
}
|
||||
|
||||
func hashATNConfig(i interface{}) int {
|
||||
o := i.(ATNConfig)
|
||||
hash := 7
|
||||
hash = 31*hash + o.GetState().GetStateNumber()
|
||||
hash = 31*hash + o.GetAlt()
|
||||
hash = 31*hash + o.GetSemanticContext().hash()
|
||||
return hash
|
||||
}
|
||||
|
||||
func equalATNConfigs(a, b interface{}) bool {
|
||||
if a == nil || b == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if a == b {
|
||||
return true
|
||||
}
|
||||
|
||||
var ai, ok = a.(ATNConfig)
|
||||
var bi, ok1 = b.(ATNConfig)
|
||||
|
||||
if !ok || !ok1 {
|
||||
return false
|
||||
}
|
||||
|
||||
if ai.GetState().GetStateNumber() != bi.GetState().GetStateNumber() {
|
||||
return false
|
||||
}
|
||||
|
||||
if ai.GetAlt() != bi.GetAlt() {
|
||||
return false
|
||||
}
|
||||
|
||||
return ai.GetSemanticContext().equals(bi.GetSemanticContext())
|
||||
}
|
61
vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserialization_options.go
generated
vendored
Normal file
61
vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserialization_options.go
generated
vendored
Normal file
@ -0,0 +1,61 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import "errors"
|
||||
|
||||
var defaultATNDeserializationOptions = ATNDeserializationOptions{true, true, false}
|
||||
|
||||
type ATNDeserializationOptions struct {
|
||||
readOnly bool
|
||||
verifyATN bool
|
||||
generateRuleBypassTransitions bool
|
||||
}
|
||||
|
||||
func (opts *ATNDeserializationOptions) ReadOnly() bool {
|
||||
return opts.readOnly
|
||||
}
|
||||
|
||||
func (opts *ATNDeserializationOptions) SetReadOnly(readOnly bool) {
|
||||
if opts.readOnly {
|
||||
panic(errors.New("Cannot mutate read only ATNDeserializationOptions"))
|
||||
}
|
||||
opts.readOnly = readOnly
|
||||
}
|
||||
|
||||
func (opts *ATNDeserializationOptions) VerifyATN() bool {
|
||||
return opts.verifyATN
|
||||
}
|
||||
|
||||
func (opts *ATNDeserializationOptions) SetVerifyATN(verifyATN bool) {
|
||||
if opts.readOnly {
|
||||
panic(errors.New("Cannot mutate read only ATNDeserializationOptions"))
|
||||
}
|
||||
opts.verifyATN = verifyATN
|
||||
}
|
||||
|
||||
func (opts *ATNDeserializationOptions) GenerateRuleBypassTransitions() bool {
|
||||
return opts.generateRuleBypassTransitions
|
||||
}
|
||||
|
||||
func (opts *ATNDeserializationOptions) SetGenerateRuleBypassTransitions(generateRuleBypassTransitions bool) {
|
||||
if opts.readOnly {
|
||||
panic(errors.New("Cannot mutate read only ATNDeserializationOptions"))
|
||||
}
|
||||
opts.generateRuleBypassTransitions = generateRuleBypassTransitions
|
||||
}
|
||||
|
||||
func DefaultATNDeserializationOptions() *ATNDeserializationOptions {
|
||||
return NewATNDeserializationOptions(&defaultATNDeserializationOptions)
|
||||
}
|
||||
|
||||
func NewATNDeserializationOptions(other *ATNDeserializationOptions) *ATNDeserializationOptions {
|
||||
o := new(ATNDeserializationOptions)
|
||||
if other != nil {
|
||||
*o = *other
|
||||
o.readOnly = false
|
||||
}
|
||||
return o
|
||||
}
|
683
vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserializer.go
generated
vendored
Normal file
683
vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserializer.go
generated
vendored
Normal file
@ -0,0 +1,683 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const serializedVersion = 4
|
||||
|
||||
type loopEndStateIntPair struct {
|
||||
item0 *LoopEndState
|
||||
item1 int
|
||||
}
|
||||
|
||||
type blockStartStateIntPair struct {
|
||||
item0 BlockStartState
|
||||
item1 int
|
||||
}
|
||||
|
||||
type ATNDeserializer struct {
|
||||
options *ATNDeserializationOptions
|
||||
data []int32
|
||||
pos int
|
||||
}
|
||||
|
||||
func NewATNDeserializer(options *ATNDeserializationOptions) *ATNDeserializer {
|
||||
if options == nil {
|
||||
options = &defaultATNDeserializationOptions
|
||||
}
|
||||
|
||||
return &ATNDeserializer{options: options}
|
||||
}
|
||||
|
||||
func stringInSlice(a string, list []string) int {
|
||||
for i, b := range list {
|
||||
if b == a {
|
||||
return i
|
||||
}
|
||||
}
|
||||
|
||||
return -1
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) Deserialize(data []int32) *ATN {
|
||||
a.data = data
|
||||
a.pos = 0
|
||||
a.checkVersion()
|
||||
|
||||
atn := a.readATN()
|
||||
|
||||
a.readStates(atn)
|
||||
a.readRules(atn)
|
||||
a.readModes(atn)
|
||||
|
||||
sets := a.readSets(atn, nil)
|
||||
|
||||
a.readEdges(atn, sets)
|
||||
a.readDecisions(atn)
|
||||
a.readLexerActions(atn)
|
||||
a.markPrecedenceDecisions(atn)
|
||||
a.verifyATN(atn)
|
||||
|
||||
if a.options.GenerateRuleBypassTransitions() && atn.grammarType == ATNTypeParser {
|
||||
a.generateRuleBypassTransitions(atn)
|
||||
// Re-verify after modification
|
||||
a.verifyATN(atn)
|
||||
}
|
||||
|
||||
return atn
|
||||
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) checkVersion() {
|
||||
version := a.readInt()
|
||||
|
||||
if version != serializedVersion {
|
||||
panic("Could not deserialize ATN with version " + strconv.Itoa(version) + " (expected " + strconv.Itoa(serializedVersion) + ").")
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) readATN() *ATN {
|
||||
grammarType := a.readInt()
|
||||
maxTokenType := a.readInt()
|
||||
|
||||
return NewATN(grammarType, maxTokenType)
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) readStates(atn *ATN) {
|
||||
nstates := a.readInt()
|
||||
|
||||
// Allocate worst case size.
|
||||
loopBackStateNumbers := make([]loopEndStateIntPair, 0, nstates)
|
||||
endStateNumbers := make([]blockStartStateIntPair, 0, nstates)
|
||||
|
||||
// Preallocate states slice.
|
||||
atn.states = make([]ATNState, 0, nstates)
|
||||
|
||||
for i := 0; i < nstates; i++ {
|
||||
stype := a.readInt()
|
||||
|
||||
// Ignore bad types of states
|
||||
if stype == ATNStateInvalidType {
|
||||
atn.addState(nil)
|
||||
continue
|
||||
}
|
||||
|
||||
ruleIndex := a.readInt()
|
||||
|
||||
s := a.stateFactory(stype, ruleIndex)
|
||||
|
||||
if stype == ATNStateLoopEnd {
|
||||
loopBackStateNumber := a.readInt()
|
||||
|
||||
loopBackStateNumbers = append(loopBackStateNumbers, loopEndStateIntPair{s.(*LoopEndState), loopBackStateNumber})
|
||||
} else if s2, ok := s.(BlockStartState); ok {
|
||||
endStateNumber := a.readInt()
|
||||
|
||||
endStateNumbers = append(endStateNumbers, blockStartStateIntPair{s2, endStateNumber})
|
||||
}
|
||||
|
||||
atn.addState(s)
|
||||
}
|
||||
|
||||
// Delay the assignment of loop back and end states until we know all the state
|
||||
// instances have been initialized
|
||||
for _, pair := range loopBackStateNumbers {
|
||||
pair.item0.loopBackState = atn.states[pair.item1]
|
||||
}
|
||||
|
||||
for _, pair := range endStateNumbers {
|
||||
pair.item0.setEndState(atn.states[pair.item1].(*BlockEndState))
|
||||
}
|
||||
|
||||
numNonGreedyStates := a.readInt()
|
||||
for j := 0; j < numNonGreedyStates; j++ {
|
||||
stateNumber := a.readInt()
|
||||
|
||||
atn.states[stateNumber].(DecisionState).setNonGreedy(true)
|
||||
}
|
||||
|
||||
numPrecedenceStates := a.readInt()
|
||||
for j := 0; j < numPrecedenceStates; j++ {
|
||||
stateNumber := a.readInt()
|
||||
|
||||
atn.states[stateNumber].(*RuleStartState).isPrecedenceRule = true
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) readRules(atn *ATN) {
|
||||
nrules := a.readInt()
|
||||
|
||||
if atn.grammarType == ATNTypeLexer {
|
||||
atn.ruleToTokenType = make([]int, nrules)
|
||||
}
|
||||
|
||||
atn.ruleToStartState = make([]*RuleStartState, nrules)
|
||||
|
||||
for i := range atn.ruleToStartState {
|
||||
s := a.readInt()
|
||||
startState := atn.states[s].(*RuleStartState)
|
||||
|
||||
atn.ruleToStartState[i] = startState
|
||||
|
||||
if atn.grammarType == ATNTypeLexer {
|
||||
tokenType := a.readInt()
|
||||
|
||||
atn.ruleToTokenType[i] = tokenType
|
||||
}
|
||||
}
|
||||
|
||||
atn.ruleToStopState = make([]*RuleStopState, nrules)
|
||||
|
||||
for _, state := range atn.states {
|
||||
if s2, ok := state.(*RuleStopState); ok {
|
||||
atn.ruleToStopState[s2.ruleIndex] = s2
|
||||
atn.ruleToStartState[s2.ruleIndex].stopState = s2
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) readModes(atn *ATN) {
|
||||
nmodes := a.readInt()
|
||||
atn.modeToStartState = make([]*TokensStartState, nmodes)
|
||||
|
||||
for i := range atn.modeToStartState {
|
||||
s := a.readInt()
|
||||
|
||||
atn.modeToStartState[i] = atn.states[s].(*TokensStartState)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) readSets(atn *ATN, sets []*IntervalSet) []*IntervalSet {
|
||||
m := a.readInt()
|
||||
|
||||
// Preallocate the needed capacity.
|
||||
if cap(sets)-len(sets) < m {
|
||||
isets := make([]*IntervalSet, len(sets), len(sets)+m)
|
||||
copy(isets, sets)
|
||||
sets = isets
|
||||
}
|
||||
|
||||
for i := 0; i < m; i++ {
|
||||
iset := NewIntervalSet()
|
||||
|
||||
sets = append(sets, iset)
|
||||
|
||||
n := a.readInt()
|
||||
containsEOF := a.readInt()
|
||||
|
||||
if containsEOF != 0 {
|
||||
iset.addOne(-1)
|
||||
}
|
||||
|
||||
for j := 0; j < n; j++ {
|
||||
i1 := a.readInt()
|
||||
i2 := a.readInt()
|
||||
|
||||
iset.addRange(i1, i2)
|
||||
}
|
||||
}
|
||||
|
||||
return sets
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) readEdges(atn *ATN, sets []*IntervalSet) {
|
||||
nedges := a.readInt()
|
||||
|
||||
for i := 0; i < nedges; i++ {
|
||||
var (
|
||||
src = a.readInt()
|
||||
trg = a.readInt()
|
||||
ttype = a.readInt()
|
||||
arg1 = a.readInt()
|
||||
arg2 = a.readInt()
|
||||
arg3 = a.readInt()
|
||||
trans = a.edgeFactory(atn, ttype, src, trg, arg1, arg2, arg3, sets)
|
||||
srcState = atn.states[src]
|
||||
)
|
||||
|
||||
srcState.AddTransition(trans, -1)
|
||||
}
|
||||
|
||||
// Edges for rule stop states can be derived, so they are not serialized
|
||||
for _, state := range atn.states {
|
||||
for _, t := range state.GetTransitions() {
|
||||
var rt, ok = t.(*RuleTransition)
|
||||
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
outermostPrecedenceReturn := -1
|
||||
|
||||
if atn.ruleToStartState[rt.getTarget().GetRuleIndex()].isPrecedenceRule {
|
||||
if rt.precedence == 0 {
|
||||
outermostPrecedenceReturn = rt.getTarget().GetRuleIndex()
|
||||
}
|
||||
}
|
||||
|
||||
trans := NewEpsilonTransition(rt.followState, outermostPrecedenceReturn)
|
||||
|
||||
atn.ruleToStopState[rt.getTarget().GetRuleIndex()].AddTransition(trans, -1)
|
||||
}
|
||||
}
|
||||
|
||||
for _, state := range atn.states {
|
||||
if s2, ok := state.(BlockStartState); ok {
|
||||
// We need to know the end state to set its start state
|
||||
if s2.getEndState() == nil {
|
||||
panic("IllegalState")
|
||||
}
|
||||
|
||||
// Block end states can only be associated to a single block start state
|
||||
if s2.getEndState().startState != nil {
|
||||
panic("IllegalState")
|
||||
}
|
||||
|
||||
s2.getEndState().startState = state
|
||||
}
|
||||
|
||||
if s2, ok := state.(*PlusLoopbackState); ok {
|
||||
for _, t := range s2.GetTransitions() {
|
||||
if t2, ok := t.getTarget().(*PlusBlockStartState); ok {
|
||||
t2.loopBackState = state
|
||||
}
|
||||
}
|
||||
} else if s2, ok := state.(*StarLoopbackState); ok {
|
||||
for _, t := range s2.GetTransitions() {
|
||||
if t2, ok := t.getTarget().(*StarLoopEntryState); ok {
|
||||
t2.loopBackState = state
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) readDecisions(atn *ATN) {
|
||||
ndecisions := a.readInt()
|
||||
|
||||
for i := 0; i < ndecisions; i++ {
|
||||
s := a.readInt()
|
||||
decState := atn.states[s].(DecisionState)
|
||||
|
||||
atn.DecisionToState = append(atn.DecisionToState, decState)
|
||||
decState.setDecision(i)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) readLexerActions(atn *ATN) {
|
||||
if atn.grammarType == ATNTypeLexer {
|
||||
count := a.readInt()
|
||||
|
||||
atn.lexerActions = make([]LexerAction, count)
|
||||
|
||||
for i := range atn.lexerActions {
|
||||
actionType := a.readInt()
|
||||
data1 := a.readInt()
|
||||
data2 := a.readInt()
|
||||
atn.lexerActions[i] = a.lexerActionFactory(actionType, data1, data2)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) generateRuleBypassTransitions(atn *ATN) {
|
||||
count := len(atn.ruleToStartState)
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
atn.ruleToTokenType[i] = atn.maxTokenType + i + 1
|
||||
}
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
a.generateRuleBypassTransition(atn, i)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) generateRuleBypassTransition(atn *ATN, idx int) {
|
||||
bypassStart := NewBasicBlockStartState()
|
||||
|
||||
bypassStart.ruleIndex = idx
|
||||
atn.addState(bypassStart)
|
||||
|
||||
bypassStop := NewBlockEndState()
|
||||
|
||||
bypassStop.ruleIndex = idx
|
||||
atn.addState(bypassStop)
|
||||
|
||||
bypassStart.endState = bypassStop
|
||||
|
||||
atn.defineDecisionState(bypassStart.BaseDecisionState)
|
||||
|
||||
bypassStop.startState = bypassStart
|
||||
|
||||
var excludeTransition Transition
|
||||
var endState ATNState
|
||||
|
||||
if atn.ruleToStartState[idx].isPrecedenceRule {
|
||||
// Wrap from the beginning of the rule to the StarLoopEntryState
|
||||
endState = nil
|
||||
|
||||
for i := 0; i < len(atn.states); i++ {
|
||||
state := atn.states[i]
|
||||
|
||||
if a.stateIsEndStateFor(state, idx) != nil {
|
||||
endState = state
|
||||
excludeTransition = state.(*StarLoopEntryState).loopBackState.GetTransitions()[0]
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if excludeTransition == nil {
|
||||
panic("Couldn't identify final state of the precedence rule prefix section.")
|
||||
}
|
||||
} else {
|
||||
endState = atn.ruleToStopState[idx]
|
||||
}
|
||||
|
||||
// All non-excluded transitions that currently target end state need to target
|
||||
// blockEnd instead
|
||||
for i := 0; i < len(atn.states); i++ {
|
||||
state := atn.states[i]
|
||||
|
||||
for j := 0; j < len(state.GetTransitions()); j++ {
|
||||
transition := state.GetTransitions()[j]
|
||||
|
||||
if transition == excludeTransition {
|
||||
continue
|
||||
}
|
||||
|
||||
if transition.getTarget() == endState {
|
||||
transition.setTarget(bypassStop)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// All transitions leaving the rule start state need to leave blockStart instead
|
||||
ruleToStartState := atn.ruleToStartState[idx]
|
||||
count := len(ruleToStartState.GetTransitions())
|
||||
|
||||
for count > 0 {
|
||||
bypassStart.AddTransition(ruleToStartState.GetTransitions()[count-1], -1)
|
||||
ruleToStartState.SetTransitions([]Transition{ruleToStartState.GetTransitions()[len(ruleToStartState.GetTransitions())-1]})
|
||||
}
|
||||
|
||||
// Link the new states
|
||||
atn.ruleToStartState[idx].AddTransition(NewEpsilonTransition(bypassStart, -1), -1)
|
||||
bypassStop.AddTransition(NewEpsilonTransition(endState, -1), -1)
|
||||
|
||||
MatchState := NewBasicState()
|
||||
|
||||
atn.addState(MatchState)
|
||||
MatchState.AddTransition(NewAtomTransition(bypassStop, atn.ruleToTokenType[idx]), -1)
|
||||
bypassStart.AddTransition(NewEpsilonTransition(MatchState, -1), -1)
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) stateIsEndStateFor(state ATNState, idx int) ATNState {
|
||||
if state.GetRuleIndex() != idx {
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, ok := state.(*StarLoopEntryState); !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget()
|
||||
|
||||
if _, ok := maybeLoopEndState.(*LoopEndState); !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
var _, ok = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState)
|
||||
|
||||
if maybeLoopEndState.(*LoopEndState).epsilonOnlyTransitions && ok {
|
||||
return state
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// markPrecedenceDecisions analyzes the StarLoopEntryState states in the
|
||||
// specified ATN to set the StarLoopEntryState.precedenceRuleDecision field to
|
||||
// the correct value.
|
||||
func (a *ATNDeserializer) markPrecedenceDecisions(atn *ATN) {
|
||||
for _, state := range atn.states {
|
||||
if _, ok := state.(*StarLoopEntryState); !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// We analyze the ATN to determine if a ATN decision state is the
|
||||
// decision for the closure block that determines whether a
|
||||
// precedence rule should continue or complete.
|
||||
if atn.ruleToStartState[state.GetRuleIndex()].isPrecedenceRule {
|
||||
maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget()
|
||||
|
||||
if s3, ok := maybeLoopEndState.(*LoopEndState); ok {
|
||||
var _, ok2 = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState)
|
||||
|
||||
if s3.epsilonOnlyTransitions && ok2 {
|
||||
state.(*StarLoopEntryState).precedenceRuleDecision = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) verifyATN(atn *ATN) {
|
||||
if !a.options.VerifyATN() {
|
||||
return
|
||||
}
|
||||
|
||||
// Verify assumptions
|
||||
for _, state := range atn.states {
|
||||
if state == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
a.checkCondition(state.GetEpsilonOnlyTransitions() || len(state.GetTransitions()) <= 1, "")
|
||||
|
||||
switch s2 := state.(type) {
|
||||
case *PlusBlockStartState:
|
||||
a.checkCondition(s2.loopBackState != nil, "")
|
||||
|
||||
case *StarLoopEntryState:
|
||||
a.checkCondition(s2.loopBackState != nil, "")
|
||||
a.checkCondition(len(s2.GetTransitions()) == 2, "")
|
||||
|
||||
switch s2.transitions[0].getTarget().(type) {
|
||||
case *StarBlockStartState:
|
||||
_, ok := s2.transitions[1].getTarget().(*LoopEndState)
|
||||
|
||||
a.checkCondition(ok, "")
|
||||
a.checkCondition(!s2.nonGreedy, "")
|
||||
|
||||
case *LoopEndState:
|
||||
var _, ok = s2.transitions[1].getTarget().(*StarBlockStartState)
|
||||
|
||||
a.checkCondition(ok, "")
|
||||
a.checkCondition(s2.nonGreedy, "")
|
||||
|
||||
default:
|
||||
panic("IllegalState")
|
||||
}
|
||||
|
||||
case *StarLoopbackState:
|
||||
a.checkCondition(len(state.GetTransitions()) == 1, "")
|
||||
|
||||
var _, ok = state.GetTransitions()[0].getTarget().(*StarLoopEntryState)
|
||||
|
||||
a.checkCondition(ok, "")
|
||||
|
||||
case *LoopEndState:
|
||||
a.checkCondition(s2.loopBackState != nil, "")
|
||||
|
||||
case *RuleStartState:
|
||||
a.checkCondition(s2.stopState != nil, "")
|
||||
|
||||
case BlockStartState:
|
||||
a.checkCondition(s2.getEndState() != nil, "")
|
||||
|
||||
case *BlockEndState:
|
||||
a.checkCondition(s2.startState != nil, "")
|
||||
|
||||
case DecisionState:
|
||||
a.checkCondition(len(s2.GetTransitions()) <= 1 || s2.getDecision() >= 0, "")
|
||||
|
||||
default:
|
||||
var _, ok = s2.(*RuleStopState)
|
||||
|
||||
a.checkCondition(len(s2.GetTransitions()) <= 1 || ok, "")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) checkCondition(condition bool, message string) {
|
||||
if !condition {
|
||||
if message == "" {
|
||||
message = "IllegalState"
|
||||
}
|
||||
|
||||
panic(message)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) readInt() int {
|
||||
v := a.data[a.pos]
|
||||
|
||||
a.pos++
|
||||
|
||||
return int(v) // data is 32 bits but int is at least that big
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, src, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition {
|
||||
target := atn.states[trg]
|
||||
|
||||
switch typeIndex {
|
||||
case TransitionEPSILON:
|
||||
return NewEpsilonTransition(target, -1)
|
||||
|
||||
case TransitionRANGE:
|
||||
if arg3 != 0 {
|
||||
return NewRangeTransition(target, TokenEOF, arg2)
|
||||
}
|
||||
|
||||
return NewRangeTransition(target, arg1, arg2)
|
||||
|
||||
case TransitionRULE:
|
||||
return NewRuleTransition(atn.states[arg1], arg2, arg3, target)
|
||||
|
||||
case TransitionPREDICATE:
|
||||
return NewPredicateTransition(target, arg1, arg2, arg3 != 0)
|
||||
|
||||
case TransitionPRECEDENCE:
|
||||
return NewPrecedencePredicateTransition(target, arg1)
|
||||
|
||||
case TransitionATOM:
|
||||
if arg3 != 0 {
|
||||
return NewAtomTransition(target, TokenEOF)
|
||||
}
|
||||
|
||||
return NewAtomTransition(target, arg1)
|
||||
|
||||
case TransitionACTION:
|
||||
return NewActionTransition(target, arg1, arg2, arg3 != 0)
|
||||
|
||||
case TransitionSET:
|
||||
return NewSetTransition(target, sets[arg1])
|
||||
|
||||
case TransitionNOTSET:
|
||||
return NewNotSetTransition(target, sets[arg1])
|
||||
|
||||
case TransitionWILDCARD:
|
||||
return NewWildcardTransition(target)
|
||||
}
|
||||
|
||||
panic("The specified transition type is not valid.")
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) stateFactory(typeIndex, ruleIndex int) ATNState {
|
||||
var s ATNState
|
||||
|
||||
switch typeIndex {
|
||||
case ATNStateInvalidType:
|
||||
return nil
|
||||
|
||||
case ATNStateBasic:
|
||||
s = NewBasicState()
|
||||
|
||||
case ATNStateRuleStart:
|
||||
s = NewRuleStartState()
|
||||
|
||||
case ATNStateBlockStart:
|
||||
s = NewBasicBlockStartState()
|
||||
|
||||
case ATNStatePlusBlockStart:
|
||||
s = NewPlusBlockStartState()
|
||||
|
||||
case ATNStateStarBlockStart:
|
||||
s = NewStarBlockStartState()
|
||||
|
||||
case ATNStateTokenStart:
|
||||
s = NewTokensStartState()
|
||||
|
||||
case ATNStateRuleStop:
|
||||
s = NewRuleStopState()
|
||||
|
||||
case ATNStateBlockEnd:
|
||||
s = NewBlockEndState()
|
||||
|
||||
case ATNStateStarLoopBack:
|
||||
s = NewStarLoopbackState()
|
||||
|
||||
case ATNStateStarLoopEntry:
|
||||
s = NewStarLoopEntryState()
|
||||
|
||||
case ATNStatePlusLoopBack:
|
||||
s = NewPlusLoopbackState()
|
||||
|
||||
case ATNStateLoopEnd:
|
||||
s = NewLoopEndState()
|
||||
|
||||
default:
|
||||
panic(fmt.Sprintf("state type %d is invalid", typeIndex))
|
||||
}
|
||||
|
||||
s.SetRuleIndex(ruleIndex)
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) lexerActionFactory(typeIndex, data1, data2 int) LexerAction {
|
||||
switch typeIndex {
|
||||
case LexerActionTypeChannel:
|
||||
return NewLexerChannelAction(data1)
|
||||
|
||||
case LexerActionTypeCustom:
|
||||
return NewLexerCustomAction(data1, data2)
|
||||
|
||||
case LexerActionTypeMode:
|
||||
return NewLexerModeAction(data1)
|
||||
|
||||
case LexerActionTypeMore:
|
||||
return LexerMoreActionINSTANCE
|
||||
|
||||
case LexerActionTypePopMode:
|
||||
return LexerPopModeActionINSTANCE
|
||||
|
||||
case LexerActionTypePushMode:
|
||||
return NewLexerPushModeAction(data1)
|
||||
|
||||
case LexerActionTypeSkip:
|
||||
return LexerSkipActionINSTANCE
|
||||
|
||||
case LexerActionTypeType:
|
||||
return NewLexerTypeAction(data1)
|
||||
|
||||
default:
|
||||
panic(fmt.Sprintf("lexer action %d is invalid", typeIndex))
|
||||
}
|
||||
}
|
50
vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_simulator.go
generated
vendored
Normal file
50
vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_simulator.go
generated
vendored
Normal file
@ -0,0 +1,50 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewBaseATNConfigSet(false))
|
||||
|
||||
type IATNSimulator interface {
|
||||
SharedContextCache() *PredictionContextCache
|
||||
ATN() *ATN
|
||||
DecisionToDFA() []*DFA
|
||||
}
|
||||
|
||||
type BaseATNSimulator struct {
|
||||
atn *ATN
|
||||
sharedContextCache *PredictionContextCache
|
||||
decisionToDFA []*DFA
|
||||
}
|
||||
|
||||
func NewBaseATNSimulator(atn *ATN, sharedContextCache *PredictionContextCache) *BaseATNSimulator {
|
||||
b := new(BaseATNSimulator)
|
||||
|
||||
b.atn = atn
|
||||
b.sharedContextCache = sharedContextCache
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *BaseATNSimulator) getCachedContext(context PredictionContext) PredictionContext {
|
||||
if b.sharedContextCache == nil {
|
||||
return context
|
||||
}
|
||||
|
||||
visited := make(map[PredictionContext]PredictionContext)
|
||||
|
||||
return getCachedBasePredictionContext(context, b.sharedContextCache, visited)
|
||||
}
|
||||
|
||||
func (b *BaseATNSimulator) SharedContextCache() *PredictionContextCache {
|
||||
return b.sharedContextCache
|
||||
}
|
||||
|
||||
func (b *BaseATNSimulator) ATN() *ATN {
|
||||
return b.atn
|
||||
}
|
||||
|
||||
func (b *BaseATNSimulator) DecisionToDFA() []*DFA {
|
||||
return b.decisionToDFA
|
||||
}
|
392
vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_state.go
generated
vendored
Normal file
392
vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_state.go
generated
vendored
Normal file
@ -0,0 +1,392 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import "strconv"
|
||||
|
||||
// Constants for serialization.
|
||||
const (
|
||||
ATNStateInvalidType = 0
|
||||
ATNStateBasic = 1
|
||||
ATNStateRuleStart = 2
|
||||
ATNStateBlockStart = 3
|
||||
ATNStatePlusBlockStart = 4
|
||||
ATNStateStarBlockStart = 5
|
||||
ATNStateTokenStart = 6
|
||||
ATNStateRuleStop = 7
|
||||
ATNStateBlockEnd = 8
|
||||
ATNStateStarLoopBack = 9
|
||||
ATNStateStarLoopEntry = 10
|
||||
ATNStatePlusLoopBack = 11
|
||||
ATNStateLoopEnd = 12
|
||||
|
||||
ATNStateInvalidStateNumber = -1
|
||||
)
|
||||
|
||||
var ATNStateInitialNumTransitions = 4
|
||||
|
||||
type ATNState interface {
|
||||
GetEpsilonOnlyTransitions() bool
|
||||
|
||||
GetRuleIndex() int
|
||||
SetRuleIndex(int)
|
||||
|
||||
GetNextTokenWithinRule() *IntervalSet
|
||||
SetNextTokenWithinRule(*IntervalSet)
|
||||
|
||||
GetATN() *ATN
|
||||
SetATN(*ATN)
|
||||
|
||||
GetStateType() int
|
||||
|
||||
GetStateNumber() int
|
||||
SetStateNumber(int)
|
||||
|
||||
GetTransitions() []Transition
|
||||
SetTransitions([]Transition)
|
||||
AddTransition(Transition, int)
|
||||
|
||||
String() string
|
||||
hash() int
|
||||
}
|
||||
|
||||
type BaseATNState struct {
|
||||
// NextTokenWithinRule caches lookahead during parsing. Not used during construction.
|
||||
NextTokenWithinRule *IntervalSet
|
||||
|
||||
// atn is the current ATN.
|
||||
atn *ATN
|
||||
|
||||
epsilonOnlyTransitions bool
|
||||
|
||||
// ruleIndex tracks the Rule index because there are no Rule objects at runtime.
|
||||
ruleIndex int
|
||||
|
||||
stateNumber int
|
||||
|
||||
stateType int
|
||||
|
||||
// Track the transitions emanating from this ATN state.
|
||||
transitions []Transition
|
||||
}
|
||||
|
||||
func NewBaseATNState() *BaseATNState {
|
||||
return &BaseATNState{stateNumber: ATNStateInvalidStateNumber, stateType: ATNStateInvalidType}
|
||||
}
|
||||
|
||||
func (as *BaseATNState) GetRuleIndex() int {
|
||||
return as.ruleIndex
|
||||
}
|
||||
|
||||
func (as *BaseATNState) SetRuleIndex(v int) {
|
||||
as.ruleIndex = v
|
||||
}
|
||||
func (as *BaseATNState) GetEpsilonOnlyTransitions() bool {
|
||||
return as.epsilonOnlyTransitions
|
||||
}
|
||||
|
||||
func (as *BaseATNState) GetATN() *ATN {
|
||||
return as.atn
|
||||
}
|
||||
|
||||
func (as *BaseATNState) SetATN(atn *ATN) {
|
||||
as.atn = atn
|
||||
}
|
||||
|
||||
func (as *BaseATNState) GetTransitions() []Transition {
|
||||
return as.transitions
|
||||
}
|
||||
|
||||
func (as *BaseATNState) SetTransitions(t []Transition) {
|
||||
as.transitions = t
|
||||
}
|
||||
|
||||
func (as *BaseATNState) GetStateType() int {
|
||||
return as.stateType
|
||||
}
|
||||
|
||||
func (as *BaseATNState) GetStateNumber() int {
|
||||
return as.stateNumber
|
||||
}
|
||||
|
||||
func (as *BaseATNState) SetStateNumber(stateNumber int) {
|
||||
as.stateNumber = stateNumber
|
||||
}
|
||||
|
||||
func (as *BaseATNState) GetNextTokenWithinRule() *IntervalSet {
|
||||
return as.NextTokenWithinRule
|
||||
}
|
||||
|
||||
func (as *BaseATNState) SetNextTokenWithinRule(v *IntervalSet) {
|
||||
as.NextTokenWithinRule = v
|
||||
}
|
||||
|
||||
func (as *BaseATNState) hash() int {
|
||||
return as.stateNumber
|
||||
}
|
||||
|
||||
func (as *BaseATNState) String() string {
|
||||
return strconv.Itoa(as.stateNumber)
|
||||
}
|
||||
|
||||
func (as *BaseATNState) equals(other interface{}) bool {
|
||||
if ot, ok := other.(ATNState); ok {
|
||||
return as.stateNumber == ot.GetStateNumber()
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (as *BaseATNState) isNonGreedyExitState() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (as *BaseATNState) AddTransition(trans Transition, index int) {
|
||||
if len(as.transitions) == 0 {
|
||||
as.epsilonOnlyTransitions = trans.getIsEpsilon()
|
||||
} else if as.epsilonOnlyTransitions != trans.getIsEpsilon() {
|
||||
as.epsilonOnlyTransitions = false
|
||||
}
|
||||
|
||||
if index == -1 {
|
||||
as.transitions = append(as.transitions, trans)
|
||||
} else {
|
||||
as.transitions = append(as.transitions[:index], append([]Transition{trans}, as.transitions[index:]...)...)
|
||||
// TODO: as.transitions.splice(index, 1, trans)
|
||||
}
|
||||
}
|
||||
|
||||
type BasicState struct {
|
||||
*BaseATNState
|
||||
}
|
||||
|
||||
func NewBasicState() *BasicState {
|
||||
b := NewBaseATNState()
|
||||
|
||||
b.stateType = ATNStateBasic
|
||||
|
||||
return &BasicState{BaseATNState: b}
|
||||
}
|
||||
|
||||
type DecisionState interface {
|
||||
ATNState
|
||||
|
||||
getDecision() int
|
||||
setDecision(int)
|
||||
|
||||
getNonGreedy() bool
|
||||
setNonGreedy(bool)
|
||||
}
|
||||
|
||||
type BaseDecisionState struct {
|
||||
*BaseATNState
|
||||
decision int
|
||||
nonGreedy bool
|
||||
}
|
||||
|
||||
func NewBaseDecisionState() *BaseDecisionState {
|
||||
return &BaseDecisionState{BaseATNState: NewBaseATNState(), decision: -1}
|
||||
}
|
||||
|
||||
func (s *BaseDecisionState) getDecision() int {
|
||||
return s.decision
|
||||
}
|
||||
|
||||
func (s *BaseDecisionState) setDecision(b int) {
|
||||
s.decision = b
|
||||
}
|
||||
|
||||
func (s *BaseDecisionState) getNonGreedy() bool {
|
||||
return s.nonGreedy
|
||||
}
|
||||
|
||||
func (s *BaseDecisionState) setNonGreedy(b bool) {
|
||||
s.nonGreedy = b
|
||||
}
|
||||
|
||||
type BlockStartState interface {
|
||||
DecisionState
|
||||
|
||||
getEndState() *BlockEndState
|
||||
setEndState(*BlockEndState)
|
||||
}
|
||||
|
||||
// BaseBlockStartState is the start of a regular (...) block.
|
||||
type BaseBlockStartState struct {
|
||||
*BaseDecisionState
|
||||
endState *BlockEndState
|
||||
}
|
||||
|
||||
func NewBlockStartState() *BaseBlockStartState {
|
||||
return &BaseBlockStartState{BaseDecisionState: NewBaseDecisionState()}
|
||||
}
|
||||
|
||||
func (s *BaseBlockStartState) getEndState() *BlockEndState {
|
||||
return s.endState
|
||||
}
|
||||
|
||||
func (s *BaseBlockStartState) setEndState(b *BlockEndState) {
|
||||
s.endState = b
|
||||
}
|
||||
|
||||
type BasicBlockStartState struct {
|
||||
*BaseBlockStartState
|
||||
}
|
||||
|
||||
func NewBasicBlockStartState() *BasicBlockStartState {
|
||||
b := NewBlockStartState()
|
||||
|
||||
b.stateType = ATNStateBlockStart
|
||||
|
||||
return &BasicBlockStartState{BaseBlockStartState: b}
|
||||
}
|
||||
|
||||
var _ BlockStartState = &BasicBlockStartState{}
|
||||
|
||||
// BlockEndState is a terminal node of a simple (a|b|c) block.
|
||||
type BlockEndState struct {
|
||||
*BaseATNState
|
||||
startState ATNState
|
||||
}
|
||||
|
||||
func NewBlockEndState() *BlockEndState {
|
||||
b := NewBaseATNState()
|
||||
|
||||
b.stateType = ATNStateBlockEnd
|
||||
|
||||
return &BlockEndState{BaseATNState: b}
|
||||
}
|
||||
|
||||
// RuleStopState is the last node in the ATN for a rule, unless that rule is the
|
||||
// start symbol. In that case, there is one transition to EOF. Later, we might
|
||||
// encode references to all calls to this rule to compute FOLLOW sets for error
|
||||
// handling.
|
||||
type RuleStopState struct {
|
||||
*BaseATNState
|
||||
}
|
||||
|
||||
func NewRuleStopState() *RuleStopState {
|
||||
b := NewBaseATNState()
|
||||
|
||||
b.stateType = ATNStateRuleStop
|
||||
|
||||
return &RuleStopState{BaseATNState: b}
|
||||
}
|
||||
|
||||
type RuleStartState struct {
|
||||
*BaseATNState
|
||||
stopState ATNState
|
||||
isPrecedenceRule bool
|
||||
}
|
||||
|
||||
func NewRuleStartState() *RuleStartState {
|
||||
b := NewBaseATNState()
|
||||
|
||||
b.stateType = ATNStateRuleStart
|
||||
|
||||
return &RuleStartState{BaseATNState: b}
|
||||
}
|
||||
|
||||
// PlusLoopbackState is a decision state for A+ and (A|B)+. It has two
|
||||
// transitions: one to the loop back to start of the block, and one to exit.
|
||||
type PlusLoopbackState struct {
|
||||
*BaseDecisionState
|
||||
}
|
||||
|
||||
func NewPlusLoopbackState() *PlusLoopbackState {
|
||||
b := NewBaseDecisionState()
|
||||
|
||||
b.stateType = ATNStatePlusLoopBack
|
||||
|
||||
return &PlusLoopbackState{BaseDecisionState: b}
|
||||
}
|
||||
|
||||
// PlusBlockStartState is the start of a (A|B|...)+ loop. Technically it is a
|
||||
// decision state; we don't use it for code generation. Somebody might need it,
|
||||
// it is included for completeness. In reality, PlusLoopbackState is the real
|
||||
// decision-making node for A+.
|
||||
type PlusBlockStartState struct {
|
||||
*BaseBlockStartState
|
||||
loopBackState ATNState
|
||||
}
|
||||
|
||||
func NewPlusBlockStartState() *PlusBlockStartState {
|
||||
b := NewBlockStartState()
|
||||
|
||||
b.stateType = ATNStatePlusBlockStart
|
||||
|
||||
return &PlusBlockStartState{BaseBlockStartState: b}
|
||||
}
|
||||
|
||||
var _ BlockStartState = &PlusBlockStartState{}
|
||||
|
||||
// StarBlockStartState is the block that begins a closure loop.
|
||||
type StarBlockStartState struct {
|
||||
*BaseBlockStartState
|
||||
}
|
||||
|
||||
func NewStarBlockStartState() *StarBlockStartState {
|
||||
b := NewBlockStartState()
|
||||
|
||||
b.stateType = ATNStateStarBlockStart
|
||||
|
||||
return &StarBlockStartState{BaseBlockStartState: b}
|
||||
}
|
||||
|
||||
var _ BlockStartState = &StarBlockStartState{}
|
||||
|
||||
type StarLoopbackState struct {
|
||||
*BaseATNState
|
||||
}
|
||||
|
||||
func NewStarLoopbackState() *StarLoopbackState {
|
||||
b := NewBaseATNState()
|
||||
|
||||
b.stateType = ATNStateStarLoopBack
|
||||
|
||||
return &StarLoopbackState{BaseATNState: b}
|
||||
}
|
||||
|
||||
type StarLoopEntryState struct {
|
||||
*BaseDecisionState
|
||||
loopBackState ATNState
|
||||
precedenceRuleDecision bool
|
||||
}
|
||||
|
||||
func NewStarLoopEntryState() *StarLoopEntryState {
|
||||
b := NewBaseDecisionState()
|
||||
|
||||
b.stateType = ATNStateStarLoopEntry
|
||||
|
||||
// False precedenceRuleDecision indicates whether s state can benefit from a precedence DFA during SLL decision making.
|
||||
return &StarLoopEntryState{BaseDecisionState: b}
|
||||
}
|
||||
|
||||
// LoopEndState marks the end of a * or + loop.
|
||||
type LoopEndState struct {
|
||||
*BaseATNState
|
||||
loopBackState ATNState
|
||||
}
|
||||
|
||||
func NewLoopEndState() *LoopEndState {
|
||||
b := NewBaseATNState()
|
||||
|
||||
b.stateType = ATNStateLoopEnd
|
||||
|
||||
return &LoopEndState{BaseATNState: b}
|
||||
}
|
||||
|
||||
// TokensStartState is the Tokens rule start state linking to each lexer rule start state.
|
||||
type TokensStartState struct {
|
||||
*BaseDecisionState
|
||||
}
|
||||
|
||||
func NewTokensStartState() *TokensStartState {
|
||||
b := NewBaseDecisionState()
|
||||
|
||||
b.stateType = ATNStateTokenStart
|
||||
|
||||
return &TokensStartState{BaseDecisionState: b}
|
||||
}
|
11
vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_type.go
generated
vendored
Normal file
11
vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_type.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
// Represent the type of recognizer an ATN applies to.
|
||||
const (
|
||||
ATNTypeLexer = 0
|
||||
ATNTypeParser = 1
|
||||
)
|
12
vendor/github.com/antlr/antlr4/runtime/Go/antlr/char_stream.go
generated
vendored
Normal file
12
vendor/github.com/antlr/antlr4/runtime/Go/antlr/char_stream.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
type CharStream interface {
|
||||
IntStream
|
||||
GetText(int, int) string
|
||||
GetTextFromTokens(start, end Token) string
|
||||
GetTextFromInterval(*Interval) string
|
||||
}
|
56
vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_factory.go
generated
vendored
Normal file
56
vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_factory.go
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
// TokenFactory creates CommonToken objects.
|
||||
type TokenFactory interface {
|
||||
Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token
|
||||
}
|
||||
|
||||
// CommonTokenFactory is the default TokenFactory implementation.
|
||||
type CommonTokenFactory struct {
|
||||
// copyText indicates whether CommonToken.setText should be called after
|
||||
// constructing tokens to explicitly set the text. This is useful for cases
|
||||
// where the input stream might not be able to provide arbitrary substrings of
|
||||
// text from the input after the lexer creates a token (e.g. the
|
||||
// implementation of CharStream.GetText in UnbufferedCharStream panics an
|
||||
// UnsupportedOperationException). Explicitly setting the token text allows
|
||||
// Token.GetText to be called at any time regardless of the input stream
|
||||
// implementation.
|
||||
//
|
||||
// The default value is false to avoid the performance and memory overhead of
|
||||
// copying text for every token unless explicitly requested.
|
||||
copyText bool
|
||||
}
|
||||
|
||||
func NewCommonTokenFactory(copyText bool) *CommonTokenFactory {
|
||||
return &CommonTokenFactory{copyText: copyText}
|
||||
}
|
||||
|
||||
// CommonTokenFactoryDEFAULT is the default CommonTokenFactory. It does not
|
||||
// explicitly copy token text when constructing tokens.
|
||||
var CommonTokenFactoryDEFAULT = NewCommonTokenFactory(false)
|
||||
|
||||
func (c *CommonTokenFactory) Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token {
|
||||
t := NewCommonToken(source, ttype, channel, start, stop)
|
||||
|
||||
t.line = line
|
||||
t.column = column
|
||||
|
||||
if text != "" {
|
||||
t.SetText(text)
|
||||
} else if c.copyText && source.charStream != nil {
|
||||
t.SetText(source.charStream.GetTextFromInterval(NewInterval(start, stop)))
|
||||
}
|
||||
|
||||
return t
|
||||
}
|
||||
|
||||
func (c *CommonTokenFactory) createThin(ttype int, text string) Token {
|
||||
t := NewCommonToken(nil, ttype, TokenDefaultChannel, -1, -1)
|
||||
t.SetText(text)
|
||||
|
||||
return t
|
||||
}
|
447
vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_stream.go
generated
vendored
Normal file
447
vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_stream.go
generated
vendored
Normal file
@ -0,0 +1,447 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// CommonTokenStream is an implementation of TokenStream that loads tokens from
|
||||
// a TokenSource on-demand and places the tokens in a buffer to provide access
|
||||
// to any previous token by index. This token stream ignores the value of
|
||||
// Token.getChannel. If your parser requires the token stream filter tokens to
|
||||
// only those on a particular channel, such as Token.DEFAULT_CHANNEL or
|
||||
// Token.HIDDEN_CHANNEL, use a filtering token stream such a CommonTokenStream.
|
||||
type CommonTokenStream struct {
|
||||
channel int
|
||||
|
||||
// fetchedEOF indicates whether the Token.EOF token has been fetched from
|
||||
// tokenSource and added to tokens. This field improves performance for the
|
||||
// following cases:
|
||||
//
|
||||
// consume: The lookahead check in consume to preven consuming the EOF symbol is
|
||||
// optimized by checking the values of fetchedEOF and p instead of calling LA.
|
||||
//
|
||||
// fetch: The check to prevent adding multiple EOF symbols into tokens is
|
||||
// trivial with bt field.
|
||||
fetchedEOF bool
|
||||
|
||||
// index indexs into tokens of the current token (next token to consume).
|
||||
// tokens[p] should be LT(1). It is set to -1 when the stream is first
|
||||
// constructed or when SetTokenSource is called, indicating that the first token
|
||||
// has not yet been fetched from the token source. For additional information,
|
||||
// see the documentation of IntStream for a description of initializing methods.
|
||||
index int
|
||||
|
||||
// tokenSource is the TokenSource from which tokens for the bt stream are
|
||||
// fetched.
|
||||
tokenSource TokenSource
|
||||
|
||||
// tokens is all tokens fetched from the token source. The list is considered a
|
||||
// complete view of the input once fetchedEOF is set to true.
|
||||
tokens []Token
|
||||
}
|
||||
|
||||
func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream {
|
||||
return &CommonTokenStream{
|
||||
channel: channel,
|
||||
index: -1,
|
||||
tokenSource: lexer,
|
||||
tokens: make([]Token, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) GetAllTokens() []Token {
|
||||
return c.tokens
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) Mark() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) Release(marker int) {}
|
||||
|
||||
func (c *CommonTokenStream) reset() {
|
||||
c.Seek(0)
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) Seek(index int) {
|
||||
c.lazyInit()
|
||||
c.index = c.adjustSeekIndex(index)
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) Get(index int) Token {
|
||||
c.lazyInit()
|
||||
|
||||
return c.tokens[index]
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) Consume() {
|
||||
SkipEOFCheck := false
|
||||
|
||||
if c.index >= 0 {
|
||||
if c.fetchedEOF {
|
||||
// The last token in tokens is EOF. Skip the check if p indexes any fetched.
|
||||
// token except the last.
|
||||
SkipEOFCheck = c.index < len(c.tokens)-1
|
||||
} else {
|
||||
// No EOF token in tokens. Skip the check if p indexes a fetched token.
|
||||
SkipEOFCheck = c.index < len(c.tokens)
|
||||
}
|
||||
} else {
|
||||
// Not yet initialized
|
||||
SkipEOFCheck = false
|
||||
}
|
||||
|
||||
if !SkipEOFCheck && c.LA(1) == TokenEOF {
|
||||
panic("cannot consume EOF")
|
||||
}
|
||||
|
||||
if c.Sync(c.index + 1) {
|
||||
c.index = c.adjustSeekIndex(c.index + 1)
|
||||
}
|
||||
}
|
||||
|
||||
// Sync makes sure index i in tokens has a token and returns true if a token is
|
||||
// located at index i and otherwise false.
|
||||
func (c *CommonTokenStream) Sync(i int) bool {
|
||||
n := i - len(c.tokens) + 1 // TODO: How many more elements do we need?
|
||||
|
||||
if n > 0 {
|
||||
fetched := c.fetch(n)
|
||||
return fetched >= n
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// fetch adds n elements to buffer and returns the actual number of elements
|
||||
// added to the buffer.
|
||||
func (c *CommonTokenStream) fetch(n int) int {
|
||||
if c.fetchedEOF {
|
||||
return 0
|
||||
}
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
t := c.tokenSource.NextToken()
|
||||
|
||||
t.SetTokenIndex(len(c.tokens))
|
||||
c.tokens = append(c.tokens, t)
|
||||
|
||||
if t.GetTokenType() == TokenEOF {
|
||||
c.fetchedEOF = true
|
||||
|
||||
return i + 1
|
||||
}
|
||||
}
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
// GetTokens gets all tokens from start to stop inclusive.
|
||||
func (c *CommonTokenStream) GetTokens(start int, stop int, types *IntervalSet) []Token {
|
||||
if start < 0 || stop < 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
c.lazyInit()
|
||||
|
||||
subset := make([]Token, 0)
|
||||
|
||||
if stop >= len(c.tokens) {
|
||||
stop = len(c.tokens) - 1
|
||||
}
|
||||
|
||||
for i := start; i < stop; i++ {
|
||||
t := c.tokens[i]
|
||||
|
||||
if t.GetTokenType() == TokenEOF {
|
||||
break
|
||||
}
|
||||
|
||||
if types == nil || types.contains(t.GetTokenType()) {
|
||||
subset = append(subset, t)
|
||||
}
|
||||
}
|
||||
|
||||
return subset
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) LA(i int) int {
|
||||
return c.LT(i).GetTokenType()
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) lazyInit() {
|
||||
if c.index == -1 {
|
||||
c.setup()
|
||||
}
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) setup() {
|
||||
c.Sync(0)
|
||||
c.index = c.adjustSeekIndex(0)
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) GetTokenSource() TokenSource {
|
||||
return c.tokenSource
|
||||
}
|
||||
|
||||
// SetTokenSource resets the c token stream by setting its token source.
|
||||
func (c *CommonTokenStream) SetTokenSource(tokenSource TokenSource) {
|
||||
c.tokenSource = tokenSource
|
||||
c.tokens = make([]Token, 0)
|
||||
c.index = -1
|
||||
}
|
||||
|
||||
// NextTokenOnChannel returns the index of the next token on channel given a
|
||||
// starting index. Returns i if tokens[i] is on channel. Returns -1 if there are
|
||||
// no tokens on channel between i and EOF.
|
||||
func (c *CommonTokenStream) NextTokenOnChannel(i, channel int) int {
|
||||
c.Sync(i)
|
||||
|
||||
if i >= len(c.tokens) {
|
||||
return -1
|
||||
}
|
||||
|
||||
token := c.tokens[i]
|
||||
|
||||
for token.GetChannel() != c.channel {
|
||||
if token.GetTokenType() == TokenEOF {
|
||||
return -1
|
||||
}
|
||||
|
||||
i++
|
||||
c.Sync(i)
|
||||
token = c.tokens[i]
|
||||
}
|
||||
|
||||
return i
|
||||
}
|
||||
|
||||
// previousTokenOnChannel returns the index of the previous token on channel
|
||||
// given a starting index. Returns i if tokens[i] is on channel. Returns -1 if
|
||||
// there are no tokens on channel between i and 0.
|
||||
func (c *CommonTokenStream) previousTokenOnChannel(i, channel int) int {
|
||||
for i >= 0 && c.tokens[i].GetChannel() != channel {
|
||||
i--
|
||||
}
|
||||
|
||||
return i
|
||||
}
|
||||
|
||||
// GetHiddenTokensToRight collects all tokens on a specified channel to the
|
||||
// right of the current token up until we see a token on DEFAULT_TOKEN_CHANNEL
|
||||
// or EOF. If channel is -1, it finds any non-default channel token.
|
||||
func (c *CommonTokenStream) GetHiddenTokensToRight(tokenIndex, channel int) []Token {
|
||||
c.lazyInit()
|
||||
|
||||
if tokenIndex < 0 || tokenIndex >= len(c.tokens) {
|
||||
panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1))
|
||||
}
|
||||
|
||||
nextOnChannel := c.NextTokenOnChannel(tokenIndex+1, LexerDefaultTokenChannel)
|
||||
from := tokenIndex + 1
|
||||
|
||||
// If no onchannel to the right, then nextOnChannel == -1, so set to to last token
|
||||
var to int
|
||||
|
||||
if nextOnChannel == -1 {
|
||||
to = len(c.tokens) - 1
|
||||
} else {
|
||||
to = nextOnChannel
|
||||
}
|
||||
|
||||
return c.filterForChannel(from, to, channel)
|
||||
}
|
||||
|
||||
// GetHiddenTokensToLeft collects all tokens on channel to the left of the
|
||||
// current token until we see a token on DEFAULT_TOKEN_CHANNEL. If channel is
|
||||
// -1, it finds any non default channel token.
|
||||
func (c *CommonTokenStream) GetHiddenTokensToLeft(tokenIndex, channel int) []Token {
|
||||
c.lazyInit()
|
||||
|
||||
if tokenIndex < 0 || tokenIndex >= len(c.tokens) {
|
||||
panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1))
|
||||
}
|
||||
|
||||
prevOnChannel := c.previousTokenOnChannel(tokenIndex-1, LexerDefaultTokenChannel)
|
||||
|
||||
if prevOnChannel == tokenIndex-1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If there are none on channel to the left and prevOnChannel == -1 then from = 0
|
||||
from := prevOnChannel + 1
|
||||
to := tokenIndex - 1
|
||||
|
||||
return c.filterForChannel(from, to, channel)
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) filterForChannel(left, right, channel int) []Token {
|
||||
hidden := make([]Token, 0)
|
||||
|
||||
for i := left; i < right+1; i++ {
|
||||
t := c.tokens[i]
|
||||
|
||||
if channel == -1 {
|
||||
if t.GetChannel() != LexerDefaultTokenChannel {
|
||||
hidden = append(hidden, t)
|
||||
}
|
||||
} else if t.GetChannel() == channel {
|
||||
hidden = append(hidden, t)
|
||||
}
|
||||
}
|
||||
|
||||
if len(hidden) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return hidden
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) GetSourceName() string {
|
||||
return c.tokenSource.GetSourceName()
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) Size() int {
|
||||
return len(c.tokens)
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) Index() int {
|
||||
return c.index
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) GetAllText() string {
|
||||
return c.GetTextFromInterval(nil)
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) GetTextFromTokens(start, end Token) string {
|
||||
if start == nil || end == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return c.GetTextFromInterval(NewInterval(start.GetTokenIndex(), end.GetTokenIndex()))
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string {
|
||||
return c.GetTextFromInterval(interval.GetSourceInterval())
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) GetTextFromInterval(interval *Interval) string {
|
||||
c.lazyInit()
|
||||
c.Fill()
|
||||
|
||||
if interval == nil {
|
||||
interval = NewInterval(0, len(c.tokens)-1)
|
||||
}
|
||||
|
||||
start := interval.Start
|
||||
stop := interval.Stop
|
||||
|
||||
if start < 0 || stop < 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
if stop >= len(c.tokens) {
|
||||
stop = len(c.tokens) - 1
|
||||
}
|
||||
|
||||
s := ""
|
||||
|
||||
for i := start; i < stop+1; i++ {
|
||||
t := c.tokens[i]
|
||||
|
||||
if t.GetTokenType() == TokenEOF {
|
||||
break
|
||||
}
|
||||
|
||||
s += t.GetText()
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// Fill gets all tokens from the lexer until EOF.
|
||||
func (c *CommonTokenStream) Fill() {
|
||||
c.lazyInit()
|
||||
|
||||
for c.fetch(1000) == 1000 {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) adjustSeekIndex(i int) int {
|
||||
return c.NextTokenOnChannel(i, c.channel)
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) LB(k int) Token {
|
||||
if k == 0 || c.index-k < 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
i := c.index
|
||||
n := 1
|
||||
|
||||
// Find k good tokens looking backward
|
||||
for n <= k {
|
||||
// Skip off-channel tokens
|
||||
i = c.previousTokenOnChannel(i-1, c.channel)
|
||||
n++
|
||||
}
|
||||
|
||||
if i < 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return c.tokens[i]
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) LT(k int) Token {
|
||||
c.lazyInit()
|
||||
|
||||
if k == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if k < 0 {
|
||||
return c.LB(-k)
|
||||
}
|
||||
|
||||
i := c.index
|
||||
n := 1 // We know tokens[n] is valid
|
||||
|
||||
// Find k good tokens
|
||||
for n < k {
|
||||
// Skip off-channel tokens, but make sure to not look past EOF
|
||||
if c.Sync(i + 1) {
|
||||
i = c.NextTokenOnChannel(i+1, c.channel)
|
||||
}
|
||||
|
||||
n++
|
||||
}
|
||||
|
||||
return c.tokens[i]
|
||||
}
|
||||
|
||||
// getNumberOfOnChannelTokens counts EOF once.
|
||||
func (c *CommonTokenStream) getNumberOfOnChannelTokens() int {
|
||||
var n int
|
||||
|
||||
c.Fill()
|
||||
|
||||
for i := 0; i < len(c.tokens); i++ {
|
||||
t := c.tokens[i]
|
||||
|
||||
if t.GetChannel() == c.channel {
|
||||
n++
|
||||
}
|
||||
|
||||
if t.GetTokenType() == TokenEOF {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return n
|
||||
}
|
170
vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa.go
generated
vendored
Normal file
170
vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa.go
generated
vendored
Normal file
@ -0,0 +1,170 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
type DFA struct {
|
||||
// atnStartState is the ATN state in which this was created
|
||||
atnStartState DecisionState
|
||||
|
||||
decision int
|
||||
|
||||
// states is all the DFA states. Use Map to get the old state back; Set can only
|
||||
// indicate whether it is there.
|
||||
states map[int]*DFAState
|
||||
|
||||
s0 *DFAState
|
||||
|
||||
// precedenceDfa is the backing field for isPrecedenceDfa and setPrecedenceDfa.
|
||||
// True if the DFA is for a precedence decision and false otherwise.
|
||||
precedenceDfa bool
|
||||
}
|
||||
|
||||
func NewDFA(atnStartState DecisionState, decision int) *DFA {
|
||||
dfa := &DFA{
|
||||
atnStartState: atnStartState,
|
||||
decision: decision,
|
||||
states: make(map[int]*DFAState),
|
||||
}
|
||||
if s, ok := atnStartState.(*StarLoopEntryState); ok && s.precedenceRuleDecision {
|
||||
dfa.precedenceDfa = true
|
||||
dfa.s0 = NewDFAState(-1, NewBaseATNConfigSet(false))
|
||||
dfa.s0.isAcceptState = false
|
||||
dfa.s0.requiresFullContext = false
|
||||
}
|
||||
return dfa
|
||||
}
|
||||
|
||||
// getPrecedenceStartState gets the start state for the current precedence and
|
||||
// returns the start state corresponding to the specified precedence if a start
|
||||
// state exists for the specified precedence and nil otherwise. d must be a
|
||||
// precedence DFA. See also isPrecedenceDfa.
|
||||
func (d *DFA) getPrecedenceStartState(precedence int) *DFAState {
|
||||
if !d.getPrecedenceDfa() {
|
||||
panic("only precedence DFAs may contain a precedence start state")
|
||||
}
|
||||
|
||||
// s0.edges is never nil for a precedence DFA
|
||||
if precedence < 0 || precedence >= len(d.getS0().getEdges()) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return d.getS0().getIthEdge(precedence)
|
||||
}
|
||||
|
||||
// setPrecedenceStartState sets the start state for the current precedence. d
|
||||
// must be a precedence DFA. See also isPrecedenceDfa.
|
||||
func (d *DFA) setPrecedenceStartState(precedence int, startState *DFAState) {
|
||||
if !d.getPrecedenceDfa() {
|
||||
panic("only precedence DFAs may contain a precedence start state")
|
||||
}
|
||||
|
||||
if precedence < 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Synchronization on s0 here is ok. When the DFA is turned into a
|
||||
// precedence DFA, s0 will be initialized once and not updated again. s0.edges
|
||||
// is never nil for a precedence DFA.
|
||||
s0 := d.getS0()
|
||||
if precedence >= s0.numEdges() {
|
||||
edges := append(s0.getEdges(), make([]*DFAState, precedence+1-s0.numEdges())...)
|
||||
s0.setEdges(edges)
|
||||
d.setS0(s0)
|
||||
}
|
||||
|
||||
s0.setIthEdge(precedence, startState)
|
||||
}
|
||||
|
||||
func (d *DFA) getPrecedenceDfa() bool {
|
||||
return d.precedenceDfa
|
||||
}
|
||||
|
||||
// setPrecedenceDfa sets whether d is a precedence DFA. If precedenceDfa differs
|
||||
// from the current DFA configuration, then d.states is cleared, the initial
|
||||
// state s0 is set to a new DFAState with an empty outgoing DFAState.edges to
|
||||
// store the start states for individual precedence values if precedenceDfa is
|
||||
// true or nil otherwise, and d.precedenceDfa is updated.
|
||||
func (d *DFA) setPrecedenceDfa(precedenceDfa bool) {
|
||||
if d.getPrecedenceDfa() != precedenceDfa {
|
||||
d.setStates(make(map[int]*DFAState))
|
||||
|
||||
if precedenceDfa {
|
||||
precedenceState := NewDFAState(-1, NewBaseATNConfigSet(false))
|
||||
|
||||
precedenceState.setEdges(make([]*DFAState, 0))
|
||||
precedenceState.isAcceptState = false
|
||||
precedenceState.requiresFullContext = false
|
||||
d.setS0(precedenceState)
|
||||
} else {
|
||||
d.setS0(nil)
|
||||
}
|
||||
|
||||
d.precedenceDfa = precedenceDfa
|
||||
}
|
||||
}
|
||||
|
||||
func (d *DFA) getS0() *DFAState {
|
||||
return d.s0
|
||||
}
|
||||
|
||||
func (d *DFA) setS0(s *DFAState) {
|
||||
d.s0 = s
|
||||
}
|
||||
|
||||
func (d *DFA) getState(hash int) (*DFAState, bool) {
|
||||
s, ok := d.states[hash]
|
||||
return s, ok
|
||||
}
|
||||
|
||||
func (d *DFA) setStates(states map[int]*DFAState) {
|
||||
d.states = states
|
||||
}
|
||||
|
||||
func (d *DFA) setState(hash int, state *DFAState) {
|
||||
d.states[hash] = state
|
||||
}
|
||||
|
||||
func (d *DFA) numStates() int {
|
||||
return len(d.states)
|
||||
}
|
||||
|
||||
type dfaStateList []*DFAState
|
||||
|
||||
func (d dfaStateList) Len() int { return len(d) }
|
||||
func (d dfaStateList) Less(i, j int) bool { return d[i].stateNumber < d[j].stateNumber }
|
||||
func (d dfaStateList) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
|
||||
|
||||
// sortedStates returns the states in d sorted by their state number.
|
||||
func (d *DFA) sortedStates() []*DFAState {
|
||||
vs := make([]*DFAState, 0, len(d.states))
|
||||
|
||||
for _, v := range d.states {
|
||||
vs = append(vs, v)
|
||||
}
|
||||
|
||||
sort.Sort(dfaStateList(vs))
|
||||
|
||||
return vs
|
||||
}
|
||||
|
||||
func (d *DFA) String(literalNames []string, symbolicNames []string) string {
|
||||
if d.getS0() == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return NewDFASerializer(d, literalNames, symbolicNames).String()
|
||||
}
|
||||
|
||||
func (d *DFA) ToLexerString() string {
|
||||
if d.getS0() == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return NewLexerDFASerializer(d).String()
|
||||
}
|
158
vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_serializer.go
generated
vendored
Normal file
158
vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_serializer.go
generated
vendored
Normal file
@ -0,0 +1,158 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// DFASerializer is a DFA walker that knows how to dump them to serialized
|
||||
// strings.
|
||||
type DFASerializer struct {
|
||||
dfa *DFA
|
||||
literalNames []string
|
||||
symbolicNames []string
|
||||
}
|
||||
|
||||
func NewDFASerializer(dfa *DFA, literalNames, symbolicNames []string) *DFASerializer {
|
||||
if literalNames == nil {
|
||||
literalNames = make([]string, 0)
|
||||
}
|
||||
|
||||
if symbolicNames == nil {
|
||||
symbolicNames = make([]string, 0)
|
||||
}
|
||||
|
||||
return &DFASerializer{
|
||||
dfa: dfa,
|
||||
literalNames: literalNames,
|
||||
symbolicNames: symbolicNames,
|
||||
}
|
||||
}
|
||||
|
||||
func (d *DFASerializer) String() string {
|
||||
if d.dfa.getS0() == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
buf := ""
|
||||
states := d.dfa.sortedStates()
|
||||
|
||||
for _, s := range states {
|
||||
if s.edges != nil {
|
||||
n := len(s.edges)
|
||||
|
||||
for j := 0; j < n; j++ {
|
||||
t := s.edges[j]
|
||||
|
||||
if t != nil && t.stateNumber != 0x7FFFFFFF {
|
||||
buf += d.GetStateString(s)
|
||||
buf += "-"
|
||||
buf += d.getEdgeLabel(j)
|
||||
buf += "->"
|
||||
buf += d.GetStateString(t)
|
||||
buf += "\n"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(buf) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
return buf
|
||||
}
|
||||
|
||||
func (d *DFASerializer) getEdgeLabel(i int) string {
|
||||
if i == 0 {
|
||||
return "EOF"
|
||||
} else if d.literalNames != nil && i-1 < len(d.literalNames) {
|
||||
return d.literalNames[i-1]
|
||||
} else if d.symbolicNames != nil && i-1 < len(d.symbolicNames) {
|
||||
return d.symbolicNames[i-1]
|
||||
}
|
||||
|
||||
return strconv.Itoa(i - 1)
|
||||
}
|
||||
|
||||
func (d *DFASerializer) GetStateString(s *DFAState) string {
|
||||
var a, b string
|
||||
|
||||
if s.isAcceptState {
|
||||
a = ":"
|
||||
}
|
||||
|
||||
if s.requiresFullContext {
|
||||
b = "^"
|
||||
}
|
||||
|
||||
baseStateStr := a + "s" + strconv.Itoa(s.stateNumber) + b
|
||||
|
||||
if s.isAcceptState {
|
||||
if s.predicates != nil {
|
||||
return baseStateStr + "=>" + fmt.Sprint(s.predicates)
|
||||
}
|
||||
|
||||
return baseStateStr + "=>" + fmt.Sprint(s.prediction)
|
||||
}
|
||||
|
||||
return baseStateStr
|
||||
}
|
||||
|
||||
type LexerDFASerializer struct {
|
||||
*DFASerializer
|
||||
}
|
||||
|
||||
func NewLexerDFASerializer(dfa *DFA) *LexerDFASerializer {
|
||||
return &LexerDFASerializer{DFASerializer: NewDFASerializer(dfa, nil, nil)}
|
||||
}
|
||||
|
||||
func (l *LexerDFASerializer) getEdgeLabel(i int) string {
|
||||
var sb strings.Builder
|
||||
sb.Grow(6)
|
||||
sb.WriteByte('\'')
|
||||
sb.WriteRune(rune(i))
|
||||
sb.WriteByte('\'')
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
func (l *LexerDFASerializer) String() string {
|
||||
if l.dfa.getS0() == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
buf := ""
|
||||
states := l.dfa.sortedStates()
|
||||
|
||||
for i := 0; i < len(states); i++ {
|
||||
s := states[i]
|
||||
|
||||
if s.edges != nil {
|
||||
n := len(s.edges)
|
||||
|
||||
for j := 0; j < n; j++ {
|
||||
t := s.edges[j]
|
||||
|
||||
if t != nil && t.stateNumber != 0x7FFFFFFF {
|
||||
buf += l.GetStateString(s)
|
||||
buf += "-"
|
||||
buf += l.getEdgeLabel(j)
|
||||
buf += "->"
|
||||
buf += l.GetStateString(t)
|
||||
buf += "\n"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(buf) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
return buf
|
||||
}
|
171
vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_state.go
generated
vendored
Normal file
171
vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_state.go
generated
vendored
Normal file
@ -0,0 +1,171 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// PredPrediction maps a predicate to a predicted alternative.
|
||||
type PredPrediction struct {
|
||||
alt int
|
||||
pred SemanticContext
|
||||
}
|
||||
|
||||
func NewPredPrediction(pred SemanticContext, alt int) *PredPrediction {
|
||||
return &PredPrediction{alt: alt, pred: pred}
|
||||
}
|
||||
|
||||
func (p *PredPrediction) String() string {
|
||||
return "(" + fmt.Sprint(p.pred) + ", " + fmt.Sprint(p.alt) + ")"
|
||||
}
|
||||
|
||||
// DFAState represents a set of possible ATN configurations. As Aho, Sethi,
|
||||
// Ullman p. 117 says: "The DFA uses its state to keep track of all possible
|
||||
// states the ATN can be in after reading each input symbol. That is to say,
|
||||
// after reading input a1a2..an, the DFA is in a state that represents the
|
||||
// subset T of the states of the ATN that are reachable from the ATN's start
|
||||
// state along some path labeled a1a2..an." In conventional NFA-to-DFA
|
||||
// conversion, therefore, the subset T would be a bitset representing the set of
|
||||
// states the ATN could be in. We need to track the alt predicted by each state
|
||||
// as well, however. More importantly, we need to maintain a stack of states,
|
||||
// tracking the closure operations as they jump from rule to rule, emulating
|
||||
// rule invocations (method calls). I have to add a stack to simulate the proper
|
||||
// lookahead sequences for the underlying LL grammar from which the ATN was
|
||||
// derived.
|
||||
//
|
||||
// I use a set of ATNConfig objects, not simple states. An ATNConfig is both a
|
||||
// state (ala normal conversion) and a RuleContext describing the chain of rules
|
||||
// (if any) followed to arrive at that state.
|
||||
//
|
||||
// A DFAState may have multiple references to a particular state, but with
|
||||
// different ATN contexts (with same or different alts) meaning that state was
|
||||
// reached via a different set of rule invocations.
|
||||
type DFAState struct {
|
||||
stateNumber int
|
||||
configs ATNConfigSet
|
||||
|
||||
// edges elements point to the target of the symbol. Shift up by 1 so (-1)
|
||||
// Token.EOF maps to the first element.
|
||||
edges []*DFAState
|
||||
|
||||
isAcceptState bool
|
||||
|
||||
// prediction is the ttype we match or alt we predict if the state is accept.
|
||||
// Set to ATN.INVALID_ALT_NUMBER when predicates != nil or
|
||||
// requiresFullContext.
|
||||
prediction int
|
||||
|
||||
lexerActionExecutor *LexerActionExecutor
|
||||
|
||||
// requiresFullContext indicates it was created during an SLL prediction that
|
||||
// discovered a conflict between the configurations in the state. Future
|
||||
// ParserATNSimulator.execATN invocations immediately jump doing
|
||||
// full context prediction if true.
|
||||
requiresFullContext bool
|
||||
|
||||
// predicates is the predicates associated with the ATN configurations of the
|
||||
// DFA state during SLL parsing. When we have predicates, requiresFullContext
|
||||
// is false, since full context prediction evaluates predicates on-the-fly. If
|
||||
// d is
|
||||
// not nil, then prediction is ATN.INVALID_ALT_NUMBER.
|
||||
//
|
||||
// We only use these for non-requiresFullContext but conflicting states. That
|
||||
// means we know from the context (it's $ or we don't dip into outer context)
|
||||
// that it's an ambiguity not a conflict.
|
||||
//
|
||||
// This list is computed by
|
||||
// ParserATNSimulator.predicateDFAState.
|
||||
predicates []*PredPrediction
|
||||
}
|
||||
|
||||
func NewDFAState(stateNumber int, configs ATNConfigSet) *DFAState {
|
||||
if configs == nil {
|
||||
configs = NewBaseATNConfigSet(false)
|
||||
}
|
||||
|
||||
return &DFAState{configs: configs, stateNumber: stateNumber}
|
||||
}
|
||||
|
||||
// GetAltSet gets the set of all alts mentioned by all ATN configurations in d.
|
||||
func (d *DFAState) GetAltSet() Set {
|
||||
alts := newArray2DHashSet(nil, nil)
|
||||
|
||||
if d.configs != nil {
|
||||
for _, c := range d.configs.GetItems() {
|
||||
alts.Add(c.GetAlt())
|
||||
}
|
||||
}
|
||||
|
||||
if alts.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return alts
|
||||
}
|
||||
|
||||
func (d *DFAState) getEdges() []*DFAState {
|
||||
return d.edges
|
||||
}
|
||||
|
||||
func (d *DFAState) numEdges() int {
|
||||
return len(d.edges)
|
||||
}
|
||||
|
||||
func (d *DFAState) getIthEdge(i int) *DFAState {
|
||||
return d.edges[i]
|
||||
}
|
||||
|
||||
func (d *DFAState) setEdges(newEdges []*DFAState) {
|
||||
d.edges = newEdges
|
||||
}
|
||||
|
||||
func (d *DFAState) setIthEdge(i int, edge *DFAState) {
|
||||
d.edges[i] = edge
|
||||
}
|
||||
|
||||
func (d *DFAState) setPrediction(v int) {
|
||||
d.prediction = v
|
||||
}
|
||||
|
||||
// equals returns whether d equals other. Two DFAStates are equal if their ATN
|
||||
// configuration sets are the same. This method is used to see if a state
|
||||
// already exists.
|
||||
//
|
||||
// Because the number of alternatives and number of ATN configurations are
|
||||
// finite, there is a finite number of DFA states that can be processed. This is
|
||||
// necessary to show that the algorithm terminates.
|
||||
//
|
||||
// Cannot test the DFA state numbers here because in
|
||||
// ParserATNSimulator.addDFAState we need to know if any other state exists that
|
||||
// has d exact set of ATN configurations. The stateNumber is irrelevant.
|
||||
func (d *DFAState) equals(other interface{}) bool {
|
||||
if d == other {
|
||||
return true
|
||||
} else if _, ok := other.(*DFAState); !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
return d.configs.Equals(other.(*DFAState).configs)
|
||||
}
|
||||
|
||||
func (d *DFAState) String() string {
|
||||
var s string
|
||||
if d.isAcceptState {
|
||||
if d.predicates != nil {
|
||||
s = "=>" + fmt.Sprint(d.predicates)
|
||||
} else {
|
||||
s = "=>" + fmt.Sprint(d.prediction)
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%d:%s%s", d.stateNumber, fmt.Sprint(d.configs), s)
|
||||
}
|
||||
|
||||
func (d *DFAState) hash() int {
|
||||
h := murmurInit(7)
|
||||
h = murmurUpdate(h, d.configs.hash())
|
||||
return murmurFinish(h, 1)
|
||||
}
|
111
vendor/github.com/antlr/antlr4/runtime/Go/antlr/diagnostic_error_listener.go
generated
vendored
Normal file
111
vendor/github.com/antlr/antlr4/runtime/Go/antlr/diagnostic_error_listener.go
generated
vendored
Normal file
@ -0,0 +1,111 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
)
|
||||
|
||||
//
|
||||
// This implementation of {@link ANTLRErrorListener} can be used to identify
|
||||
// certain potential correctness and performance problems in grammars. "reports"
|
||||
// are made by calling {@link Parser//NotifyErrorListeners} with the appropriate
|
||||
// message.
|
||||
//
|
||||
// <ul>
|
||||
// <li><b>Ambiguities</b>: These are cases where more than one path through the
|
||||
// grammar can Match the input.</li>
|
||||
// <li><b>Weak context sensitivity</b>: These are cases where full-context
|
||||
// prediction resolved an SLL conflict to a unique alternative which equaled the
|
||||
// minimum alternative of the SLL conflict.</li>
|
||||
// <li><b>Strong (forced) context sensitivity</b>: These are cases where the
|
||||
// full-context prediction resolved an SLL conflict to a unique alternative,
|
||||
// <em>and</em> the minimum alternative of the SLL conflict was found to not be
|
||||
// a truly viable alternative. Two-stage parsing cannot be used for inputs where
|
||||
// d situation occurs.</li>
|
||||
// </ul>
|
||||
|
||||
type DiagnosticErrorListener struct {
|
||||
*DefaultErrorListener
|
||||
|
||||
exactOnly bool
|
||||
}
|
||||
|
||||
func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener {
|
||||
|
||||
n := new(DiagnosticErrorListener)
|
||||
|
||||
// whether all ambiguities or only exact ambiguities are Reported.
|
||||
n.exactOnly = exactOnly
|
||||
return n
|
||||
}
|
||||
|
||||
func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
|
||||
if d.exactOnly && !exact {
|
||||
return
|
||||
}
|
||||
msg := "reportAmbiguity d=" +
|
||||
d.getDecisionDescription(recognizer, dfa) +
|
||||
": ambigAlts=" +
|
||||
d.getConflictingAlts(ambigAlts, configs).String() +
|
||||
", input='" +
|
||||
recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
|
||||
recognizer.NotifyErrorListeners(msg, nil, nil)
|
||||
}
|
||||
|
||||
func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
|
||||
|
||||
msg := "reportAttemptingFullContext d=" +
|
||||
d.getDecisionDescription(recognizer, dfa) +
|
||||
", input='" +
|
||||
recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
|
||||
recognizer.NotifyErrorListeners(msg, nil, nil)
|
||||
}
|
||||
|
||||
func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
|
||||
msg := "reportContextSensitivity d=" +
|
||||
d.getDecisionDescription(recognizer, dfa) +
|
||||
", input='" +
|
||||
recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
|
||||
recognizer.NotifyErrorListeners(msg, nil, nil)
|
||||
}
|
||||
|
||||
func (d *DiagnosticErrorListener) getDecisionDescription(recognizer Parser, dfa *DFA) string {
|
||||
decision := dfa.decision
|
||||
ruleIndex := dfa.atnStartState.GetRuleIndex()
|
||||
|
||||
ruleNames := recognizer.GetRuleNames()
|
||||
if ruleIndex < 0 || ruleIndex >= len(ruleNames) {
|
||||
return strconv.Itoa(decision)
|
||||
}
|
||||
ruleName := ruleNames[ruleIndex]
|
||||
if ruleName == "" {
|
||||
return strconv.Itoa(decision)
|
||||
}
|
||||
return strconv.Itoa(decision) + " (" + ruleName + ")"
|
||||
}
|
||||
|
||||
//
|
||||
// Computes the set of conflicting or ambiguous alternatives from a
|
||||
// configuration set, if that information was not already provided by the
|
||||
// parser.
|
||||
//
|
||||
// @param ReportedAlts The set of conflicting or ambiguous alternatives, as
|
||||
// Reported by the parser.
|
||||
// @param configs The conflicting or ambiguous configuration set.
|
||||
// @return Returns {@code ReportedAlts} if it is not {@code nil}, otherwise
|
||||
// returns the set of alternatives represented in {@code configs}.
|
||||
//
|
||||
func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set ATNConfigSet) *BitSet {
|
||||
if ReportedAlts != nil {
|
||||
return ReportedAlts
|
||||
}
|
||||
result := NewBitSet()
|
||||
for _, c := range set.GetItems() {
|
||||
result.add(c.GetAlt())
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
108
vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_listener.go
generated
vendored
Normal file
108
vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_listener.go
generated
vendored
Normal file
@ -0,0 +1,108 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Provides an empty default implementation of {@link ANTLRErrorListener}. The
|
||||
// default implementation of each method does nothing, but can be overridden as
|
||||
// necessary.
|
||||
|
||||
type ErrorListener interface {
|
||||
SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException)
|
||||
ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet)
|
||||
ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet)
|
||||
ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet)
|
||||
}
|
||||
|
||||
type DefaultErrorListener struct {
|
||||
}
|
||||
|
||||
func NewDefaultErrorListener() *DefaultErrorListener {
|
||||
return new(DefaultErrorListener)
|
||||
}
|
||||
|
||||
func (d *DefaultErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
|
||||
}
|
||||
|
||||
func (d *DefaultErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
|
||||
}
|
||||
|
||||
func (d *DefaultErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
|
||||
}
|
||||
|
||||
func (d *DefaultErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
|
||||
}
|
||||
|
||||
type ConsoleErrorListener struct {
|
||||
*DefaultErrorListener
|
||||
}
|
||||
|
||||
func NewConsoleErrorListener() *ConsoleErrorListener {
|
||||
return new(ConsoleErrorListener)
|
||||
}
|
||||
|
||||
//
|
||||
// Provides a default instance of {@link ConsoleErrorListener}.
|
||||
//
|
||||
var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener()
|
||||
|
||||
//
|
||||
// {@inheritDoc}
|
||||
//
|
||||
// <p>
|
||||
// This implementation prints messages to {@link System//err} containing the
|
||||
// values of {@code line}, {@code charPositionInLine}, and {@code msg} using
|
||||
// the following format.</p>
|
||||
//
|
||||
// <pre>
|
||||
// line <em>line</em>:<em>charPositionInLine</em> <em>msg</em>
|
||||
// </pre>
|
||||
//
|
||||
func (c *ConsoleErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
|
||||
fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg)
|
||||
}
|
||||
|
||||
type ProxyErrorListener struct {
|
||||
*DefaultErrorListener
|
||||
delegates []ErrorListener
|
||||
}
|
||||
|
||||
func NewProxyErrorListener(delegates []ErrorListener) *ProxyErrorListener {
|
||||
if delegates == nil {
|
||||
panic("delegates is not provided")
|
||||
}
|
||||
l := new(ProxyErrorListener)
|
||||
l.delegates = delegates
|
||||
return l
|
||||
}
|
||||
|
||||
func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
|
||||
for _, d := range p.delegates {
|
||||
d.SyntaxError(recognizer, offendingSymbol, line, column, msg, e)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
|
||||
for _, d := range p.delegates {
|
||||
d.ReportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
|
||||
for _, d := range p.delegates {
|
||||
d.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
|
||||
for _, d := range p.delegates {
|
||||
d.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs)
|
||||
}
|
||||
}
|
762
vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go
generated
vendored
Normal file
762
vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go
generated
vendored
Normal file
@ -0,0 +1,762 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type ErrorStrategy interface {
|
||||
reset(Parser)
|
||||
RecoverInline(Parser) Token
|
||||
Recover(Parser, RecognitionException)
|
||||
Sync(Parser)
|
||||
InErrorRecoveryMode(Parser) bool
|
||||
ReportError(Parser, RecognitionException)
|
||||
ReportMatch(Parser)
|
||||
}
|
||||
|
||||
// This is the default implementation of {@link ANTLRErrorStrategy} used for
|
||||
// error Reporting and recovery in ANTLR parsers.
|
||||
//
|
||||
type DefaultErrorStrategy struct {
|
||||
errorRecoveryMode bool
|
||||
lastErrorIndex int
|
||||
lastErrorStates *IntervalSet
|
||||
}
|
||||
|
||||
var _ ErrorStrategy = &DefaultErrorStrategy{}
|
||||
|
||||
func NewDefaultErrorStrategy() *DefaultErrorStrategy {
|
||||
|
||||
d := new(DefaultErrorStrategy)
|
||||
|
||||
// Indicates whether the error strategy is currently "recovering from an
|
||||
// error". This is used to suppress Reporting multiple error messages while
|
||||
// attempting to recover from a detected syntax error.
|
||||
//
|
||||
// @see //InErrorRecoveryMode
|
||||
//
|
||||
d.errorRecoveryMode = false
|
||||
|
||||
// The index into the input stream where the last error occurred.
|
||||
// This is used to prevent infinite loops where an error is found
|
||||
// but no token is consumed during recovery...another error is found,
|
||||
// ad nauseum. This is a failsafe mechanism to guarantee that at least
|
||||
// one token/tree node is consumed for two errors.
|
||||
//
|
||||
d.lastErrorIndex = -1
|
||||
d.lastErrorStates = nil
|
||||
return d
|
||||
}
|
||||
|
||||
// <p>The default implementation simply calls {@link //endErrorCondition} to
|
||||
// ensure that the handler is not in error recovery mode.</p>
|
||||
func (d *DefaultErrorStrategy) reset(recognizer Parser) {
|
||||
d.endErrorCondition(recognizer)
|
||||
}
|
||||
|
||||
//
|
||||
// This method is called to enter error recovery mode when a recognition
|
||||
// exception is Reported.
|
||||
//
|
||||
// @param recognizer the parser instance
|
||||
//
|
||||
func (d *DefaultErrorStrategy) beginErrorCondition(recognizer Parser) {
|
||||
d.errorRecoveryMode = true
|
||||
}
|
||||
|
||||
func (d *DefaultErrorStrategy) InErrorRecoveryMode(recognizer Parser) bool {
|
||||
return d.errorRecoveryMode
|
||||
}
|
||||
|
||||
//
|
||||
// This method is called to leave error recovery mode after recovering from
|
||||
// a recognition exception.
|
||||
//
|
||||
// @param recognizer
|
||||
//
|
||||
func (d *DefaultErrorStrategy) endErrorCondition(recognizer Parser) {
|
||||
d.errorRecoveryMode = false
|
||||
d.lastErrorStates = nil
|
||||
d.lastErrorIndex = -1
|
||||
}
|
||||
|
||||
//
|
||||
// {@inheritDoc}
|
||||
//
|
||||
// <p>The default implementation simply calls {@link //endErrorCondition}.</p>
|
||||
//
|
||||
func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) {
|
||||
d.endErrorCondition(recognizer)
|
||||
}
|
||||
|
||||
//
|
||||
// {@inheritDoc}
|
||||
//
|
||||
// <p>The default implementation returns immediately if the handler is already
|
||||
// in error recovery mode. Otherwise, it calls {@link //beginErrorCondition}
|
||||
// and dispatches the Reporting task based on the runtime type of {@code e}
|
||||
// according to the following table.</p>
|
||||
//
|
||||
// <ul>
|
||||
// <li>{@link NoViableAltException}: Dispatches the call to
|
||||
// {@link //ReportNoViableAlternative}</li>
|
||||
// <li>{@link InputMisMatchException}: Dispatches the call to
|
||||
// {@link //ReportInputMisMatch}</li>
|
||||
// <li>{@link FailedPredicateException}: Dispatches the call to
|
||||
// {@link //ReportFailedPredicate}</li>
|
||||
// <li>All other types: calls {@link Parser//NotifyErrorListeners} to Report
|
||||
// the exception</li>
|
||||
// </ul>
|
||||
//
|
||||
func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) {
|
||||
// if we've already Reported an error and have not Matched a token
|
||||
// yet successfully, don't Report any errors.
|
||||
if d.InErrorRecoveryMode(recognizer) {
|
||||
return // don't Report spurious errors
|
||||
}
|
||||
d.beginErrorCondition(recognizer)
|
||||
|
||||
switch t := e.(type) {
|
||||
default:
|
||||
fmt.Println("unknown recognition error type: " + reflect.TypeOf(e).Name())
|
||||
// fmt.Println(e.stack)
|
||||
recognizer.NotifyErrorListeners(e.GetMessage(), e.GetOffendingToken(), e)
|
||||
case *NoViableAltException:
|
||||
d.ReportNoViableAlternative(recognizer, t)
|
||||
case *InputMisMatchException:
|
||||
d.ReportInputMisMatch(recognizer, t)
|
||||
case *FailedPredicateException:
|
||||
d.ReportFailedPredicate(recognizer, t)
|
||||
}
|
||||
}
|
||||
|
||||
// {@inheritDoc}
|
||||
//
|
||||
// <p>The default implementation reSynchronizes the parser by consuming tokens
|
||||
// until we find one in the reSynchronization set--loosely the set of tokens
|
||||
// that can follow the current rule.</p>
|
||||
//
|
||||
func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
|
||||
|
||||
if d.lastErrorIndex == recognizer.GetInputStream().Index() &&
|
||||
d.lastErrorStates != nil && d.lastErrorStates.contains(recognizer.GetState()) {
|
||||
// uh oh, another error at same token index and previously-Visited
|
||||
// state in ATN must be a case where LT(1) is in the recovery
|
||||
// token set so nothing got consumed. Consume a single token
|
||||
// at least to prevent an infinite loop d is a failsafe.
|
||||
recognizer.Consume()
|
||||
}
|
||||
d.lastErrorIndex = recognizer.GetInputStream().Index()
|
||||
if d.lastErrorStates == nil {
|
||||
d.lastErrorStates = NewIntervalSet()
|
||||
}
|
||||
d.lastErrorStates.addOne(recognizer.GetState())
|
||||
followSet := d.getErrorRecoverySet(recognizer)
|
||||
d.consumeUntil(recognizer, followSet)
|
||||
}
|
||||
|
||||
// The default implementation of {@link ANTLRErrorStrategy//Sync} makes sure
|
||||
// that the current lookahead symbol is consistent with what were expecting
|
||||
// at d point in the ATN. You can call d anytime but ANTLR only
|
||||
// generates code to check before subrules/loops and each iteration.
|
||||
//
|
||||
// <p>Implements Jim Idle's magic Sync mechanism in closures and optional
|
||||
// subrules. E.g.,</p>
|
||||
//
|
||||
// <pre>
|
||||
// a : Sync ( stuff Sync )*
|
||||
// Sync : {consume to what can follow Sync}
|
||||
// </pre>
|
||||
//
|
||||
// At the start of a sub rule upon error, {@link //Sync} performs single
|
||||
// token deletion, if possible. If it can't do that, it bails on the current
|
||||
// rule and uses the default error recovery, which consumes until the
|
||||
// reSynchronization set of the current rule.
|
||||
//
|
||||
// <p>If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block
|
||||
// with an empty alternative), then the expected set includes what follows
|
||||
// the subrule.</p>
|
||||
//
|
||||
// <p>During loop iteration, it consumes until it sees a token that can start a
|
||||
// sub rule or what follows loop. Yes, that is pretty aggressive. We opt to
|
||||
// stay in the loop as long as possible.</p>
|
||||
//
|
||||
// <p><strong>ORIGINS</strong></p>
|
||||
//
|
||||
// <p>Previous versions of ANTLR did a poor job of their recovery within loops.
|
||||
// A single mismatch token or missing token would force the parser to bail
|
||||
// out of the entire rules surrounding the loop. So, for rule</p>
|
||||
//
|
||||
// <pre>
|
||||
// classfunc : 'class' ID '{' member* '}'
|
||||
// </pre>
|
||||
//
|
||||
// input with an extra token between members would force the parser to
|
||||
// consume until it found the next class definition rather than the next
|
||||
// member definition of the current class.
|
||||
//
|
||||
// <p>This functionality cost a little bit of effort because the parser has to
|
||||
// compare token set at the start of the loop and at each iteration. If for
|
||||
// some reason speed is suffering for you, you can turn off d
|
||||
// functionality by simply overriding d method as a blank { }.</p>
|
||||
//
|
||||
func (d *DefaultErrorStrategy) Sync(recognizer Parser) {
|
||||
// If already recovering, don't try to Sync
|
||||
if d.InErrorRecoveryMode(recognizer) {
|
||||
return
|
||||
}
|
||||
|
||||
s := recognizer.GetInterpreter().atn.states[recognizer.GetState()]
|
||||
la := recognizer.GetTokenStream().LA(1)
|
||||
|
||||
// try cheaper subset first might get lucky. seems to shave a wee bit off
|
||||
nextTokens := recognizer.GetATN().NextTokens(s, nil)
|
||||
if nextTokens.contains(TokenEpsilon) || nextTokens.contains(la) {
|
||||
return
|
||||
}
|
||||
|
||||
switch s.GetStateType() {
|
||||
case ATNStateBlockStart, ATNStateStarBlockStart, ATNStatePlusBlockStart, ATNStateStarLoopEntry:
|
||||
// Report error and recover if possible
|
||||
if d.SingleTokenDeletion(recognizer) != nil {
|
||||
return
|
||||
}
|
||||
panic(NewInputMisMatchException(recognizer))
|
||||
case ATNStatePlusLoopBack, ATNStateStarLoopBack:
|
||||
d.ReportUnwantedToken(recognizer)
|
||||
expecting := NewIntervalSet()
|
||||
expecting.addSet(recognizer.GetExpectedTokens())
|
||||
whatFollowsLoopIterationOrRule := expecting.addSet(d.getErrorRecoverySet(recognizer))
|
||||
d.consumeUntil(recognizer, whatFollowsLoopIterationOrRule)
|
||||
default:
|
||||
// do nothing if we can't identify the exact kind of ATN state
|
||||
}
|
||||
}
|
||||
|
||||
// This is called by {@link //ReportError} when the exception is a
|
||||
// {@link NoViableAltException}.
|
||||
//
|
||||
// @see //ReportError
|
||||
//
|
||||
// @param recognizer the parser instance
|
||||
// @param e the recognition exception
|
||||
//
|
||||
func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) {
|
||||
tokens := recognizer.GetTokenStream()
|
||||
var input string
|
||||
if tokens != nil {
|
||||
if e.startToken.GetTokenType() == TokenEOF {
|
||||
input = "<EOF>"
|
||||
} else {
|
||||
input = tokens.GetTextFromTokens(e.startToken, e.offendingToken)
|
||||
}
|
||||
} else {
|
||||
input = "<unknown input>"
|
||||
}
|
||||
msg := "no viable alternative at input " + d.escapeWSAndQuote(input)
|
||||
recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
|
||||
}
|
||||
|
||||
//
|
||||
// This is called by {@link //ReportError} when the exception is an
|
||||
// {@link InputMisMatchException}.
|
||||
//
|
||||
// @see //ReportError
|
||||
//
|
||||
// @param recognizer the parser instance
|
||||
// @param e the recognition exception
|
||||
//
|
||||
func (this *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) {
|
||||
msg := "mismatched input " + this.GetTokenErrorDisplay(e.offendingToken) +
|
||||
" expecting " + e.getExpectedTokens().StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false)
|
||||
recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
|
||||
}
|
||||
|
||||
//
|
||||
// This is called by {@link //ReportError} when the exception is a
|
||||
// {@link FailedPredicateException}.
|
||||
//
|
||||
// @see //ReportError
|
||||
//
|
||||
// @param recognizer the parser instance
|
||||
// @param e the recognition exception
|
||||
//
|
||||
func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException) {
|
||||
ruleName := recognizer.GetRuleNames()[recognizer.GetParserRuleContext().GetRuleIndex()]
|
||||
msg := "rule " + ruleName + " " + e.message
|
||||
recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
|
||||
}
|
||||
|
||||
// This method is called to Report a syntax error which requires the removal
|
||||
// of a token from the input stream. At the time d method is called, the
|
||||
// erroneous symbol is current {@code LT(1)} symbol and has not yet been
|
||||
// removed from the input stream. When d method returns,
|
||||
// {@code recognizer} is in error recovery mode.
|
||||
//
|
||||
// <p>This method is called when {@link //singleTokenDeletion} identifies
|
||||
// single-token deletion as a viable recovery strategy for a mismatched
|
||||
// input error.</p>
|
||||
//
|
||||
// <p>The default implementation simply returns if the handler is already in
|
||||
// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to
|
||||
// enter error recovery mode, followed by calling
|
||||
// {@link Parser//NotifyErrorListeners}.</p>
|
||||
//
|
||||
// @param recognizer the parser instance
|
||||
//
|
||||
func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) {
|
||||
if d.InErrorRecoveryMode(recognizer) {
|
||||
return
|
||||
}
|
||||
d.beginErrorCondition(recognizer)
|
||||
t := recognizer.GetCurrentToken()
|
||||
tokenName := d.GetTokenErrorDisplay(t)
|
||||
expecting := d.GetExpectedTokens(recognizer)
|
||||
msg := "extraneous input " + tokenName + " expecting " +
|
||||
expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false)
|
||||
recognizer.NotifyErrorListeners(msg, t, nil)
|
||||
}
|
||||
|
||||
// This method is called to Report a syntax error which requires the
|
||||
// insertion of a missing token into the input stream. At the time d
|
||||
// method is called, the missing token has not yet been inserted. When d
|
||||
// method returns, {@code recognizer} is in error recovery mode.
|
||||
//
|
||||
// <p>This method is called when {@link //singleTokenInsertion} identifies
|
||||
// single-token insertion as a viable recovery strategy for a mismatched
|
||||
// input error.</p>
|
||||
//
|
||||
// <p>The default implementation simply returns if the handler is already in
|
||||
// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to
|
||||
// enter error recovery mode, followed by calling
|
||||
// {@link Parser//NotifyErrorListeners}.</p>
|
||||
//
|
||||
// @param recognizer the parser instance
|
||||
//
|
||||
func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) {
|
||||
if d.InErrorRecoveryMode(recognizer) {
|
||||
return
|
||||
}
|
||||
d.beginErrorCondition(recognizer)
|
||||
t := recognizer.GetCurrentToken()
|
||||
expecting := d.GetExpectedTokens(recognizer)
|
||||
msg := "missing " + expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) +
|
||||
" at " + d.GetTokenErrorDisplay(t)
|
||||
recognizer.NotifyErrorListeners(msg, t, nil)
|
||||
}
|
||||
|
||||
// <p>The default implementation attempts to recover from the mismatched input
|
||||
// by using single token insertion and deletion as described below. If the
|
||||
// recovery attempt fails, d method panics an
|
||||
// {@link InputMisMatchException}.</p>
|
||||
//
|
||||
// <p><strong>EXTRA TOKEN</strong> (single token deletion)</p>
|
||||
//
|
||||
// <p>{@code LA(1)} is not what we are looking for. If {@code LA(2)} has the
|
||||
// right token, however, then assume {@code LA(1)} is some extra spurious
|
||||
// token and delete it. Then consume and return the next token (which was
|
||||
// the {@code LA(2)} token) as the successful result of the Match operation.</p>
|
||||
//
|
||||
// <p>This recovery strategy is implemented by {@link
|
||||
// //singleTokenDeletion}.</p>
|
||||
//
|
||||
// <p><strong>MISSING TOKEN</strong> (single token insertion)</p>
|
||||
//
|
||||
// <p>If current token (at {@code LA(1)}) is consistent with what could come
|
||||
// after the expected {@code LA(1)} token, then assume the token is missing
|
||||
// and use the parser's {@link TokenFactory} to create it on the fly. The
|
||||
// "insertion" is performed by returning the created token as the successful
|
||||
// result of the Match operation.</p>
|
||||
//
|
||||
// <p>This recovery strategy is implemented by {@link
|
||||
// //singleTokenInsertion}.</p>
|
||||
//
|
||||
// <p><strong>EXAMPLE</strong></p>
|
||||
//
|
||||
// <p>For example, Input {@code i=(3} is clearly missing the {@code ')'}. When
|
||||
// the parser returns from the nested call to {@code expr}, it will have
|
||||
// call chain:</p>
|
||||
//
|
||||
// <pre>
|
||||
// stat &rarr expr &rarr atom
|
||||
// </pre>
|
||||
//
|
||||
// and it will be trying to Match the {@code ')'} at d point in the
|
||||
// derivation:
|
||||
//
|
||||
// <pre>
|
||||
// => ID '=' '(' INT ')' ('+' atom)* ''
|
||||
// ^
|
||||
// </pre>
|
||||
//
|
||||
// The attempt to Match {@code ')'} will fail when it sees {@code ''} and
|
||||
// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==''}
|
||||
// is in the set of tokens that can follow the {@code ')'} token reference
|
||||
// in rule {@code atom}. It can assume that you forgot the {@code ')'}.
|
||||
//
|
||||
func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token {
|
||||
// SINGLE TOKEN DELETION
|
||||
MatchedSymbol := d.SingleTokenDeletion(recognizer)
|
||||
if MatchedSymbol != nil {
|
||||
// we have deleted the extra token.
|
||||
// now, move past ttype token as if all were ok
|
||||
recognizer.Consume()
|
||||
return MatchedSymbol
|
||||
}
|
||||
// SINGLE TOKEN INSERTION
|
||||
if d.SingleTokenInsertion(recognizer) {
|
||||
return d.GetMissingSymbol(recognizer)
|
||||
}
|
||||
// even that didn't work must panic the exception
|
||||
panic(NewInputMisMatchException(recognizer))
|
||||
}
|
||||
|
||||
//
|
||||
// This method implements the single-token insertion inline error recovery
|
||||
// strategy. It is called by {@link //recoverInline} if the single-token
|
||||
// deletion strategy fails to recover from the mismatched input. If this
|
||||
// method returns {@code true}, {@code recognizer} will be in error recovery
|
||||
// mode.
|
||||
//
|
||||
// <p>This method determines whether or not single-token insertion is viable by
|
||||
// checking if the {@code LA(1)} input symbol could be successfully Matched
|
||||
// if it were instead the {@code LA(2)} symbol. If d method returns
|
||||
// {@code true}, the caller is responsible for creating and inserting a
|
||||
// token with the correct type to produce d behavior.</p>
|
||||
//
|
||||
// @param recognizer the parser instance
|
||||
// @return {@code true} if single-token insertion is a viable recovery
|
||||
// strategy for the current mismatched input, otherwise {@code false}
|
||||
//
|
||||
func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool {
|
||||
currentSymbolType := recognizer.GetTokenStream().LA(1)
|
||||
// if current token is consistent with what could come after current
|
||||
// ATN state, then we know we're missing a token error recovery
|
||||
// is free to conjure up and insert the missing token
|
||||
atn := recognizer.GetInterpreter().atn
|
||||
currentState := atn.states[recognizer.GetState()]
|
||||
next := currentState.GetTransitions()[0].getTarget()
|
||||
expectingAtLL2 := atn.NextTokens(next, recognizer.GetParserRuleContext())
|
||||
if expectingAtLL2.contains(currentSymbolType) {
|
||||
d.ReportMissingToken(recognizer)
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// This method implements the single-token deletion inline error recovery
|
||||
// strategy. It is called by {@link //recoverInline} to attempt to recover
|
||||
// from mismatched input. If this method returns nil, the parser and error
|
||||
// handler state will not have changed. If this method returns non-nil,
|
||||
// {@code recognizer} will <em>not</em> be in error recovery mode since the
|
||||
// returned token was a successful Match.
|
||||
//
|
||||
// <p>If the single-token deletion is successful, d method calls
|
||||
// {@link //ReportUnwantedToken} to Report the error, followed by
|
||||
// {@link Parser//consume} to actually "delete" the extraneous token. Then,
|
||||
// before returning {@link //ReportMatch} is called to signal a successful
|
||||
// Match.</p>
|
||||
//
|
||||
// @param recognizer the parser instance
|
||||
// @return the successfully Matched {@link Token} instance if single-token
|
||||
// deletion successfully recovers from the mismatched input, otherwise
|
||||
// {@code nil}
|
||||
//
|
||||
func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token {
|
||||
NextTokenType := recognizer.GetTokenStream().LA(2)
|
||||
expecting := d.GetExpectedTokens(recognizer)
|
||||
if expecting.contains(NextTokenType) {
|
||||
d.ReportUnwantedToken(recognizer)
|
||||
// print("recoverFromMisMatchedToken deleting " \
|
||||
// + str(recognizer.GetTokenStream().LT(1)) \
|
||||
// + " since " + str(recognizer.GetTokenStream().LT(2)) \
|
||||
// + " is what we want", file=sys.stderr)
|
||||
recognizer.Consume() // simply delete extra token
|
||||
// we want to return the token we're actually Matching
|
||||
MatchedSymbol := recognizer.GetCurrentToken()
|
||||
d.ReportMatch(recognizer) // we know current token is correct
|
||||
return MatchedSymbol
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Conjure up a missing token during error recovery.
|
||||
//
|
||||
// The recognizer attempts to recover from single missing
|
||||
// symbols. But, actions might refer to that missing symbol.
|
||||
// For example, x=ID {f($x)}. The action clearly assumes
|
||||
// that there has been an identifier Matched previously and that
|
||||
// $x points at that token. If that token is missing, but
|
||||
// the next token in the stream is what we want we assume that
|
||||
// d token is missing and we keep going. Because we
|
||||
// have to return some token to replace the missing token,
|
||||
// we have to conjure one up. This method gives the user control
|
||||
// over the tokens returned for missing tokens. Mostly,
|
||||
// you will want to create something special for identifier
|
||||
// tokens. For literals such as '{' and ',', the default
|
||||
// action in the parser or tree parser works. It simply creates
|
||||
// a CommonToken of the appropriate type. The text will be the token.
|
||||
// If you change what tokens must be created by the lexer,
|
||||
// override d method to create the appropriate tokens.
|
||||
//
|
||||
func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token {
|
||||
currentSymbol := recognizer.GetCurrentToken()
|
||||
expecting := d.GetExpectedTokens(recognizer)
|
||||
expectedTokenType := expecting.first()
|
||||
var tokenText string
|
||||
|
||||
if expectedTokenType == TokenEOF {
|
||||
tokenText = "<missing EOF>"
|
||||
} else {
|
||||
ln := recognizer.GetLiteralNames()
|
||||
if expectedTokenType > 0 && expectedTokenType < len(ln) {
|
||||
tokenText = "<missing " + recognizer.GetLiteralNames()[expectedTokenType] + ">"
|
||||
} else {
|
||||
tokenText = "<missing undefined>" // TODO matches the JS impl
|
||||
}
|
||||
}
|
||||
current := currentSymbol
|
||||
lookback := recognizer.GetTokenStream().LT(-1)
|
||||
if current.GetTokenType() == TokenEOF && lookback != nil {
|
||||
current = lookback
|
||||
}
|
||||
|
||||
tf := recognizer.GetTokenFactory()
|
||||
|
||||
return tf.Create(current.GetSource(), expectedTokenType, tokenText, TokenDefaultChannel, -1, -1, current.GetLine(), current.GetColumn())
|
||||
}
|
||||
|
||||
func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet {
|
||||
return recognizer.GetExpectedTokens()
|
||||
}
|
||||
|
||||
// How should a token be displayed in an error message? The default
|
||||
// is to display just the text, but during development you might
|
||||
// want to have a lot of information spit out. Override in that case
|
||||
// to use t.String() (which, for CommonToken, dumps everything about
|
||||
// the token). This is better than forcing you to override a method in
|
||||
// your token objects because you don't have to go modify your lexer
|
||||
// so that it creates a NewJava type.
|
||||
//
|
||||
func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string {
|
||||
if t == nil {
|
||||
return "<no token>"
|
||||
}
|
||||
s := t.GetText()
|
||||
if s == "" {
|
||||
if t.GetTokenType() == TokenEOF {
|
||||
s = "<EOF>"
|
||||
} else {
|
||||
s = "<" + strconv.Itoa(t.GetTokenType()) + ">"
|
||||
}
|
||||
}
|
||||
return d.escapeWSAndQuote(s)
|
||||
}
|
||||
|
||||
func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
|
||||
s = strings.Replace(s, "\t", "\\t", -1)
|
||||
s = strings.Replace(s, "\n", "\\n", -1)
|
||||
s = strings.Replace(s, "\r", "\\r", -1)
|
||||
return "'" + s + "'"
|
||||
}
|
||||
|
||||
// Compute the error recovery set for the current rule. During
|
||||
// rule invocation, the parser pushes the set of tokens that can
|
||||
// follow that rule reference on the stack d amounts to
|
||||
// computing FIRST of what follows the rule reference in the
|
||||
// enclosing rule. See LinearApproximator.FIRST().
|
||||
// This local follow set only includes tokens
|
||||
// from within the rule i.e., the FIRST computation done by
|
||||
// ANTLR stops at the end of a rule.
|
||||
//
|
||||
// EXAMPLE
|
||||
//
|
||||
// When you find a "no viable alt exception", the input is not
|
||||
// consistent with any of the alternatives for rule r. The best
|
||||
// thing to do is to consume tokens until you see something that
|
||||
// can legally follow a call to r//or* any rule that called r.
|
||||
// You don't want the exact set of viable next tokens because the
|
||||
// input might just be missing a token--you might consume the
|
||||
// rest of the input looking for one of the missing tokens.
|
||||
//
|
||||
// Consider grammar:
|
||||
//
|
||||
// a : '[' b ']'
|
||||
// | '(' b ')'
|
||||
//
|
||||
// b : c '^' INT
|
||||
// c : ID
|
||||
// | INT
|
||||
//
|
||||
//
|
||||
// At each rule invocation, the set of tokens that could follow
|
||||
// that rule is pushed on a stack. Here are the various
|
||||
// context-sensitive follow sets:
|
||||
//
|
||||
// FOLLOW(b1_in_a) = FIRST(']') = ']'
|
||||
// FOLLOW(b2_in_a) = FIRST(')') = ')'
|
||||
// FOLLOW(c_in_b) = FIRST('^') = '^'
|
||||
//
|
||||
// Upon erroneous input "[]", the call chain is
|
||||
//
|
||||
// a -> b -> c
|
||||
//
|
||||
// and, hence, the follow context stack is:
|
||||
//
|
||||
// depth follow set start of rule execution
|
||||
// 0 <EOF> a (from main())
|
||||
// 1 ']' b
|
||||
// 2 '^' c
|
||||
//
|
||||
// Notice that ')' is not included, because b would have to have
|
||||
// been called from a different context in rule a for ')' to be
|
||||
// included.
|
||||
//
|
||||
// For error recovery, we cannot consider FOLLOW(c)
|
||||
// (context-sensitive or otherwise). We need the combined set of
|
||||
// all context-sensitive FOLLOW sets--the set of all tokens that
|
||||
// could follow any reference in the call chain. We need to
|
||||
// reSync to one of those tokens. Note that FOLLOW(c)='^' and if
|
||||
// we reSync'd to that token, we'd consume until EOF. We need to
|
||||
// Sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}.
|
||||
// In this case, for input "[]", LA(1) is ']' and in the set, so we would
|
||||
// not consume anything. After printing an error, rule c would
|
||||
// return normally. Rule b would not find the required '^' though.
|
||||
// At this point, it gets a mismatched token error and panics an
|
||||
// exception (since LA(1) is not in the viable following token
|
||||
// set). The rule exception handler tries to recover, but finds
|
||||
// the same recovery set and doesn't consume anything. Rule b
|
||||
// exits normally returning to rule a. Now it finds the ']' (and
|
||||
// with the successful Match exits errorRecovery mode).
|
||||
//
|
||||
// So, you can see that the parser walks up the call chain looking
|
||||
// for the token that was a member of the recovery set.
|
||||
//
|
||||
// Errors are not generated in errorRecovery mode.
|
||||
//
|
||||
// ANTLR's error recovery mechanism is based upon original ideas:
|
||||
//
|
||||
// "Algorithms + Data Structures = Programs" by Niklaus Wirth
|
||||
//
|
||||
// and
|
||||
//
|
||||
// "A note on error recovery in recursive descent parsers":
|
||||
// http://portal.acm.org/citation.cfm?id=947902.947905
|
||||
//
|
||||
// Later, Josef Grosch had some good ideas:
|
||||
//
|
||||
// "Efficient and Comfortable Error Recovery in Recursive Descent
|
||||
// Parsers":
|
||||
// ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
|
||||
//
|
||||
// Like Grosch I implement context-sensitive FOLLOW sets that are combined
|
||||
// at run-time upon error to avoid overhead during parsing.
|
||||
//
|
||||
func (d *DefaultErrorStrategy) getErrorRecoverySet(recognizer Parser) *IntervalSet {
|
||||
atn := recognizer.GetInterpreter().atn
|
||||
ctx := recognizer.GetParserRuleContext()
|
||||
recoverSet := NewIntervalSet()
|
||||
for ctx != nil && ctx.GetInvokingState() >= 0 {
|
||||
// compute what follows who invoked us
|
||||
invokingState := atn.states[ctx.GetInvokingState()]
|
||||
rt := invokingState.GetTransitions()[0]
|
||||
follow := atn.NextTokens(rt.(*RuleTransition).followState, nil)
|
||||
recoverSet.addSet(follow)
|
||||
ctx = ctx.GetParent().(ParserRuleContext)
|
||||
}
|
||||
recoverSet.removeOne(TokenEpsilon)
|
||||
return recoverSet
|
||||
}
|
||||
|
||||
// Consume tokens until one Matches the given token set.//
|
||||
func (d *DefaultErrorStrategy) consumeUntil(recognizer Parser, set *IntervalSet) {
|
||||
ttype := recognizer.GetTokenStream().LA(1)
|
||||
for ttype != TokenEOF && !set.contains(ttype) {
|
||||
recognizer.Consume()
|
||||
ttype = recognizer.GetTokenStream().LA(1)
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// This implementation of {@link ANTLRErrorStrategy} responds to syntax errors
|
||||
// by immediately canceling the parse operation with a
|
||||
// {@link ParseCancellationException}. The implementation ensures that the
|
||||
// {@link ParserRuleContext//exception} field is set for all parse tree nodes
|
||||
// that were not completed prior to encountering the error.
|
||||
//
|
||||
// <p>
|
||||
// This error strategy is useful in the following scenarios.</p>
|
||||
//
|
||||
// <ul>
|
||||
// <li><strong>Two-stage parsing:</strong> This error strategy allows the first
|
||||
// stage of two-stage parsing to immediately terminate if an error is
|
||||
// encountered, and immediately fall back to the second stage. In addition to
|
||||
// avoiding wasted work by attempting to recover from errors here, the empty
|
||||
// implementation of {@link BailErrorStrategy//Sync} improves the performance of
|
||||
// the first stage.</li>
|
||||
// <li><strong>Silent validation:</strong> When syntax errors are not being
|
||||
// Reported or logged, and the parse result is simply ignored if errors occur,
|
||||
// the {@link BailErrorStrategy} avoids wasting work on recovering from errors
|
||||
// when the result will be ignored either way.</li>
|
||||
// </ul>
|
||||
//
|
||||
// <p>
|
||||
// {@code myparser.setErrorHandler(NewBailErrorStrategy())}</p>
|
||||
//
|
||||
// @see Parser//setErrorHandler(ANTLRErrorStrategy)
|
||||
|
||||
type BailErrorStrategy struct {
|
||||
*DefaultErrorStrategy
|
||||
}
|
||||
|
||||
var _ ErrorStrategy = &BailErrorStrategy{}
|
||||
|
||||
func NewBailErrorStrategy() *BailErrorStrategy {
|
||||
|
||||
b := new(BailErrorStrategy)
|
||||
|
||||
b.DefaultErrorStrategy = NewDefaultErrorStrategy()
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// Instead of recovering from exception {@code e}, re-panic it wrapped
|
||||
// in a {@link ParseCancellationException} so it is not caught by the
|
||||
// rule func catches. Use {@link Exception//getCause()} to get the
|
||||
// original {@link RecognitionException}.
|
||||
//
|
||||
func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
|
||||
context := recognizer.GetParserRuleContext()
|
||||
for context != nil {
|
||||
context.SetException(e)
|
||||
if parent, ok := context.GetParent().(ParserRuleContext); ok {
|
||||
context = parent
|
||||
} else {
|
||||
context = nil
|
||||
}
|
||||
}
|
||||
panic(NewParseCancellationException()) // TODO we don't emit e properly
|
||||
}
|
||||
|
||||
// Make sure we don't attempt to recover inline if the parser
|
||||
// successfully recovers, it won't panic an exception.
|
||||
//
|
||||
func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token {
|
||||
b.Recover(recognizer, NewInputMisMatchException(recognizer))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Make sure we don't attempt to recover from problems in subrules.//
|
||||
func (b *BailErrorStrategy) Sync(recognizer Parser) {
|
||||
// pass
|
||||
}
|
241
vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go
generated
vendored
Normal file
241
vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go
generated
vendored
Normal file
@ -0,0 +1,241 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just
|
||||
// 3 kinds of errors: prediction errors, failed predicate errors, and
|
||||
// mismatched input errors. In each case, the parser knows where it is
|
||||
// in the input, where it is in the ATN, the rule invocation stack,
|
||||
// and what kind of problem occurred.
|
||||
|
||||
type RecognitionException interface {
|
||||
GetOffendingToken() Token
|
||||
GetMessage() string
|
||||
GetInputStream() IntStream
|
||||
}
|
||||
|
||||
type BaseRecognitionException struct {
|
||||
message string
|
||||
recognizer Recognizer
|
||||
offendingToken Token
|
||||
offendingState int
|
||||
ctx RuleContext
|
||||
input IntStream
|
||||
}
|
||||
|
||||
func NewBaseRecognitionException(message string, recognizer Recognizer, input IntStream, ctx RuleContext) *BaseRecognitionException {
|
||||
|
||||
// todo
|
||||
// Error.call(this)
|
||||
//
|
||||
// if (!!Error.captureStackTrace) {
|
||||
// Error.captureStackTrace(this, RecognitionException)
|
||||
// } else {
|
||||
// stack := NewError().stack
|
||||
// }
|
||||
// TODO may be able to use - "runtime" func Stack(buf []byte, all bool) int
|
||||
|
||||
t := new(BaseRecognitionException)
|
||||
|
||||
t.message = message
|
||||
t.recognizer = recognizer
|
||||
t.input = input
|
||||
t.ctx = ctx
|
||||
// The current {@link Token} when an error occurred. Since not all streams
|
||||
// support accessing symbols by index, we have to track the {@link Token}
|
||||
// instance itself.
|
||||
t.offendingToken = nil
|
||||
// Get the ATN state number the parser was in at the time the error
|
||||
// occurred. For {@link NoViableAltException} and
|
||||
// {@link LexerNoViableAltException} exceptions, this is the
|
||||
// {@link DecisionState} number. For others, it is the state whose outgoing
|
||||
// edge we couldn't Match.
|
||||
t.offendingState = -1
|
||||
if t.recognizer != nil {
|
||||
t.offendingState = t.recognizer.GetState()
|
||||
}
|
||||
|
||||
return t
|
||||
}
|
||||
|
||||
func (b *BaseRecognitionException) GetMessage() string {
|
||||
return b.message
|
||||
}
|
||||
|
||||
func (b *BaseRecognitionException) GetOffendingToken() Token {
|
||||
return b.offendingToken
|
||||
}
|
||||
|
||||
func (b *BaseRecognitionException) GetInputStream() IntStream {
|
||||
return b.input
|
||||
}
|
||||
|
||||
// <p>If the state number is not known, b method returns -1.</p>
|
||||
|
||||
//
|
||||
// Gets the set of input symbols which could potentially follow the
|
||||
// previously Matched symbol at the time b exception was panicn.
|
||||
//
|
||||
// <p>If the set of expected tokens is not known and could not be computed,
|
||||
// b method returns {@code nil}.</p>
|
||||
//
|
||||
// @return The set of token types that could potentially follow the current
|
||||
// state in the ATN, or {@code nil} if the information is not available.
|
||||
// /
|
||||
func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet {
|
||||
if b.recognizer != nil {
|
||||
return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BaseRecognitionException) String() string {
|
||||
return b.message
|
||||
}
|
||||
|
||||
type LexerNoViableAltException struct {
|
||||
*BaseRecognitionException
|
||||
|
||||
startIndex int
|
||||
deadEndConfigs ATNConfigSet
|
||||
}
|
||||
|
||||
func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs ATNConfigSet) *LexerNoViableAltException {
|
||||
|
||||
l := new(LexerNoViableAltException)
|
||||
|
||||
l.BaseRecognitionException = NewBaseRecognitionException("", lexer, input, nil)
|
||||
|
||||
l.startIndex = startIndex
|
||||
l.deadEndConfigs = deadEndConfigs
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *LexerNoViableAltException) String() string {
|
||||
symbol := ""
|
||||
if l.startIndex >= 0 && l.startIndex < l.input.Size() {
|
||||
symbol = l.input.(CharStream).GetTextFromInterval(NewInterval(l.startIndex, l.startIndex))
|
||||
}
|
||||
return "LexerNoViableAltException" + symbol
|
||||
}
|
||||
|
||||
type NoViableAltException struct {
|
||||
*BaseRecognitionException
|
||||
|
||||
startToken Token
|
||||
offendingToken Token
|
||||
ctx ParserRuleContext
|
||||
deadEndConfigs ATNConfigSet
|
||||
}
|
||||
|
||||
// Indicates that the parser could not decide which of two or more paths
|
||||
// to take based upon the remaining input. It tracks the starting token
|
||||
// of the offending input and also knows where the parser was
|
||||
// in the various paths when the error. Reported by ReportNoViableAlternative()
|
||||
//
|
||||
func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs ATNConfigSet, ctx ParserRuleContext) *NoViableAltException {
|
||||
|
||||
if ctx == nil {
|
||||
ctx = recognizer.GetParserRuleContext()
|
||||
}
|
||||
|
||||
if offendingToken == nil {
|
||||
offendingToken = recognizer.GetCurrentToken()
|
||||
}
|
||||
|
||||
if startToken == nil {
|
||||
startToken = recognizer.GetCurrentToken()
|
||||
}
|
||||
|
||||
if input == nil {
|
||||
input = recognizer.GetInputStream().(TokenStream)
|
||||
}
|
||||
|
||||
n := new(NoViableAltException)
|
||||
n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx)
|
||||
|
||||
// Which configurations did we try at input.Index() that couldn't Match
|
||||
// input.LT(1)?//
|
||||
n.deadEndConfigs = deadEndConfigs
|
||||
// The token object at the start index the input stream might
|
||||
// not be buffering tokens so get a reference to it. (At the
|
||||
// time the error occurred, of course the stream needs to keep a
|
||||
// buffer all of the tokens but later we might not have access to those.)
|
||||
n.startToken = startToken
|
||||
n.offendingToken = offendingToken
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
type InputMisMatchException struct {
|
||||
*BaseRecognitionException
|
||||
}
|
||||
|
||||
// This signifies any kind of mismatched input exceptions such as
|
||||
// when the current input does not Match the expected token.
|
||||
//
|
||||
func NewInputMisMatchException(recognizer Parser) *InputMisMatchException {
|
||||
|
||||
i := new(InputMisMatchException)
|
||||
i.BaseRecognitionException = NewBaseRecognitionException("", recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext())
|
||||
|
||||
i.offendingToken = recognizer.GetCurrentToken()
|
||||
|
||||
return i
|
||||
|
||||
}
|
||||
|
||||
// A semantic predicate failed during validation. Validation of predicates
|
||||
// occurs when normally parsing the alternative just like Matching a token.
|
||||
// Disambiguating predicate evaluation occurs when we test a predicate during
|
||||
// prediction.
|
||||
|
||||
type FailedPredicateException struct {
|
||||
*BaseRecognitionException
|
||||
|
||||
ruleIndex int
|
||||
predicateIndex int
|
||||
predicate string
|
||||
}
|
||||
|
||||
func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException {
|
||||
|
||||
f := new(FailedPredicateException)
|
||||
|
||||
f.BaseRecognitionException = NewBaseRecognitionException(f.formatMessage(predicate, message), recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext())
|
||||
|
||||
s := recognizer.GetInterpreter().atn.states[recognizer.GetState()]
|
||||
trans := s.GetTransitions()[0]
|
||||
if trans2, ok := trans.(*PredicateTransition); ok {
|
||||
f.ruleIndex = trans2.ruleIndex
|
||||
f.predicateIndex = trans2.predIndex
|
||||
} else {
|
||||
f.ruleIndex = 0
|
||||
f.predicateIndex = 0
|
||||
}
|
||||
f.predicate = predicate
|
||||
f.offendingToken = recognizer.GetCurrentToken()
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *FailedPredicateException) formatMessage(predicate, message string) string {
|
||||
if message != "" {
|
||||
return message
|
||||
}
|
||||
|
||||
return "failed predicate: {" + predicate + "}?"
|
||||
}
|
||||
|
||||
type ParseCancellationException struct {
|
||||
}
|
||||
|
||||
func NewParseCancellationException() *ParseCancellationException {
|
||||
// Error.call(this)
|
||||
// Error.captureStackTrace(this, ParseCancellationException)
|
||||
return new(ParseCancellationException)
|
||||
}
|
49
vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go
generated
vendored
Normal file
49
vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go
generated
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// This is an InputStream that is loaded from a file all at once
|
||||
// when you construct the object.
|
||||
|
||||
type FileStream struct {
|
||||
*InputStream
|
||||
|
||||
filename string
|
||||
}
|
||||
|
||||
func NewFileStream(fileName string) (*FileStream, error) {
|
||||
|
||||
buf := bytes.NewBuffer(nil)
|
||||
|
||||
f, err := os.Open(fileName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
_, err = io.Copy(buf, f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fs := new(FileStream)
|
||||
|
||||
fs.filename = fileName
|
||||
s := string(buf.Bytes())
|
||||
|
||||
fs.InputStream = NewInputStream(s)
|
||||
|
||||
return fs, nil
|
||||
|
||||
}
|
||||
|
||||
func (f *FileStream) GetSourceName() string {
|
||||
return f.filename
|
||||
}
|
113
vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go
generated
vendored
Normal file
113
vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go
generated
vendored
Normal file
@ -0,0 +1,113 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
type InputStream struct {
|
||||
name string
|
||||
index int
|
||||
data []rune
|
||||
size int
|
||||
}
|
||||
|
||||
func NewInputStream(data string) *InputStream {
|
||||
|
||||
is := new(InputStream)
|
||||
|
||||
is.name = "<empty>"
|
||||
is.index = 0
|
||||
is.data = []rune(data)
|
||||
is.size = len(is.data) // number of runes
|
||||
|
||||
return is
|
||||
}
|
||||
|
||||
func (is *InputStream) reset() {
|
||||
is.index = 0
|
||||
}
|
||||
|
||||
func (is *InputStream) Consume() {
|
||||
if is.index >= is.size {
|
||||
// assert is.LA(1) == TokenEOF
|
||||
panic("cannot consume EOF")
|
||||
}
|
||||
is.index++
|
||||
}
|
||||
|
||||
func (is *InputStream) LA(offset int) int {
|
||||
|
||||
if offset == 0 {
|
||||
return 0 // nil
|
||||
}
|
||||
if offset < 0 {
|
||||
offset++ // e.g., translate LA(-1) to use offset=0
|
||||
}
|
||||
pos := is.index + offset - 1
|
||||
|
||||
if pos < 0 || pos >= is.size { // invalid
|
||||
return TokenEOF
|
||||
}
|
||||
|
||||
return int(is.data[pos])
|
||||
}
|
||||
|
||||
func (is *InputStream) LT(offset int) int {
|
||||
return is.LA(offset)
|
||||
}
|
||||
|
||||
func (is *InputStream) Index() int {
|
||||
return is.index
|
||||
}
|
||||
|
||||
func (is *InputStream) Size() int {
|
||||
return is.size
|
||||
}
|
||||
|
||||
// mark/release do nothing we have entire buffer
|
||||
func (is *InputStream) Mark() int {
|
||||
return -1
|
||||
}
|
||||
|
||||
func (is *InputStream) Release(marker int) {
|
||||
}
|
||||
|
||||
func (is *InputStream) Seek(index int) {
|
||||
if index <= is.index {
|
||||
is.index = index // just jump don't update stream state (line,...)
|
||||
return
|
||||
}
|
||||
// seek forward
|
||||
is.index = intMin(index, is.size)
|
||||
}
|
||||
|
||||
func (is *InputStream) GetText(start int, stop int) string {
|
||||
if stop >= is.size {
|
||||
stop = is.size - 1
|
||||
}
|
||||
if start >= is.size {
|
||||
return ""
|
||||
}
|
||||
|
||||
return string(is.data[start : stop+1])
|
||||
}
|
||||
|
||||
func (is *InputStream) GetTextFromTokens(start, stop Token) string {
|
||||
if start != nil && stop != nil {
|
||||
return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex()))
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func (is *InputStream) GetTextFromInterval(i *Interval) string {
|
||||
return is.GetText(i.Start, i.Stop)
|
||||
}
|
||||
|
||||
func (*InputStream) GetSourceName() string {
|
||||
return "Obtained from string"
|
||||
}
|
||||
|
||||
func (is *InputStream) String() string {
|
||||
return string(is.data)
|
||||
}
|
16
vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go
generated
vendored
Normal file
16
vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
type IntStream interface {
|
||||
Consume()
|
||||
LA(int) int
|
||||
Mark() int
|
||||
Release(marker int)
|
||||
Index() int
|
||||
Seek(index int)
|
||||
Size() int
|
||||
GetSourceName() string
|
||||
}
|
308
vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go
generated
vendored
Normal file
308
vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go
generated
vendored
Normal file
@ -0,0 +1,308 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Interval struct {
|
||||
Start int
|
||||
Stop int
|
||||
}
|
||||
|
||||
/* stop is not included! */
|
||||
func NewInterval(start, stop int) *Interval {
|
||||
i := new(Interval)
|
||||
|
||||
i.Start = start
|
||||
i.Stop = stop
|
||||
return i
|
||||
}
|
||||
|
||||
func (i *Interval) Contains(item int) bool {
|
||||
return item >= i.Start && item < i.Stop
|
||||
}
|
||||
|
||||
func (i *Interval) String() string {
|
||||
if i.Start == i.Stop-1 {
|
||||
return strconv.Itoa(i.Start)
|
||||
}
|
||||
|
||||
return strconv.Itoa(i.Start) + ".." + strconv.Itoa(i.Stop-1)
|
||||
}
|
||||
|
||||
func (i *Interval) length() int {
|
||||
return i.Stop - i.Start
|
||||
}
|
||||
|
||||
type IntervalSet struct {
|
||||
intervals []*Interval
|
||||
readOnly bool
|
||||
}
|
||||
|
||||
func NewIntervalSet() *IntervalSet {
|
||||
|
||||
i := new(IntervalSet)
|
||||
|
||||
i.intervals = nil
|
||||
i.readOnly = false
|
||||
|
||||
return i
|
||||
}
|
||||
|
||||
func (i *IntervalSet) first() int {
|
||||
if len(i.intervals) == 0 {
|
||||
return TokenInvalidType
|
||||
}
|
||||
|
||||
return i.intervals[0].Start
|
||||
}
|
||||
|
||||
func (i *IntervalSet) addOne(v int) {
|
||||
i.addInterval(NewInterval(v, v+1))
|
||||
}
|
||||
|
||||
func (i *IntervalSet) addRange(l, h int) {
|
||||
i.addInterval(NewInterval(l, h+1))
|
||||
}
|
||||
|
||||
func (i *IntervalSet) addInterval(v *Interval) {
|
||||
if i.intervals == nil {
|
||||
i.intervals = make([]*Interval, 0)
|
||||
i.intervals = append(i.intervals, v)
|
||||
} else {
|
||||
// find insert pos
|
||||
for k, interval := range i.intervals {
|
||||
// distinct range -> insert
|
||||
if v.Stop < interval.Start {
|
||||
i.intervals = append(i.intervals[0:k], append([]*Interval{v}, i.intervals[k:]...)...)
|
||||
return
|
||||
} else if v.Stop == interval.Start {
|
||||
i.intervals[k].Start = v.Start
|
||||
return
|
||||
} else if v.Start <= interval.Stop {
|
||||
i.intervals[k] = NewInterval(intMin(interval.Start, v.Start), intMax(interval.Stop, v.Stop))
|
||||
|
||||
// if not applying to end, merge potential overlaps
|
||||
if k < len(i.intervals)-1 {
|
||||
l := i.intervals[k]
|
||||
r := i.intervals[k+1]
|
||||
// if r contained in l
|
||||
if l.Stop >= r.Stop {
|
||||
i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...)
|
||||
} else if l.Stop >= r.Start { // partial overlap
|
||||
i.intervals[k] = NewInterval(l.Start, r.Stop)
|
||||
i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
// greater than any exiting
|
||||
i.intervals = append(i.intervals, v)
|
||||
}
|
||||
}
|
||||
|
||||
func (i *IntervalSet) addSet(other *IntervalSet) *IntervalSet {
|
||||
if other.intervals != nil {
|
||||
for k := 0; k < len(other.intervals); k++ {
|
||||
i2 := other.intervals[k]
|
||||
i.addInterval(NewInterval(i2.Start, i2.Stop))
|
||||
}
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
func (i *IntervalSet) complement(start int, stop int) *IntervalSet {
|
||||
result := NewIntervalSet()
|
||||
result.addInterval(NewInterval(start, stop+1))
|
||||
for j := 0; j < len(i.intervals); j++ {
|
||||
result.removeRange(i.intervals[j])
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (i *IntervalSet) contains(item int) bool {
|
||||
if i.intervals == nil {
|
||||
return false
|
||||
}
|
||||
for k := 0; k < len(i.intervals); k++ {
|
||||
if i.intervals[k].Contains(item) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (i *IntervalSet) length() int {
|
||||
len := 0
|
||||
|
||||
for _, v := range i.intervals {
|
||||
len += v.length()
|
||||
}
|
||||
|
||||
return len
|
||||
}
|
||||
|
||||
func (i *IntervalSet) removeRange(v *Interval) {
|
||||
if v.Start == v.Stop-1 {
|
||||
i.removeOne(v.Start)
|
||||
} else if i.intervals != nil {
|
||||
k := 0
|
||||
for n := 0; n < len(i.intervals); n++ {
|
||||
ni := i.intervals[k]
|
||||
// intervals are ordered
|
||||
if v.Stop <= ni.Start {
|
||||
return
|
||||
} else if v.Start > ni.Start && v.Stop < ni.Stop {
|
||||
i.intervals[k] = NewInterval(ni.Start, v.Start)
|
||||
x := NewInterval(v.Stop, ni.Stop)
|
||||
// i.intervals.splice(k, 0, x)
|
||||
i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...)
|
||||
return
|
||||
} else if v.Start <= ni.Start && v.Stop >= ni.Stop {
|
||||
// i.intervals.splice(k, 1)
|
||||
i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...)
|
||||
k = k - 1 // need another pass
|
||||
} else if v.Start < ni.Stop {
|
||||
i.intervals[k] = NewInterval(ni.Start, v.Start)
|
||||
} else if v.Stop < ni.Stop {
|
||||
i.intervals[k] = NewInterval(v.Stop, ni.Stop)
|
||||
}
|
||||
k++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (i *IntervalSet) removeOne(v int) {
|
||||
if i.intervals != nil {
|
||||
for k := 0; k < len(i.intervals); k++ {
|
||||
ki := i.intervals[k]
|
||||
// intervals i ordered
|
||||
if v < ki.Start {
|
||||
return
|
||||
} else if v == ki.Start && v == ki.Stop-1 {
|
||||
// i.intervals.splice(k, 1)
|
||||
i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...)
|
||||
return
|
||||
} else if v == ki.Start {
|
||||
i.intervals[k] = NewInterval(ki.Start+1, ki.Stop)
|
||||
return
|
||||
} else if v == ki.Stop-1 {
|
||||
i.intervals[k] = NewInterval(ki.Start, ki.Stop-1)
|
||||
return
|
||||
} else if v < ki.Stop-1 {
|
||||
x := NewInterval(ki.Start, v)
|
||||
ki.Start = v + 1
|
||||
// i.intervals.splice(k, 0, x)
|
||||
i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (i *IntervalSet) String() string {
|
||||
return i.StringVerbose(nil, nil, false)
|
||||
}
|
||||
|
||||
func (i *IntervalSet) StringVerbose(literalNames []string, symbolicNames []string, elemsAreChar bool) string {
|
||||
|
||||
if i.intervals == nil {
|
||||
return "{}"
|
||||
} else if literalNames != nil || symbolicNames != nil {
|
||||
return i.toTokenString(literalNames, symbolicNames)
|
||||
} else if elemsAreChar {
|
||||
return i.toCharString()
|
||||
}
|
||||
|
||||
return i.toIndexString()
|
||||
}
|
||||
|
||||
func (i *IntervalSet) toCharString() string {
|
||||
names := make([]string, len(i.intervals))
|
||||
|
||||
var sb strings.Builder
|
||||
|
||||
for j := 0; j < len(i.intervals); j++ {
|
||||
v := i.intervals[j]
|
||||
if v.Stop == v.Start+1 {
|
||||
if v.Start == TokenEOF {
|
||||
names = append(names, "<EOF>")
|
||||
} else {
|
||||
sb.WriteByte('\'')
|
||||
sb.WriteRune(rune(v.Start))
|
||||
sb.WriteByte('\'')
|
||||
names = append(names, sb.String())
|
||||
sb.Reset()
|
||||
}
|
||||
} else {
|
||||
sb.WriteByte('\'')
|
||||
sb.WriteRune(rune(v.Start))
|
||||
sb.WriteString("'..'")
|
||||
sb.WriteRune(rune(v.Stop - 1))
|
||||
sb.WriteByte('\'')
|
||||
names = append(names, sb.String())
|
||||
sb.Reset()
|
||||
}
|
||||
}
|
||||
if len(names) > 1 {
|
||||
return "{" + strings.Join(names, ", ") + "}"
|
||||
}
|
||||
|
||||
return names[0]
|
||||
}
|
||||
|
||||
func (i *IntervalSet) toIndexString() string {
|
||||
|
||||
names := make([]string, 0)
|
||||
for j := 0; j < len(i.intervals); j++ {
|
||||
v := i.intervals[j]
|
||||
if v.Stop == v.Start+1 {
|
||||
if v.Start == TokenEOF {
|
||||
names = append(names, "<EOF>")
|
||||
} else {
|
||||
names = append(names, strconv.Itoa(v.Start))
|
||||
}
|
||||
} else {
|
||||
names = append(names, strconv.Itoa(v.Start)+".."+strconv.Itoa(v.Stop-1))
|
||||
}
|
||||
}
|
||||
if len(names) > 1 {
|
||||
return "{" + strings.Join(names, ", ") + "}"
|
||||
}
|
||||
|
||||
return names[0]
|
||||
}
|
||||
|
||||
func (i *IntervalSet) toTokenString(literalNames []string, symbolicNames []string) string {
|
||||
names := make([]string, 0)
|
||||
for _, v := range i.intervals {
|
||||
for j := v.Start; j < v.Stop; j++ {
|
||||
names = append(names, i.elementName(literalNames, symbolicNames, j))
|
||||
}
|
||||
}
|
||||
if len(names) > 1 {
|
||||
return "{" + strings.Join(names, ", ") + "}"
|
||||
}
|
||||
|
||||
return names[0]
|
||||
}
|
||||
|
||||
func (i *IntervalSet) elementName(literalNames []string, symbolicNames []string, a int) string {
|
||||
if a == TokenEOF {
|
||||
return "<EOF>"
|
||||
} else if a == TokenEpsilon {
|
||||
return "<EPSILON>"
|
||||
} else {
|
||||
if a < len(literalNames) && literalNames[a] != "" {
|
||||
return literalNames[a]
|
||||
}
|
||||
|
||||
return symbolicNames[a]
|
||||
}
|
||||
}
|
418
vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go
generated
vendored
Normal file
418
vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go
generated
vendored
Normal file
@ -0,0 +1,418 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// A lexer is recognizer that draws input symbols from a character stream.
|
||||
// lexer grammars result in a subclass of this object. A Lexer object
|
||||
// uses simplified Match() and error recovery mechanisms in the interest
|
||||
// of speed.
|
||||
///
|
||||
|
||||
type Lexer interface {
|
||||
TokenSource
|
||||
Recognizer
|
||||
|
||||
Emit() Token
|
||||
|
||||
SetChannel(int)
|
||||
PushMode(int)
|
||||
PopMode() int
|
||||
SetType(int)
|
||||
SetMode(int)
|
||||
}
|
||||
|
||||
type BaseLexer struct {
|
||||
*BaseRecognizer
|
||||
|
||||
Interpreter ILexerATNSimulator
|
||||
TokenStartCharIndex int
|
||||
TokenStartLine int
|
||||
TokenStartColumn int
|
||||
ActionType int
|
||||
Virt Lexer // The most derived lexer implementation. Allows virtual method calls.
|
||||
|
||||
input CharStream
|
||||
factory TokenFactory
|
||||
tokenFactorySourcePair *TokenSourceCharStreamPair
|
||||
token Token
|
||||
hitEOF bool
|
||||
channel int
|
||||
thetype int
|
||||
modeStack IntStack
|
||||
mode int
|
||||
text string
|
||||
}
|
||||
|
||||
func NewBaseLexer(input CharStream) *BaseLexer {
|
||||
|
||||
lexer := new(BaseLexer)
|
||||
|
||||
lexer.BaseRecognizer = NewBaseRecognizer()
|
||||
|
||||
lexer.input = input
|
||||
lexer.factory = CommonTokenFactoryDEFAULT
|
||||
lexer.tokenFactorySourcePair = &TokenSourceCharStreamPair{lexer, input}
|
||||
|
||||
lexer.Virt = lexer
|
||||
|
||||
lexer.Interpreter = nil // child classes must populate it
|
||||
|
||||
// The goal of all lexer rules/methods is to create a token object.
|
||||
// l is an instance variable as multiple rules may collaborate to
|
||||
// create a single token. NextToken will return l object after
|
||||
// Matching lexer rule(s). If you subclass to allow multiple token
|
||||
// emissions, then set l to the last token to be Matched or
|
||||
// something nonnil so that the auto token emit mechanism will not
|
||||
// emit another token.
|
||||
lexer.token = nil
|
||||
|
||||
// What character index in the stream did the current token start at?
|
||||
// Needed, for example, to get the text for current token. Set at
|
||||
// the start of NextToken.
|
||||
lexer.TokenStartCharIndex = -1
|
||||
|
||||
// The line on which the first character of the token resides///
|
||||
lexer.TokenStartLine = -1
|
||||
|
||||
// The character position of first character within the line///
|
||||
lexer.TokenStartColumn = -1
|
||||
|
||||
// Once we see EOF on char stream, next token will be EOF.
|
||||
// If you have DONE : EOF then you see DONE EOF.
|
||||
lexer.hitEOF = false
|
||||
|
||||
// The channel number for the current token///
|
||||
lexer.channel = TokenDefaultChannel
|
||||
|
||||
// The token type for the current token///
|
||||
lexer.thetype = TokenInvalidType
|
||||
|
||||
lexer.modeStack = make([]int, 0)
|
||||
lexer.mode = LexerDefaultMode
|
||||
|
||||
// You can set the text for the current token to override what is in
|
||||
// the input char buffer. Use setText() or can set l instance var.
|
||||
// /
|
||||
lexer.text = ""
|
||||
|
||||
return lexer
|
||||
}
|
||||
|
||||
const (
|
||||
LexerDefaultMode = 0
|
||||
LexerMore = -2
|
||||
LexerSkip = -3
|
||||
)
|
||||
|
||||
const (
|
||||
LexerDefaultTokenChannel = TokenDefaultChannel
|
||||
LexerHidden = TokenHiddenChannel
|
||||
LexerMinCharValue = 0x0000
|
||||
LexerMaxCharValue = 0x10FFFF
|
||||
)
|
||||
|
||||
func (b *BaseLexer) reset() {
|
||||
// wack Lexer state variables
|
||||
if b.input != nil {
|
||||
b.input.Seek(0) // rewind the input
|
||||
}
|
||||
b.token = nil
|
||||
b.thetype = TokenInvalidType
|
||||
b.channel = TokenDefaultChannel
|
||||
b.TokenStartCharIndex = -1
|
||||
b.TokenStartColumn = -1
|
||||
b.TokenStartLine = -1
|
||||
b.text = ""
|
||||
|
||||
b.hitEOF = false
|
||||
b.mode = LexerDefaultMode
|
||||
b.modeStack = make([]int, 0)
|
||||
|
||||
b.Interpreter.reset()
|
||||
}
|
||||
|
||||
func (b *BaseLexer) GetInterpreter() ILexerATNSimulator {
|
||||
return b.Interpreter
|
||||
}
|
||||
|
||||
func (b *BaseLexer) GetInputStream() CharStream {
|
||||
return b.input
|
||||
}
|
||||
|
||||
func (b *BaseLexer) GetSourceName() string {
|
||||
return b.GrammarFileName
|
||||
}
|
||||
|
||||
func (b *BaseLexer) SetChannel(v int) {
|
||||
b.channel = v
|
||||
}
|
||||
|
||||
func (b *BaseLexer) GetTokenFactory() TokenFactory {
|
||||
return b.factory
|
||||
}
|
||||
|
||||
func (b *BaseLexer) setTokenFactory(f TokenFactory) {
|
||||
b.factory = f
|
||||
}
|
||||
|
||||
func (b *BaseLexer) safeMatch() (ret int) {
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
if re, ok := e.(RecognitionException); ok {
|
||||
b.notifyListeners(re) // Report error
|
||||
b.Recover(re)
|
||||
ret = LexerSkip // default
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return b.Interpreter.Match(b.input, b.mode)
|
||||
}
|
||||
|
||||
// Return a token from l source i.e., Match a token on the char stream.
|
||||
func (b *BaseLexer) NextToken() Token {
|
||||
if b.input == nil {
|
||||
panic("NextToken requires a non-nil input stream.")
|
||||
}
|
||||
|
||||
tokenStartMarker := b.input.Mark()
|
||||
|
||||
// previously in finally block
|
||||
defer func() {
|
||||
// make sure we release marker after Match or
|
||||
// unbuffered char stream will keep buffering
|
||||
b.input.Release(tokenStartMarker)
|
||||
}()
|
||||
|
||||
for {
|
||||
if b.hitEOF {
|
||||
b.EmitEOF()
|
||||
return b.token
|
||||
}
|
||||
b.token = nil
|
||||
b.channel = TokenDefaultChannel
|
||||
b.TokenStartCharIndex = b.input.Index()
|
||||
b.TokenStartColumn = b.Interpreter.GetCharPositionInLine()
|
||||
b.TokenStartLine = b.Interpreter.GetLine()
|
||||
b.text = ""
|
||||
continueOuter := false
|
||||
for {
|
||||
b.thetype = TokenInvalidType
|
||||
ttype := LexerSkip
|
||||
|
||||
ttype = b.safeMatch()
|
||||
|
||||
if b.input.LA(1) == TokenEOF {
|
||||
b.hitEOF = true
|
||||
}
|
||||
if b.thetype == TokenInvalidType {
|
||||
b.thetype = ttype
|
||||
}
|
||||
if b.thetype == LexerSkip {
|
||||
continueOuter = true
|
||||
break
|
||||
}
|
||||
if b.thetype != LexerMore {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if continueOuter {
|
||||
continue
|
||||
}
|
||||
if b.token == nil {
|
||||
b.Virt.Emit()
|
||||
}
|
||||
return b.token
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Instruct the lexer to Skip creating a token for current lexer rule
|
||||
// and look for another token. NextToken() knows to keep looking when
|
||||
// a lexer rule finishes with token set to SKIPTOKEN. Recall that
|
||||
// if token==nil at end of any token rule, it creates one for you
|
||||
// and emits it.
|
||||
// /
|
||||
func (b *BaseLexer) Skip() {
|
||||
b.thetype = LexerSkip
|
||||
}
|
||||
|
||||
func (b *BaseLexer) More() {
|
||||
b.thetype = LexerMore
|
||||
}
|
||||
|
||||
func (b *BaseLexer) SetMode(m int) {
|
||||
b.mode = m
|
||||
}
|
||||
|
||||
func (b *BaseLexer) PushMode(m int) {
|
||||
if LexerATNSimulatorDebug {
|
||||
fmt.Println("pushMode " + strconv.Itoa(m))
|
||||
}
|
||||
b.modeStack.Push(b.mode)
|
||||
b.mode = m
|
||||
}
|
||||
|
||||
func (b *BaseLexer) PopMode() int {
|
||||
if len(b.modeStack) == 0 {
|
||||
panic("Empty Stack")
|
||||
}
|
||||
if LexerATNSimulatorDebug {
|
||||
fmt.Println("popMode back to " + fmt.Sprint(b.modeStack[0:len(b.modeStack)-1]))
|
||||
}
|
||||
i, _ := b.modeStack.Pop()
|
||||
b.mode = i
|
||||
return b.mode
|
||||
}
|
||||
|
||||
func (b *BaseLexer) inputStream() CharStream {
|
||||
return b.input
|
||||
}
|
||||
|
||||
// SetInputStream resets the lexer input stream and associated lexer state.
|
||||
func (b *BaseLexer) SetInputStream(input CharStream) {
|
||||
b.input = nil
|
||||
b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input}
|
||||
b.reset()
|
||||
b.input = input
|
||||
b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input}
|
||||
}
|
||||
|
||||
func (b *BaseLexer) GetTokenSourceCharStreamPair() *TokenSourceCharStreamPair {
|
||||
return b.tokenFactorySourcePair
|
||||
}
|
||||
|
||||
// By default does not support multiple emits per NextToken invocation
|
||||
// for efficiency reasons. Subclass and override l method, NextToken,
|
||||
// and GetToken (to push tokens into a list and pull from that list
|
||||
// rather than a single variable as l implementation does).
|
||||
// /
|
||||
func (b *BaseLexer) EmitToken(token Token) {
|
||||
b.token = token
|
||||
}
|
||||
|
||||
// The standard method called to automatically emit a token at the
|
||||
// outermost lexical rule. The token object should point into the
|
||||
// char buffer start..stop. If there is a text override in 'text',
|
||||
// use that to set the token's text. Override l method to emit
|
||||
// custom Token objects or provide a Newfactory.
|
||||
// /
|
||||
func (b *BaseLexer) Emit() Token {
|
||||
t := b.factory.Create(b.tokenFactorySourcePair, b.thetype, b.text, b.channel, b.TokenStartCharIndex, b.GetCharIndex()-1, b.TokenStartLine, b.TokenStartColumn)
|
||||
b.EmitToken(t)
|
||||
return t
|
||||
}
|
||||
|
||||
func (b *BaseLexer) EmitEOF() Token {
|
||||
cpos := b.GetCharPositionInLine()
|
||||
lpos := b.GetLine()
|
||||
eof := b.factory.Create(b.tokenFactorySourcePair, TokenEOF, "", TokenDefaultChannel, b.input.Index(), b.input.Index()-1, lpos, cpos)
|
||||
b.EmitToken(eof)
|
||||
return eof
|
||||
}
|
||||
|
||||
func (b *BaseLexer) GetCharPositionInLine() int {
|
||||
return b.Interpreter.GetCharPositionInLine()
|
||||
}
|
||||
|
||||
func (b *BaseLexer) GetLine() int {
|
||||
return b.Interpreter.GetLine()
|
||||
}
|
||||
|
||||
func (b *BaseLexer) GetType() int {
|
||||
return b.thetype
|
||||
}
|
||||
|
||||
func (b *BaseLexer) SetType(t int) {
|
||||
b.thetype = t
|
||||
}
|
||||
|
||||
// What is the index of the current character of lookahead?///
|
||||
func (b *BaseLexer) GetCharIndex() int {
|
||||
return b.input.Index()
|
||||
}
|
||||
|
||||
// Return the text Matched so far for the current token or any text override.
|
||||
//Set the complete text of l token it wipes any previous changes to the text.
|
||||
func (b *BaseLexer) GetText() string {
|
||||
if b.text != "" {
|
||||
return b.text
|
||||
}
|
||||
|
||||
return b.Interpreter.GetText(b.input)
|
||||
}
|
||||
|
||||
func (b *BaseLexer) SetText(text string) {
|
||||
b.text = text
|
||||
}
|
||||
|
||||
func (b *BaseLexer) GetATN() *ATN {
|
||||
return b.Interpreter.ATN()
|
||||
}
|
||||
|
||||
// Return a list of all Token objects in input char stream.
|
||||
// Forces load of all tokens. Does not include EOF token.
|
||||
// /
|
||||
func (b *BaseLexer) GetAllTokens() []Token {
|
||||
vl := b.Virt
|
||||
tokens := make([]Token, 0)
|
||||
t := vl.NextToken()
|
||||
for t.GetTokenType() != TokenEOF {
|
||||
tokens = append(tokens, t)
|
||||
t = vl.NextToken()
|
||||
}
|
||||
return tokens
|
||||
}
|
||||
|
||||
func (b *BaseLexer) notifyListeners(e RecognitionException) {
|
||||
start := b.TokenStartCharIndex
|
||||
stop := b.input.Index()
|
||||
text := b.input.GetTextFromInterval(NewInterval(start, stop))
|
||||
msg := "token recognition error at: '" + text + "'"
|
||||
listener := b.GetErrorListenerDispatch()
|
||||
listener.SyntaxError(b, nil, b.TokenStartLine, b.TokenStartColumn, msg, e)
|
||||
}
|
||||
|
||||
func (b *BaseLexer) getErrorDisplayForChar(c rune) string {
|
||||
if c == TokenEOF {
|
||||
return "<EOF>"
|
||||
} else if c == '\n' {
|
||||
return "\\n"
|
||||
} else if c == '\t' {
|
||||
return "\\t"
|
||||
} else if c == '\r' {
|
||||
return "\\r"
|
||||
} else {
|
||||
return string(c)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BaseLexer) getCharErrorDisplay(c rune) string {
|
||||
return "'" + b.getErrorDisplayForChar(c) + "'"
|
||||
}
|
||||
|
||||
// Lexers can normally Match any char in it's vocabulary after Matching
|
||||
// a token, so do the easy thing and just kill a character and hope
|
||||
// it all works out. You can instead use the rule invocation stack
|
||||
// to do sophisticated error recovery if you are in a fragment rule.
|
||||
// /
|
||||
func (b *BaseLexer) Recover(re RecognitionException) {
|
||||
if b.input.LA(1) != TokenEOF {
|
||||
if _, ok := re.(*LexerNoViableAltException); ok {
|
||||
// Skip a char and try again
|
||||
b.Interpreter.Consume(b.input)
|
||||
} else {
|
||||
// TODO: Do we lose character or line position information?
|
||||
b.input.Consume()
|
||||
}
|
||||
}
|
||||
}
|
430
vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go
generated
vendored
Normal file
430
vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go
generated
vendored
Normal file
@ -0,0 +1,430 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import "strconv"
|
||||
|
||||
const (
|
||||
LexerActionTypeChannel = 0 //The type of a {@link LexerChannelAction} action.
|
||||
LexerActionTypeCustom = 1 //The type of a {@link LexerCustomAction} action.
|
||||
LexerActionTypeMode = 2 //The type of a {@link LexerModeAction} action.
|
||||
LexerActionTypeMore = 3 //The type of a {@link LexerMoreAction} action.
|
||||
LexerActionTypePopMode = 4 //The type of a {@link LexerPopModeAction} action.
|
||||
LexerActionTypePushMode = 5 //The type of a {@link LexerPushModeAction} action.
|
||||
LexerActionTypeSkip = 6 //The type of a {@link LexerSkipAction} action.
|
||||
LexerActionTypeType = 7 //The type of a {@link LexerTypeAction} action.
|
||||
)
|
||||
|
||||
type LexerAction interface {
|
||||
getActionType() int
|
||||
getIsPositionDependent() bool
|
||||
execute(lexer Lexer)
|
||||
hash() int
|
||||
equals(other LexerAction) bool
|
||||
}
|
||||
|
||||
type BaseLexerAction struct {
|
||||
actionType int
|
||||
isPositionDependent bool
|
||||
}
|
||||
|
||||
func NewBaseLexerAction(action int) *BaseLexerAction {
|
||||
la := new(BaseLexerAction)
|
||||
|
||||
la.actionType = action
|
||||
la.isPositionDependent = false
|
||||
|
||||
return la
|
||||
}
|
||||
|
||||
func (b *BaseLexerAction) execute(lexer Lexer) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (b *BaseLexerAction) getActionType() int {
|
||||
return b.actionType
|
||||
}
|
||||
|
||||
func (b *BaseLexerAction) getIsPositionDependent() bool {
|
||||
return b.isPositionDependent
|
||||
}
|
||||
|
||||
func (b *BaseLexerAction) hash() int {
|
||||
return b.actionType
|
||||
}
|
||||
|
||||
func (b *BaseLexerAction) equals(other LexerAction) bool {
|
||||
return b == other
|
||||
}
|
||||
|
||||
//
|
||||
// Implements the {@code Skip} lexer action by calling {@link Lexer//Skip}.
|
||||
//
|
||||
// <p>The {@code Skip} command does not have any parameters, so l action is
|
||||
// implemented as a singleton instance exposed by {@link //INSTANCE}.</p>
|
||||
type LexerSkipAction struct {
|
||||
*BaseLexerAction
|
||||
}
|
||||
|
||||
func NewLexerSkipAction() *LexerSkipAction {
|
||||
la := new(LexerSkipAction)
|
||||
la.BaseLexerAction = NewBaseLexerAction(LexerActionTypeSkip)
|
||||
return la
|
||||
}
|
||||
|
||||
// Provides a singleton instance of l parameterless lexer action.
|
||||
var LexerSkipActionINSTANCE = NewLexerSkipAction()
|
||||
|
||||
func (l *LexerSkipAction) execute(lexer Lexer) {
|
||||
lexer.Skip()
|
||||
}
|
||||
|
||||
func (l *LexerSkipAction) String() string {
|
||||
return "skip"
|
||||
}
|
||||
|
||||
// Implements the {@code type} lexer action by calling {@link Lexer//setType}
|
||||
// with the assigned type.
|
||||
type LexerTypeAction struct {
|
||||
*BaseLexerAction
|
||||
|
||||
thetype int
|
||||
}
|
||||
|
||||
func NewLexerTypeAction(thetype int) *LexerTypeAction {
|
||||
l := new(LexerTypeAction)
|
||||
l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeType)
|
||||
l.thetype = thetype
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *LexerTypeAction) execute(lexer Lexer) {
|
||||
lexer.SetType(l.thetype)
|
||||
}
|
||||
|
||||
func (l *LexerTypeAction) hash() int {
|
||||
h := murmurInit(0)
|
||||
h = murmurUpdate(h, l.actionType)
|
||||
h = murmurUpdate(h, l.thetype)
|
||||
return murmurFinish(h, 2)
|
||||
}
|
||||
|
||||
func (l *LexerTypeAction) equals(other LexerAction) bool {
|
||||
if l == other {
|
||||
return true
|
||||
} else if _, ok := other.(*LexerTypeAction); !ok {
|
||||
return false
|
||||
} else {
|
||||
return l.thetype == other.(*LexerTypeAction).thetype
|
||||
}
|
||||
}
|
||||
|
||||
func (l *LexerTypeAction) String() string {
|
||||
return "actionType(" + strconv.Itoa(l.thetype) + ")"
|
||||
}
|
||||
|
||||
// Implements the {@code pushMode} lexer action by calling
|
||||
// {@link Lexer//pushMode} with the assigned mode.
|
||||
type LexerPushModeAction struct {
|
||||
*BaseLexerAction
|
||||
|
||||
mode int
|
||||
}
|
||||
|
||||
func NewLexerPushModeAction(mode int) *LexerPushModeAction {
|
||||
|
||||
l := new(LexerPushModeAction)
|
||||
l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePushMode)
|
||||
|
||||
l.mode = mode
|
||||
return l
|
||||
}
|
||||
|
||||
// <p>This action is implemented by calling {@link Lexer//pushMode} with the
|
||||
// value provided by {@link //getMode}.</p>
|
||||
func (l *LexerPushModeAction) execute(lexer Lexer) {
|
||||
lexer.PushMode(l.mode)
|
||||
}
|
||||
|
||||
func (l *LexerPushModeAction) hash() int {
|
||||
h := murmurInit(0)
|
||||
h = murmurUpdate(h, l.actionType)
|
||||
h = murmurUpdate(h, l.mode)
|
||||
return murmurFinish(h, 2)
|
||||
}
|
||||
|
||||
func (l *LexerPushModeAction) equals(other LexerAction) bool {
|
||||
if l == other {
|
||||
return true
|
||||
} else if _, ok := other.(*LexerPushModeAction); !ok {
|
||||
return false
|
||||
} else {
|
||||
return l.mode == other.(*LexerPushModeAction).mode
|
||||
}
|
||||
}
|
||||
|
||||
func (l *LexerPushModeAction) String() string {
|
||||
return "pushMode(" + strconv.Itoa(l.mode) + ")"
|
||||
}
|
||||
|
||||
// Implements the {@code popMode} lexer action by calling {@link Lexer//popMode}.
|
||||
//
|
||||
// <p>The {@code popMode} command does not have any parameters, so l action is
|
||||
// implemented as a singleton instance exposed by {@link //INSTANCE}.</p>
|
||||
type LexerPopModeAction struct {
|
||||
*BaseLexerAction
|
||||
}
|
||||
|
||||
func NewLexerPopModeAction() *LexerPopModeAction {
|
||||
|
||||
l := new(LexerPopModeAction)
|
||||
|
||||
l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePopMode)
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
var LexerPopModeActionINSTANCE = NewLexerPopModeAction()
|
||||
|
||||
// <p>This action is implemented by calling {@link Lexer//popMode}.</p>
|
||||
func (l *LexerPopModeAction) execute(lexer Lexer) {
|
||||
lexer.PopMode()
|
||||
}
|
||||
|
||||
func (l *LexerPopModeAction) String() string {
|
||||
return "popMode"
|
||||
}
|
||||
|
||||
// Implements the {@code more} lexer action by calling {@link Lexer//more}.
|
||||
//
|
||||
// <p>The {@code more} command does not have any parameters, so l action is
|
||||
// implemented as a singleton instance exposed by {@link //INSTANCE}.</p>
|
||||
|
||||
type LexerMoreAction struct {
|
||||
*BaseLexerAction
|
||||
}
|
||||
|
||||
func NewLexerMoreAction() *LexerMoreAction {
|
||||
l := new(LexerMoreAction)
|
||||
l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMore)
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
var LexerMoreActionINSTANCE = NewLexerMoreAction()
|
||||
|
||||
// <p>This action is implemented by calling {@link Lexer//popMode}.</p>
|
||||
func (l *LexerMoreAction) execute(lexer Lexer) {
|
||||
lexer.More()
|
||||
}
|
||||
|
||||
func (l *LexerMoreAction) String() string {
|
||||
return "more"
|
||||
}
|
||||
|
||||
// Implements the {@code mode} lexer action by calling {@link Lexer//mode} with
|
||||
// the assigned mode.
|
||||
type LexerModeAction struct {
|
||||
*BaseLexerAction
|
||||
|
||||
mode int
|
||||
}
|
||||
|
||||
func NewLexerModeAction(mode int) *LexerModeAction {
|
||||
l := new(LexerModeAction)
|
||||
l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMode)
|
||||
l.mode = mode
|
||||
return l
|
||||
}
|
||||
|
||||
// <p>This action is implemented by calling {@link Lexer//mode} with the
|
||||
// value provided by {@link //getMode}.</p>
|
||||
func (l *LexerModeAction) execute(lexer Lexer) {
|
||||
lexer.SetMode(l.mode)
|
||||
}
|
||||
|
||||
func (l *LexerModeAction) hash() int {
|
||||
h := murmurInit(0)
|
||||
h = murmurUpdate(h, l.actionType)
|
||||
h = murmurUpdate(h, l.mode)
|
||||
return murmurFinish(h, 2)
|
||||
}
|
||||
|
||||
func (l *LexerModeAction) equals(other LexerAction) bool {
|
||||
if l == other {
|
||||
return true
|
||||
} else if _, ok := other.(*LexerModeAction); !ok {
|
||||
return false
|
||||
} else {
|
||||
return l.mode == other.(*LexerModeAction).mode
|
||||
}
|
||||
}
|
||||
|
||||
func (l *LexerModeAction) String() string {
|
||||
return "mode(" + strconv.Itoa(l.mode) + ")"
|
||||
}
|
||||
|
||||
// Executes a custom lexer action by calling {@link Recognizer//action} with the
|
||||
// rule and action indexes assigned to the custom action. The implementation of
|
||||
// a custom action is added to the generated code for the lexer in an override
|
||||
// of {@link Recognizer//action} when the grammar is compiled.
|
||||
//
|
||||
// <p>This class may represent embedded actions created with the <code>{...}</code>
|
||||
// syntax in ANTLR 4, as well as actions created for lexer commands where the
|
||||
// command argument could not be evaluated when the grammar was compiled.</p>
|
||||
|
||||
// Constructs a custom lexer action with the specified rule and action
|
||||
// indexes.
|
||||
//
|
||||
// @param ruleIndex The rule index to use for calls to
|
||||
// {@link Recognizer//action}.
|
||||
// @param actionIndex The action index to use for calls to
|
||||
// {@link Recognizer//action}.
|
||||
|
||||
type LexerCustomAction struct {
|
||||
*BaseLexerAction
|
||||
ruleIndex, actionIndex int
|
||||
}
|
||||
|
||||
func NewLexerCustomAction(ruleIndex, actionIndex int) *LexerCustomAction {
|
||||
l := new(LexerCustomAction)
|
||||
l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeCustom)
|
||||
l.ruleIndex = ruleIndex
|
||||
l.actionIndex = actionIndex
|
||||
l.isPositionDependent = true
|
||||
return l
|
||||
}
|
||||
|
||||
// <p>Custom actions are implemented by calling {@link Lexer//action} with the
|
||||
// appropriate rule and action indexes.</p>
|
||||
func (l *LexerCustomAction) execute(lexer Lexer) {
|
||||
lexer.Action(nil, l.ruleIndex, l.actionIndex)
|
||||
}
|
||||
|
||||
func (l *LexerCustomAction) hash() int {
|
||||
h := murmurInit(0)
|
||||
h = murmurUpdate(h, l.actionType)
|
||||
h = murmurUpdate(h, l.ruleIndex)
|
||||
h = murmurUpdate(h, l.actionIndex)
|
||||
return murmurFinish(h, 3)
|
||||
}
|
||||
|
||||
func (l *LexerCustomAction) equals(other LexerAction) bool {
|
||||
if l == other {
|
||||
return true
|
||||
} else if _, ok := other.(*LexerCustomAction); !ok {
|
||||
return false
|
||||
} else {
|
||||
return l.ruleIndex == other.(*LexerCustomAction).ruleIndex && l.actionIndex == other.(*LexerCustomAction).actionIndex
|
||||
}
|
||||
}
|
||||
|
||||
// Implements the {@code channel} lexer action by calling
|
||||
// {@link Lexer//setChannel} with the assigned channel.
|
||||
// Constructs a New{@code channel} action with the specified channel value.
|
||||
// @param channel The channel value to pass to {@link Lexer//setChannel}.
|
||||
type LexerChannelAction struct {
|
||||
*BaseLexerAction
|
||||
|
||||
channel int
|
||||
}
|
||||
|
||||
func NewLexerChannelAction(channel int) *LexerChannelAction {
|
||||
l := new(LexerChannelAction)
|
||||
l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeChannel)
|
||||
l.channel = channel
|
||||
return l
|
||||
}
|
||||
|
||||
// <p>This action is implemented by calling {@link Lexer//setChannel} with the
|
||||
// value provided by {@link //getChannel}.</p>
|
||||
func (l *LexerChannelAction) execute(lexer Lexer) {
|
||||
lexer.SetChannel(l.channel)
|
||||
}
|
||||
|
||||
func (l *LexerChannelAction) hash() int {
|
||||
h := murmurInit(0)
|
||||
h = murmurUpdate(h, l.actionType)
|
||||
h = murmurUpdate(h, l.channel)
|
||||
return murmurFinish(h, 2)
|
||||
}
|
||||
|
||||
func (l *LexerChannelAction) equals(other LexerAction) bool {
|
||||
if l == other {
|
||||
return true
|
||||
} else if _, ok := other.(*LexerChannelAction); !ok {
|
||||
return false
|
||||
} else {
|
||||
return l.channel == other.(*LexerChannelAction).channel
|
||||
}
|
||||
}
|
||||
|
||||
func (l *LexerChannelAction) String() string {
|
||||
return "channel(" + strconv.Itoa(l.channel) + ")"
|
||||
}
|
||||
|
||||
// This implementation of {@link LexerAction} is used for tracking input offsets
|
||||
// for position-dependent actions within a {@link LexerActionExecutor}.
|
||||
//
|
||||
// <p>This action is not serialized as part of the ATN, and is only required for
|
||||
// position-dependent lexer actions which appear at a location other than the
|
||||
// end of a rule. For more information about DFA optimizations employed for
|
||||
// lexer actions, see {@link LexerActionExecutor//append} and
|
||||
// {@link LexerActionExecutor//fixOffsetBeforeMatch}.</p>
|
||||
|
||||
// Constructs a Newindexed custom action by associating a character offset
|
||||
// with a {@link LexerAction}.
|
||||
//
|
||||
// <p>Note: This class is only required for lexer actions for which
|
||||
// {@link LexerAction//isPositionDependent} returns {@code true}.</p>
|
||||
//
|
||||
// @param offset The offset into the input {@link CharStream}, relative to
|
||||
// the token start index, at which the specified lexer action should be
|
||||
// executed.
|
||||
// @param action The lexer action to execute at a particular offset in the
|
||||
// input {@link CharStream}.
|
||||
type LexerIndexedCustomAction struct {
|
||||
*BaseLexerAction
|
||||
|
||||
offset int
|
||||
lexerAction LexerAction
|
||||
isPositionDependent bool
|
||||
}
|
||||
|
||||
func NewLexerIndexedCustomAction(offset int, lexerAction LexerAction) *LexerIndexedCustomAction {
|
||||
|
||||
l := new(LexerIndexedCustomAction)
|
||||
l.BaseLexerAction = NewBaseLexerAction(lexerAction.getActionType())
|
||||
|
||||
l.offset = offset
|
||||
l.lexerAction = lexerAction
|
||||
l.isPositionDependent = true
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
// <p>This method calls {@link //execute} on the result of {@link //getAction}
|
||||
// using the provided {@code lexer}.</p>
|
||||
func (l *LexerIndexedCustomAction) execute(lexer Lexer) {
|
||||
// assume the input stream position was properly set by the calling code
|
||||
l.lexerAction.execute(lexer)
|
||||
}
|
||||
|
||||
func (l *LexerIndexedCustomAction) hash() int {
|
||||
h := murmurInit(0)
|
||||
h = murmurUpdate(h, l.offset)
|
||||
h = murmurUpdate(h, l.lexerAction.hash())
|
||||
return murmurFinish(h, 2)
|
||||
}
|
||||
|
||||
func (l *LexerIndexedCustomAction) equals(other LexerAction) bool {
|
||||
if l == other {
|
||||
return true
|
||||
} else if _, ok := other.(*LexerIndexedCustomAction); !ok {
|
||||
return false
|
||||
} else {
|
||||
return l.offset == other.(*LexerIndexedCustomAction).offset && l.lexerAction == other.(*LexerIndexedCustomAction).lexerAction
|
||||
}
|
||||
}
|
173
vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go
generated
vendored
Normal file
173
vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go
generated
vendored
Normal file
@ -0,0 +1,173 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
// Represents an executor for a sequence of lexer actions which traversed during
|
||||
// the Matching operation of a lexer rule (token).
|
||||
//
|
||||
// <p>The executor tracks position information for position-dependent lexer actions
|
||||
// efficiently, ensuring that actions appearing only at the end of the rule do
|
||||
// not cause bloating of the {@link DFA} created for the lexer.</p>
|
||||
|
||||
type LexerActionExecutor struct {
|
||||
lexerActions []LexerAction
|
||||
cachedHash int
|
||||
}
|
||||
|
||||
func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor {
|
||||
|
||||
if lexerActions == nil {
|
||||
lexerActions = make([]LexerAction, 0)
|
||||
}
|
||||
|
||||
l := new(LexerActionExecutor)
|
||||
|
||||
l.lexerActions = lexerActions
|
||||
|
||||
// Caches the result of {@link //hashCode} since the hash code is an element
|
||||
// of the performance-critical {@link LexerATNConfig//hashCode} operation.
|
||||
l.cachedHash = murmurInit(57)
|
||||
for _, a := range lexerActions {
|
||||
l.cachedHash = murmurUpdate(l.cachedHash, a.hash())
|
||||
}
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
// Creates a {@link LexerActionExecutor} which executes the actions for
|
||||
// the input {@code lexerActionExecutor} followed by a specified
|
||||
// {@code lexerAction}.
|
||||
//
|
||||
// @param lexerActionExecutor The executor for actions already traversed by
|
||||
// the lexer while Matching a token within a particular
|
||||
// {@link LexerATNConfig}. If this is {@code nil}, the method behaves as
|
||||
// though it were an empty executor.
|
||||
// @param lexerAction The lexer action to execute after the actions
|
||||
// specified in {@code lexerActionExecutor}.
|
||||
//
|
||||
// @return A {@link LexerActionExecutor} for executing the combine actions
|
||||
// of {@code lexerActionExecutor} and {@code lexerAction}.
|
||||
func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor {
|
||||
if lexerActionExecutor == nil {
|
||||
return NewLexerActionExecutor([]LexerAction{lexerAction})
|
||||
}
|
||||
|
||||
return NewLexerActionExecutor(append(lexerActionExecutor.lexerActions, lexerAction))
|
||||
}
|
||||
|
||||
// Creates a {@link LexerActionExecutor} which encodes the current offset
|
||||
// for position-dependent lexer actions.
|
||||
//
|
||||
// <p>Normally, when the executor encounters lexer actions where
|
||||
// {@link LexerAction//isPositionDependent} returns {@code true}, it calls
|
||||
// {@link IntStream//seek} on the input {@link CharStream} to set the input
|
||||
// position to the <em>end</em> of the current token. This behavior provides
|
||||
// for efficient DFA representation of lexer actions which appear at the end
|
||||
// of a lexer rule, even when the lexer rule Matches a variable number of
|
||||
// characters.</p>
|
||||
//
|
||||
// <p>Prior to traversing a Match transition in the ATN, the current offset
|
||||
// from the token start index is assigned to all position-dependent lexer
|
||||
// actions which have not already been assigned a fixed offset. By storing
|
||||
// the offsets relative to the token start index, the DFA representation of
|
||||
// lexer actions which appear in the middle of tokens remains efficient due
|
||||
// to sharing among tokens of the same length, regardless of their absolute
|
||||
// position in the input stream.</p>
|
||||
//
|
||||
// <p>If the current executor already has offsets assigned to all
|
||||
// position-dependent lexer actions, the method returns {@code this}.</p>
|
||||
//
|
||||
// @param offset The current offset to assign to all position-dependent
|
||||
// lexer actions which do not already have offsets assigned.
|
||||
//
|
||||
// @return A {@link LexerActionExecutor} which stores input stream offsets
|
||||
// for all position-dependent lexer actions.
|
||||
// /
|
||||
func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecutor {
|
||||
var updatedLexerActions []LexerAction
|
||||
for i := 0; i < len(l.lexerActions); i++ {
|
||||
_, ok := l.lexerActions[i].(*LexerIndexedCustomAction)
|
||||
if l.lexerActions[i].getIsPositionDependent() && !ok {
|
||||
if updatedLexerActions == nil {
|
||||
updatedLexerActions = make([]LexerAction, 0)
|
||||
|
||||
for _, a := range l.lexerActions {
|
||||
updatedLexerActions = append(updatedLexerActions, a)
|
||||
}
|
||||
}
|
||||
|
||||
updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, l.lexerActions[i])
|
||||
}
|
||||
}
|
||||
if updatedLexerActions == nil {
|
||||
return l
|
||||
}
|
||||
|
||||
return NewLexerActionExecutor(updatedLexerActions)
|
||||
}
|
||||
|
||||
// Execute the actions encapsulated by l executor within the context of a
|
||||
// particular {@link Lexer}.
|
||||
//
|
||||
// <p>This method calls {@link IntStream//seek} to set the position of the
|
||||
// {@code input} {@link CharStream} prior to calling
|
||||
// {@link LexerAction//execute} on a position-dependent action. Before the
|
||||
// method returns, the input position will be restored to the same position
|
||||
// it was in when the method was invoked.</p>
|
||||
//
|
||||
// @param lexer The lexer instance.
|
||||
// @param input The input stream which is the source for the current token.
|
||||
// When l method is called, the current {@link IntStream//index} for
|
||||
// {@code input} should be the start of the following token, i.e. 1
|
||||
// character past the end of the current token.
|
||||
// @param startIndex The token start index. This value may be passed to
|
||||
// {@link IntStream//seek} to set the {@code input} position to the beginning
|
||||
// of the token.
|
||||
// /
|
||||
func (l *LexerActionExecutor) execute(lexer Lexer, input CharStream, startIndex int) {
|
||||
requiresSeek := false
|
||||
stopIndex := input.Index()
|
||||
|
||||
defer func() {
|
||||
if requiresSeek {
|
||||
input.Seek(stopIndex)
|
||||
}
|
||||
}()
|
||||
|
||||
for i := 0; i < len(l.lexerActions); i++ {
|
||||
lexerAction := l.lexerActions[i]
|
||||
if la, ok := lexerAction.(*LexerIndexedCustomAction); ok {
|
||||
offset := la.offset
|
||||
input.Seek(startIndex + offset)
|
||||
lexerAction = la.lexerAction
|
||||
requiresSeek = (startIndex + offset) != stopIndex
|
||||
} else if lexerAction.getIsPositionDependent() {
|
||||
input.Seek(stopIndex)
|
||||
requiresSeek = false
|
||||
}
|
||||
lexerAction.execute(lexer)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *LexerActionExecutor) hash() int {
|
||||
if l == nil {
|
||||
return 61
|
||||
}
|
||||
return l.cachedHash
|
||||
}
|
||||
|
||||
func (l *LexerActionExecutor) equals(other interface{}) bool {
|
||||
if l == other {
|
||||
return true
|
||||
}
|
||||
othert, ok := other.(*LexerActionExecutor)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if othert == nil {
|
||||
return false
|
||||
}
|
||||
return l.cachedHash == othert.cachedHash && &l.lexerActions == &othert.lexerActions
|
||||
}
|
679
vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go
generated
vendored
Normal file
679
vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go
generated
vendored
Normal file
@ -0,0 +1,679 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
LexerATNSimulatorDebug = false
|
||||
LexerATNSimulatorDFADebug = false
|
||||
|
||||
LexerATNSimulatorMinDFAEdge = 0
|
||||
LexerATNSimulatorMaxDFAEdge = 127 // forces unicode to stay in ATN
|
||||
|
||||
LexerATNSimulatorMatchCalls = 0
|
||||
)
|
||||
|
||||
type ILexerATNSimulator interface {
|
||||
IATNSimulator
|
||||
|
||||
reset()
|
||||
Match(input CharStream, mode int) int
|
||||
GetCharPositionInLine() int
|
||||
GetLine() int
|
||||
GetText(input CharStream) string
|
||||
Consume(input CharStream)
|
||||
}
|
||||
|
||||
type LexerATNSimulator struct {
|
||||
*BaseATNSimulator
|
||||
|
||||
recog Lexer
|
||||
predictionMode int
|
||||
mergeCache DoubleDict
|
||||
startIndex int
|
||||
Line int
|
||||
CharPositionInLine int
|
||||
mode int
|
||||
prevAccept *SimState
|
||||
MatchCalls int
|
||||
}
|
||||
|
||||
func NewLexerATNSimulator(recog Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator {
|
||||
l := new(LexerATNSimulator)
|
||||
|
||||
l.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache)
|
||||
|
||||
l.decisionToDFA = decisionToDFA
|
||||
l.recog = recog
|
||||
// The current token's starting index into the character stream.
|
||||
// Shared across DFA to ATN simulation in case the ATN fails and the
|
||||
// DFA did not have a previous accept state. In l case, we use the
|
||||
// ATN-generated exception object.
|
||||
l.startIndex = -1
|
||||
// line number 1..n within the input///
|
||||
l.Line = 1
|
||||
// The index of the character relative to the beginning of the line
|
||||
// 0..n-1///
|
||||
l.CharPositionInLine = 0
|
||||
l.mode = LexerDefaultMode
|
||||
// Used during DFA/ATN exec to record the most recent accept configuration
|
||||
// info
|
||||
l.prevAccept = NewSimState()
|
||||
// done
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *LexerATNSimulator) copyState(simulator *LexerATNSimulator) {
|
||||
l.CharPositionInLine = simulator.CharPositionInLine
|
||||
l.Line = simulator.Line
|
||||
l.mode = simulator.mode
|
||||
l.startIndex = simulator.startIndex
|
||||
}
|
||||
|
||||
func (l *LexerATNSimulator) Match(input CharStream, mode int) int {
|
||||
l.MatchCalls++
|
||||
l.mode = mode
|
||||
mark := input.Mark()
|
||||
|
||||
defer func() {
|
||||
input.Release(mark)
|
||||
}()
|
||||
|
||||
l.startIndex = input.Index()
|
||||
l.prevAccept.reset()
|
||||
|
||||
dfa := l.decisionToDFA[mode]
|
||||
|
||||
var s0 *DFAState
|
||||
l.atn.stateMu.RLock()
|
||||
s0 = dfa.getS0()
|
||||
l.atn.stateMu.RUnlock()
|
||||
|
||||
if s0 == nil {
|
||||
return l.MatchATN(input)
|
||||
}
|
||||
|
||||
return l.execATN(input, s0)
|
||||
}
|
||||
|
||||
func (l *LexerATNSimulator) reset() {
|
||||
l.prevAccept.reset()
|
||||
l.startIndex = -1
|
||||
l.Line = 1
|
||||
l.CharPositionInLine = 0
|
||||
l.mode = LexerDefaultMode
|
||||
}
|
||||
|
||||
func (l *LexerATNSimulator) MatchATN(input CharStream) int {
|
||||
startState := l.atn.modeToStartState[l.mode]
|
||||
|
||||
if LexerATNSimulatorDebug {
|
||||
fmt.Println("MatchATN mode " + strconv.Itoa(l.mode) + " start: " + startState.String())
|
||||
}
|
||||
oldMode := l.mode
|
||||
s0Closure := l.computeStartState(input, startState)
|
||||
suppressEdge := s0Closure.hasSemanticContext
|
||||
s0Closure.hasSemanticContext = false
|
||||
|
||||
next := l.addDFAState(s0Closure, suppressEdge)
|
||||
|
||||
predict := l.execATN(input, next)
|
||||
|
||||
if LexerATNSimulatorDebug {
|
||||
fmt.Println("DFA after MatchATN: " + l.decisionToDFA[oldMode].ToLexerString())
|
||||
}
|
||||
return predict
|
||||
}
|
||||
|
||||
func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int {
|
||||
|
||||
if LexerATNSimulatorDebug {
|
||||
fmt.Println("start state closure=" + ds0.configs.String())
|
||||
}
|
||||
if ds0.isAcceptState {
|
||||
// allow zero-length tokens
|
||||
l.captureSimState(l.prevAccept, input, ds0)
|
||||
}
|
||||
t := input.LA(1)
|
||||
s := ds0 // s is current/from DFA state
|
||||
|
||||
for { // while more work
|
||||
if LexerATNSimulatorDebug {
|
||||
fmt.Println("execATN loop starting closure: " + s.configs.String())
|
||||
}
|
||||
|
||||
// As we move src->trg, src->trg, we keep track of the previous trg to
|
||||
// avoid looking up the DFA state again, which is expensive.
|
||||
// If the previous target was already part of the DFA, we might
|
||||
// be able to avoid doing a reach operation upon t. If s!=nil,
|
||||
// it means that semantic predicates didn't prevent us from
|
||||
// creating a DFA state. Once we know s!=nil, we check to see if
|
||||
// the DFA state has an edge already for t. If so, we can just reuse
|
||||
// it's configuration set there's no point in re-computing it.
|
||||
// This is kind of like doing DFA simulation within the ATN
|
||||
// simulation because DFA simulation is really just a way to avoid
|
||||
// computing reach/closure sets. Technically, once we know that
|
||||
// we have a previously added DFA state, we could jump over to
|
||||
// the DFA simulator. But, that would mean popping back and forth
|
||||
// a lot and making things more complicated algorithmically.
|
||||
// This optimization makes a lot of sense for loops within DFA.
|
||||
// A character will take us back to an existing DFA state
|
||||
// that already has lots of edges out of it. e.g., .* in comments.
|
||||
target := l.getExistingTargetState(s, t)
|
||||
if target == nil {
|
||||
target = l.computeTargetState(input, s, t)
|
||||
// print("Computed:" + str(target))
|
||||
}
|
||||
if target == ATNSimulatorError {
|
||||
break
|
||||
}
|
||||
// If l is a consumable input element, make sure to consume before
|
||||
// capturing the accept state so the input index, line, and char
|
||||
// position accurately reflect the state of the interpreter at the
|
||||
// end of the token.
|
||||
if t != TokenEOF {
|
||||
l.Consume(input)
|
||||
}
|
||||
if target.isAcceptState {
|
||||
l.captureSimState(l.prevAccept, input, target)
|
||||
if t == TokenEOF {
|
||||
break
|
||||
}
|
||||
}
|
||||
t = input.LA(1)
|
||||
s = target // flip current DFA target becomes Newsrc/from state
|
||||
}
|
||||
|
||||
return l.failOrAccept(l.prevAccept, input, s.configs, t)
|
||||
}
|
||||
|
||||
// Get an existing target state for an edge in the DFA. If the target state
|
||||
// for the edge has not yet been computed or is otherwise not available,
|
||||
// l method returns {@code nil}.
|
||||
//
|
||||
// @param s The current DFA state
|
||||
// @param t The next input symbol
|
||||
// @return The existing target DFA state for the given input symbol
|
||||
// {@code t}, or {@code nil} if the target state for l edge is not
|
||||
// already cached
|
||||
func (l *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState {
|
||||
if t < LexerATNSimulatorMinDFAEdge || t > LexerATNSimulatorMaxDFAEdge {
|
||||
return nil
|
||||
}
|
||||
|
||||
l.atn.edgeMu.RLock()
|
||||
defer l.atn.edgeMu.RUnlock()
|
||||
if s.getEdges() == nil {
|
||||
return nil
|
||||
}
|
||||
target := s.getIthEdge(t - LexerATNSimulatorMinDFAEdge)
|
||||
if LexerATNSimulatorDebug && target != nil {
|
||||
fmt.Println("reuse state " + strconv.Itoa(s.stateNumber) + " edge to " + strconv.Itoa(target.stateNumber))
|
||||
}
|
||||
return target
|
||||
}
|
||||
|
||||
// Compute a target state for an edge in the DFA, and attempt to add the
|
||||
// computed state and corresponding edge to the DFA.
|
||||
//
|
||||
// @param input The input stream
|
||||
// @param s The current DFA state
|
||||
// @param t The next input symbol
|
||||
//
|
||||
// @return The computed target DFA state for the given input symbol
|
||||
// {@code t}. If {@code t} does not lead to a valid DFA state, l method
|
||||
// returns {@link //ERROR}.
|
||||
func (l *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState, t int) *DFAState {
|
||||
reach := NewOrderedATNConfigSet()
|
||||
|
||||
// if we don't find an existing DFA state
|
||||
// Fill reach starting from closure, following t transitions
|
||||
l.getReachableConfigSet(input, s.configs, reach.BaseATNConfigSet, t)
|
||||
|
||||
if len(reach.configs) == 0 { // we got nowhere on t from s
|
||||
if !reach.hasSemanticContext {
|
||||
// we got nowhere on t, don't panic out l knowledge it'd
|
||||
// cause a failover from DFA later.
|
||||
l.addDFAEdge(s, t, ATNSimulatorError, nil)
|
||||
}
|
||||
// stop when we can't Match any more char
|
||||
return ATNSimulatorError
|
||||
}
|
||||
// Add an edge from s to target DFA found/created for reach
|
||||
return l.addDFAEdge(s, t, nil, reach.BaseATNConfigSet)
|
||||
}
|
||||
|
||||
func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach ATNConfigSet, t int) int {
|
||||
if l.prevAccept.dfaState != nil {
|
||||
lexerActionExecutor := prevAccept.dfaState.lexerActionExecutor
|
||||
l.accept(input, lexerActionExecutor, l.startIndex, prevAccept.index, prevAccept.line, prevAccept.column)
|
||||
return prevAccept.dfaState.prediction
|
||||
}
|
||||
|
||||
// if no accept and EOF is first char, return EOF
|
||||
if t == TokenEOF && input.Index() == l.startIndex {
|
||||
return TokenEOF
|
||||
}
|
||||
|
||||
panic(NewLexerNoViableAltException(l.recog, input, l.startIndex, reach))
|
||||
}
|
||||
|
||||
// Given a starting configuration set, figure out all ATN configurations
|
||||
// we can reach upon input {@code t}. Parameter {@code reach} is a return
|
||||
// parameter.
|
||||
func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure ATNConfigSet, reach ATNConfigSet, t int) {
|
||||
// l is used to Skip processing for configs which have a lower priority
|
||||
// than a config that already reached an accept state for the same rule
|
||||
SkipAlt := ATNInvalidAltNumber
|
||||
|
||||
for _, cfg := range closure.GetItems() {
|
||||
currentAltReachedAcceptState := (cfg.GetAlt() == SkipAlt)
|
||||
if currentAltReachedAcceptState && cfg.(*LexerATNConfig).passedThroughNonGreedyDecision {
|
||||
continue
|
||||
}
|
||||
|
||||
if LexerATNSimulatorDebug {
|
||||
|
||||
fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String()) // l.recog, true))
|
||||
}
|
||||
|
||||
for _, trans := range cfg.GetState().GetTransitions() {
|
||||
target := l.getReachableTarget(trans, t)
|
||||
if target != nil {
|
||||
lexerActionExecutor := cfg.(*LexerATNConfig).lexerActionExecutor
|
||||
if lexerActionExecutor != nil {
|
||||
lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.Index() - l.startIndex)
|
||||
}
|
||||
treatEOFAsEpsilon := (t == TokenEOF)
|
||||
config := NewLexerATNConfig3(cfg.(*LexerATNConfig), target, lexerActionExecutor)
|
||||
if l.closure(input, config, reach,
|
||||
currentAltReachedAcceptState, true, treatEOFAsEpsilon) {
|
||||
// any remaining configs for l alt have a lower priority
|
||||
// than the one that just reached an accept state.
|
||||
SkipAlt = cfg.GetAlt()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *LexerATNSimulator) accept(input CharStream, lexerActionExecutor *LexerActionExecutor, startIndex, index, line, charPos int) {
|
||||
if LexerATNSimulatorDebug {
|
||||
fmt.Printf("ACTION %v\n", lexerActionExecutor)
|
||||
}
|
||||
// seek to after last char in token
|
||||
input.Seek(index)
|
||||
l.Line = line
|
||||
l.CharPositionInLine = charPos
|
||||
if lexerActionExecutor != nil && l.recog != nil {
|
||||
lexerActionExecutor.execute(l.recog, input, startIndex)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *LexerATNSimulator) getReachableTarget(trans Transition, t int) ATNState {
|
||||
if trans.Matches(t, 0, LexerMaxCharValue) {
|
||||
return trans.getTarget()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *OrderedATNConfigSet {
|
||||
configs := NewOrderedATNConfigSet()
|
||||
for i := 0; i < len(p.GetTransitions()); i++ {
|
||||
target := p.GetTransitions()[i].getTarget()
|
||||
cfg := NewLexerATNConfig6(target, i+1, BasePredictionContextEMPTY)
|
||||
l.closure(input, cfg, configs, false, false, false)
|
||||
}
|
||||
|
||||
return configs
|
||||
}
|
||||
|
||||
// Since the alternatives within any lexer decision are ordered by
|
||||
// preference, l method stops pursuing the closure as soon as an accept
|
||||
// state is reached. After the first accept state is reached by depth-first
|
||||
// search from {@code config}, all other (potentially reachable) states for
|
||||
// l rule would have a lower priority.
|
||||
//
|
||||
// @return {@code true} if an accept state is reached, otherwise
|
||||
// {@code false}.
|
||||
func (l *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, configs ATNConfigSet,
|
||||
currentAltReachedAcceptState, speculative, treatEOFAsEpsilon bool) bool {
|
||||
|
||||
if LexerATNSimulatorDebug {
|
||||
fmt.Println("closure(" + config.String() + ")") // config.String(l.recog, true) + ")")
|
||||
}
|
||||
|
||||
_, ok := config.state.(*RuleStopState)
|
||||
if ok {
|
||||
|
||||
if LexerATNSimulatorDebug {
|
||||
if l.recog != nil {
|
||||
fmt.Printf("closure at %s rule stop %s\n", l.recog.GetRuleNames()[config.state.GetRuleIndex()], config)
|
||||
} else {
|
||||
fmt.Printf("closure at rule stop %s\n", config)
|
||||
}
|
||||
}
|
||||
|
||||
if config.context == nil || config.context.hasEmptyPath() {
|
||||
if config.context == nil || config.context.isEmpty() {
|
||||
configs.Add(config, nil)
|
||||
return true
|
||||
}
|
||||
|
||||
configs.Add(NewLexerATNConfig2(config, config.state, BasePredictionContextEMPTY), nil)
|
||||
currentAltReachedAcceptState = true
|
||||
}
|
||||
if config.context != nil && !config.context.isEmpty() {
|
||||
for i := 0; i < config.context.length(); i++ {
|
||||
if config.context.getReturnState(i) != BasePredictionContextEmptyReturnState {
|
||||
newContext := config.context.GetParent(i) // "pop" return state
|
||||
returnState := l.atn.states[config.context.getReturnState(i)]
|
||||
cfg := NewLexerATNConfig2(config, returnState, newContext)
|
||||
currentAltReachedAcceptState = l.closure(input, cfg, configs, currentAltReachedAcceptState, speculative, treatEOFAsEpsilon)
|
||||
}
|
||||
}
|
||||
}
|
||||
return currentAltReachedAcceptState
|
||||
}
|
||||
// optimization
|
||||
if !config.state.GetEpsilonOnlyTransitions() {
|
||||
if !currentAltReachedAcceptState || !config.passedThroughNonGreedyDecision {
|
||||
configs.Add(config, nil)
|
||||
}
|
||||
}
|
||||
for j := 0; j < len(config.state.GetTransitions()); j++ {
|
||||
trans := config.state.GetTransitions()[j]
|
||||
cfg := l.getEpsilonTarget(input, config, trans, configs, speculative, treatEOFAsEpsilon)
|
||||
if cfg != nil {
|
||||
currentAltReachedAcceptState = l.closure(input, cfg, configs,
|
||||
currentAltReachedAcceptState, speculative, treatEOFAsEpsilon)
|
||||
}
|
||||
}
|
||||
return currentAltReachedAcceptState
|
||||
}
|
||||
|
||||
// side-effect: can alter configs.hasSemanticContext
|
||||
func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNConfig, trans Transition,
|
||||
configs ATNConfigSet, speculative, treatEOFAsEpsilon bool) *LexerATNConfig {
|
||||
|
||||
var cfg *LexerATNConfig
|
||||
|
||||
if trans.getSerializationType() == TransitionRULE {
|
||||
|
||||
rt := trans.(*RuleTransition)
|
||||
newContext := SingletonBasePredictionContextCreate(config.context, rt.followState.GetStateNumber())
|
||||
cfg = NewLexerATNConfig2(config, trans.getTarget(), newContext)
|
||||
|
||||
} else if trans.getSerializationType() == TransitionPRECEDENCE {
|
||||
panic("Precedence predicates are not supported in lexers.")
|
||||
} else if trans.getSerializationType() == TransitionPREDICATE {
|
||||
// Track traversing semantic predicates. If we traverse,
|
||||
// we cannot add a DFA state for l "reach" computation
|
||||
// because the DFA would not test the predicate again in the
|
||||
// future. Rather than creating collections of semantic predicates
|
||||
// like v3 and testing them on prediction, v4 will test them on the
|
||||
// fly all the time using the ATN not the DFA. This is slower but
|
||||
// semantically it's not used that often. One of the key elements to
|
||||
// l predicate mechanism is not adding DFA states that see
|
||||
// predicates immediately afterwards in the ATN. For example,
|
||||
|
||||
// a : ID {p1}? | ID {p2}?
|
||||
|
||||
// should create the start state for rule 'a' (to save start state
|
||||
// competition), but should not create target of ID state. The
|
||||
// collection of ATN states the following ID references includes
|
||||
// states reached by traversing predicates. Since l is when we
|
||||
// test them, we cannot cash the DFA state target of ID.
|
||||
|
||||
pt := trans.(*PredicateTransition)
|
||||
|
||||
if LexerATNSimulatorDebug {
|
||||
fmt.Println("EVAL rule " + strconv.Itoa(trans.(*PredicateTransition).ruleIndex) + ":" + strconv.Itoa(pt.predIndex))
|
||||
}
|
||||
configs.SetHasSemanticContext(true)
|
||||
if l.evaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative) {
|
||||
cfg = NewLexerATNConfig4(config, trans.getTarget())
|
||||
}
|
||||
} else if trans.getSerializationType() == TransitionACTION {
|
||||
if config.context == nil || config.context.hasEmptyPath() {
|
||||
// execute actions anywhere in the start rule for a token.
|
||||
//
|
||||
// TODO: if the entry rule is invoked recursively, some
|
||||
// actions may be executed during the recursive call. The
|
||||
// problem can appear when hasEmptyPath() is true but
|
||||
// isEmpty() is false. In l case, the config needs to be
|
||||
// split into two contexts - one with just the empty path
|
||||
// and another with everything but the empty path.
|
||||
// Unfortunately, the current algorithm does not allow
|
||||
// getEpsilonTarget to return two configurations, so
|
||||
// additional modifications are needed before we can support
|
||||
// the split operation.
|
||||
lexerActionExecutor := LexerActionExecutorappend(config.lexerActionExecutor, l.atn.lexerActions[trans.(*ActionTransition).actionIndex])
|
||||
cfg = NewLexerATNConfig3(config, trans.getTarget(), lexerActionExecutor)
|
||||
} else {
|
||||
// ignore actions in referenced rules
|
||||
cfg = NewLexerATNConfig4(config, trans.getTarget())
|
||||
}
|
||||
} else if trans.getSerializationType() == TransitionEPSILON {
|
||||
cfg = NewLexerATNConfig4(config, trans.getTarget())
|
||||
} else if trans.getSerializationType() == TransitionATOM ||
|
||||
trans.getSerializationType() == TransitionRANGE ||
|
||||
trans.getSerializationType() == TransitionSET {
|
||||
if treatEOFAsEpsilon {
|
||||
if trans.Matches(TokenEOF, 0, LexerMaxCharValue) {
|
||||
cfg = NewLexerATNConfig4(config, trans.getTarget())
|
||||
}
|
||||
}
|
||||
}
|
||||
return cfg
|
||||
}
|
||||
|
||||
// Evaluate a predicate specified in the lexer.
|
||||
//
|
||||
// <p>If {@code speculative} is {@code true}, l method was called before
|
||||
// {@link //consume} for the Matched character. This method should call
|
||||
// {@link //consume} before evaluating the predicate to ensure position
|
||||
// sensitive values, including {@link Lexer//GetText}, {@link Lexer//GetLine},
|
||||
// and {@link Lexer//getcolumn}, properly reflect the current
|
||||
// lexer state. This method should restore {@code input} and the simulator
|
||||
// to the original state before returning (i.e. undo the actions made by the
|
||||
// call to {@link //consume}.</p>
|
||||
//
|
||||
// @param input The input stream.
|
||||
// @param ruleIndex The rule containing the predicate.
|
||||
// @param predIndex The index of the predicate within the rule.
|
||||
// @param speculative {@code true} if the current index in {@code input} is
|
||||
// one character before the predicate's location.
|
||||
//
|
||||
// @return {@code true} if the specified predicate evaluates to
|
||||
// {@code true}.
|
||||
// /
|
||||
func (l *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, predIndex int, speculative bool) bool {
|
||||
// assume true if no recognizer was provided
|
||||
if l.recog == nil {
|
||||
return true
|
||||
}
|
||||
if !speculative {
|
||||
return l.recog.Sempred(nil, ruleIndex, predIndex)
|
||||
}
|
||||
savedcolumn := l.CharPositionInLine
|
||||
savedLine := l.Line
|
||||
index := input.Index()
|
||||
marker := input.Mark()
|
||||
|
||||
defer func() {
|
||||
l.CharPositionInLine = savedcolumn
|
||||
l.Line = savedLine
|
||||
input.Seek(index)
|
||||
input.Release(marker)
|
||||
}()
|
||||
|
||||
l.Consume(input)
|
||||
return l.recog.Sempred(nil, ruleIndex, predIndex)
|
||||
}
|
||||
|
||||
func (l *LexerATNSimulator) captureSimState(settings *SimState, input CharStream, dfaState *DFAState) {
|
||||
settings.index = input.Index()
|
||||
settings.line = l.Line
|
||||
settings.column = l.CharPositionInLine
|
||||
settings.dfaState = dfaState
|
||||
}
|
||||
|
||||
func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs ATNConfigSet) *DFAState {
|
||||
if to == nil && cfgs != nil {
|
||||
// leading to l call, ATNConfigSet.hasSemanticContext is used as a
|
||||
// marker indicating dynamic predicate evaluation makes l edge
|
||||
// dependent on the specific input sequence, so the static edge in the
|
||||
// DFA should be omitted. The target DFAState is still created since
|
||||
// execATN has the ability to reSynchronize with the DFA state cache
|
||||
// following the predicate evaluation step.
|
||||
//
|
||||
// TJP notes: next time through the DFA, we see a pred again and eval.
|
||||
// If that gets us to a previously created (but dangling) DFA
|
||||
// state, we can continue in pure DFA mode from there.
|
||||
// /
|
||||
suppressEdge := cfgs.HasSemanticContext()
|
||||
cfgs.SetHasSemanticContext(false)
|
||||
|
||||
to = l.addDFAState(cfgs, true)
|
||||
|
||||
if suppressEdge {
|
||||
return to
|
||||
}
|
||||
}
|
||||
// add the edge
|
||||
if tk < LexerATNSimulatorMinDFAEdge || tk > LexerATNSimulatorMaxDFAEdge {
|
||||
// Only track edges within the DFA bounds
|
||||
return to
|
||||
}
|
||||
if LexerATNSimulatorDebug {
|
||||
fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + strconv.Itoa(tk))
|
||||
}
|
||||
l.atn.edgeMu.Lock()
|
||||
defer l.atn.edgeMu.Unlock()
|
||||
if from.getEdges() == nil {
|
||||
// make room for tokens 1..n and -1 masquerading as index 0
|
||||
from.setEdges(make([]*DFAState, LexerATNSimulatorMaxDFAEdge-LexerATNSimulatorMinDFAEdge+1))
|
||||
}
|
||||
from.setIthEdge(tk-LexerATNSimulatorMinDFAEdge, to) // connect
|
||||
|
||||
return to
|
||||
}
|
||||
|
||||
// Add a NewDFA state if there isn't one with l set of
|
||||
// configurations already. This method also detects the first
|
||||
// configuration containing an ATN rule stop state. Later, when
|
||||
// traversing the DFA, we will know which rule to accept.
|
||||
func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet, suppressEdge bool) *DFAState {
|
||||
|
||||
proposed := NewDFAState(-1, configs)
|
||||
var firstConfigWithRuleStopState ATNConfig
|
||||
|
||||
for _, cfg := range configs.GetItems() {
|
||||
|
||||
_, ok := cfg.GetState().(*RuleStopState)
|
||||
|
||||
if ok {
|
||||
firstConfigWithRuleStopState = cfg
|
||||
break
|
||||
}
|
||||
}
|
||||
if firstConfigWithRuleStopState != nil {
|
||||
proposed.isAcceptState = true
|
||||
proposed.lexerActionExecutor = firstConfigWithRuleStopState.(*LexerATNConfig).lexerActionExecutor
|
||||
proposed.setPrediction(l.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().GetRuleIndex()])
|
||||
}
|
||||
hash := proposed.hash()
|
||||
dfa := l.decisionToDFA[l.mode]
|
||||
|
||||
l.atn.stateMu.Lock()
|
||||
defer l.atn.stateMu.Unlock()
|
||||
existing, ok := dfa.getState(hash)
|
||||
if ok {
|
||||
proposed = existing
|
||||
} else {
|
||||
proposed.stateNumber = dfa.numStates()
|
||||
configs.SetReadOnly(true)
|
||||
proposed.configs = configs
|
||||
dfa.setState(hash, proposed)
|
||||
}
|
||||
if !suppressEdge {
|
||||
dfa.setS0(proposed)
|
||||
}
|
||||
return proposed
|
||||
}
|
||||
|
||||
func (l *LexerATNSimulator) getDFA(mode int) *DFA {
|
||||
return l.decisionToDFA[mode]
|
||||
}
|
||||
|
||||
// Get the text Matched so far for the current token.
|
||||
func (l *LexerATNSimulator) GetText(input CharStream) string {
|
||||
// index is first lookahead char, don't include.
|
||||
return input.GetTextFromInterval(NewInterval(l.startIndex, input.Index()-1))
|
||||
}
|
||||
|
||||
func (l *LexerATNSimulator) Consume(input CharStream) {
|
||||
curChar := input.LA(1)
|
||||
if curChar == int('\n') {
|
||||
l.Line++
|
||||
l.CharPositionInLine = 0
|
||||
} else {
|
||||
l.CharPositionInLine++
|
||||
}
|
||||
input.Consume()
|
||||
}
|
||||
|
||||
func (l *LexerATNSimulator) GetCharPositionInLine() int {
|
||||
return l.CharPositionInLine
|
||||
}
|
||||
|
||||
func (l *LexerATNSimulator) GetLine() int {
|
||||
return l.Line
|
||||
}
|
||||
|
||||
func (l *LexerATNSimulator) GetTokenName(tt int) string {
|
||||
if tt == -1 {
|
||||
return "EOF"
|
||||
}
|
||||
|
||||
var sb strings.Builder
|
||||
sb.Grow(6)
|
||||
sb.WriteByte('\'')
|
||||
sb.WriteRune(rune(tt))
|
||||
sb.WriteByte('\'')
|
||||
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
func resetSimState(sim *SimState) {
|
||||
sim.index = -1
|
||||
sim.line = 0
|
||||
sim.column = -1
|
||||
sim.dfaState = nil
|
||||
}
|
||||
|
||||
type SimState struct {
|
||||
index int
|
||||
line int
|
||||
column int
|
||||
dfaState *DFAState
|
||||
}
|
||||
|
||||
func NewSimState() *SimState {
|
||||
s := new(SimState)
|
||||
resetSimState(s)
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *SimState) reset() {
|
||||
resetSimState(s)
|
||||
}
|
212
vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go
generated
vendored
Normal file
212
vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go
generated
vendored
Normal file
@ -0,0 +1,212 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
type LL1Analyzer struct {
|
||||
atn *ATN
|
||||
}
|
||||
|
||||
func NewLL1Analyzer(atn *ATN) *LL1Analyzer {
|
||||
la := new(LL1Analyzer)
|
||||
la.atn = atn
|
||||
return la
|
||||
}
|
||||
|
||||
//* Special value added to the lookahead sets to indicate that we hit
|
||||
// a predicate during analysis if {@code seeThruPreds==false}.
|
||||
///
|
||||
const (
|
||||
LL1AnalyzerHitPred = TokenInvalidType
|
||||
)
|
||||
|
||||
//*
|
||||
// Calculates the SLL(1) expected lookahead set for each outgoing transition
|
||||
// of an {@link ATNState}. The returned array has one element for each
|
||||
// outgoing transition in {@code s}. If the closure from transition
|
||||
// <em>i</em> leads to a semantic predicate before Matching a symbol, the
|
||||
// element at index <em>i</em> of the result will be {@code nil}.
|
||||
//
|
||||
// @param s the ATN state
|
||||
// @return the expected symbols for each outgoing transition of {@code s}.
|
||||
func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
count := len(s.GetTransitions())
|
||||
look := make([]*IntervalSet, count)
|
||||
for alt := 0; alt < count; alt++ {
|
||||
look[alt] = NewIntervalSet()
|
||||
lookBusy := newArray2DHashSet(nil, nil)
|
||||
seeThruPreds := false // fail to get lookahead upon pred
|
||||
la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), seeThruPreds, false)
|
||||
// Wipe out lookahead for la alternative if we found nothing
|
||||
// or we had a predicate when we !seeThruPreds
|
||||
if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) {
|
||||
look[alt] = nil
|
||||
}
|
||||
}
|
||||
return look
|
||||
}
|
||||
|
||||
//*
|
||||
// Compute set of tokens that can follow {@code s} in the ATN in the
|
||||
// specified {@code ctx}.
|
||||
//
|
||||
// <p>If {@code ctx} is {@code nil} and the end of the rule containing
|
||||
// {@code s} is reached, {@link Token//EPSILON} is added to the result set.
|
||||
// If {@code ctx} is not {@code nil} and the end of the outermost rule is
|
||||
// reached, {@link Token//EOF} is added to the result set.</p>
|
||||
//
|
||||
// @param s the ATN state
|
||||
// @param stopState the ATN state to stop at. This can be a
|
||||
// {@link BlockEndState} to detect epsilon paths through a closure.
|
||||
// @param ctx the complete parser context, or {@code nil} if the context
|
||||
// should be ignored
|
||||
//
|
||||
// @return The set of tokens that can follow {@code s} in the ATN in the
|
||||
// specified {@code ctx}.
|
||||
///
|
||||
func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet {
|
||||
r := NewIntervalSet()
|
||||
seeThruPreds := true // ignore preds get all lookahead
|
||||
var lookContext PredictionContext
|
||||
if ctx != nil {
|
||||
lookContext = predictionContextFromRuleContext(s.GetATN(), ctx)
|
||||
}
|
||||
la.look1(s, stopState, lookContext, r, newArray2DHashSet(nil, nil), NewBitSet(), seeThruPreds, true)
|
||||
return r
|
||||
}
|
||||
|
||||
//*
|
||||
// Compute set of tokens that can follow {@code s} in the ATN in the
|
||||
// specified {@code ctx}.
|
||||
//
|
||||
// <p>If {@code ctx} is {@code nil} and {@code stopState} or the end of the
|
||||
// rule containing {@code s} is reached, {@link Token//EPSILON} is added to
|
||||
// the result set. If {@code ctx} is not {@code nil} and {@code addEOF} is
|
||||
// {@code true} and {@code stopState} or the end of the outermost rule is
|
||||
// reached, {@link Token//EOF} is added to the result set.</p>
|
||||
//
|
||||
// @param s the ATN state.
|
||||
// @param stopState the ATN state to stop at. This can be a
|
||||
// {@link BlockEndState} to detect epsilon paths through a closure.
|
||||
// @param ctx The outer context, or {@code nil} if the outer context should
|
||||
// not be used.
|
||||
// @param look The result lookahead set.
|
||||
// @param lookBusy A set used for preventing epsilon closures in the ATN
|
||||
// from causing a stack overflow. Outside code should pass
|
||||
// {@code NewSet<ATNConfig>} for la argument.
|
||||
// @param calledRuleStack A set used for preventing left recursion in the
|
||||
// ATN from causing a stack overflow. Outside code should pass
|
||||
// {@code NewBitSet()} for la argument.
|
||||
// @param seeThruPreds {@code true} to true semantic predicates as
|
||||
// implicitly {@code true} and "see through them", otherwise {@code false}
|
||||
// to treat semantic predicates as opaque and add {@link //HitPred} to the
|
||||
// result if one is encountered.
|
||||
// @param addEOF Add {@link Token//EOF} to the result if the end of the
|
||||
// outermost context is reached. This parameter has no effect if {@code ctx}
|
||||
// is {@code nil}.
|
||||
|
||||
func (la *LL1Analyzer) look2(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) {
|
||||
|
||||
returnState := la.atn.states[ctx.getReturnState(i)]
|
||||
la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
|
||||
|
||||
}
|
||||
|
||||
func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool) {
|
||||
|
||||
c := NewBaseATNConfig6(s, 0, ctx)
|
||||
|
||||
if lookBusy.Contains(c) {
|
||||
return
|
||||
}
|
||||
|
||||
lookBusy.Add(c)
|
||||
|
||||
if s == stopState {
|
||||
if ctx == nil {
|
||||
look.addOne(TokenEpsilon)
|
||||
return
|
||||
} else if ctx.isEmpty() && addEOF {
|
||||
look.addOne(TokenEOF)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
_, ok := s.(*RuleStopState)
|
||||
|
||||
if ok {
|
||||
if ctx == nil {
|
||||
look.addOne(TokenEpsilon)
|
||||
return
|
||||
} else if ctx.isEmpty() && addEOF {
|
||||
look.addOne(TokenEOF)
|
||||
return
|
||||
}
|
||||
|
||||
if ctx != BasePredictionContextEMPTY {
|
||||
removed := calledRuleStack.contains(s.GetRuleIndex())
|
||||
defer func() {
|
||||
if removed {
|
||||
calledRuleStack.add(s.GetRuleIndex())
|
||||
}
|
||||
}()
|
||||
calledRuleStack.remove(s.GetRuleIndex())
|
||||
// run thru all possible stack tops in ctx
|
||||
for i := 0; i < ctx.length(); i++ {
|
||||
returnState := la.atn.states[ctx.getReturnState(i)]
|
||||
la.look2(returnState, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, i)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
n := len(s.GetTransitions())
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
t := s.GetTransitions()[i]
|
||||
|
||||
if t1, ok := t.(*RuleTransition); ok {
|
||||
if calledRuleStack.contains(t1.getTarget().GetRuleIndex()) {
|
||||
continue
|
||||
}
|
||||
|
||||
newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber())
|
||||
la.look3(stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, t1)
|
||||
} else if t2, ok := t.(AbstractPredicateTransition); ok {
|
||||
if seeThruPreds {
|
||||
la.look1(t2.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
|
||||
} else {
|
||||
look.addOne(LL1AnalyzerHitPred)
|
||||
}
|
||||
} else if t.getIsEpsilon() {
|
||||
la.look1(t.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
|
||||
} else if _, ok := t.(*WildcardTransition); ok {
|
||||
look.addRange(TokenMinUserTokenType, la.atn.maxTokenType)
|
||||
} else {
|
||||
set := t.getLabel()
|
||||
if set != nil {
|
||||
if _, ok := t.(*NotSetTransition); ok {
|
||||
set = set.complement(TokenMinUserTokenType, la.atn.maxTokenType)
|
||||
}
|
||||
look.addSet(set)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (la *LL1Analyzer) look3(stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) {
|
||||
|
||||
newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber())
|
||||
|
||||
defer func() {
|
||||
calledRuleStack.remove(t1.getTarget().GetRuleIndex())
|
||||
}()
|
||||
|
||||
calledRuleStack.add(t1.getTarget().GetRuleIndex())
|
||||
la.look1(t1.getTarget(), stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
|
||||
|
||||
}
|
718
vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go
generated
vendored
Normal file
718
vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go
generated
vendored
Normal file
@ -0,0 +1,718 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type Parser interface {
|
||||
Recognizer
|
||||
|
||||
GetInterpreter() *ParserATNSimulator
|
||||
|
||||
GetTokenStream() TokenStream
|
||||
GetTokenFactory() TokenFactory
|
||||
GetParserRuleContext() ParserRuleContext
|
||||
SetParserRuleContext(ParserRuleContext)
|
||||
Consume() Token
|
||||
GetParseListeners() []ParseTreeListener
|
||||
|
||||
GetErrorHandler() ErrorStrategy
|
||||
SetErrorHandler(ErrorStrategy)
|
||||
GetInputStream() IntStream
|
||||
GetCurrentToken() Token
|
||||
GetExpectedTokens() *IntervalSet
|
||||
NotifyErrorListeners(string, Token, RecognitionException)
|
||||
IsExpectedToken(int) bool
|
||||
GetPrecedence() int
|
||||
GetRuleInvocationStack(ParserRuleContext) []string
|
||||
}
|
||||
|
||||
type BaseParser struct {
|
||||
*BaseRecognizer
|
||||
|
||||
Interpreter *ParserATNSimulator
|
||||
BuildParseTrees bool
|
||||
|
||||
input TokenStream
|
||||
errHandler ErrorStrategy
|
||||
precedenceStack IntStack
|
||||
ctx ParserRuleContext
|
||||
|
||||
tracer *TraceListener
|
||||
parseListeners []ParseTreeListener
|
||||
_SyntaxErrors int
|
||||
}
|
||||
|
||||
// p.is all the parsing support code essentially most of it is error
|
||||
// recovery stuff.//
|
||||
func NewBaseParser(input TokenStream) *BaseParser {
|
||||
|
||||
p := new(BaseParser)
|
||||
|
||||
p.BaseRecognizer = NewBaseRecognizer()
|
||||
|
||||
// The input stream.
|
||||
p.input = nil
|
||||
// The error handling strategy for the parser. The default value is a new
|
||||
// instance of {@link DefaultErrorStrategy}.
|
||||
p.errHandler = NewDefaultErrorStrategy()
|
||||
p.precedenceStack = make([]int, 0)
|
||||
p.precedenceStack.Push(0)
|
||||
// The {@link ParserRuleContext} object for the currently executing rule.
|
||||
// p.is always non-nil during the parsing process.
|
||||
p.ctx = nil
|
||||
// Specifies whether or not the parser should construct a parse tree during
|
||||
// the parsing process. The default value is {@code true}.
|
||||
p.BuildParseTrees = true
|
||||
// When {@link //setTrace}{@code (true)} is called, a reference to the
|
||||
// {@link TraceListener} is stored here so it can be easily removed in a
|
||||
// later call to {@link //setTrace}{@code (false)}. The listener itself is
|
||||
// implemented as a parser listener so p.field is not directly used by
|
||||
// other parser methods.
|
||||
p.tracer = nil
|
||||
// The list of {@link ParseTreeListener} listeners registered to receive
|
||||
// events during the parse.
|
||||
p.parseListeners = nil
|
||||
// The number of syntax errors Reported during parsing. p.value is
|
||||
// incremented each time {@link //NotifyErrorListeners} is called.
|
||||
p._SyntaxErrors = 0
|
||||
p.SetInputStream(input)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// p.field maps from the serialized ATN string to the deserialized {@link
|
||||
// ATN} with
|
||||
// bypass alternatives.
|
||||
//
|
||||
// @see ATNDeserializationOptions//isGenerateRuleBypassTransitions()
|
||||
//
|
||||
var bypassAltsAtnCache = make(map[string]int)
|
||||
|
||||
// reset the parser's state//
|
||||
func (p *BaseParser) reset() {
|
||||
if p.input != nil {
|
||||
p.input.Seek(0)
|
||||
}
|
||||
p.errHandler.reset(p)
|
||||
p.ctx = nil
|
||||
p._SyntaxErrors = 0
|
||||
p.SetTrace(nil)
|
||||
p.precedenceStack = make([]int, 0)
|
||||
p.precedenceStack.Push(0)
|
||||
if p.Interpreter != nil {
|
||||
p.Interpreter.reset()
|
||||
}
|
||||
}
|
||||
|
||||
func (p *BaseParser) GetErrorHandler() ErrorStrategy {
|
||||
return p.errHandler
|
||||
}
|
||||
|
||||
func (p *BaseParser) SetErrorHandler(e ErrorStrategy) {
|
||||
p.errHandler = e
|
||||
}
|
||||
|
||||
// Match current input symbol against {@code ttype}. If the symbol type
|
||||
// Matches, {@link ANTLRErrorStrategy//ReportMatch} and {@link //consume} are
|
||||
// called to complete the Match process.
|
||||
//
|
||||
// <p>If the symbol type does not Match,
|
||||
// {@link ANTLRErrorStrategy//recoverInline} is called on the current error
|
||||
// strategy to attempt recovery. If {@link //getBuildParseTree} is
|
||||
// {@code true} and the token index of the symbol returned by
|
||||
// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to
|
||||
// the parse tree by calling {@link ParserRuleContext//addErrorNode}.</p>
|
||||
//
|
||||
// @param ttype the token type to Match
|
||||
// @return the Matched symbol
|
||||
// @panics RecognitionException if the current input symbol did not Match
|
||||
// {@code ttype} and the error strategy could not recover from the
|
||||
// mismatched symbol
|
||||
|
||||
func (p *BaseParser) Match(ttype int) Token {
|
||||
|
||||
t := p.GetCurrentToken()
|
||||
|
||||
if t.GetTokenType() == ttype {
|
||||
p.errHandler.ReportMatch(p)
|
||||
p.Consume()
|
||||
} else {
|
||||
t = p.errHandler.RecoverInline(p)
|
||||
if p.BuildParseTrees && t.GetTokenIndex() == -1 {
|
||||
// we must have conjured up a Newtoken during single token
|
||||
// insertion
|
||||
// if it's not the current symbol
|
||||
p.ctx.AddErrorNode(t)
|
||||
}
|
||||
}
|
||||
|
||||
return t
|
||||
}
|
||||
|
||||
// Match current input symbol as a wildcard. If the symbol type Matches
|
||||
// (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//ReportMatch}
|
||||
// and {@link //consume} are called to complete the Match process.
|
||||
//
|
||||
// <p>If the symbol type does not Match,
|
||||
// {@link ANTLRErrorStrategy//recoverInline} is called on the current error
|
||||
// strategy to attempt recovery. If {@link //getBuildParseTree} is
|
||||
// {@code true} and the token index of the symbol returned by
|
||||
// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to
|
||||
// the parse tree by calling {@link ParserRuleContext//addErrorNode}.</p>
|
||||
//
|
||||
// @return the Matched symbol
|
||||
// @panics RecognitionException if the current input symbol did not Match
|
||||
// a wildcard and the error strategy could not recover from the mismatched
|
||||
// symbol
|
||||
|
||||
func (p *BaseParser) MatchWildcard() Token {
|
||||
t := p.GetCurrentToken()
|
||||
if t.GetTokenType() > 0 {
|
||||
p.errHandler.ReportMatch(p)
|
||||
p.Consume()
|
||||
} else {
|
||||
t = p.errHandler.RecoverInline(p)
|
||||
if p.BuildParseTrees && t.GetTokenIndex() == -1 {
|
||||
// we must have conjured up a Newtoken during single token
|
||||
// insertion
|
||||
// if it's not the current symbol
|
||||
p.ctx.AddErrorNode(t)
|
||||
}
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
func (p *BaseParser) GetParserRuleContext() ParserRuleContext {
|
||||
return p.ctx
|
||||
}
|
||||
|
||||
func (p *BaseParser) SetParserRuleContext(v ParserRuleContext) {
|
||||
p.ctx = v
|
||||
}
|
||||
|
||||
func (p *BaseParser) GetParseListeners() []ParseTreeListener {
|
||||
if p.parseListeners == nil {
|
||||
return make([]ParseTreeListener, 0)
|
||||
}
|
||||
return p.parseListeners
|
||||
}
|
||||
|
||||
// Registers {@code listener} to receive events during the parsing process.
|
||||
//
|
||||
// <p>To support output-preserving grammar transformations (including but not
|
||||
// limited to left-recursion removal, automated left-factoring, and
|
||||
// optimized code generation), calls to listener methods during the parse
|
||||
// may differ substantially from calls made by
|
||||
// {@link ParseTreeWalker//DEFAULT} used after the parse is complete. In
|
||||
// particular, rule entry and exit events may occur in a different order
|
||||
// during the parse than after the parser. In addition, calls to certain
|
||||
// rule entry methods may be omitted.</p>
|
||||
//
|
||||
// <p>With the following specific exceptions, calls to listener events are
|
||||
// <em>deterministic</em>, i.e. for identical input the calls to listener
|
||||
// methods will be the same.</p>
|
||||
//
|
||||
// <ul>
|
||||
// <li>Alterations to the grammar used to generate code may change the
|
||||
// behavior of the listener calls.</li>
|
||||
// <li>Alterations to the command line options passed to ANTLR 4 when
|
||||
// generating the parser may change the behavior of the listener calls.</li>
|
||||
// <li>Changing the version of the ANTLR Tool used to generate the parser
|
||||
// may change the behavior of the listener calls.</li>
|
||||
// </ul>
|
||||
//
|
||||
// @param listener the listener to add
|
||||
//
|
||||
// @panics nilPointerException if {@code} listener is {@code nil}
|
||||
//
|
||||
func (p *BaseParser) AddParseListener(listener ParseTreeListener) {
|
||||
if listener == nil {
|
||||
panic("listener")
|
||||
}
|
||||
if p.parseListeners == nil {
|
||||
p.parseListeners = make([]ParseTreeListener, 0)
|
||||
}
|
||||
p.parseListeners = append(p.parseListeners, listener)
|
||||
}
|
||||
|
||||
//
|
||||
// Remove {@code listener} from the list of parse listeners.
|
||||
//
|
||||
// <p>If {@code listener} is {@code nil} or has not been added as a parse
|
||||
// listener, p.method does nothing.</p>
|
||||
// @param listener the listener to remove
|
||||
//
|
||||
func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) {
|
||||
|
||||
if p.parseListeners != nil {
|
||||
|
||||
idx := -1
|
||||
for i, v := range p.parseListeners {
|
||||
if v == listener {
|
||||
idx = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if idx == -1 {
|
||||
return
|
||||
}
|
||||
|
||||
// remove the listener from the slice
|
||||
p.parseListeners = append(p.parseListeners[0:idx], p.parseListeners[idx+1:]...)
|
||||
|
||||
if len(p.parseListeners) == 0 {
|
||||
p.parseListeners = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Remove all parse listeners.
|
||||
func (p *BaseParser) removeParseListeners() {
|
||||
p.parseListeners = nil
|
||||
}
|
||||
|
||||
// Notify any parse listeners of an enter rule event.
|
||||
func (p *BaseParser) TriggerEnterRuleEvent() {
|
||||
if p.parseListeners != nil {
|
||||
ctx := p.ctx
|
||||
for _, listener := range p.parseListeners {
|
||||
listener.EnterEveryRule(ctx)
|
||||
ctx.EnterRule(listener)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Notify any parse listeners of an exit rule event.
|
||||
//
|
||||
// @see //addParseListener
|
||||
//
|
||||
func (p *BaseParser) TriggerExitRuleEvent() {
|
||||
if p.parseListeners != nil {
|
||||
// reverse order walk of listeners
|
||||
ctx := p.ctx
|
||||
l := len(p.parseListeners) - 1
|
||||
|
||||
for i := range p.parseListeners {
|
||||
listener := p.parseListeners[l-i]
|
||||
ctx.ExitRule(listener)
|
||||
listener.ExitEveryRule(ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *BaseParser) GetInterpreter() *ParserATNSimulator {
|
||||
return p.Interpreter
|
||||
}
|
||||
|
||||
func (p *BaseParser) GetATN() *ATN {
|
||||
return p.Interpreter.atn
|
||||
}
|
||||
|
||||
func (p *BaseParser) GetTokenFactory() TokenFactory {
|
||||
return p.input.GetTokenSource().GetTokenFactory()
|
||||
}
|
||||
|
||||
// Tell our token source and error strategy about a Newway to create tokens.//
|
||||
func (p *BaseParser) setTokenFactory(factory TokenFactory) {
|
||||
p.input.GetTokenSource().setTokenFactory(factory)
|
||||
}
|
||||
|
||||
// The ATN with bypass alternatives is expensive to create so we create it
|
||||
// lazily.
|
||||
//
|
||||
// @panics UnsupportedOperationException if the current parser does not
|
||||
// implement the {@link //getSerializedATN()} method.
|
||||
//
|
||||
func (p *BaseParser) GetATNWithBypassAlts() {
|
||||
|
||||
// TODO
|
||||
panic("Not implemented!")
|
||||
|
||||
// serializedAtn := p.getSerializedATN()
|
||||
// if (serializedAtn == nil) {
|
||||
// panic("The current parser does not support an ATN with bypass alternatives.")
|
||||
// }
|
||||
// result := p.bypassAltsAtnCache[serializedAtn]
|
||||
// if (result == nil) {
|
||||
// deserializationOptions := NewATNDeserializationOptions(nil)
|
||||
// deserializationOptions.generateRuleBypassTransitions = true
|
||||
// result = NewATNDeserializer(deserializationOptions).deserialize(serializedAtn)
|
||||
// p.bypassAltsAtnCache[serializedAtn] = result
|
||||
// }
|
||||
// return result
|
||||
}
|
||||
|
||||
// The preferred method of getting a tree pattern. For example, here's a
|
||||
// sample use:
|
||||
//
|
||||
// <pre>
|
||||
// ParseTree t = parser.expr()
|
||||
// ParseTreePattern p = parser.compileParseTreePattern("<ID>+0",
|
||||
// MyParser.RULE_expr)
|
||||
// ParseTreeMatch m = p.Match(t)
|
||||
// String id = m.Get("ID")
|
||||
// </pre>
|
||||
|
||||
func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) {
|
||||
|
||||
panic("NewParseTreePatternMatcher not implemented!")
|
||||
//
|
||||
// if (lexer == nil) {
|
||||
// if (p.GetTokenStream() != nil) {
|
||||
// tokenSource := p.GetTokenStream().GetTokenSource()
|
||||
// if _, ok := tokenSource.(ILexer); ok {
|
||||
// lexer = tokenSource
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// if (lexer == nil) {
|
||||
// panic("Parser can't discover a lexer to use")
|
||||
// }
|
||||
|
||||
// m := NewParseTreePatternMatcher(lexer, p)
|
||||
// return m.compile(pattern, patternRuleIndex)
|
||||
}
|
||||
|
||||
func (p *BaseParser) GetInputStream() IntStream {
|
||||
return p.GetTokenStream()
|
||||
}
|
||||
|
||||
func (p *BaseParser) SetInputStream(input TokenStream) {
|
||||
p.SetTokenStream(input)
|
||||
}
|
||||
|
||||
func (p *BaseParser) GetTokenStream() TokenStream {
|
||||
return p.input
|
||||
}
|
||||
|
||||
// Set the token stream and reset the parser.//
|
||||
func (p *BaseParser) SetTokenStream(input TokenStream) {
|
||||
p.input = nil
|
||||
p.reset()
|
||||
p.input = input
|
||||
}
|
||||
|
||||
// Match needs to return the current input symbol, which gets put
|
||||
// into the label for the associated token ref e.g., x=ID.
|
||||
//
|
||||
func (p *BaseParser) GetCurrentToken() Token {
|
||||
return p.input.LT(1)
|
||||
}
|
||||
|
||||
func (p *BaseParser) NotifyErrorListeners(msg string, offendingToken Token, err RecognitionException) {
|
||||
if offendingToken == nil {
|
||||
offendingToken = p.GetCurrentToken()
|
||||
}
|
||||
p._SyntaxErrors++
|
||||
line := offendingToken.GetLine()
|
||||
column := offendingToken.GetColumn()
|
||||
listener := p.GetErrorListenerDispatch()
|
||||
listener.SyntaxError(p, offendingToken, line, column, msg, err)
|
||||
}
|
||||
|
||||
func (p *BaseParser) Consume() Token {
|
||||
o := p.GetCurrentToken()
|
||||
if o.GetTokenType() != TokenEOF {
|
||||
p.GetInputStream().Consume()
|
||||
}
|
||||
hasListener := p.parseListeners != nil && len(p.parseListeners) > 0
|
||||
if p.BuildParseTrees || hasListener {
|
||||
if p.errHandler.InErrorRecoveryMode(p) {
|
||||
node := p.ctx.AddErrorNode(o)
|
||||
if p.parseListeners != nil {
|
||||
for _, l := range p.parseListeners {
|
||||
l.VisitErrorNode(node)
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
node := p.ctx.AddTokenNode(o)
|
||||
if p.parseListeners != nil {
|
||||
for _, l := range p.parseListeners {
|
||||
l.VisitTerminal(node)
|
||||
}
|
||||
}
|
||||
}
|
||||
// node.invokingState = p.state
|
||||
}
|
||||
|
||||
return o
|
||||
}
|
||||
|
||||
func (p *BaseParser) addContextToParseTree() {
|
||||
// add current context to parent if we have a parent
|
||||
if p.ctx.GetParent() != nil {
|
||||
p.ctx.GetParent().(ParserRuleContext).AddChild(p.ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, ruleIndex int) {
|
||||
p.SetState(state)
|
||||
p.ctx = localctx
|
||||
p.ctx.SetStart(p.input.LT(1))
|
||||
if p.BuildParseTrees {
|
||||
p.addContextToParseTree()
|
||||
}
|
||||
if p.parseListeners != nil {
|
||||
p.TriggerEnterRuleEvent()
|
||||
}
|
||||
}
|
||||
|
||||
func (p *BaseParser) ExitRule() {
|
||||
p.ctx.SetStop(p.input.LT(-1))
|
||||
// trigger event on ctx, before it reverts to parent
|
||||
if p.parseListeners != nil {
|
||||
p.TriggerExitRuleEvent()
|
||||
}
|
||||
p.SetState(p.ctx.GetInvokingState())
|
||||
if p.ctx.GetParent() != nil {
|
||||
p.ctx = p.ctx.GetParent().(ParserRuleContext)
|
||||
} else {
|
||||
p.ctx = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) {
|
||||
localctx.SetAltNumber(altNum)
|
||||
// if we have Newlocalctx, make sure we replace existing ctx
|
||||
// that is previous child of parse tree
|
||||
if p.BuildParseTrees && p.ctx != localctx {
|
||||
if p.ctx.GetParent() != nil {
|
||||
p.ctx.GetParent().(ParserRuleContext).RemoveLastChild()
|
||||
p.ctx.GetParent().(ParserRuleContext).AddChild(localctx)
|
||||
}
|
||||
}
|
||||
p.ctx = localctx
|
||||
}
|
||||
|
||||
// Get the precedence level for the top-most precedence rule.
|
||||
//
|
||||
// @return The precedence level for the top-most precedence rule, or -1 if
|
||||
// the parser context is not nested within a precedence rule.
|
||||
|
||||
func (p *BaseParser) GetPrecedence() int {
|
||||
if len(p.precedenceStack) == 0 {
|
||||
return -1
|
||||
}
|
||||
|
||||
return p.precedenceStack[len(p.precedenceStack)-1]
|
||||
}
|
||||
|
||||
func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleIndex, precedence int) {
|
||||
p.SetState(state)
|
||||
p.precedenceStack.Push(precedence)
|
||||
p.ctx = localctx
|
||||
p.ctx.SetStart(p.input.LT(1))
|
||||
if p.parseListeners != nil {
|
||||
p.TriggerEnterRuleEvent() // simulates rule entry for
|
||||
// left-recursive rules
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Like {@link //EnterRule} but for recursive rules.
|
||||
|
||||
func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, ruleIndex int) {
|
||||
previous := p.ctx
|
||||
previous.SetParent(localctx)
|
||||
previous.SetInvokingState(state)
|
||||
previous.SetStop(p.input.LT(-1))
|
||||
|
||||
p.ctx = localctx
|
||||
p.ctx.SetStart(previous.GetStart())
|
||||
if p.BuildParseTrees {
|
||||
p.ctx.AddChild(previous)
|
||||
}
|
||||
if p.parseListeners != nil {
|
||||
p.TriggerEnterRuleEvent() // simulates rule entry for
|
||||
// left-recursive rules
|
||||
}
|
||||
}
|
||||
|
||||
func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) {
|
||||
p.precedenceStack.Pop()
|
||||
p.ctx.SetStop(p.input.LT(-1))
|
||||
retCtx := p.ctx // save current ctx (return value)
|
||||
// unroll so ctx is as it was before call to recursive method
|
||||
if p.parseListeners != nil {
|
||||
for p.ctx != parentCtx {
|
||||
p.TriggerExitRuleEvent()
|
||||
p.ctx = p.ctx.GetParent().(ParserRuleContext)
|
||||
}
|
||||
} else {
|
||||
p.ctx = parentCtx
|
||||
}
|
||||
// hook into tree
|
||||
retCtx.SetParent(parentCtx)
|
||||
if p.BuildParseTrees && parentCtx != nil {
|
||||
// add return ctx into invoking rule's tree
|
||||
parentCtx.AddChild(retCtx)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext {
|
||||
ctx := p.ctx
|
||||
for ctx != nil {
|
||||
if ctx.GetRuleIndex() == ruleIndex {
|
||||
return ctx
|
||||
}
|
||||
ctx = ctx.GetParent().(ParserRuleContext)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *BaseParser) Precpred(localctx RuleContext, precedence int) bool {
|
||||
return precedence >= p.precedenceStack[len(p.precedenceStack)-1]
|
||||
}
|
||||
|
||||
func (p *BaseParser) inContext(context ParserRuleContext) bool {
|
||||
// TODO: useful in parser?
|
||||
return false
|
||||
}
|
||||
|
||||
//
|
||||
// Checks whether or not {@code symbol} can follow the current state in the
|
||||
// ATN. The behavior of p.method is equivalent to the following, but is
|
||||
// implemented such that the complete context-sensitive follow set does not
|
||||
// need to be explicitly constructed.
|
||||
//
|
||||
// <pre>
|
||||
// return getExpectedTokens().contains(symbol)
|
||||
// </pre>
|
||||
//
|
||||
// @param symbol the symbol type to check
|
||||
// @return {@code true} if {@code symbol} can follow the current state in
|
||||
// the ATN, otherwise {@code false}.
|
||||
|
||||
func (p *BaseParser) IsExpectedToken(symbol int) bool {
|
||||
atn := p.Interpreter.atn
|
||||
ctx := p.ctx
|
||||
s := atn.states[p.state]
|
||||
following := atn.NextTokens(s, nil)
|
||||
if following.contains(symbol) {
|
||||
return true
|
||||
}
|
||||
if !following.contains(TokenEpsilon) {
|
||||
return false
|
||||
}
|
||||
for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) {
|
||||
invokingState := atn.states[ctx.GetInvokingState()]
|
||||
rt := invokingState.GetTransitions()[0]
|
||||
following = atn.NextTokens(rt.(*RuleTransition).followState, nil)
|
||||
if following.contains(symbol) {
|
||||
return true
|
||||
}
|
||||
ctx = ctx.GetParent().(ParserRuleContext)
|
||||
}
|
||||
if following.contains(TokenEpsilon) && symbol == TokenEOF {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Computes the set of input symbols which could follow the current parser
|
||||
// state and context, as given by {@link //GetState} and {@link //GetContext},
|
||||
// respectively.
|
||||
//
|
||||
// @see ATN//getExpectedTokens(int, RuleContext)
|
||||
//
|
||||
func (p *BaseParser) GetExpectedTokens() *IntervalSet {
|
||||
return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx)
|
||||
}
|
||||
|
||||
func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet {
|
||||
atn := p.Interpreter.atn
|
||||
s := atn.states[p.state]
|
||||
return atn.NextTokens(s, nil)
|
||||
}
|
||||
|
||||
// Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found.//
|
||||
func (p *BaseParser) GetRuleIndex(ruleName string) int {
|
||||
var ruleIndex, ok = p.GetRuleIndexMap()[ruleName]
|
||||
if ok {
|
||||
return ruleIndex
|
||||
}
|
||||
|
||||
return -1
|
||||
}
|
||||
|
||||
// Return List<String> of the rule names in your parser instance
|
||||
// leading up to a call to the current rule. You could override if
|
||||
// you want more details such as the file/line info of where
|
||||
// in the ATN a rule is invoked.
|
||||
//
|
||||
// this very useful for error messages.
|
||||
|
||||
func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string {
|
||||
if c == nil {
|
||||
c = p.ctx
|
||||
}
|
||||
stack := make([]string, 0)
|
||||
for c != nil {
|
||||
// compute what follows who invoked us
|
||||
ruleIndex := c.GetRuleIndex()
|
||||
if ruleIndex < 0 {
|
||||
stack = append(stack, "n/a")
|
||||
} else {
|
||||
stack = append(stack, p.GetRuleNames()[ruleIndex])
|
||||
}
|
||||
|
||||
vp := c.GetParent()
|
||||
|
||||
if vp == nil {
|
||||
break
|
||||
}
|
||||
|
||||
c = vp.(ParserRuleContext)
|
||||
}
|
||||
return stack
|
||||
}
|
||||
|
||||
// For debugging and other purposes.//
|
||||
func (p *BaseParser) GetDFAStrings() string {
|
||||
return fmt.Sprint(p.Interpreter.decisionToDFA)
|
||||
}
|
||||
|
||||
// For debugging and other purposes.//
|
||||
func (p *BaseParser) DumpDFA() {
|
||||
seenOne := false
|
||||
for _, dfa := range p.Interpreter.decisionToDFA {
|
||||
if dfa.numStates() > 0 {
|
||||
if seenOne {
|
||||
fmt.Println()
|
||||
}
|
||||
fmt.Println("Decision " + strconv.Itoa(dfa.decision) + ":")
|
||||
fmt.Print(dfa.String(p.LiteralNames, p.SymbolicNames))
|
||||
seenOne = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *BaseParser) GetSourceName() string {
|
||||
return p.GrammarFileName
|
||||
}
|
||||
|
||||
// During a parse is sometimes useful to listen in on the rule entry and exit
|
||||
// events as well as token Matches. p.is for quick and dirty debugging.
|
||||
//
|
||||
func (p *BaseParser) SetTrace(trace *TraceListener) {
|
||||
if trace == nil {
|
||||
p.RemoveParseListener(p.tracer)
|
||||
p.tracer = nil
|
||||
} else {
|
||||
if p.tracer != nil {
|
||||
p.RemoveParseListener(p.tracer)
|
||||
}
|
||||
p.tracer = NewTraceListener(p)
|
||||
p.AddParseListener(p.tracer)
|
||||
}
|
||||
}
|
1544
vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go
generated
vendored
Normal file
1544
vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
362
vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go
generated
vendored
Normal file
362
vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go
generated
vendored
Normal file
@ -0,0 +1,362 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type ParserRuleContext interface {
|
||||
RuleContext
|
||||
|
||||
SetException(RecognitionException)
|
||||
|
||||
AddTokenNode(token Token) *TerminalNodeImpl
|
||||
AddErrorNode(badToken Token) *ErrorNodeImpl
|
||||
|
||||
EnterRule(listener ParseTreeListener)
|
||||
ExitRule(listener ParseTreeListener)
|
||||
|
||||
SetStart(Token)
|
||||
GetStart() Token
|
||||
|
||||
SetStop(Token)
|
||||
GetStop() Token
|
||||
|
||||
AddChild(child RuleContext) RuleContext
|
||||
RemoveLastChild()
|
||||
}
|
||||
|
||||
type BaseParserRuleContext struct {
|
||||
*BaseRuleContext
|
||||
|
||||
start, stop Token
|
||||
exception RecognitionException
|
||||
children []Tree
|
||||
}
|
||||
|
||||
func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) *BaseParserRuleContext {
|
||||
prc := new(BaseParserRuleContext)
|
||||
|
||||
prc.BaseRuleContext = NewBaseRuleContext(parent, invokingStateNumber)
|
||||
|
||||
prc.RuleIndex = -1
|
||||
// * If we are debugging or building a parse tree for a Visitor,
|
||||
// we need to track all of the tokens and rule invocations associated
|
||||
// with prc rule's context. This is empty for parsing w/o tree constr.
|
||||
// operation because we don't the need to track the details about
|
||||
// how we parse prc rule.
|
||||
// /
|
||||
prc.children = nil
|
||||
prc.start = nil
|
||||
prc.stop = nil
|
||||
// The exception that forced prc rule to return. If the rule successfully
|
||||
// completed, prc is {@code nil}.
|
||||
prc.exception = nil
|
||||
|
||||
return prc
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) SetException(e RecognitionException) {
|
||||
prc.exception = e
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) GetChildren() []Tree {
|
||||
return prc.children
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) CopyFrom(ctx *BaseParserRuleContext) {
|
||||
// from RuleContext
|
||||
prc.parentCtx = ctx.parentCtx
|
||||
prc.invokingState = ctx.invokingState
|
||||
prc.children = nil
|
||||
prc.start = ctx.start
|
||||
prc.stop = ctx.stop
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) GetText() string {
|
||||
if prc.GetChildCount() == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
var s string
|
||||
for _, child := range prc.children {
|
||||
s += child.(ParseTree).GetText()
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// Double dispatch methods for listeners
|
||||
func (prc *BaseParserRuleContext) EnterRule(listener ParseTreeListener) {
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) ExitRule(listener ParseTreeListener) {
|
||||
}
|
||||
|
||||
// * Does not set parent link other add methods do that///
|
||||
func (prc *BaseParserRuleContext) addTerminalNodeChild(child TerminalNode) TerminalNode {
|
||||
if prc.children == nil {
|
||||
prc.children = make([]Tree, 0)
|
||||
}
|
||||
if child == nil {
|
||||
panic("Child may not be null")
|
||||
}
|
||||
prc.children = append(prc.children, child)
|
||||
return child
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) AddChild(child RuleContext) RuleContext {
|
||||
if prc.children == nil {
|
||||
prc.children = make([]Tree, 0)
|
||||
}
|
||||
if child == nil {
|
||||
panic("Child may not be null")
|
||||
}
|
||||
prc.children = append(prc.children, child)
|
||||
return child
|
||||
}
|
||||
|
||||
// * Used by EnterOuterAlt to toss out a RuleContext previously added as
|
||||
// we entered a rule. If we have // label, we will need to remove
|
||||
// generic ruleContext object.
|
||||
// /
|
||||
func (prc *BaseParserRuleContext) RemoveLastChild() {
|
||||
if prc.children != nil && len(prc.children) > 0 {
|
||||
prc.children = prc.children[0 : len(prc.children)-1]
|
||||
}
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) AddTokenNode(token Token) *TerminalNodeImpl {
|
||||
|
||||
node := NewTerminalNodeImpl(token)
|
||||
prc.addTerminalNodeChild(node)
|
||||
node.parentCtx = prc
|
||||
return node
|
||||
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) AddErrorNode(badToken Token) *ErrorNodeImpl {
|
||||
node := NewErrorNodeImpl(badToken)
|
||||
prc.addTerminalNodeChild(node)
|
||||
node.parentCtx = prc
|
||||
return node
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) GetChild(i int) Tree {
|
||||
if prc.children != nil && len(prc.children) >= i {
|
||||
return prc.children[i]
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) GetChildOfType(i int, childType reflect.Type) RuleContext {
|
||||
if childType == nil {
|
||||
return prc.GetChild(i).(RuleContext)
|
||||
}
|
||||
|
||||
for j := 0; j < len(prc.children); j++ {
|
||||
child := prc.children[j]
|
||||
if reflect.TypeOf(child) == childType {
|
||||
if i == 0 {
|
||||
return child.(RuleContext)
|
||||
}
|
||||
|
||||
i--
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) ToStringTree(ruleNames []string, recog Recognizer) string {
|
||||
return TreesStringTree(prc, ruleNames, recog)
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) GetRuleContext() RuleContext {
|
||||
return prc
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) Accept(visitor ParseTreeVisitor) interface{} {
|
||||
return visitor.VisitChildren(prc)
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) SetStart(t Token) {
|
||||
prc.start = t
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) GetStart() Token {
|
||||
return prc.start
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) SetStop(t Token) {
|
||||
prc.stop = t
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) GetStop() Token {
|
||||
return prc.stop
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) GetToken(ttype int, i int) TerminalNode {
|
||||
|
||||
for j := 0; j < len(prc.children); j++ {
|
||||
child := prc.children[j]
|
||||
if c2, ok := child.(TerminalNode); ok {
|
||||
if c2.GetSymbol().GetTokenType() == ttype {
|
||||
if i == 0 {
|
||||
return c2
|
||||
}
|
||||
|
||||
i--
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) GetTokens(ttype int) []TerminalNode {
|
||||
if prc.children == nil {
|
||||
return make([]TerminalNode, 0)
|
||||
}
|
||||
|
||||
tokens := make([]TerminalNode, 0)
|
||||
|
||||
for j := 0; j < len(prc.children); j++ {
|
||||
child := prc.children[j]
|
||||
if tchild, ok := child.(TerminalNode); ok {
|
||||
if tchild.GetSymbol().GetTokenType() == ttype {
|
||||
tokens = append(tokens, tchild)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return tokens
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) GetPayload() interface{} {
|
||||
return prc
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) getChild(ctxType reflect.Type, i int) RuleContext {
|
||||
if prc.children == nil || i < 0 || i >= len(prc.children) {
|
||||
return nil
|
||||
}
|
||||
|
||||
j := -1 // what element have we found with ctxType?
|
||||
for _, o := range prc.children {
|
||||
|
||||
childType := reflect.TypeOf(o)
|
||||
|
||||
if childType.Implements(ctxType) {
|
||||
j++
|
||||
if j == i {
|
||||
return o.(RuleContext)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Go lacks generics, so it's not possible for us to return the child with the correct type, but we do
|
||||
// check for convertibility
|
||||
|
||||
func (prc *BaseParserRuleContext) GetTypedRuleContext(ctxType reflect.Type, i int) RuleContext {
|
||||
return prc.getChild(ctxType, i)
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) GetTypedRuleContexts(ctxType reflect.Type) []RuleContext {
|
||||
if prc.children == nil {
|
||||
return make([]RuleContext, 0)
|
||||
}
|
||||
|
||||
contexts := make([]RuleContext, 0)
|
||||
|
||||
for _, child := range prc.children {
|
||||
childType := reflect.TypeOf(child)
|
||||
|
||||
if childType.ConvertibleTo(ctxType) {
|
||||
contexts = append(contexts, child.(RuleContext))
|
||||
}
|
||||
}
|
||||
return contexts
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) GetChildCount() int {
|
||||
if prc.children == nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return len(prc.children)
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) GetSourceInterval() *Interval {
|
||||
if prc.start == nil || prc.stop == nil {
|
||||
return TreeInvalidInterval
|
||||
}
|
||||
|
||||
return NewInterval(prc.start.GetTokenIndex(), prc.stop.GetTokenIndex())
|
||||
}
|
||||
|
||||
//need to manage circular dependencies, so export now
|
||||
|
||||
// Print out a whole tree, not just a node, in LISP format
|
||||
// (root child1 .. childN). Print just a node if b is a leaf.
|
||||
//
|
||||
|
||||
func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) string {
|
||||
|
||||
var p ParserRuleContext = prc
|
||||
s := "["
|
||||
for p != nil && p != stop {
|
||||
if ruleNames == nil {
|
||||
if !p.IsEmpty() {
|
||||
s += strconv.Itoa(p.GetInvokingState())
|
||||
}
|
||||
} else {
|
||||
ri := p.GetRuleIndex()
|
||||
var ruleName string
|
||||
if ri >= 0 && ri < len(ruleNames) {
|
||||
ruleName = ruleNames[ri]
|
||||
} else {
|
||||
ruleName = strconv.Itoa(ri)
|
||||
}
|
||||
s += ruleName
|
||||
}
|
||||
if p.GetParent() != nil && (ruleNames != nil || !p.GetParent().(ParserRuleContext).IsEmpty()) {
|
||||
s += " "
|
||||
}
|
||||
pi := p.GetParent()
|
||||
if pi != nil {
|
||||
p = pi.(ParserRuleContext)
|
||||
} else {
|
||||
p = nil
|
||||
}
|
||||
}
|
||||
s += "]"
|
||||
return s
|
||||
}
|
||||
|
||||
var RuleContextEmpty = NewBaseParserRuleContext(nil, -1)
|
||||
|
||||
type InterpreterRuleContext interface {
|
||||
ParserRuleContext
|
||||
}
|
||||
|
||||
type BaseInterpreterRuleContext struct {
|
||||
*BaseParserRuleContext
|
||||
}
|
||||
|
||||
func NewBaseInterpreterRuleContext(parent BaseInterpreterRuleContext, invokingStateNumber, ruleIndex int) *BaseInterpreterRuleContext {
|
||||
|
||||
prc := new(BaseInterpreterRuleContext)
|
||||
|
||||
prc.BaseParserRuleContext = NewBaseParserRuleContext(parent, invokingStateNumber)
|
||||
|
||||
prc.RuleIndex = ruleIndex
|
||||
|
||||
return prc
|
||||
}
|
751
vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go
generated
vendored
Normal file
751
vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go
generated
vendored
Normal file
@ -0,0 +1,751 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Represents {@code $} in local context prediction, which means wildcard.
|
||||
// {@code//+x =//}.
|
||||
// /
|
||||
const (
|
||||
BasePredictionContextEmptyReturnState = 0x7FFFFFFF
|
||||
)
|
||||
|
||||
// Represents {@code $} in an array in full context mode, when {@code $}
|
||||
// doesn't mean wildcard: {@code $ + x = [$,x]}. Here,
|
||||
// {@code $} = {@link //EmptyReturnState}.
|
||||
// /
|
||||
|
||||
var (
|
||||
BasePredictionContextglobalNodeCount = 1
|
||||
BasePredictionContextid = BasePredictionContextglobalNodeCount
|
||||
)
|
||||
|
||||
type PredictionContext interface {
|
||||
hash() int
|
||||
GetParent(int) PredictionContext
|
||||
getReturnState(int) int
|
||||
equals(PredictionContext) bool
|
||||
length() int
|
||||
isEmpty() bool
|
||||
hasEmptyPath() bool
|
||||
String() string
|
||||
}
|
||||
|
||||
type BasePredictionContext struct {
|
||||
cachedHash int
|
||||
}
|
||||
|
||||
func NewBasePredictionContext(cachedHash int) *BasePredictionContext {
|
||||
pc := new(BasePredictionContext)
|
||||
pc.cachedHash = cachedHash
|
||||
|
||||
return pc
|
||||
}
|
||||
|
||||
func (b *BasePredictionContext) isEmpty() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func calculateHash(parent PredictionContext, returnState int) int {
|
||||
h := murmurInit(1)
|
||||
h = murmurUpdate(h, parent.hash())
|
||||
h = murmurUpdate(h, returnState)
|
||||
return murmurFinish(h, 2)
|
||||
}
|
||||
|
||||
var _emptyPredictionContextHash int
|
||||
|
||||
func init() {
|
||||
_emptyPredictionContextHash = murmurInit(1)
|
||||
_emptyPredictionContextHash = murmurFinish(_emptyPredictionContextHash, 0)
|
||||
}
|
||||
|
||||
func calculateEmptyHash() int {
|
||||
return _emptyPredictionContextHash
|
||||
}
|
||||
|
||||
// Used to cache {@link BasePredictionContext} objects. Its used for the shared
|
||||
// context cash associated with contexts in DFA states. This cache
|
||||
// can be used for both lexers and parsers.
|
||||
|
||||
type PredictionContextCache struct {
|
||||
cache map[PredictionContext]PredictionContext
|
||||
}
|
||||
|
||||
func NewPredictionContextCache() *PredictionContextCache {
|
||||
t := new(PredictionContextCache)
|
||||
t.cache = make(map[PredictionContext]PredictionContext)
|
||||
return t
|
||||
}
|
||||
|
||||
// Add a context to the cache and return it. If the context already exists,
|
||||
// return that one instead and do not add a Newcontext to the cache.
|
||||
// Protect shared cache from unsafe thread access.
|
||||
//
|
||||
func (p *PredictionContextCache) add(ctx PredictionContext) PredictionContext {
|
||||
if ctx == BasePredictionContextEMPTY {
|
||||
return BasePredictionContextEMPTY
|
||||
}
|
||||
existing := p.cache[ctx]
|
||||
if existing != nil {
|
||||
return existing
|
||||
}
|
||||
p.cache[ctx] = ctx
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (p *PredictionContextCache) Get(ctx PredictionContext) PredictionContext {
|
||||
return p.cache[ctx]
|
||||
}
|
||||
|
||||
func (p *PredictionContextCache) length() int {
|
||||
return len(p.cache)
|
||||
}
|
||||
|
||||
type SingletonPredictionContext interface {
|
||||
PredictionContext
|
||||
}
|
||||
|
||||
type BaseSingletonPredictionContext struct {
|
||||
*BasePredictionContext
|
||||
|
||||
parentCtx PredictionContext
|
||||
returnState int
|
||||
}
|
||||
|
||||
func NewBaseSingletonPredictionContext(parent PredictionContext, returnState int) *BaseSingletonPredictionContext {
|
||||
var cachedHash int
|
||||
if parent != nil {
|
||||
cachedHash = calculateHash(parent, returnState)
|
||||
} else {
|
||||
cachedHash = calculateEmptyHash()
|
||||
}
|
||||
|
||||
s := new(BaseSingletonPredictionContext)
|
||||
s.BasePredictionContext = NewBasePredictionContext(cachedHash)
|
||||
|
||||
s.parentCtx = parent
|
||||
s.returnState = returnState
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func SingletonBasePredictionContextCreate(parent PredictionContext, returnState int) PredictionContext {
|
||||
if returnState == BasePredictionContextEmptyReturnState && parent == nil {
|
||||
// someone can pass in the bits of an array ctx that mean $
|
||||
return BasePredictionContextEMPTY
|
||||
}
|
||||
|
||||
return NewBaseSingletonPredictionContext(parent, returnState)
|
||||
}
|
||||
|
||||
func (b *BaseSingletonPredictionContext) length() int {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (b *BaseSingletonPredictionContext) GetParent(index int) PredictionContext {
|
||||
return b.parentCtx
|
||||
}
|
||||
|
||||
func (b *BaseSingletonPredictionContext) getReturnState(index int) int {
|
||||
return b.returnState
|
||||
}
|
||||
|
||||
func (b *BaseSingletonPredictionContext) hasEmptyPath() bool {
|
||||
return b.returnState == BasePredictionContextEmptyReturnState
|
||||
}
|
||||
|
||||
func (b *BaseSingletonPredictionContext) equals(other PredictionContext) bool {
|
||||
if b == other {
|
||||
return true
|
||||
} else if _, ok := other.(*BaseSingletonPredictionContext); !ok {
|
||||
return false
|
||||
} else if b.hash() != other.hash() {
|
||||
return false // can't be same if hash is different
|
||||
}
|
||||
|
||||
otherP := other.(*BaseSingletonPredictionContext)
|
||||
|
||||
if b.returnState != other.getReturnState(0) {
|
||||
return false
|
||||
} else if b.parentCtx == nil {
|
||||
return otherP.parentCtx == nil
|
||||
}
|
||||
|
||||
return b.parentCtx.equals(otherP.parentCtx)
|
||||
}
|
||||
|
||||
func (b *BaseSingletonPredictionContext) hash() int {
|
||||
return b.cachedHash
|
||||
}
|
||||
|
||||
func (b *BaseSingletonPredictionContext) String() string {
|
||||
var up string
|
||||
|
||||
if b.parentCtx == nil {
|
||||
up = ""
|
||||
} else {
|
||||
up = b.parentCtx.String()
|
||||
}
|
||||
|
||||
if len(up) == 0 {
|
||||
if b.returnState == BasePredictionContextEmptyReturnState {
|
||||
return "$"
|
||||
}
|
||||
|
||||
return strconv.Itoa(b.returnState)
|
||||
}
|
||||
|
||||
return strconv.Itoa(b.returnState) + " " + up
|
||||
}
|
||||
|
||||
var BasePredictionContextEMPTY = NewEmptyPredictionContext()
|
||||
|
||||
type EmptyPredictionContext struct {
|
||||
*BaseSingletonPredictionContext
|
||||
}
|
||||
|
||||
func NewEmptyPredictionContext() *EmptyPredictionContext {
|
||||
|
||||
p := new(EmptyPredictionContext)
|
||||
|
||||
p.BaseSingletonPredictionContext = NewBaseSingletonPredictionContext(nil, BasePredictionContextEmptyReturnState)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (e *EmptyPredictionContext) isEmpty() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e *EmptyPredictionContext) GetParent(index int) PredictionContext {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *EmptyPredictionContext) getReturnState(index int) int {
|
||||
return e.returnState
|
||||
}
|
||||
|
||||
func (e *EmptyPredictionContext) equals(other PredictionContext) bool {
|
||||
return e == other
|
||||
}
|
||||
|
||||
func (e *EmptyPredictionContext) String() string {
|
||||
return "$"
|
||||
}
|
||||
|
||||
type ArrayPredictionContext struct {
|
||||
*BasePredictionContext
|
||||
|
||||
parents []PredictionContext
|
||||
returnStates []int
|
||||
}
|
||||
|
||||
func NewArrayPredictionContext(parents []PredictionContext, returnStates []int) *ArrayPredictionContext {
|
||||
// Parent can be nil only if full ctx mode and we make an array
|
||||
// from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using
|
||||
// nil parent and
|
||||
// returnState == {@link //EmptyReturnState}.
|
||||
hash := murmurInit(1)
|
||||
|
||||
for _, parent := range parents {
|
||||
hash = murmurUpdate(hash, parent.hash())
|
||||
}
|
||||
|
||||
for _, returnState := range returnStates {
|
||||
hash = murmurUpdate(hash, returnState)
|
||||
}
|
||||
|
||||
hash = murmurFinish(hash, len(parents)<<1)
|
||||
|
||||
c := new(ArrayPredictionContext)
|
||||
c.BasePredictionContext = NewBasePredictionContext(hash)
|
||||
|
||||
c.parents = parents
|
||||
c.returnStates = returnStates
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (a *ArrayPredictionContext) GetReturnStates() []int {
|
||||
return a.returnStates
|
||||
}
|
||||
|
||||
func (a *ArrayPredictionContext) hasEmptyPath() bool {
|
||||
return a.getReturnState(a.length()-1) == BasePredictionContextEmptyReturnState
|
||||
}
|
||||
|
||||
func (a *ArrayPredictionContext) isEmpty() bool {
|
||||
// since EmptyReturnState can only appear in the last position, we
|
||||
// don't need to verify that size==1
|
||||
return a.returnStates[0] == BasePredictionContextEmptyReturnState
|
||||
}
|
||||
|
||||
func (a *ArrayPredictionContext) length() int {
|
||||
return len(a.returnStates)
|
||||
}
|
||||
|
||||
func (a *ArrayPredictionContext) GetParent(index int) PredictionContext {
|
||||
return a.parents[index]
|
||||
}
|
||||
|
||||
func (a *ArrayPredictionContext) getReturnState(index int) int {
|
||||
return a.returnStates[index]
|
||||
}
|
||||
|
||||
func (a *ArrayPredictionContext) equals(other PredictionContext) bool {
|
||||
if _, ok := other.(*ArrayPredictionContext); !ok {
|
||||
return false
|
||||
} else if a.cachedHash != other.hash() {
|
||||
return false // can't be same if hash is different
|
||||
} else {
|
||||
otherP := other.(*ArrayPredictionContext)
|
||||
return &a.returnStates == &otherP.returnStates && &a.parents == &otherP.parents
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ArrayPredictionContext) hash() int {
|
||||
return a.BasePredictionContext.cachedHash
|
||||
}
|
||||
|
||||
func (a *ArrayPredictionContext) String() string {
|
||||
if a.isEmpty() {
|
||||
return "[]"
|
||||
}
|
||||
|
||||
s := "["
|
||||
for i := 0; i < len(a.returnStates); i++ {
|
||||
if i > 0 {
|
||||
s = s + ", "
|
||||
}
|
||||
if a.returnStates[i] == BasePredictionContextEmptyReturnState {
|
||||
s = s + "$"
|
||||
continue
|
||||
}
|
||||
s = s + strconv.Itoa(a.returnStates[i])
|
||||
if a.parents[i] != nil {
|
||||
s = s + " " + a.parents[i].String()
|
||||
} else {
|
||||
s = s + "nil"
|
||||
}
|
||||
}
|
||||
|
||||
return s + "]"
|
||||
}
|
||||
|
||||
// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph.
|
||||
// Return {@link //EMPTY} if {@code outerContext} is empty or nil.
|
||||
// /
|
||||
func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) PredictionContext {
|
||||
if outerContext == nil {
|
||||
outerContext = RuleContextEmpty
|
||||
}
|
||||
// if we are in RuleContext of start rule, s, then BasePredictionContext
|
||||
// is EMPTY. Nobody called us. (if we are empty, return empty)
|
||||
if outerContext.GetParent() == nil || outerContext == RuleContextEmpty {
|
||||
return BasePredictionContextEMPTY
|
||||
}
|
||||
// If we have a parent, convert it to a BasePredictionContext graph
|
||||
parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext))
|
||||
state := a.states[outerContext.GetInvokingState()]
|
||||
transition := state.GetTransitions()[0]
|
||||
|
||||
return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber())
|
||||
}
|
||||
|
||||
func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
|
||||
// share same graph if both same
|
||||
if a == b {
|
||||
return a
|
||||
}
|
||||
|
||||
ac, ok1 := a.(*BaseSingletonPredictionContext)
|
||||
bc, ok2 := b.(*BaseSingletonPredictionContext)
|
||||
|
||||
if ok1 && ok2 {
|
||||
return mergeSingletons(ac, bc, rootIsWildcard, mergeCache)
|
||||
}
|
||||
// At least one of a or b is array
|
||||
// If one is $ and rootIsWildcard, return $ as// wildcard
|
||||
if rootIsWildcard {
|
||||
if _, ok := a.(*EmptyPredictionContext); ok {
|
||||
return a
|
||||
}
|
||||
if _, ok := b.(*EmptyPredictionContext); ok {
|
||||
return b
|
||||
}
|
||||
}
|
||||
// convert singleton so both are arrays to normalize
|
||||
if _, ok := a.(*BaseSingletonPredictionContext); ok {
|
||||
a = NewArrayPredictionContext([]PredictionContext{a.GetParent(0)}, []int{a.getReturnState(0)})
|
||||
}
|
||||
if _, ok := b.(*BaseSingletonPredictionContext); ok {
|
||||
b = NewArrayPredictionContext([]PredictionContext{b.GetParent(0)}, []int{b.getReturnState(0)})
|
||||
}
|
||||
return mergeArrays(a.(*ArrayPredictionContext), b.(*ArrayPredictionContext), rootIsWildcard, mergeCache)
|
||||
}
|
||||
|
||||
//
|
||||
// Merge two {@link SingletonBasePredictionContext} instances.
|
||||
//
|
||||
// <p>Stack tops equal, parents merge is same return left graph.<br>
|
||||
// <embed src="images/SingletonMerge_SameRootSamePar.svg"
|
||||
// type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Same stack top, parents differ merge parents giving array node, then
|
||||
// remainders of those graphs. A Newroot node is created to point to the
|
||||
// merged parents.<br>
|
||||
// <embed src="images/SingletonMerge_SameRootDiffPar.svg"
|
||||
// type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Different stack tops pointing to same parent. Make array node for the
|
||||
// root where both element in the root point to the same (original)
|
||||
// parent.<br>
|
||||
// <embed src="images/SingletonMerge_DiffRootSamePar.svg"
|
||||
// type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Different stack tops pointing to different parents. Make array node for
|
||||
// the root where each element points to the corresponding original
|
||||
// parent.<br>
|
||||
// <embed src="images/SingletonMerge_DiffRootDiffPar.svg"
|
||||
// type="image/svg+xml"/></p>
|
||||
//
|
||||
// @param a the first {@link SingletonBasePredictionContext}
|
||||
// @param b the second {@link SingletonBasePredictionContext}
|
||||
// @param rootIsWildcard {@code true} if this is a local-context merge,
|
||||
// otherwise false to indicate a full-context merge
|
||||
// @param mergeCache
|
||||
// /
|
||||
func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
|
||||
if mergeCache != nil {
|
||||
previous := mergeCache.Get(a.hash(), b.hash())
|
||||
if previous != nil {
|
||||
return previous.(PredictionContext)
|
||||
}
|
||||
previous = mergeCache.Get(b.hash(), a.hash())
|
||||
if previous != nil {
|
||||
return previous.(PredictionContext)
|
||||
}
|
||||
}
|
||||
|
||||
rootMerge := mergeRoot(a, b, rootIsWildcard)
|
||||
if rootMerge != nil {
|
||||
if mergeCache != nil {
|
||||
mergeCache.set(a.hash(), b.hash(), rootMerge)
|
||||
}
|
||||
return rootMerge
|
||||
}
|
||||
if a.returnState == b.returnState {
|
||||
parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache)
|
||||
// if parent is same as existing a or b parent or reduced to a parent,
|
||||
// return it
|
||||
if parent == a.parentCtx {
|
||||
return a // ax + bx = ax, if a=b
|
||||
}
|
||||
if parent == b.parentCtx {
|
||||
return b // ax + bx = bx, if a=b
|
||||
}
|
||||
// else: ax + ay = a'[x,y]
|
||||
// merge parents x and y, giving array node with x,y then remainders
|
||||
// of those graphs. dup a, a' points at merged array
|
||||
// Newjoined parent so create Newsingleton pointing to it, a'
|
||||
spc := SingletonBasePredictionContextCreate(parent, a.returnState)
|
||||
if mergeCache != nil {
|
||||
mergeCache.set(a.hash(), b.hash(), spc)
|
||||
}
|
||||
return spc
|
||||
}
|
||||
// a != b payloads differ
|
||||
// see if we can collapse parents due to $+x parents if local ctx
|
||||
var singleParent PredictionContext
|
||||
if a == b || (a.parentCtx != nil && a.parentCtx == b.parentCtx) { // ax +
|
||||
// bx =
|
||||
// [a,b]x
|
||||
singleParent = a.parentCtx
|
||||
}
|
||||
if singleParent != nil { // parents are same
|
||||
// sort payloads and use same parent
|
||||
payloads := []int{a.returnState, b.returnState}
|
||||
if a.returnState > b.returnState {
|
||||
payloads[0] = b.returnState
|
||||
payloads[1] = a.returnState
|
||||
}
|
||||
parents := []PredictionContext{singleParent, singleParent}
|
||||
apc := NewArrayPredictionContext(parents, payloads)
|
||||
if mergeCache != nil {
|
||||
mergeCache.set(a.hash(), b.hash(), apc)
|
||||
}
|
||||
return apc
|
||||
}
|
||||
// parents differ and can't merge them. Just pack together
|
||||
// into array can't merge.
|
||||
// ax + by = [ax,by]
|
||||
payloads := []int{a.returnState, b.returnState}
|
||||
parents := []PredictionContext{a.parentCtx, b.parentCtx}
|
||||
if a.returnState > b.returnState { // sort by payload
|
||||
payloads[0] = b.returnState
|
||||
payloads[1] = a.returnState
|
||||
parents = []PredictionContext{b.parentCtx, a.parentCtx}
|
||||
}
|
||||
apc := NewArrayPredictionContext(parents, payloads)
|
||||
if mergeCache != nil {
|
||||
mergeCache.set(a.hash(), b.hash(), apc)
|
||||
}
|
||||
return apc
|
||||
}
|
||||
|
||||
//
|
||||
// Handle case where at least one of {@code a} or {@code b} is
|
||||
// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used
|
||||
// to represent {@link //EMPTY}.
|
||||
//
|
||||
// <h2>Local-Context Merges</h2>
|
||||
//
|
||||
// <p>These local-context merge operations are used when {@code rootIsWildcard}
|
||||
// is true.</p>
|
||||
//
|
||||
// <p>{@link //EMPTY} is superset of any graph return {@link //EMPTY}.<br>
|
||||
// <embed src="images/LocalMerge_EmptyRoot.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>{@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is
|
||||
// {@code //EMPTY} return left graph.<br>
|
||||
// <embed src="images/LocalMerge_EmptyParent.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Special case of last merge if local context.<br>
|
||||
// <embed src="images/LocalMerge_DiffRoots.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <h2>Full-Context Merges</h2>
|
||||
//
|
||||
// <p>These full-context merge operations are used when {@code rootIsWildcard}
|
||||
// is false.</p>
|
||||
//
|
||||
// <p><embed src="images/FullMerge_EmptyRoots.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Must keep all contexts {@link //EMPTY} in array is a special value (and
|
||||
// nil parent).<br>
|
||||
// <embed src="images/FullMerge_EmptyRoot.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p><embed src="images/FullMerge_SameRoot.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// @param a the first {@link SingletonBasePredictionContext}
|
||||
// @param b the second {@link SingletonBasePredictionContext}
|
||||
// @param rootIsWildcard {@code true} if this is a local-context merge,
|
||||
// otherwise false to indicate a full-context merge
|
||||
// /
|
||||
func mergeRoot(a, b SingletonPredictionContext, rootIsWildcard bool) PredictionContext {
|
||||
if rootIsWildcard {
|
||||
if a == BasePredictionContextEMPTY {
|
||||
return BasePredictionContextEMPTY // // + b =//
|
||||
}
|
||||
if b == BasePredictionContextEMPTY {
|
||||
return BasePredictionContextEMPTY // a +// =//
|
||||
}
|
||||
} else {
|
||||
if a == BasePredictionContextEMPTY && b == BasePredictionContextEMPTY {
|
||||
return BasePredictionContextEMPTY // $ + $ = $
|
||||
} else if a == BasePredictionContextEMPTY { // $ + x = [$,x]
|
||||
payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState}
|
||||
parents := []PredictionContext{b.GetParent(-1), nil}
|
||||
return NewArrayPredictionContext(parents, payloads)
|
||||
} else if b == BasePredictionContextEMPTY { // x + $ = [$,x] ($ is always first if present)
|
||||
payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState}
|
||||
parents := []PredictionContext{a.GetParent(-1), nil}
|
||||
return NewArrayPredictionContext(parents, payloads)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//
|
||||
// Merge two {@link ArrayBasePredictionContext} instances.
|
||||
//
|
||||
// <p>Different tops, different parents.<br>
|
||||
// <embed src="images/ArrayMerge_DiffTopDiffPar.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Shared top, same parents.<br>
|
||||
// <embed src="images/ArrayMerge_ShareTopSamePar.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Shared top, different parents.<br>
|
||||
// <embed src="images/ArrayMerge_ShareTopDiffPar.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Shared top, all shared parents.<br>
|
||||
// <embed src="images/ArrayMerge_ShareTopSharePar.svg"
|
||||
// type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Equal tops, merge parents and reduce top to
|
||||
// {@link SingletonBasePredictionContext}.<br>
|
||||
// <embed src="images/ArrayMerge_EqualTop.svg" type="image/svg+xml"/></p>
|
||||
// /
|
||||
func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
|
||||
if mergeCache != nil {
|
||||
previous := mergeCache.Get(a.hash(), b.hash())
|
||||
if previous != nil {
|
||||
return previous.(PredictionContext)
|
||||
}
|
||||
previous = mergeCache.Get(b.hash(), a.hash())
|
||||
if previous != nil {
|
||||
return previous.(PredictionContext)
|
||||
}
|
||||
}
|
||||
// merge sorted payloads a + b => M
|
||||
i := 0 // walks a
|
||||
j := 0 // walks b
|
||||
k := 0 // walks target M array
|
||||
|
||||
mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates))
|
||||
mergedParents := make([]PredictionContext, len(a.returnStates)+len(b.returnStates))
|
||||
// walk and merge to yield mergedParents, mergedReturnStates
|
||||
for i < len(a.returnStates) && j < len(b.returnStates) {
|
||||
aParent := a.parents[i]
|
||||
bParent := b.parents[j]
|
||||
if a.returnStates[i] == b.returnStates[j] {
|
||||
// same payload (stack tops are equal), must yield merged singleton
|
||||
payload := a.returnStates[i]
|
||||
// $+$ = $
|
||||
bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil
|
||||
axAX := (aParent != nil && bParent != nil && aParent == bParent) // ax+ax
|
||||
// ->
|
||||
// ax
|
||||
if bothDollars || axAX {
|
||||
mergedParents[k] = aParent // choose left
|
||||
mergedReturnStates[k] = payload
|
||||
} else { // ax+ay -> a'[x,y]
|
||||
mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache)
|
||||
mergedParents[k] = mergedParent
|
||||
mergedReturnStates[k] = payload
|
||||
}
|
||||
i++ // hop over left one as usual
|
||||
j++ // but also Skip one in right side since we merge
|
||||
} else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M
|
||||
mergedParents[k] = aParent
|
||||
mergedReturnStates[k] = a.returnStates[i]
|
||||
i++
|
||||
} else { // b > a, copy b[j] to M
|
||||
mergedParents[k] = bParent
|
||||
mergedReturnStates[k] = b.returnStates[j]
|
||||
j++
|
||||
}
|
||||
k++
|
||||
}
|
||||
// copy over any payloads remaining in either array
|
||||
if i < len(a.returnStates) {
|
||||
for p := i; p < len(a.returnStates); p++ {
|
||||
mergedParents[k] = a.parents[p]
|
||||
mergedReturnStates[k] = a.returnStates[p]
|
||||
k++
|
||||
}
|
||||
} else {
|
||||
for p := j; p < len(b.returnStates); p++ {
|
||||
mergedParents[k] = b.parents[p]
|
||||
mergedReturnStates[k] = b.returnStates[p]
|
||||
k++
|
||||
}
|
||||
}
|
||||
// trim merged if we combined a few that had same stack tops
|
||||
if k < len(mergedParents) { // write index < last position trim
|
||||
if k == 1 { // for just one merged element, return singleton top
|
||||
pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0])
|
||||
if mergeCache != nil {
|
||||
mergeCache.set(a.hash(), b.hash(), pc)
|
||||
}
|
||||
return pc
|
||||
}
|
||||
mergedParents = mergedParents[0:k]
|
||||
mergedReturnStates = mergedReturnStates[0:k]
|
||||
}
|
||||
|
||||
M := NewArrayPredictionContext(mergedParents, mergedReturnStates)
|
||||
|
||||
// if we created same array as a or b, return that instead
|
||||
// TODO: track whether this is possible above during merge sort for speed
|
||||
if M == a {
|
||||
if mergeCache != nil {
|
||||
mergeCache.set(a.hash(), b.hash(), a)
|
||||
}
|
||||
return a
|
||||
}
|
||||
if M == b {
|
||||
if mergeCache != nil {
|
||||
mergeCache.set(a.hash(), b.hash(), b)
|
||||
}
|
||||
return b
|
||||
}
|
||||
combineCommonParents(mergedParents)
|
||||
|
||||
if mergeCache != nil {
|
||||
mergeCache.set(a.hash(), b.hash(), M)
|
||||
}
|
||||
return M
|
||||
}
|
||||
|
||||
//
|
||||
// Make pass over all <em>M</em> {@code parents} merge any {@code equals()}
|
||||
// ones.
|
||||
// /
|
||||
func combineCommonParents(parents []PredictionContext) {
|
||||
uniqueParents := make(map[PredictionContext]PredictionContext)
|
||||
|
||||
for p := 0; p < len(parents); p++ {
|
||||
parent := parents[p]
|
||||
if uniqueParents[parent] == nil {
|
||||
uniqueParents[parent] = parent
|
||||
}
|
||||
}
|
||||
for q := 0; q < len(parents); q++ {
|
||||
parents[q] = uniqueParents[parents[q]]
|
||||
}
|
||||
}
|
||||
|
||||
func getCachedBasePredictionContext(context PredictionContext, contextCache *PredictionContextCache, visited map[PredictionContext]PredictionContext) PredictionContext {
|
||||
|
||||
if context.isEmpty() {
|
||||
return context
|
||||
}
|
||||
existing := visited[context]
|
||||
if existing != nil {
|
||||
return existing
|
||||
}
|
||||
existing = contextCache.Get(context)
|
||||
if existing != nil {
|
||||
visited[context] = existing
|
||||
return existing
|
||||
}
|
||||
changed := false
|
||||
parents := make([]PredictionContext, context.length())
|
||||
for i := 0; i < len(parents); i++ {
|
||||
parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited)
|
||||
if changed || parent != context.GetParent(i) {
|
||||
if !changed {
|
||||
parents = make([]PredictionContext, context.length())
|
||||
for j := 0; j < context.length(); j++ {
|
||||
parents[j] = context.GetParent(j)
|
||||
}
|
||||
changed = true
|
||||
}
|
||||
parents[i] = parent
|
||||
}
|
||||
}
|
||||
if !changed {
|
||||
contextCache.add(context)
|
||||
visited[context] = context
|
||||
return context
|
||||
}
|
||||
var updated PredictionContext
|
||||
if len(parents) == 0 {
|
||||
updated = BasePredictionContextEMPTY
|
||||
} else if len(parents) == 1 {
|
||||
updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0))
|
||||
} else {
|
||||
updated = NewArrayPredictionContext(parents, context.(*ArrayPredictionContext).GetReturnStates())
|
||||
}
|
||||
contextCache.add(updated)
|
||||
visited[updated] = updated
|
||||
visited[context] = updated
|
||||
|
||||
return updated
|
||||
}
|
553
vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go
generated
vendored
Normal file
553
vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go
generated
vendored
Normal file
@ -0,0 +1,553 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
// This enumeration defines the prediction modes available in ANTLR 4 along with
|
||||
// utility methods for analyzing configuration sets for conflicts and/or
|
||||
// ambiguities.
|
||||
|
||||
const (
|
||||
//
|
||||
// The SLL(*) prediction mode. This prediction mode ignores the current
|
||||
// parser context when making predictions. This is the fastest prediction
|
||||
// mode, and provides correct results for many grammars. This prediction
|
||||
// mode is more powerful than the prediction mode provided by ANTLR 3, but
|
||||
// may result in syntax errors for grammar and input combinations which are
|
||||
// not SLL.
|
||||
//
|
||||
// <p>
|
||||
// When using this prediction mode, the parser will either return a correct
|
||||
// parse tree (i.e. the same parse tree that would be returned with the
|
||||
// {@link //LL} prediction mode), or it will Report a syntax error. If a
|
||||
// syntax error is encountered when using the {@link //SLL} prediction mode,
|
||||
// it may be due to either an actual syntax error in the input or indicate
|
||||
// that the particular combination of grammar and input requires the more
|
||||
// powerful {@link //LL} prediction abilities to complete successfully.</p>
|
||||
//
|
||||
// <p>
|
||||
// This prediction mode does not provide any guarantees for prediction
|
||||
// behavior for syntactically-incorrect inputs.</p>
|
||||
//
|
||||
PredictionModeSLL = 0
|
||||
//
|
||||
// The LL(*) prediction mode. This prediction mode allows the current parser
|
||||
// context to be used for resolving SLL conflicts that occur during
|
||||
// prediction. This is the fastest prediction mode that guarantees correct
|
||||
// parse results for all combinations of grammars with syntactically correct
|
||||
// inputs.
|
||||
//
|
||||
// <p>
|
||||
// When using this prediction mode, the parser will make correct decisions
|
||||
// for all syntactically-correct grammar and input combinations. However, in
|
||||
// cases where the grammar is truly ambiguous this prediction mode might not
|
||||
// Report a precise answer for <em>exactly which</em> alternatives are
|
||||
// ambiguous.</p>
|
||||
//
|
||||
// <p>
|
||||
// This prediction mode does not provide any guarantees for prediction
|
||||
// behavior for syntactically-incorrect inputs.</p>
|
||||
//
|
||||
PredictionModeLL = 1
|
||||
//
|
||||
// The LL(*) prediction mode with exact ambiguity detection. In addition to
|
||||
// the correctness guarantees provided by the {@link //LL} prediction mode,
|
||||
// this prediction mode instructs the prediction algorithm to determine the
|
||||
// complete and exact set of ambiguous alternatives for every ambiguous
|
||||
// decision encountered while parsing.
|
||||
//
|
||||
// <p>
|
||||
// This prediction mode may be used for diagnosing ambiguities during
|
||||
// grammar development. Due to the performance overhead of calculating sets
|
||||
// of ambiguous alternatives, this prediction mode should be avoided when
|
||||
// the exact results are not necessary.</p>
|
||||
//
|
||||
// <p>
|
||||
// This prediction mode does not provide any guarantees for prediction
|
||||
// behavior for syntactically-incorrect inputs.</p>
|
||||
//
|
||||
PredictionModeLLExactAmbigDetection = 2
|
||||
)
|
||||
|
||||
//
|
||||
// Computes the SLL prediction termination condition.
|
||||
//
|
||||
// <p>
|
||||
// This method computes the SLL prediction termination condition for both of
|
||||
// the following cases.</p>
|
||||
//
|
||||
// <ul>
|
||||
// <li>The usual SLL+LL fallback upon SLL conflict</li>
|
||||
// <li>Pure SLL without LL fallback</li>
|
||||
// </ul>
|
||||
//
|
||||
// <p><strong>COMBINED SLL+LL PARSING</strong></p>
|
||||
//
|
||||
// <p>When LL-fallback is enabled upon SLL conflict, correct predictions are
|
||||
// ensured regardless of how the termination condition is computed by this
|
||||
// method. Due to the substantially higher cost of LL prediction, the
|
||||
// prediction should only fall back to LL when the additional lookahead
|
||||
// cannot lead to a unique SLL prediction.</p>
|
||||
//
|
||||
// <p>Assuming combined SLL+LL parsing, an SLL configuration set with only
|
||||
// conflicting subsets should fall back to full LL, even if the
|
||||
// configuration sets don't resolve to the same alternative (e.g.
|
||||
// {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting
|
||||
// configuration, SLL could continue with the hopes that more lookahead will
|
||||
// resolve via one of those non-conflicting configurations.</p>
|
||||
//
|
||||
// <p>Here's the prediction termination rule them: SLL (for SLL+LL parsing)
|
||||
// stops when it sees only conflicting configuration subsets. In contrast,
|
||||
// full LL keeps going when there is uncertainty.</p>
|
||||
//
|
||||
// <p><strong>HEURISTIC</strong></p>
|
||||
//
|
||||
// <p>As a heuristic, we stop prediction when we see any conflicting subset
|
||||
// unless we see a state that only has one alternative associated with it.
|
||||
// The single-alt-state thing lets prediction continue upon rules like
|
||||
// (otherwise, it would admit defeat too soon):</p>
|
||||
//
|
||||
// <p>{@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) '' }</p>
|
||||
//
|
||||
// <p>When the ATN simulation reaches the state before {@code ''}, it has a
|
||||
// DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally
|
||||
// {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop
|
||||
// processing this node because alternative to has another way to continue,
|
||||
// via {@code [6|2|[]]}.</p>
|
||||
//
|
||||
// <p>It also let's us continue for this rule:</p>
|
||||
//
|
||||
// <p>{@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B }</p>
|
||||
//
|
||||
// <p>After Matching input A, we reach the stop state for rule A, state 1.
|
||||
// State 8 is the state right before B. Clearly alternatives 1 and 2
|
||||
// conflict and no amount of further lookahead will separate the two.
|
||||
// However, alternative 3 will be able to continue and so we do not stop
|
||||
// working on this state. In the previous example, we're concerned with
|
||||
// states associated with the conflicting alternatives. Here alt 3 is not
|
||||
// associated with the conflicting configs, but since we can continue
|
||||
// looking for input reasonably, don't declare the state done.</p>
|
||||
//
|
||||
// <p><strong>PURE SLL PARSING</strong></p>
|
||||
//
|
||||
// <p>To handle pure SLL parsing, all we have to do is make sure that we
|
||||
// combine stack contexts for configurations that differ only by semantic
|
||||
// predicate. From there, we can do the usual SLL termination heuristic.</p>
|
||||
//
|
||||
// <p><strong>PREDICATES IN SLL+LL PARSING</strong></p>
|
||||
//
|
||||
// <p>SLL decisions don't evaluate predicates until after they reach DFA stop
|
||||
// states because they need to create the DFA cache that works in all
|
||||
// semantic situations. In contrast, full LL evaluates predicates collected
|
||||
// during start state computation so it can ignore predicates thereafter.
|
||||
// This means that SLL termination detection can totally ignore semantic
|
||||
// predicates.</p>
|
||||
//
|
||||
// <p>Implementation-wise, {@link ATNConfigSet} combines stack contexts but not
|
||||
// semantic predicate contexts so we might see two configurations like the
|
||||
// following.</p>
|
||||
//
|
||||
// <p>{@code (s, 1, x, {}), (s, 1, x', {p})}</p>
|
||||
//
|
||||
// <p>Before testing these configurations against others, we have to merge
|
||||
// {@code x} and {@code x'} (without modifying the existing configurations).
|
||||
// For example, we test {@code (x+x')==x''} when looking for conflicts in
|
||||
// the following configurations.</p>
|
||||
//
|
||||
// <p>{@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x'', {})}</p>
|
||||
//
|
||||
// <p>If the configuration set has predicates (as indicated by
|
||||
// {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of
|
||||
// the configurations to strip out all of the predicates so that a standard
|
||||
// {@link ATNConfigSet} will merge everything ignoring predicates.</p>
|
||||
//
|
||||
func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs ATNConfigSet) bool {
|
||||
// Configs in rule stop states indicate reaching the end of the decision
|
||||
// rule (local context) or end of start rule (full context). If all
|
||||
// configs meet this condition, then none of the configurations is able
|
||||
// to Match additional input so we terminate prediction.
|
||||
//
|
||||
if PredictionModeallConfigsInRuleStopStates(configs) {
|
||||
return true
|
||||
}
|
||||
// pure SLL mode parsing
|
||||
if mode == PredictionModeSLL {
|
||||
// Don't bother with combining configs from different semantic
|
||||
// contexts if we can fail over to full LL costs more time
|
||||
// since we'll often fail over anyway.
|
||||
if configs.HasSemanticContext() {
|
||||
// dup configs, tossing out semantic predicates
|
||||
dup := NewBaseATNConfigSet(false)
|
||||
for _, c := range configs.GetItems() {
|
||||
|
||||
// NewBaseATNConfig({semanticContext:}, c)
|
||||
c = NewBaseATNConfig2(c, SemanticContextNone)
|
||||
dup.Add(c, nil)
|
||||
}
|
||||
configs = dup
|
||||
}
|
||||
// now we have combined contexts for configs with dissimilar preds
|
||||
}
|
||||
// pure SLL or combined SLL+LL mode parsing
|
||||
altsets := PredictionModegetConflictingAltSubsets(configs)
|
||||
return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs)
|
||||
}
|
||||
|
||||
// Checks if any configuration in {@code configs} is in a
|
||||
// {@link RuleStopState}. Configurations meeting this condition have reached
|
||||
// the end of the decision rule (local context) or end of start rule (full
|
||||
// context).
|
||||
//
|
||||
// @param configs the configuration set to test
|
||||
// @return {@code true} if any configuration in {@code configs} is in a
|
||||
// {@link RuleStopState}, otherwise {@code false}
|
||||
func PredictionModehasConfigInRuleStopState(configs ATNConfigSet) bool {
|
||||
for _, c := range configs.GetItems() {
|
||||
if _, ok := c.GetState().(*RuleStopState); ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Checks if all configurations in {@code configs} are in a
|
||||
// {@link RuleStopState}. Configurations meeting this condition have reached
|
||||
// the end of the decision rule (local context) or end of start rule (full
|
||||
// context).
|
||||
//
|
||||
// @param configs the configuration set to test
|
||||
// @return {@code true} if all configurations in {@code configs} are in a
|
||||
// {@link RuleStopState}, otherwise {@code false}
|
||||
func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool {
|
||||
|
||||
for _, c := range configs.GetItems() {
|
||||
if _, ok := c.GetState().(*RuleStopState); !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
//
|
||||
// Full LL prediction termination.
|
||||
//
|
||||
// <p>Can we stop looking ahead during ATN simulation or is there some
|
||||
// uncertainty as to which alternative we will ultimately pick, after
|
||||
// consuming more input? Even if there are partial conflicts, we might know
|
||||
// that everything is going to resolve to the same minimum alternative. That
|
||||
// means we can stop since no more lookahead will change that fact. On the
|
||||
// other hand, there might be multiple conflicts that resolve to different
|
||||
// minimums. That means we need more look ahead to decide which of those
|
||||
// alternatives we should predict.</p>
|
||||
//
|
||||
// <p>The basic idea is to split the set of configurations {@code C}, into
|
||||
// conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with
|
||||
// non-conflicting configurations. Two configurations conflict if they have
|
||||
// identical {@link ATNConfig//state} and {@link ATNConfig//context} values
|
||||
// but different {@link ATNConfig//alt} value, e.g. {@code (s, i, ctx, _)}
|
||||
// and {@code (s, j, ctx, _)} for {@code i!=j}.</p>
|
||||
//
|
||||
// <p>Reduce these configuration subsets to the set of possible alternatives.
|
||||
// You can compute the alternative subsets in one pass as follows:</p>
|
||||
//
|
||||
// <p>{@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in
|
||||
// {@code C} holding {@code s} and {@code ctx} fixed.</p>
|
||||
//
|
||||
// <p>Or in pseudo-code, for each configuration {@code c} in {@code C}:</p>
|
||||
//
|
||||
// <pre>
|
||||
// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
|
||||
// alt and not pred
|
||||
// </pre>
|
||||
//
|
||||
// <p>The values in {@code map} are the set of {@code A_s,ctx} sets.</p>
|
||||
//
|
||||
// <p>If {@code |A_s,ctx|=1} then there is no conflict associated with
|
||||
// {@code s} and {@code ctx}.</p>
|
||||
//
|
||||
// <p>Reduce the subsets to singletons by choosing a minimum of each subset. If
|
||||
// the union of these alternative subsets is a singleton, then no amount of
|
||||
// more lookahead will help us. We will always pick that alternative. If,
|
||||
// however, there is more than one alternative, then we are uncertain which
|
||||
// alternative to predict and must continue looking for resolution. We may
|
||||
// or may not discover an ambiguity in the future, even if there are no
|
||||
// conflicting subsets this round.</p>
|
||||
//
|
||||
// <p>The biggest sin is to terminate early because it means we've made a
|
||||
// decision but were uncertain as to the eventual outcome. We haven't used
|
||||
// enough lookahead. On the other hand, announcing a conflict too late is no
|
||||
// big deal you will still have the conflict. It's just inefficient. It
|
||||
// might even look until the end of file.</p>
|
||||
//
|
||||
// <p>No special consideration for semantic predicates is required because
|
||||
// predicates are evaluated on-the-fly for full LL prediction, ensuring that
|
||||
// no configuration contains a semantic context during the termination
|
||||
// check.</p>
|
||||
//
|
||||
// <p><strong>CONFLICTING CONFIGS</strong></p>
|
||||
//
|
||||
// <p>Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict
|
||||
// when {@code i!=j} but {@code x=x'}. Because we merge all
|
||||
// {@code (s, i, _)} configurations together, that means that there are at
|
||||
// most {@code n} configurations associated with state {@code s} for
|
||||
// {@code n} possible alternatives in the decision. The merged stacks
|
||||
// complicate the comparison of configuration contexts {@code x} and
|
||||
// {@code x'}. Sam checks to see if one is a subset of the other by calling
|
||||
// merge and checking to see if the merged result is either {@code x} or
|
||||
// {@code x'}. If the {@code x} associated with lowest alternative {@code i}
|
||||
// is the superset, then {@code i} is the only possible prediction since the
|
||||
// others resolve to {@code min(i)} as well. However, if {@code x} is
|
||||
// associated with {@code j>i} then at least one stack configuration for
|
||||
// {@code j} is not in conflict with alternative {@code i}. The algorithm
|
||||
// should keep going, looking for more lookahead due to the uncertainty.</p>
|
||||
//
|
||||
// <p>For simplicity, I'm doing a equality check between {@code x} and
|
||||
// {@code x'} that lets the algorithm continue to consume lookahead longer
|
||||
// than necessary. The reason I like the equality is of course the
|
||||
// simplicity but also because that is the test you need to detect the
|
||||
// alternatives that are actually in conflict.</p>
|
||||
//
|
||||
// <p><strong>CONTINUE/STOP RULE</strong></p>
|
||||
//
|
||||
// <p>Continue if union of resolved alternative sets from non-conflicting and
|
||||
// conflicting alternative subsets has more than one alternative. We are
|
||||
// uncertain about which alternative to predict.</p>
|
||||
//
|
||||
// <p>The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which
|
||||
// alternatives are still in the running for the amount of input we've
|
||||
// consumed at this point. The conflicting sets let us to strip away
|
||||
// configurations that won't lead to more states because we resolve
|
||||
// conflicts to the configuration with a minimum alternate for the
|
||||
// conflicting set.</p>
|
||||
//
|
||||
// <p><strong>CASES</strong></p>
|
||||
//
|
||||
// <ul>
|
||||
//
|
||||
// <li>no conflicts and more than 1 alternative in set => continue</li>
|
||||
//
|
||||
// <li> {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s, 3, z)},
|
||||
// {@code (s', 1, y)}, {@code (s', 2, y)} yields non-conflicting set
|
||||
// {@code {3}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} =
|
||||
// {@code {1,3}} => continue
|
||||
// </li>
|
||||
//
|
||||
// <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)},
|
||||
// {@code (s', 2, y)}, {@code (s'', 1, z)} yields non-conflicting set
|
||||
// {@code {1}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} =
|
||||
// {@code {1}} => stop and predict 1</li>
|
||||
//
|
||||
// <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)},
|
||||
// {@code (s', 2, y)} yields conflicting, reduced sets {@code {1}} U
|
||||
// {@code {1}} = {@code {1}} => stop and predict 1, can announce
|
||||
// ambiguity {@code {1,2}}</li>
|
||||
//
|
||||
// <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 2, y)},
|
||||
// {@code (s', 3, y)} yields conflicting, reduced sets {@code {1}} U
|
||||
// {@code {2}} = {@code {1,2}} => continue</li>
|
||||
//
|
||||
// <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 3, y)},
|
||||
// {@code (s', 4, y)} yields conflicting, reduced sets {@code {1}} U
|
||||
// {@code {3}} = {@code {1,3}} => continue</li>
|
||||
//
|
||||
// </ul>
|
||||
//
|
||||
// <p><strong>EXACT AMBIGUITY DETECTION</strong></p>
|
||||
//
|
||||
// <p>If all states Report the same conflicting set of alternatives, then we
|
||||
// know we have the exact ambiguity set.</p>
|
||||
//
|
||||
// <p><code>|A_<em>i</em>|>1</code> and
|
||||
// <code>A_<em>i</em> = A_<em>j</em></code> for all <em>i</em>, <em>j</em>.</p>
|
||||
//
|
||||
// <p>In other words, we continue examining lookahead until all {@code A_i}
|
||||
// have more than one alternative and all {@code A_i} are the same. If
|
||||
// {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate
|
||||
// because the resolved set is {@code {1}}. To determine what the real
|
||||
// ambiguity is, we have to know whether the ambiguity is between one and
|
||||
// two or one and three so we keep going. We can only stop prediction when
|
||||
// we need exact ambiguity detection when the sets look like
|
||||
// {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...</p>
|
||||
//
|
||||
func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int {
|
||||
return PredictionModegetSingleViableAlt(altsets)
|
||||
}
|
||||
|
||||
//
|
||||
// Determines if every alternative subset in {@code altsets} contains more
|
||||
// than one alternative.
|
||||
//
|
||||
// @param altsets a collection of alternative subsets
|
||||
// @return {@code true} if every {@link BitSet} in {@code altsets} has
|
||||
// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false}
|
||||
//
|
||||
func PredictionModeallSubsetsConflict(altsets []*BitSet) bool {
|
||||
return !PredictionModehasNonConflictingAltSet(altsets)
|
||||
}
|
||||
|
||||
//
|
||||
// Determines if any single alternative subset in {@code altsets} contains
|
||||
// exactly one alternative.
|
||||
//
|
||||
// @param altsets a collection of alternative subsets
|
||||
// @return {@code true} if {@code altsets} contains a {@link BitSet} with
|
||||
// {@link BitSet//cardinality cardinality} 1, otherwise {@code false}
|
||||
//
|
||||
func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool {
|
||||
for i := 0; i < len(altsets); i++ {
|
||||
alts := altsets[i]
|
||||
if alts.length() == 1 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
//
|
||||
// Determines if any single alternative subset in {@code altsets} contains
|
||||
// more than one alternative.
|
||||
//
|
||||
// @param altsets a collection of alternative subsets
|
||||
// @return {@code true} if {@code altsets} contains a {@link BitSet} with
|
||||
// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false}
|
||||
//
|
||||
func PredictionModehasConflictingAltSet(altsets []*BitSet) bool {
|
||||
for i := 0; i < len(altsets); i++ {
|
||||
alts := altsets[i]
|
||||
if alts.length() > 1 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
//
|
||||
// Determines if every alternative subset in {@code altsets} is equivalent.
|
||||
//
|
||||
// @param altsets a collection of alternative subsets
|
||||
// @return {@code true} if every member of {@code altsets} is equal to the
|
||||
// others, otherwise {@code false}
|
||||
//
|
||||
func PredictionModeallSubsetsEqual(altsets []*BitSet) bool {
|
||||
var first *BitSet
|
||||
|
||||
for i := 0; i < len(altsets); i++ {
|
||||
alts := altsets[i]
|
||||
if first == nil {
|
||||
first = alts
|
||||
} else if alts != first {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
//
|
||||
// Returns the unique alternative predicted by all alternative subsets in
|
||||
// {@code altsets}. If no such alternative exists, this method returns
|
||||
// {@link ATN//INVALID_ALT_NUMBER}.
|
||||
//
|
||||
// @param altsets a collection of alternative subsets
|
||||
//
|
||||
func PredictionModegetUniqueAlt(altsets []*BitSet) int {
|
||||
all := PredictionModeGetAlts(altsets)
|
||||
if all.length() == 1 {
|
||||
return all.minValue()
|
||||
}
|
||||
|
||||
return ATNInvalidAltNumber
|
||||
}
|
||||
|
||||
// Gets the complete set of represented alternatives for a collection of
|
||||
// alternative subsets. This method returns the union of each {@link BitSet}
|
||||
// in {@code altsets}.
|
||||
//
|
||||
// @param altsets a collection of alternative subsets
|
||||
// @return the set of represented alternatives in {@code altsets}
|
||||
//
|
||||
func PredictionModeGetAlts(altsets []*BitSet) *BitSet {
|
||||
all := NewBitSet()
|
||||
for _, alts := range altsets {
|
||||
all.or(alts)
|
||||
}
|
||||
return all
|
||||
}
|
||||
|
||||
//
|
||||
// This func gets the conflicting alt subsets from a configuration set.
|
||||
// For each configuration {@code c} in {@code configs}:
|
||||
//
|
||||
// <pre>
|
||||
// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
|
||||
// alt and not pred
|
||||
// </pre>
|
||||
//
|
||||
func PredictionModegetConflictingAltSubsets(configs ATNConfigSet) []*BitSet {
|
||||
configToAlts := make(map[int]*BitSet)
|
||||
|
||||
for _, c := range configs.GetItems() {
|
||||
key := 31 * c.GetState().GetStateNumber() + c.GetContext().hash()
|
||||
|
||||
alts, ok := configToAlts[key]
|
||||
if !ok {
|
||||
alts = NewBitSet()
|
||||
configToAlts[key] = alts
|
||||
}
|
||||
alts.add(c.GetAlt())
|
||||
}
|
||||
|
||||
values := make([]*BitSet, 0, 10)
|
||||
for _, v := range configToAlts {
|
||||
values = append(values, v)
|
||||
}
|
||||
return values
|
||||
}
|
||||
|
||||
//
|
||||
// Get a map from state to alt subset from a configuration set. For each
|
||||
// configuration {@code c} in {@code configs}:
|
||||
//
|
||||
// <pre>
|
||||
// map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt}
|
||||
// </pre>
|
||||
//
|
||||
func PredictionModeGetStateToAltMap(configs ATNConfigSet) *AltDict {
|
||||
m := NewAltDict()
|
||||
|
||||
for _, c := range configs.GetItems() {
|
||||
alts := m.Get(c.GetState().String())
|
||||
if alts == nil {
|
||||
alts = NewBitSet()
|
||||
m.put(c.GetState().String(), alts)
|
||||
}
|
||||
alts.(*BitSet).add(c.GetAlt())
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func PredictionModehasStateAssociatedWithOneAlt(configs ATNConfigSet) bool {
|
||||
values := PredictionModeGetStateToAltMap(configs).values()
|
||||
for i := 0; i < len(values); i++ {
|
||||
if values[i].(*BitSet).length() == 1 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func PredictionModegetSingleViableAlt(altsets []*BitSet) int {
|
||||
result := ATNInvalidAltNumber
|
||||
|
||||
for i := 0; i < len(altsets); i++ {
|
||||
alts := altsets[i]
|
||||
minAlt := alts.minValue()
|
||||
if result == ATNInvalidAltNumber {
|
||||
result = minAlt
|
||||
} else if result != minAlt { // more than 1 viable alt
|
||||
return ATNInvalidAltNumber
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
217
vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go
generated
vendored
Normal file
217
vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go
generated
vendored
Normal file
@ -0,0 +1,217 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type Recognizer interface {
|
||||
GetLiteralNames() []string
|
||||
GetSymbolicNames() []string
|
||||
GetRuleNames() []string
|
||||
|
||||
Sempred(RuleContext, int, int) bool
|
||||
Precpred(RuleContext, int) bool
|
||||
|
||||
GetState() int
|
||||
SetState(int)
|
||||
Action(RuleContext, int, int)
|
||||
AddErrorListener(ErrorListener)
|
||||
RemoveErrorListeners()
|
||||
GetATN() *ATN
|
||||
GetErrorListenerDispatch() ErrorListener
|
||||
}
|
||||
|
||||
type BaseRecognizer struct {
|
||||
listeners []ErrorListener
|
||||
state int
|
||||
|
||||
RuleNames []string
|
||||
LiteralNames []string
|
||||
SymbolicNames []string
|
||||
GrammarFileName string
|
||||
}
|
||||
|
||||
func NewBaseRecognizer() *BaseRecognizer {
|
||||
rec := new(BaseRecognizer)
|
||||
rec.listeners = []ErrorListener{ConsoleErrorListenerINSTANCE}
|
||||
rec.state = -1
|
||||
return rec
|
||||
}
|
||||
|
||||
var tokenTypeMapCache = make(map[string]int)
|
||||
var ruleIndexMapCache = make(map[string]int)
|
||||
|
||||
func (b *BaseRecognizer) checkVersion(toolVersion string) {
|
||||
runtimeVersion := "4.10.1"
|
||||
if runtimeVersion != toolVersion {
|
||||
fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BaseRecognizer) Action(context RuleContext, ruleIndex, actionIndex int) {
|
||||
panic("action not implemented on Recognizer!")
|
||||
}
|
||||
|
||||
func (b *BaseRecognizer) AddErrorListener(listener ErrorListener) {
|
||||
b.listeners = append(b.listeners, listener)
|
||||
}
|
||||
|
||||
func (b *BaseRecognizer) RemoveErrorListeners() {
|
||||
b.listeners = make([]ErrorListener, 0)
|
||||
}
|
||||
|
||||
func (b *BaseRecognizer) GetRuleNames() []string {
|
||||
return b.RuleNames
|
||||
}
|
||||
|
||||
func (b *BaseRecognizer) GetTokenNames() []string {
|
||||
return b.LiteralNames
|
||||
}
|
||||
|
||||
func (b *BaseRecognizer) GetSymbolicNames() []string {
|
||||
return b.SymbolicNames
|
||||
}
|
||||
|
||||
func (b *BaseRecognizer) GetLiteralNames() []string {
|
||||
return b.LiteralNames
|
||||
}
|
||||
|
||||
func (b *BaseRecognizer) GetState() int {
|
||||
return b.state
|
||||
}
|
||||
|
||||
func (b *BaseRecognizer) SetState(v int) {
|
||||
b.state = v
|
||||
}
|
||||
|
||||
//func (b *Recognizer) GetTokenTypeMap() {
|
||||
// var tokenNames = b.GetTokenNames()
|
||||
// if (tokenNames==nil) {
|
||||
// panic("The current recognizer does not provide a list of token names.")
|
||||
// }
|
||||
// var result = tokenTypeMapCache[tokenNames]
|
||||
// if(result==nil) {
|
||||
// result = tokenNames.reduce(function(o, k, i) { o[k] = i })
|
||||
// result.EOF = TokenEOF
|
||||
// tokenTypeMapCache[tokenNames] = result
|
||||
// }
|
||||
// return result
|
||||
//}
|
||||
|
||||
// Get a map from rule names to rule indexes.
|
||||
//
|
||||
// <p>Used for XPath and tree pattern compilation.</p>
|
||||
//
|
||||
func (b *BaseRecognizer) GetRuleIndexMap() map[string]int {
|
||||
|
||||
panic("Method not defined!")
|
||||
// var ruleNames = b.GetRuleNames()
|
||||
// if (ruleNames==nil) {
|
||||
// panic("The current recognizer does not provide a list of rule names.")
|
||||
// }
|
||||
//
|
||||
// var result = ruleIndexMapCache[ruleNames]
|
||||
// if(result==nil) {
|
||||
// result = ruleNames.reduce(function(o, k, i) { o[k] = i })
|
||||
// ruleIndexMapCache[ruleNames] = result
|
||||
// }
|
||||
// return result
|
||||
}
|
||||
|
||||
func (b *BaseRecognizer) GetTokenType(tokenName string) int {
|
||||
panic("Method not defined!")
|
||||
// var ttype = b.GetTokenTypeMap()[tokenName]
|
||||
// if (ttype !=nil) {
|
||||
// return ttype
|
||||
// } else {
|
||||
// return TokenInvalidType
|
||||
// }
|
||||
}
|
||||
|
||||
//func (b *Recognizer) GetTokenTypeMap() map[string]int {
|
||||
// Vocabulary vocabulary = getVocabulary()
|
||||
//
|
||||
// Synchronized (tokenTypeMapCache) {
|
||||
// Map<String, Integer> result = tokenTypeMapCache.Get(vocabulary)
|
||||
// if (result == null) {
|
||||
// result = new HashMap<String, Integer>()
|
||||
// for (int i = 0; i < GetATN().maxTokenType; i++) {
|
||||
// String literalName = vocabulary.getLiteralName(i)
|
||||
// if (literalName != null) {
|
||||
// result.put(literalName, i)
|
||||
// }
|
||||
//
|
||||
// String symbolicName = vocabulary.GetSymbolicName(i)
|
||||
// if (symbolicName != null) {
|
||||
// result.put(symbolicName, i)
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// result.put("EOF", Token.EOF)
|
||||
// result = Collections.unmodifiableMap(result)
|
||||
// tokenTypeMapCache.put(vocabulary, result)
|
||||
// }
|
||||
//
|
||||
// return result
|
||||
// }
|
||||
//}
|
||||
|
||||
// What is the error header, normally line/character position information?//
|
||||
func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string {
|
||||
line := e.GetOffendingToken().GetLine()
|
||||
column := e.GetOffendingToken().GetColumn()
|
||||
return "line " + strconv.Itoa(line) + ":" + strconv.Itoa(column)
|
||||
}
|
||||
|
||||
// How should a token be displayed in an error message? The default
|
||||
// is to display just the text, but during development you might
|
||||
// want to have a lot of information spit out. Override in that case
|
||||
// to use t.String() (which, for CommonToken, dumps everything about
|
||||
// the token). This is better than forcing you to override a method in
|
||||
// your token objects because you don't have to go modify your lexer
|
||||
// so that it creates a NewJava type.
|
||||
//
|
||||
// @deprecated This method is not called by the ANTLR 4 Runtime. Specific
|
||||
// implementations of {@link ANTLRErrorStrategy} may provide a similar
|
||||
// feature when necessary. For example, see
|
||||
// {@link DefaultErrorStrategy//GetTokenErrorDisplay}.
|
||||
//
|
||||
func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string {
|
||||
if t == nil {
|
||||
return "<no token>"
|
||||
}
|
||||
s := t.GetText()
|
||||
if s == "" {
|
||||
if t.GetTokenType() == TokenEOF {
|
||||
s = "<EOF>"
|
||||
} else {
|
||||
s = "<" + strconv.Itoa(t.GetTokenType()) + ">"
|
||||
}
|
||||
}
|
||||
s = strings.Replace(s, "\t", "\\t", -1)
|
||||
s = strings.Replace(s, "\n", "\\n", -1)
|
||||
s = strings.Replace(s, "\r", "\\r", -1)
|
||||
|
||||
return "'" + s + "'"
|
||||
}
|
||||
|
||||
func (b *BaseRecognizer) GetErrorListenerDispatch() ErrorListener {
|
||||
return NewProxyErrorListener(b.listeners)
|
||||
}
|
||||
|
||||
// subclass needs to override these if there are sempreds or actions
|
||||
// that the ATN interp needs to execute
|
||||
func (b *BaseRecognizer) Sempred(localctx RuleContext, ruleIndex int, actionIndex int) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *BaseRecognizer) Precpred(localctx RuleContext, precedence int) bool {
|
||||
return true
|
||||
}
|
114
vendor/github.com/antlr/antlr4/runtime/Go/antlr/rule_context.go
generated
vendored
Normal file
114
vendor/github.com/antlr/antlr4/runtime/Go/antlr/rule_context.go
generated
vendored
Normal file
@ -0,0 +1,114 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
// A rule context is a record of a single rule invocation. It knows
|
||||
// which context invoked it, if any. If there is no parent context, then
|
||||
// naturally the invoking state is not valid. The parent link
|
||||
// provides a chain upwards from the current rule invocation to the root
|
||||
// of the invocation tree, forming a stack. We actually carry no
|
||||
// information about the rule associated with b context (except
|
||||
// when parsing). We keep only the state number of the invoking state from
|
||||
// the ATN submachine that invoked b. Contrast b with the s
|
||||
// pointer inside ParserRuleContext that tracks the current state
|
||||
// being "executed" for the current rule.
|
||||
//
|
||||
// The parent contexts are useful for computing lookahead sets and
|
||||
// getting error information.
|
||||
//
|
||||
// These objects are used during parsing and prediction.
|
||||
// For the special case of parsers, we use the subclass
|
||||
// ParserRuleContext.
|
||||
//
|
||||
// @see ParserRuleContext
|
||||
//
|
||||
|
||||
type RuleContext interface {
|
||||
RuleNode
|
||||
|
||||
GetInvokingState() int
|
||||
SetInvokingState(int)
|
||||
|
||||
GetRuleIndex() int
|
||||
IsEmpty() bool
|
||||
|
||||
GetAltNumber() int
|
||||
SetAltNumber(altNumber int)
|
||||
|
||||
String([]string, RuleContext) string
|
||||
}
|
||||
|
||||
type BaseRuleContext struct {
|
||||
parentCtx RuleContext
|
||||
invokingState int
|
||||
RuleIndex int
|
||||
}
|
||||
|
||||
func NewBaseRuleContext(parent RuleContext, invokingState int) *BaseRuleContext {
|
||||
|
||||
rn := new(BaseRuleContext)
|
||||
|
||||
// What context invoked b rule?
|
||||
rn.parentCtx = parent
|
||||
|
||||
// What state invoked the rule associated with b context?
|
||||
// The "return address" is the followState of invokingState
|
||||
// If parent is nil, b should be -1.
|
||||
if parent == nil {
|
||||
rn.invokingState = -1
|
||||
} else {
|
||||
rn.invokingState = invokingState
|
||||
}
|
||||
|
||||
return rn
|
||||
}
|
||||
|
||||
func (b *BaseRuleContext) GetBaseRuleContext() *BaseRuleContext {
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *BaseRuleContext) SetParent(v Tree) {
|
||||
if v == nil {
|
||||
b.parentCtx = nil
|
||||
} else {
|
||||
b.parentCtx = v.(RuleContext)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BaseRuleContext) GetInvokingState() int {
|
||||
return b.invokingState
|
||||
}
|
||||
|
||||
func (b *BaseRuleContext) SetInvokingState(t int) {
|
||||
b.invokingState = t
|
||||
}
|
||||
|
||||
func (b *BaseRuleContext) GetRuleIndex() int {
|
||||
return b.RuleIndex
|
||||
}
|
||||
|
||||
func (b *BaseRuleContext) GetAltNumber() int {
|
||||
return ATNInvalidAltNumber
|
||||
}
|
||||
|
||||
func (b *BaseRuleContext) SetAltNumber(altNumber int) {}
|
||||
|
||||
// A context is empty if there is no invoking state meaning nobody call
|
||||
// current context.
|
||||
func (b *BaseRuleContext) IsEmpty() bool {
|
||||
return b.invokingState == -1
|
||||
}
|
||||
|
||||
// Return the combined text of all child nodes. This method only considers
|
||||
// tokens which have been added to the parse tree.
|
||||
// <p>
|
||||
// Since tokens on hidden channels (e.g. whitespace or comments) are not
|
||||
// added to the parse trees, they will not appear in the output of b
|
||||
// method.
|
||||
//
|
||||
|
||||
func (b *BaseRuleContext) GetParent() Tree {
|
||||
return b.parentCtx
|
||||
}
|
466
vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go
generated
vendored
Normal file
466
vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go
generated
vendored
Normal file
@ -0,0 +1,466 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// A tree structure used to record the semantic context in which
|
||||
// an ATN configuration is valid. It's either a single predicate,
|
||||
// a conjunction {@code p1&&p2}, or a sum of products {@code p1||p2}.
|
||||
//
|
||||
// <p>I have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of
|
||||
// {@link SemanticContext} within the scope of this outer class.</p>
|
||||
//
|
||||
|
||||
type SemanticContext interface {
|
||||
comparable
|
||||
|
||||
evaluate(parser Recognizer, outerContext RuleContext) bool
|
||||
evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext
|
||||
|
||||
hash() int
|
||||
String() string
|
||||
}
|
||||
|
||||
func SemanticContextandContext(a, b SemanticContext) SemanticContext {
|
||||
if a == nil || a == SemanticContextNone {
|
||||
return b
|
||||
}
|
||||
if b == nil || b == SemanticContextNone {
|
||||
return a
|
||||
}
|
||||
result := NewAND(a, b)
|
||||
if len(result.opnds) == 1 {
|
||||
return result.opnds[0]
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func SemanticContextorContext(a, b SemanticContext) SemanticContext {
|
||||
if a == nil {
|
||||
return b
|
||||
}
|
||||
if b == nil {
|
||||
return a
|
||||
}
|
||||
if a == SemanticContextNone || b == SemanticContextNone {
|
||||
return SemanticContextNone
|
||||
}
|
||||
result := NewOR(a, b)
|
||||
if len(result.opnds) == 1 {
|
||||
return result.opnds[0]
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
type Predicate struct {
|
||||
ruleIndex int
|
||||
predIndex int
|
||||
isCtxDependent bool
|
||||
}
|
||||
|
||||
func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate {
|
||||
p := new(Predicate)
|
||||
|
||||
p.ruleIndex = ruleIndex
|
||||
p.predIndex = predIndex
|
||||
p.isCtxDependent = isCtxDependent // e.g., $i ref in pred
|
||||
return p
|
||||
}
|
||||
|
||||
//The default {@link SemanticContext}, which is semantically equivalent to
|
||||
//a predicate of the form {@code {true}?}.
|
||||
|
||||
var SemanticContextNone SemanticContext = NewPredicate(-1, -1, false)
|
||||
|
||||
func (p *Predicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *Predicate) evaluate(parser Recognizer, outerContext RuleContext) bool {
|
||||
|
||||
var localctx RuleContext
|
||||
|
||||
if p.isCtxDependent {
|
||||
localctx = outerContext
|
||||
}
|
||||
|
||||
return parser.Sempred(localctx, p.ruleIndex, p.predIndex)
|
||||
}
|
||||
|
||||
func (p *Predicate) equals(other interface{}) bool {
|
||||
if p == other {
|
||||
return true
|
||||
} else if _, ok := other.(*Predicate); !ok {
|
||||
return false
|
||||
} else {
|
||||
return p.ruleIndex == other.(*Predicate).ruleIndex &&
|
||||
p.predIndex == other.(*Predicate).predIndex &&
|
||||
p.isCtxDependent == other.(*Predicate).isCtxDependent
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Predicate) hash() int {
|
||||
h := murmurInit(0)
|
||||
h = murmurUpdate(h, p.ruleIndex)
|
||||
h = murmurUpdate(h, p.predIndex)
|
||||
if p.isCtxDependent {
|
||||
h = murmurUpdate(h, 1)
|
||||
} else {
|
||||
h = murmurUpdate(h, 0)
|
||||
}
|
||||
return murmurFinish(h, 3)
|
||||
}
|
||||
|
||||
func (p *Predicate) String() string {
|
||||
return "{" + strconv.Itoa(p.ruleIndex) + ":" + strconv.Itoa(p.predIndex) + "}?"
|
||||
}
|
||||
|
||||
type PrecedencePredicate struct {
|
||||
precedence int
|
||||
}
|
||||
|
||||
func NewPrecedencePredicate(precedence int) *PrecedencePredicate {
|
||||
|
||||
p := new(PrecedencePredicate)
|
||||
p.precedence = precedence
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *PrecedencePredicate) evaluate(parser Recognizer, outerContext RuleContext) bool {
|
||||
return parser.Precpred(outerContext, p.precedence)
|
||||
}
|
||||
|
||||
func (p *PrecedencePredicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
|
||||
if parser.Precpred(outerContext, p.precedence) {
|
||||
return SemanticContextNone
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PrecedencePredicate) compareTo(other *PrecedencePredicate) int {
|
||||
return p.precedence - other.precedence
|
||||
}
|
||||
|
||||
func (p *PrecedencePredicate) equals(other interface{}) bool {
|
||||
if p == other {
|
||||
return true
|
||||
} else if _, ok := other.(*PrecedencePredicate); !ok {
|
||||
return false
|
||||
} else {
|
||||
return p.precedence == other.(*PrecedencePredicate).precedence
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PrecedencePredicate) hash() int {
|
||||
h := uint32(1)
|
||||
h = 31*h + uint32(p.precedence)
|
||||
return int(h)
|
||||
}
|
||||
|
||||
func (p *PrecedencePredicate) String() string {
|
||||
return "{" + strconv.Itoa(p.precedence) + ">=prec}?"
|
||||
}
|
||||
|
||||
func PrecedencePredicatefilterPrecedencePredicates(set Set) []*PrecedencePredicate {
|
||||
result := make([]*PrecedencePredicate, 0)
|
||||
|
||||
set.Each(func(v interface{}) bool {
|
||||
if c2, ok := v.(*PrecedencePredicate); ok {
|
||||
result = append(result, c2)
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// A semantic context which is true whenever none of the contained contexts
|
||||
// is false.`
|
||||
|
||||
type AND struct {
|
||||
opnds []SemanticContext
|
||||
}
|
||||
|
||||
func NewAND(a, b SemanticContext) *AND {
|
||||
|
||||
operands := newArray2DHashSet(nil, nil)
|
||||
if aa, ok := a.(*AND); ok {
|
||||
for _, o := range aa.opnds {
|
||||
operands.Add(o)
|
||||
}
|
||||
} else {
|
||||
operands.Add(a)
|
||||
}
|
||||
|
||||
if ba, ok := b.(*AND); ok {
|
||||
for _, o := range ba.opnds {
|
||||
operands.Add(o)
|
||||
}
|
||||
} else {
|
||||
operands.Add(b)
|
||||
}
|
||||
precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands)
|
||||
if len(precedencePredicates) > 0 {
|
||||
// interested in the transition with the lowest precedence
|
||||
var reduced *PrecedencePredicate
|
||||
|
||||
for _, p := range precedencePredicates {
|
||||
if reduced == nil || p.precedence < reduced.precedence {
|
||||
reduced = p
|
||||
}
|
||||
}
|
||||
|
||||
operands.Add(reduced)
|
||||
}
|
||||
|
||||
vs := operands.Values()
|
||||
opnds := make([]SemanticContext, len(vs))
|
||||
for i, v := range vs {
|
||||
opnds[i] = v.(SemanticContext)
|
||||
}
|
||||
|
||||
and := new(AND)
|
||||
and.opnds = opnds
|
||||
|
||||
return and
|
||||
}
|
||||
|
||||
func (a *AND) equals(other interface{}) bool {
|
||||
if a == other {
|
||||
return true
|
||||
} else if _, ok := other.(*AND); !ok {
|
||||
return false
|
||||
} else {
|
||||
for i, v := range other.(*AND).opnds {
|
||||
if !a.opnds[i].equals(v) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// {@inheritDoc}
|
||||
//
|
||||
// <p>
|
||||
// The evaluation of predicates by a context is short-circuiting, but
|
||||
// unordered.</p>
|
||||
//
|
||||
func (a *AND) evaluate(parser Recognizer, outerContext RuleContext) bool {
|
||||
for i := 0; i < len(a.opnds); i++ {
|
||||
if !a.opnds[i].evaluate(parser, outerContext) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (a *AND) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
|
||||
differs := false
|
||||
operands := make([]SemanticContext, 0)
|
||||
|
||||
for i := 0; i < len(a.opnds); i++ {
|
||||
context := a.opnds[i]
|
||||
evaluated := context.evalPrecedence(parser, outerContext)
|
||||
differs = differs || (evaluated != context)
|
||||
if evaluated == nil {
|
||||
// The AND context is false if any element is false
|
||||
return nil
|
||||
} else if evaluated != SemanticContextNone {
|
||||
// Reduce the result by Skipping true elements
|
||||
operands = append(operands, evaluated)
|
||||
}
|
||||
}
|
||||
if !differs {
|
||||
return a
|
||||
}
|
||||
|
||||
if len(operands) == 0 {
|
||||
// all elements were true, so the AND context is true
|
||||
return SemanticContextNone
|
||||
}
|
||||
|
||||
var result SemanticContext
|
||||
|
||||
for _, o := range operands {
|
||||
if result == nil {
|
||||
result = o
|
||||
} else {
|
||||
result = SemanticContextandContext(result, o)
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func (a *AND) hash() int {
|
||||
h := murmurInit(37) // Init with a value different from OR
|
||||
for _, op := range a.opnds {
|
||||
h = murmurUpdate(h, op.hash())
|
||||
}
|
||||
return murmurFinish(h, len(a.opnds))
|
||||
}
|
||||
|
||||
func (a *OR) hash() int {
|
||||
h := murmurInit(41) // Init with a value different from AND
|
||||
for _, op := range a.opnds {
|
||||
h = murmurUpdate(h, op.hash())
|
||||
}
|
||||
return murmurFinish(h, len(a.opnds))
|
||||
}
|
||||
|
||||
func (a *AND) String() string {
|
||||
s := ""
|
||||
|
||||
for _, o := range a.opnds {
|
||||
s += "&& " + fmt.Sprint(o)
|
||||
}
|
||||
|
||||
if len(s) > 3 {
|
||||
return s[0:3]
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
//
|
||||
// A semantic context which is true whenever at least one of the contained
|
||||
// contexts is true.
|
||||
//
|
||||
|
||||
type OR struct {
|
||||
opnds []SemanticContext
|
||||
}
|
||||
|
||||
func NewOR(a, b SemanticContext) *OR {
|
||||
|
||||
operands := newArray2DHashSet(nil, nil)
|
||||
if aa, ok := a.(*OR); ok {
|
||||
for _, o := range aa.opnds {
|
||||
operands.Add(o)
|
||||
}
|
||||
} else {
|
||||
operands.Add(a)
|
||||
}
|
||||
|
||||
if ba, ok := b.(*OR); ok {
|
||||
for _, o := range ba.opnds {
|
||||
operands.Add(o)
|
||||
}
|
||||
} else {
|
||||
operands.Add(b)
|
||||
}
|
||||
precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands)
|
||||
if len(precedencePredicates) > 0 {
|
||||
// interested in the transition with the lowest precedence
|
||||
var reduced *PrecedencePredicate
|
||||
|
||||
for _, p := range precedencePredicates {
|
||||
if reduced == nil || p.precedence > reduced.precedence {
|
||||
reduced = p
|
||||
}
|
||||
}
|
||||
|
||||
operands.Add(reduced)
|
||||
}
|
||||
|
||||
vs := operands.Values()
|
||||
|
||||
opnds := make([]SemanticContext, len(vs))
|
||||
for i, v := range vs {
|
||||
opnds[i] = v.(SemanticContext)
|
||||
}
|
||||
|
||||
o := new(OR)
|
||||
o.opnds = opnds
|
||||
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *OR) equals(other interface{}) bool {
|
||||
if o == other {
|
||||
return true
|
||||
} else if _, ok := other.(*OR); !ok {
|
||||
return false
|
||||
} else {
|
||||
for i, v := range other.(*OR).opnds {
|
||||
if !o.opnds[i].equals(v) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// <p>
|
||||
// The evaluation of predicates by o context is short-circuiting, but
|
||||
// unordered.</p>
|
||||
//
|
||||
func (o *OR) evaluate(parser Recognizer, outerContext RuleContext) bool {
|
||||
for i := 0; i < len(o.opnds); i++ {
|
||||
if o.opnds[i].evaluate(parser, outerContext) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (o *OR) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
|
||||
differs := false
|
||||
operands := make([]SemanticContext, 0)
|
||||
for i := 0; i < len(o.opnds); i++ {
|
||||
context := o.opnds[i]
|
||||
evaluated := context.evalPrecedence(parser, outerContext)
|
||||
differs = differs || (evaluated != context)
|
||||
if evaluated == SemanticContextNone {
|
||||
// The OR context is true if any element is true
|
||||
return SemanticContextNone
|
||||
} else if evaluated != nil {
|
||||
// Reduce the result by Skipping false elements
|
||||
operands = append(operands, evaluated)
|
||||
}
|
||||
}
|
||||
if !differs {
|
||||
return o
|
||||
}
|
||||
if len(operands) == 0 {
|
||||
// all elements were false, so the OR context is false
|
||||
return nil
|
||||
}
|
||||
var result SemanticContext
|
||||
|
||||
for _, o := range operands {
|
||||
if result == nil {
|
||||
result = o
|
||||
} else {
|
||||
result = SemanticContextorContext(result, o)
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func (o *OR) String() string {
|
||||
s := ""
|
||||
|
||||
for _, o := range o.opnds {
|
||||
s += "|| " + fmt.Sprint(o)
|
||||
}
|
||||
|
||||
if len(s) > 3 {
|
||||
return s[0:3]
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
210
vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go
generated
vendored
Normal file
210
vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go
generated
vendored
Normal file
@ -0,0 +1,210 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type TokenSourceCharStreamPair struct {
|
||||
tokenSource TokenSource
|
||||
charStream CharStream
|
||||
}
|
||||
|
||||
// A token has properties: text, type, line, character position in the line
|
||||
// (so we can ignore tabs), token channel, index, and source from which
|
||||
// we obtained this token.
|
||||
|
||||
type Token interface {
|
||||
GetSource() *TokenSourceCharStreamPair
|
||||
GetTokenType() int
|
||||
GetChannel() int
|
||||
GetStart() int
|
||||
GetStop() int
|
||||
GetLine() int
|
||||
GetColumn() int
|
||||
|
||||
GetText() string
|
||||
SetText(s string)
|
||||
|
||||
GetTokenIndex() int
|
||||
SetTokenIndex(v int)
|
||||
|
||||
GetTokenSource() TokenSource
|
||||
GetInputStream() CharStream
|
||||
}
|
||||
|
||||
type BaseToken struct {
|
||||
source *TokenSourceCharStreamPair
|
||||
tokenType int // token type of the token
|
||||
channel int // The parser ignores everything not on DEFAULT_CHANNEL
|
||||
start int // optional return -1 if not implemented.
|
||||
stop int // optional return -1 if not implemented.
|
||||
tokenIndex int // from 0..n-1 of the token object in the input stream
|
||||
line int // line=1..n of the 1st character
|
||||
column int // beginning of the line at which it occurs, 0..n-1
|
||||
text string // text of the token.
|
||||
readOnly bool
|
||||
}
|
||||
|
||||
const (
|
||||
TokenInvalidType = 0
|
||||
|
||||
// During lookahead operations, this "token" signifies we hit rule end ATN state
|
||||
// and did not follow it despite needing to.
|
||||
TokenEpsilon = -2
|
||||
|
||||
TokenMinUserTokenType = 1
|
||||
|
||||
TokenEOF = -1
|
||||
|
||||
// All tokens go to the parser (unless Skip() is called in that rule)
|
||||
// on a particular "channel". The parser tunes to a particular channel
|
||||
// so that whitespace etc... can go to the parser on a "hidden" channel.
|
||||
|
||||
TokenDefaultChannel = 0
|
||||
|
||||
// Anything on different channel than DEFAULT_CHANNEL is not parsed
|
||||
// by parser.
|
||||
|
||||
TokenHiddenChannel = 1
|
||||
)
|
||||
|
||||
func (b *BaseToken) GetChannel() int {
|
||||
return b.channel
|
||||
}
|
||||
|
||||
func (b *BaseToken) GetStart() int {
|
||||
return b.start
|
||||
}
|
||||
|
||||
func (b *BaseToken) GetStop() int {
|
||||
return b.stop
|
||||
}
|
||||
|
||||
func (b *BaseToken) GetLine() int {
|
||||
return b.line
|
||||
}
|
||||
|
||||
func (b *BaseToken) GetColumn() int {
|
||||
return b.column
|
||||
}
|
||||
|
||||
func (b *BaseToken) GetTokenType() int {
|
||||
return b.tokenType
|
||||
}
|
||||
|
||||
func (b *BaseToken) GetSource() *TokenSourceCharStreamPair {
|
||||
return b.source
|
||||
}
|
||||
|
||||
func (b *BaseToken) GetTokenIndex() int {
|
||||
return b.tokenIndex
|
||||
}
|
||||
|
||||
func (b *BaseToken) SetTokenIndex(v int) {
|
||||
b.tokenIndex = v
|
||||
}
|
||||
|
||||
func (b *BaseToken) GetTokenSource() TokenSource {
|
||||
return b.source.tokenSource
|
||||
}
|
||||
|
||||
func (b *BaseToken) GetInputStream() CharStream {
|
||||
return b.source.charStream
|
||||
}
|
||||
|
||||
type CommonToken struct {
|
||||
*BaseToken
|
||||
}
|
||||
|
||||
func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start, stop int) *CommonToken {
|
||||
|
||||
t := new(CommonToken)
|
||||
|
||||
t.BaseToken = new(BaseToken)
|
||||
|
||||
t.source = source
|
||||
t.tokenType = tokenType
|
||||
t.channel = channel
|
||||
t.start = start
|
||||
t.stop = stop
|
||||
t.tokenIndex = -1
|
||||
if t.source.tokenSource != nil {
|
||||
t.line = source.tokenSource.GetLine()
|
||||
t.column = source.tokenSource.GetCharPositionInLine()
|
||||
} else {
|
||||
t.column = -1
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// An empty {@link Pair} which is used as the default value of
|
||||
// {@link //source} for tokens that do not have a source.
|
||||
|
||||
//CommonToken.EMPTY_SOURCE = [ nil, nil ]
|
||||
|
||||
// Constructs a New{@link CommonToken} as a copy of another {@link Token}.
|
||||
//
|
||||
// <p>
|
||||
// If {@code oldToken} is also a {@link CommonToken} instance, the newly
|
||||
// constructed token will share a reference to the {@link //text} field and
|
||||
// the {@link Pair} stored in {@link //source}. Otherwise, {@link //text} will
|
||||
// be assigned the result of calling {@link //GetText}, and {@link //source}
|
||||
// will be constructed from the result of {@link Token//GetTokenSource} and
|
||||
// {@link Token//GetInputStream}.</p>
|
||||
//
|
||||
// @param oldToken The token to copy.
|
||||
//
|
||||
func (c *CommonToken) clone() *CommonToken {
|
||||
t := NewCommonToken(c.source, c.tokenType, c.channel, c.start, c.stop)
|
||||
t.tokenIndex = c.GetTokenIndex()
|
||||
t.line = c.GetLine()
|
||||
t.column = c.GetColumn()
|
||||
t.text = c.GetText()
|
||||
return t
|
||||
}
|
||||
|
||||
func (c *CommonToken) GetText() string {
|
||||
if c.text != "" {
|
||||
return c.text
|
||||
}
|
||||
input := c.GetInputStream()
|
||||
if input == nil {
|
||||
return ""
|
||||
}
|
||||
n := input.Size()
|
||||
if c.start < n && c.stop < n {
|
||||
return input.GetTextFromInterval(NewInterval(c.start, c.stop))
|
||||
}
|
||||
return "<EOF>"
|
||||
}
|
||||
|
||||
func (c *CommonToken) SetText(text string) {
|
||||
c.text = text
|
||||
}
|
||||
|
||||
func (c *CommonToken) String() string {
|
||||
txt := c.GetText()
|
||||
if txt != "" {
|
||||
txt = strings.Replace(txt, "\n", "\\n", -1)
|
||||
txt = strings.Replace(txt, "\r", "\\r", -1)
|
||||
txt = strings.Replace(txt, "\t", "\\t", -1)
|
||||
} else {
|
||||
txt = "<no text>"
|
||||
}
|
||||
|
||||
var ch string
|
||||
if c.channel > 0 {
|
||||
ch = ",channel=" + strconv.Itoa(c.channel)
|
||||
} else {
|
||||
ch = ""
|
||||
}
|
||||
|
||||
return "[@" + strconv.Itoa(c.tokenIndex) + "," + strconv.Itoa(c.start) + ":" + strconv.Itoa(c.stop) + "='" +
|
||||
txt + "',<" + strconv.Itoa(c.tokenType) + ">" +
|
||||
ch + "," + strconv.Itoa(c.line) + ":" + strconv.Itoa(c.column) + "]"
|
||||
}
|
17
vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go
generated
vendored
Normal file
17
vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
type TokenSource interface {
|
||||
NextToken() Token
|
||||
Skip()
|
||||
More()
|
||||
GetLine() int
|
||||
GetCharPositionInLine() int
|
||||
GetInputStream() CharStream
|
||||
GetSourceName() string
|
||||
setTokenFactory(factory TokenFactory)
|
||||
GetTokenFactory() TokenFactory
|
||||
}
|
20
vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go
generated
vendored
Normal file
20
vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
type TokenStream interface {
|
||||
IntStream
|
||||
|
||||
LT(k int) Token
|
||||
|
||||
Get(index int) Token
|
||||
GetTokenSource() TokenSource
|
||||
SetTokenSource(TokenSource)
|
||||
|
||||
GetAllText() string
|
||||
GetTextFromInterval(*Interval) string
|
||||
GetTextFromRuleContext(RuleContext) string
|
||||
GetTextFromTokens(Token, Token) string
|
||||
}
|
649
vendor/github.com/antlr/antlr4/runtime/Go/antlr/tokenstream_rewriter.go
generated
vendored
Normal file
649
vendor/github.com/antlr/antlr4/runtime/Go/antlr/tokenstream_rewriter.go
generated
vendored
Normal file
@ -0,0 +1,649 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
|
||||
//
|
||||
// Useful for rewriting out a buffered input token stream after doing some
|
||||
// augmentation or other manipulations on it.
|
||||
|
||||
// <p>
|
||||
// You can insert stuff, replace, and delete chunks. Note that the operations
|
||||
// are done lazily--only if you convert the buffer to a {@link String} with
|
||||
// {@link TokenStream#getText()}. This is very efficient because you are not
|
||||
// moving data around all the time. As the buffer of tokens is converted to
|
||||
// strings, the {@link #getText()} method(s) scan the input token stream and
|
||||
// check to see if there is an operation at the current index. If so, the
|
||||
// operation is done and then normal {@link String} rendering continues on the
|
||||
// buffer. This is like having multiple Turing machine instruction streams
|
||||
// (programs) operating on a single input tape. :)</p>
|
||||
// <p>
|
||||
|
||||
// This rewriter makes no modifications to the token stream. It does not ask the
|
||||
// stream to fill itself up nor does it advance the input cursor. The token
|
||||
// stream {@link TokenStream#index()} will return the same value before and
|
||||
// after any {@link #getText()} call.</p>
|
||||
|
||||
// <p>
|
||||
// The rewriter only works on tokens that you have in the buffer and ignores the
|
||||
// current input cursor. If you are buffering tokens on-demand, calling
|
||||
// {@link #getText()} halfway through the input will only do rewrites for those
|
||||
// tokens in the first half of the file.</p>
|
||||
|
||||
// <p>
|
||||
// Since the operations are done lazily at {@link #getText}-time, operations do
|
||||
// not screw up the token index values. That is, an insert operation at token
|
||||
// index {@code i} does not change the index values for tokens
|
||||
// {@code i}+1..n-1.</p>
|
||||
|
||||
// <p>
|
||||
// Because operations never actually alter the buffer, you may always get the
|
||||
// original token stream back without undoing anything. Since the instructions
|
||||
// are queued up, you can easily simulate transactions and roll back any changes
|
||||
// if there is an error just by removing instructions. For example,</p>
|
||||
|
||||
// <pre>
|
||||
// CharStream input = new ANTLRFileStream("input");
|
||||
// TLexer lex = new TLexer(input);
|
||||
// CommonTokenStream tokens = new CommonTokenStream(lex);
|
||||
// T parser = new T(tokens);
|
||||
// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
|
||||
// parser.startRule();
|
||||
// </pre>
|
||||
|
||||
// <p>
|
||||
// Then in the rules, you can execute (assuming rewriter is visible):</p>
|
||||
|
||||
// <pre>
|
||||
// Token t,u;
|
||||
// ...
|
||||
// rewriter.insertAfter(t, "text to put after t");}
|
||||
// rewriter.insertAfter(u, "text after u");}
|
||||
// System.out.println(rewriter.getText());
|
||||
// </pre>
|
||||
|
||||
// <p>
|
||||
// You can also have multiple "instruction streams" and get multiple rewrites
|
||||
// from a single pass over the input. Just name the instruction streams and use
|
||||
// that name again when printing the buffer. This could be useful for generating
|
||||
// a C file and also its header file--all from the same buffer:</p>
|
||||
|
||||
// <pre>
|
||||
// rewriter.insertAfter("pass1", t, "text to put after t");}
|
||||
// rewriter.insertAfter("pass2", u, "text after u");}
|
||||
// System.out.println(rewriter.getText("pass1"));
|
||||
// System.out.println(rewriter.getText("pass2"));
|
||||
// </pre>
|
||||
|
||||
// <p>
|
||||
// If you don't use named rewrite streams, a "default" stream is used as the
|
||||
// first example shows.</p>
|
||||
|
||||
|
||||
|
||||
const(
|
||||
Default_Program_Name = "default"
|
||||
Program_Init_Size = 100
|
||||
Min_Token_Index = 0
|
||||
)
|
||||
|
||||
// Define the rewrite operation hierarchy
|
||||
|
||||
type RewriteOperation interface {
|
||||
// Execute the rewrite operation by possibly adding to the buffer.
|
||||
// Return the index of the next token to operate on.
|
||||
Execute(buffer *bytes.Buffer) int
|
||||
String() string
|
||||
GetInstructionIndex() int
|
||||
GetIndex() int
|
||||
GetText() string
|
||||
GetOpName() string
|
||||
GetTokens() TokenStream
|
||||
SetInstructionIndex(val int)
|
||||
SetIndex(int)
|
||||
SetText(string)
|
||||
SetOpName(string)
|
||||
SetTokens(TokenStream)
|
||||
}
|
||||
|
||||
type BaseRewriteOperation struct {
|
||||
//Current index of rewrites list
|
||||
instruction_index int
|
||||
//Token buffer index
|
||||
index int
|
||||
//Substitution text
|
||||
text string
|
||||
//Actual operation name
|
||||
op_name string
|
||||
//Pointer to token steam
|
||||
tokens TokenStream
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation)GetInstructionIndex() int{
|
||||
return op.instruction_index
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation)GetIndex() int{
|
||||
return op.index
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation)GetText() string{
|
||||
return op.text
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation)GetOpName() string{
|
||||
return op.op_name
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation)GetTokens() TokenStream{
|
||||
return op.tokens
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation)SetInstructionIndex(val int){
|
||||
op.instruction_index = val
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation)SetIndex(val int) {
|
||||
op.index = val
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation)SetText(val string){
|
||||
op.text = val
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation)SetOpName(val string){
|
||||
op.op_name = val
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation)SetTokens(val TokenStream) {
|
||||
op.tokens = val
|
||||
}
|
||||
|
||||
|
||||
func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int{
|
||||
return op.index
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation) String() string {
|
||||
return fmt.Sprintf("<%s@%d:\"%s\">",
|
||||
op.op_name,
|
||||
op.tokens.Get(op.GetIndex()),
|
||||
op.text,
|
||||
)
|
||||
|
||||
}
|
||||
|
||||
|
||||
type InsertBeforeOp struct {
|
||||
BaseRewriteOperation
|
||||
}
|
||||
|
||||
func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp{
|
||||
return &InsertBeforeOp{BaseRewriteOperation:BaseRewriteOperation{
|
||||
index:index,
|
||||
text:text,
|
||||
op_name:"InsertBeforeOp",
|
||||
tokens:stream,
|
||||
}}
|
||||
}
|
||||
|
||||
func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int{
|
||||
buffer.WriteString(op.text)
|
||||
if op.tokens.Get(op.index).GetTokenType() != TokenEOF{
|
||||
buffer.WriteString(op.tokens.Get(op.index).GetText())
|
||||
}
|
||||
return op.index+1
|
||||
}
|
||||
|
||||
func (op *InsertBeforeOp) String() string {
|
||||
return op.BaseRewriteOperation.String()
|
||||
}
|
||||
|
||||
// Distinguish between insert after/before to do the "insert afters"
|
||||
// first and then the "insert befores" at same index. Implementation
|
||||
// of "insert after" is "insert before index+1".
|
||||
|
||||
type InsertAfterOp struct {
|
||||
BaseRewriteOperation
|
||||
}
|
||||
|
||||
func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp{
|
||||
return &InsertAfterOp{BaseRewriteOperation:BaseRewriteOperation{
|
||||
index:index+1,
|
||||
text:text,
|
||||
tokens:stream,
|
||||
}}
|
||||
}
|
||||
|
||||
func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int {
|
||||
buffer.WriteString(op.text)
|
||||
if op.tokens.Get(op.index).GetTokenType() != TokenEOF{
|
||||
buffer.WriteString(op.tokens.Get(op.index).GetText())
|
||||
}
|
||||
return op.index+1
|
||||
}
|
||||
|
||||
func (op *InsertAfterOp) String() string {
|
||||
return op.BaseRewriteOperation.String()
|
||||
}
|
||||
|
||||
// I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp
|
||||
// instructions.
|
||||
type ReplaceOp struct{
|
||||
BaseRewriteOperation
|
||||
LastIndex int
|
||||
}
|
||||
|
||||
func NewReplaceOp(from, to int, text string, stream TokenStream)*ReplaceOp {
|
||||
return &ReplaceOp{
|
||||
BaseRewriteOperation:BaseRewriteOperation{
|
||||
index:from,
|
||||
text:text,
|
||||
op_name:"ReplaceOp",
|
||||
tokens:stream,
|
||||
},
|
||||
LastIndex:to,
|
||||
}
|
||||
}
|
||||
|
||||
func (op *ReplaceOp)Execute(buffer *bytes.Buffer) int{
|
||||
if op.text != ""{
|
||||
buffer.WriteString(op.text)
|
||||
}
|
||||
return op.LastIndex +1
|
||||
}
|
||||
|
||||
func (op *ReplaceOp) String() string {
|
||||
if op.text == "" {
|
||||
return fmt.Sprintf("<DeleteOP@%d..%d>",
|
||||
op.tokens.Get(op.index), op.tokens.Get(op.LastIndex))
|
||||
}
|
||||
return fmt.Sprintf("<ReplaceOp@%d..%d:\"%s\">",
|
||||
op.tokens.Get(op.index), op.tokens.Get(op.LastIndex), op.text)
|
||||
}
|
||||
|
||||
|
||||
type TokenStreamRewriter struct {
|
||||
//Our source stream
|
||||
tokens TokenStream
|
||||
// You may have multiple, named streams of rewrite operations.
|
||||
// I'm calling these things "programs."
|
||||
// Maps String (name) → rewrite (List)
|
||||
programs map[string][]RewriteOperation
|
||||
last_rewrite_token_indexes map[string]int
|
||||
}
|
||||
|
||||
func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter{
|
||||
return &TokenStreamRewriter{
|
||||
tokens: tokens,
|
||||
programs: map[string][]RewriteOperation{
|
||||
Default_Program_Name:make([]RewriteOperation,0, Program_Init_Size),
|
||||
},
|
||||
last_rewrite_token_indexes: map[string]int{},
|
||||
}
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream{
|
||||
return tsr.tokens
|
||||
}
|
||||
|
||||
// Rollback the instruction stream for a program so that
|
||||
// the indicated instruction (via instructionIndex) is no
|
||||
// longer in the stream. UNTESTED!
|
||||
func (tsr *TokenStreamRewriter) Rollback(program_name string, instruction_index int){
|
||||
is, ok := tsr.programs[program_name]
|
||||
if ok{
|
||||
tsr.programs[program_name] = is[Min_Token_Index:instruction_index]
|
||||
}
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) RollbackDefault(instruction_index int){
|
||||
tsr.Rollback(Default_Program_Name, instruction_index)
|
||||
}
|
||||
//Reset the program so that no instructions exist
|
||||
func (tsr *TokenStreamRewriter) DeleteProgram(program_name string){
|
||||
tsr.Rollback(program_name, Min_Token_Index) //TODO: double test on that cause lower bound is not included
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) DeleteProgramDefault(){
|
||||
tsr.DeleteProgram(Default_Program_Name)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text string){
|
||||
// to insert after, just insert before next index (even if past end)
|
||||
var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens)
|
||||
rewrites := tsr.GetProgram(program_name)
|
||||
op.SetInstructionIndex(len(rewrites))
|
||||
tsr.AddToProgram(program_name, op)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string){
|
||||
tsr.InsertAfter(Default_Program_Name, index, text)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) InsertAfterToken(program_name string, token Token, text string){
|
||||
tsr.InsertAfter(program_name, token.GetTokenIndex(), text)
|
||||
}
|
||||
|
||||
func (tsr* TokenStreamRewriter) InsertBefore(program_name string, index int, text string){
|
||||
var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens)
|
||||
rewrites := tsr.GetProgram(program_name)
|
||||
op.SetInstructionIndex(len(rewrites))
|
||||
tsr.AddToProgram(program_name, op)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string){
|
||||
tsr.InsertBefore(Default_Program_Name, index, text)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) InsertBeforeToken(program_name string,token Token, text string){
|
||||
tsr.InsertBefore(program_name, token.GetTokenIndex(), text)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text string){
|
||||
if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size(){
|
||||
panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)",
|
||||
from, to, tsr.tokens.Size()))
|
||||
}
|
||||
var op RewriteOperation = NewReplaceOp(from, to, text, tsr.tokens)
|
||||
rewrites := tsr.GetProgram(program_name)
|
||||
op.SetInstructionIndex(len(rewrites))
|
||||
tsr.AddToProgram(program_name, op)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter)ReplaceDefault(from, to int, text string) {
|
||||
tsr.Replace(Default_Program_Name, from, to, text)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter)ReplaceDefaultPos(index int, text string){
|
||||
tsr.ReplaceDefault(index, index, text)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter)ReplaceToken(program_name string, from, to Token, text string){
|
||||
tsr.Replace(program_name, from.GetTokenIndex(), to.GetTokenIndex(), text)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter)ReplaceTokenDefault(from, to Token, text string){
|
||||
tsr.ReplaceToken(Default_Program_Name, from, to, text)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter)ReplaceTokenDefaultPos(index Token, text string){
|
||||
tsr.ReplaceTokenDefault(index, index, text)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter)Delete(program_name string, from, to int){
|
||||
tsr.Replace(program_name, from, to, "" )
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter)DeleteDefault(from, to int){
|
||||
tsr.Delete(Default_Program_Name, from, to)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter)DeleteDefaultPos(index int){
|
||||
tsr.DeleteDefault(index,index)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter)DeleteToken(program_name string, from, to Token) {
|
||||
tsr.ReplaceToken(program_name, from, to, "")
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter)DeleteTokenDefault(from,to Token){
|
||||
tsr.DeleteToken(Default_Program_Name, from, to)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter)GetLastRewriteTokenIndex(program_name string)int {
|
||||
i, ok := tsr.last_rewrite_token_indexes[program_name]
|
||||
if !ok{
|
||||
return -1
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter)GetLastRewriteTokenIndexDefault()int{
|
||||
return tsr.GetLastRewriteTokenIndex(Default_Program_Name)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter)SetLastRewriteTokenIndex(program_name string, i int){
|
||||
tsr.last_rewrite_token_indexes[program_name] = i
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter)InitializeProgram(name string)[]RewriteOperation{
|
||||
is := make([]RewriteOperation, 0, Program_Init_Size)
|
||||
tsr.programs[name] = is
|
||||
return is
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter)AddToProgram(name string, op RewriteOperation){
|
||||
is := tsr.GetProgram(name)
|
||||
is = append(is, op)
|
||||
tsr.programs[name] = is
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter)GetProgram(name string) []RewriteOperation {
|
||||
is, ok := tsr.programs[name]
|
||||
if !ok{
|
||||
is = tsr.InitializeProgram(name)
|
||||
}
|
||||
return is
|
||||
}
|
||||
// Return the text from the original tokens altered per the
|
||||
// instructions given to this rewriter.
|
||||
func (tsr *TokenStreamRewriter)GetTextDefault() string{
|
||||
return tsr.GetText(
|
||||
Default_Program_Name,
|
||||
NewInterval(0, tsr.tokens.Size()-1))
|
||||
}
|
||||
// Return the text from the original tokens altered per the
|
||||
// instructions given to this rewriter.
|
||||
func (tsr *TokenStreamRewriter)GetText(program_name string, interval *Interval) string {
|
||||
rewrites := tsr.programs[program_name]
|
||||
start := interval.Start
|
||||
stop := interval.Stop
|
||||
// ensure start/end are in range
|
||||
stop = min(stop, tsr.tokens.Size()-1)
|
||||
start = max(start,0)
|
||||
if rewrites == nil || len(rewrites) == 0{
|
||||
return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute
|
||||
}
|
||||
buf := bytes.Buffer{}
|
||||
// First, optimize instruction stream
|
||||
indexToOp := reduceToSingleOperationPerIndex(rewrites)
|
||||
// Walk buffer, executing instructions and emitting tokens
|
||||
for i:=start; i<=stop && i<tsr.tokens.Size();{
|
||||
op := indexToOp[i]
|
||||
delete(indexToOp, i)// remove so any left have index size-1
|
||||
t := tsr.tokens.Get(i)
|
||||
if op == nil{
|
||||
// no operation at that index, just dump token
|
||||
if t.GetTokenType() != TokenEOF {buf.WriteString(t.GetText())}
|
||||
i++ // move to next token
|
||||
}else {
|
||||
i = op.Execute(&buf)// execute operation and skip
|
||||
}
|
||||
}
|
||||
// include stuff after end if it's last index in buffer
|
||||
// So, if they did an insertAfter(lastValidIndex, "foo"), include
|
||||
// foo if end==lastValidIndex.
|
||||
if stop == tsr.tokens.Size()-1{
|
||||
// Scan any remaining operations after last token
|
||||
// should be included (they will be inserts).
|
||||
for _, op := range indexToOp{
|
||||
if op.GetIndex() >= tsr.tokens.Size()-1 {buf.WriteString(op.GetText())}
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// We need to combine operations and report invalid operations (like
|
||||
// overlapping replaces that are not completed nested). Inserts to
|
||||
// same index need to be combined etc... Here are the cases:
|
||||
//
|
||||
// I.i.u I.j.v leave alone, nonoverlapping
|
||||
// I.i.u I.i.v combine: Iivu
|
||||
//
|
||||
// R.i-j.u R.x-y.v | i-j in x-y delete first R
|
||||
// R.i-j.u R.i-j.v delete first R
|
||||
// R.i-j.u R.x-y.v | x-y in i-j ERROR
|
||||
// R.i-j.u R.x-y.v | boundaries overlap ERROR
|
||||
//
|
||||
// Delete special case of replace (text==null):
|
||||
// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
|
||||
//
|
||||
// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before
|
||||
// we're not deleting i)
|
||||
// I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping
|
||||
// R.x-y.v I.i.u | i in x-y ERROR
|
||||
// R.x-y.v I.x.u R.x-y.uv (combine, delete I)
|
||||
// R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping
|
||||
//
|
||||
// I.i.u = insert u before op @ index i
|
||||
// R.x-y.u = replace x-y indexed tokens with u
|
||||
//
|
||||
// First we need to examine replaces. For any replace op:
|
||||
//
|
||||
// 1. wipe out any insertions before op within that range.
|
||||
// 2. Drop any replace op before that is contained completely within
|
||||
// that range.
|
||||
// 3. Throw exception upon boundary overlap with any previous replace.
|
||||
//
|
||||
// Then we can deal with inserts:
|
||||
//
|
||||
// 1. for any inserts to same index, combine even if not adjacent.
|
||||
// 2. for any prior replace with same left boundary, combine this
|
||||
// insert with replace and delete this replace.
|
||||
// 3. throw exception if index in same range as previous replace
|
||||
//
|
||||
// Don't actually delete; make op null in list. Easier to walk list.
|
||||
// Later we can throw as we add to index → op map.
|
||||
//
|
||||
// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
|
||||
// inserted stuff would be before the replace range. But, if you
|
||||
// add tokens in front of a method body '{' and then delete the method
|
||||
// body, I think the stuff before the '{' you added should disappear too.
|
||||
//
|
||||
// Return a map from token index to operation.
|
||||
//
|
||||
func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation{
|
||||
// WALK REPLACES
|
||||
for i:=0; i < len(rewrites); i++{
|
||||
op := rewrites[i]
|
||||
if op == nil{continue}
|
||||
rop, ok := op.(*ReplaceOp)
|
||||
if !ok{continue}
|
||||
// Wipe prior inserts within range
|
||||
for j:=0; j<i && j < len(rewrites); j++{
|
||||
if iop, ok := rewrites[j].(*InsertBeforeOp);ok{
|
||||
if iop.index == rop.index{
|
||||
// E.g., insert before 2, delete 2..2; update replace
|
||||
// text to include insert before, kill insert
|
||||
rewrites[iop.instruction_index] = nil
|
||||
if rop.text != ""{
|
||||
rop.text = iop.text + rop.text
|
||||
}else{
|
||||
rop.text = iop.text
|
||||
}
|
||||
}else if iop.index > rop.index && iop.index <=rop.LastIndex{
|
||||
// delete insert as it's a no-op.
|
||||
rewrites[iop.instruction_index] = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
// Drop any prior replaces contained within
|
||||
for j:=0; j<i && j < len(rewrites); j++{
|
||||
if prevop, ok := rewrites[j].(*ReplaceOp);ok{
|
||||
if prevop.index>=rop.index && prevop.LastIndex <= rop.LastIndex{
|
||||
// delete replace as it's a no-op.
|
||||
rewrites[prevop.instruction_index] = nil
|
||||
continue
|
||||
}
|
||||
// throw exception unless disjoint or identical
|
||||
disjoint := prevop.LastIndex < rop.index || prevop.index > rop.LastIndex
|
||||
// Delete special case of replace (text==null):
|
||||
// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
|
||||
if prevop.text == "" && rop.text == "" && !disjoint{
|
||||
rewrites[prevop.instruction_index] = nil
|
||||
rop.index = min(prevop.index, rop.index)
|
||||
rop.LastIndex = max(prevop.LastIndex, rop.LastIndex)
|
||||
println("new rop" + rop.String()) //TODO: remove console write, taken from Java version
|
||||
}else if !disjoint{
|
||||
panic("replace op boundaries of " + rop.String() + " overlap with previous " + prevop.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// WALK INSERTS
|
||||
for i:=0; i < len(rewrites); i++ {
|
||||
op := rewrites[i]
|
||||
if op == nil{continue}
|
||||
//hack to replicate inheritance in composition
|
||||
_, iok := rewrites[i].(*InsertBeforeOp)
|
||||
_, aok := rewrites[i].(*InsertAfterOp)
|
||||
if !iok && !aok{continue}
|
||||
iop := rewrites[i]
|
||||
// combine current insert with prior if any at same index
|
||||
// deviating a bit from TokenStreamRewriter.java - hard to incorporate inheritance logic
|
||||
for j:=0; j<i && j < len(rewrites); j++{
|
||||
if nextIop, ok := rewrites[j].(*InsertAfterOp); ok{
|
||||
if nextIop.index == iop.GetIndex(){
|
||||
iop.SetText(nextIop.text + iop.GetText())
|
||||
rewrites[j] = nil
|
||||
}
|
||||
}
|
||||
if prevIop, ok := rewrites[j].(*InsertBeforeOp); ok{
|
||||
if prevIop.index == iop.GetIndex(){
|
||||
iop.SetText(iop.GetText() + prevIop.text)
|
||||
rewrites[prevIop.instruction_index] = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
// look for replaces where iop.index is in range; error
|
||||
for j:=0; j<i && j < len(rewrites); j++{
|
||||
if rop,ok := rewrites[j].(*ReplaceOp); ok{
|
||||
if iop.GetIndex() == rop.index{
|
||||
rop.text = iop.GetText() + rop.text
|
||||
rewrites[i] = nil
|
||||
continue
|
||||
}
|
||||
if iop.GetIndex() >= rop.index && iop.GetIndex() <= rop.LastIndex{
|
||||
panic("insert op "+iop.String()+" within boundaries of previous "+rop.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
m := map[int]RewriteOperation{}
|
||||
for i:=0; i < len(rewrites); i++{
|
||||
op := rewrites[i]
|
||||
if op == nil {continue}
|
||||
if _, ok := m[op.GetIndex()]; ok{
|
||||
panic("should only be one op per index")
|
||||
}
|
||||
m[op.GetIndex()] = op
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Quick fixing Go lack of overloads
|
||||
*/
|
||||
|
||||
func max(a,b int)int{
|
||||
if a>b{
|
||||
return a
|
||||
}else {
|
||||
return b
|
||||
}
|
||||
}
|
||||
func min(a,b int)int{
|
||||
if a<b{
|
||||
return a
|
||||
}else {
|
||||
return b
|
||||
}
|
||||
}
|
32
vendor/github.com/antlr/antlr4/runtime/Go/antlr/trace_listener.go
generated
vendored
Normal file
32
vendor/github.com/antlr/antlr4/runtime/Go/antlr/trace_listener.go
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import "fmt"
|
||||
|
||||
type TraceListener struct {
|
||||
parser *BaseParser
|
||||
}
|
||||
|
||||
func NewTraceListener(parser *BaseParser) *TraceListener {
|
||||
tl := new(TraceListener)
|
||||
tl.parser = parser
|
||||
return tl
|
||||
}
|
||||
|
||||
func (t *TraceListener) VisitErrorNode(_ ErrorNode) {
|
||||
}
|
||||
|
||||
func (t *TraceListener) EnterEveryRule(ctx ParserRuleContext) {
|
||||
fmt.Println("enter " + t.parser.GetRuleNames()[ctx.GetRuleIndex()] + ", LT(1)=" + t.parser.input.LT(1).GetText())
|
||||
}
|
||||
|
||||
func (t *TraceListener) VisitTerminal(node TerminalNode) {
|
||||
fmt.Println("consume " + fmt.Sprint(node.GetSymbol()) + " rule " + t.parser.GetRuleNames()[t.parser.ctx.GetRuleIndex()])
|
||||
}
|
||||
|
||||
func (t *TraceListener) ExitEveryRule(ctx ParserRuleContext) {
|
||||
fmt.Println("exit " + t.parser.GetRuleNames()[ctx.GetRuleIndex()] + ", LT(1)=" + t.parser.input.LT(1).GetText())
|
||||
}
|
428
vendor/github.com/antlr/antlr4/runtime/Go/antlr/transition.go
generated
vendored
Normal file
428
vendor/github.com/antlr/antlr4/runtime/Go/antlr/transition.go
generated
vendored
Normal file
@ -0,0 +1,428 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// atom, set, epsilon, action, predicate, rule transitions.
|
||||
//
|
||||
// <p>This is a one way link. It emanates from a state (usually via a list of
|
||||
// transitions) and has a target state.</p>
|
||||
//
|
||||
// <p>Since we never have to change the ATN transitions once we construct it,
|
||||
// the states. We'll use the term Edge for the DFA to distinguish them from
|
||||
// ATN transitions.</p>
|
||||
|
||||
type Transition interface {
|
||||
getTarget() ATNState
|
||||
setTarget(ATNState)
|
||||
getIsEpsilon() bool
|
||||
getLabel() *IntervalSet
|
||||
getSerializationType() int
|
||||
Matches(int, int, int) bool
|
||||
}
|
||||
|
||||
type BaseTransition struct {
|
||||
target ATNState
|
||||
isEpsilon bool
|
||||
label int
|
||||
intervalSet *IntervalSet
|
||||
serializationType int
|
||||
}
|
||||
|
||||
func NewBaseTransition(target ATNState) *BaseTransition {
|
||||
|
||||
if target == nil {
|
||||
panic("target cannot be nil.")
|
||||
}
|
||||
|
||||
t := new(BaseTransition)
|
||||
|
||||
t.target = target
|
||||
// Are we epsilon, action, sempred?
|
||||
t.isEpsilon = false
|
||||
t.intervalSet = nil
|
||||
|
||||
return t
|
||||
}
|
||||
|
||||
func (t *BaseTransition) getTarget() ATNState {
|
||||
return t.target
|
||||
}
|
||||
|
||||
func (t *BaseTransition) setTarget(s ATNState) {
|
||||
t.target = s
|
||||
}
|
||||
|
||||
func (t *BaseTransition) getIsEpsilon() bool {
|
||||
return t.isEpsilon
|
||||
}
|
||||
|
||||
func (t *BaseTransition) getLabel() *IntervalSet {
|
||||
return t.intervalSet
|
||||
}
|
||||
|
||||
func (t *BaseTransition) getSerializationType() int {
|
||||
return t.serializationType
|
||||
}
|
||||
|
||||
func (t *BaseTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
const (
|
||||
TransitionEPSILON = 1
|
||||
TransitionRANGE = 2
|
||||
TransitionRULE = 3
|
||||
TransitionPREDICATE = 4 // e.g., {isType(input.LT(1))}?
|
||||
TransitionATOM = 5
|
||||
TransitionACTION = 6
|
||||
TransitionSET = 7 // ~(A|B) or ~atom, wildcard, which convert to next 2
|
||||
TransitionNOTSET = 8
|
||||
TransitionWILDCARD = 9
|
||||
TransitionPRECEDENCE = 10
|
||||
)
|
||||
|
||||
var TransitionserializationNames = []string{
|
||||
"INVALID",
|
||||
"EPSILON",
|
||||
"RANGE",
|
||||
"RULE",
|
||||
"PREDICATE",
|
||||
"ATOM",
|
||||
"ACTION",
|
||||
"SET",
|
||||
"NOT_SET",
|
||||
"WILDCARD",
|
||||
"PRECEDENCE",
|
||||
}
|
||||
|
||||
//var TransitionserializationTypes struct {
|
||||
// EpsilonTransition int
|
||||
// RangeTransition int
|
||||
// RuleTransition int
|
||||
// PredicateTransition int
|
||||
// AtomTransition int
|
||||
// ActionTransition int
|
||||
// SetTransition int
|
||||
// NotSetTransition int
|
||||
// WildcardTransition int
|
||||
// PrecedencePredicateTransition int
|
||||
//}{
|
||||
// TransitionEPSILON,
|
||||
// TransitionRANGE,
|
||||
// TransitionRULE,
|
||||
// TransitionPREDICATE,
|
||||
// TransitionATOM,
|
||||
// TransitionACTION,
|
||||
// TransitionSET,
|
||||
// TransitionNOTSET,
|
||||
// TransitionWILDCARD,
|
||||
// TransitionPRECEDENCE
|
||||
//}
|
||||
|
||||
// TODO: make all transitions sets? no, should remove set edges
|
||||
type AtomTransition struct {
|
||||
*BaseTransition
|
||||
}
|
||||
|
||||
func NewAtomTransition(target ATNState, intervalSet int) *AtomTransition {
|
||||
|
||||
t := new(AtomTransition)
|
||||
t.BaseTransition = NewBaseTransition(target)
|
||||
|
||||
t.label = intervalSet // The token type or character value or, signifies special intervalSet.
|
||||
t.intervalSet = t.makeLabel()
|
||||
t.serializationType = TransitionATOM
|
||||
|
||||
return t
|
||||
}
|
||||
|
||||
func (t *AtomTransition) makeLabel() *IntervalSet {
|
||||
s := NewIntervalSet()
|
||||
s.addOne(t.label)
|
||||
return s
|
||||
}
|
||||
|
||||
func (t *AtomTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
|
||||
return t.label == symbol
|
||||
}
|
||||
|
||||
func (t *AtomTransition) String() string {
|
||||
return strconv.Itoa(t.label)
|
||||
}
|
||||
|
||||
type RuleTransition struct {
|
||||
*BaseTransition
|
||||
|
||||
followState ATNState
|
||||
ruleIndex, precedence int
|
||||
}
|
||||
|
||||
func NewRuleTransition(ruleStart ATNState, ruleIndex, precedence int, followState ATNState) *RuleTransition {
|
||||
|
||||
t := new(RuleTransition)
|
||||
t.BaseTransition = NewBaseTransition(ruleStart)
|
||||
|
||||
t.ruleIndex = ruleIndex
|
||||
t.precedence = precedence
|
||||
t.followState = followState
|
||||
t.serializationType = TransitionRULE
|
||||
t.isEpsilon = true
|
||||
|
||||
return t
|
||||
}
|
||||
|
||||
func (t *RuleTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
type EpsilonTransition struct {
|
||||
*BaseTransition
|
||||
|
||||
outermostPrecedenceReturn int
|
||||
}
|
||||
|
||||
func NewEpsilonTransition(target ATNState, outermostPrecedenceReturn int) *EpsilonTransition {
|
||||
|
||||
t := new(EpsilonTransition)
|
||||
t.BaseTransition = NewBaseTransition(target)
|
||||
|
||||
t.serializationType = TransitionEPSILON
|
||||
t.isEpsilon = true
|
||||
t.outermostPrecedenceReturn = outermostPrecedenceReturn
|
||||
return t
|
||||
}
|
||||
|
||||
func (t *EpsilonTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (t *EpsilonTransition) String() string {
|
||||
return "epsilon"
|
||||
}
|
||||
|
||||
type RangeTransition struct {
|
||||
*BaseTransition
|
||||
|
||||
start, stop int
|
||||
}
|
||||
|
||||
func NewRangeTransition(target ATNState, start, stop int) *RangeTransition {
|
||||
|
||||
t := new(RangeTransition)
|
||||
t.BaseTransition = NewBaseTransition(target)
|
||||
|
||||
t.serializationType = TransitionRANGE
|
||||
t.start = start
|
||||
t.stop = stop
|
||||
t.intervalSet = t.makeLabel()
|
||||
return t
|
||||
}
|
||||
|
||||
func (t *RangeTransition) makeLabel() *IntervalSet {
|
||||
s := NewIntervalSet()
|
||||
s.addRange(t.start, t.stop)
|
||||
return s
|
||||
}
|
||||
|
||||
func (t *RangeTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
|
||||
return symbol >= t.start && symbol <= t.stop
|
||||
}
|
||||
|
||||
func (t *RangeTransition) String() string {
|
||||
var sb strings.Builder
|
||||
sb.WriteByte('\'')
|
||||
sb.WriteRune(rune(t.start))
|
||||
sb.WriteString("'..'")
|
||||
sb.WriteRune(rune(t.stop))
|
||||
sb.WriteByte('\'')
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
type AbstractPredicateTransition interface {
|
||||
Transition
|
||||
IAbstractPredicateTransitionFoo()
|
||||
}
|
||||
|
||||
type BaseAbstractPredicateTransition struct {
|
||||
*BaseTransition
|
||||
}
|
||||
|
||||
func NewBasePredicateTransition(target ATNState) *BaseAbstractPredicateTransition {
|
||||
|
||||
t := new(BaseAbstractPredicateTransition)
|
||||
t.BaseTransition = NewBaseTransition(target)
|
||||
|
||||
return t
|
||||
}
|
||||
|
||||
func (a *BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo() {}
|
||||
|
||||
type PredicateTransition struct {
|
||||
*BaseAbstractPredicateTransition
|
||||
|
||||
isCtxDependent bool
|
||||
ruleIndex, predIndex int
|
||||
}
|
||||
|
||||
func NewPredicateTransition(target ATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition {
|
||||
|
||||
t := new(PredicateTransition)
|
||||
t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target)
|
||||
|
||||
t.serializationType = TransitionPREDICATE
|
||||
t.ruleIndex = ruleIndex
|
||||
t.predIndex = predIndex
|
||||
t.isCtxDependent = isCtxDependent // e.g., $i ref in pred
|
||||
t.isEpsilon = true
|
||||
return t
|
||||
}
|
||||
|
||||
func (t *PredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (t *PredicateTransition) getPredicate() *Predicate {
|
||||
return NewPredicate(t.ruleIndex, t.predIndex, t.isCtxDependent)
|
||||
}
|
||||
|
||||
func (t *PredicateTransition) String() string {
|
||||
return "pred_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.predIndex)
|
||||
}
|
||||
|
||||
type ActionTransition struct {
|
||||
*BaseTransition
|
||||
|
||||
isCtxDependent bool
|
||||
ruleIndex, actionIndex, predIndex int
|
||||
}
|
||||
|
||||
func NewActionTransition(target ATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition {
|
||||
|
||||
t := new(ActionTransition)
|
||||
t.BaseTransition = NewBaseTransition(target)
|
||||
|
||||
t.serializationType = TransitionACTION
|
||||
t.ruleIndex = ruleIndex
|
||||
t.actionIndex = actionIndex
|
||||
t.isCtxDependent = isCtxDependent // e.g., $i ref in pred
|
||||
t.isEpsilon = true
|
||||
return t
|
||||
}
|
||||
|
||||
func (t *ActionTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (t *ActionTransition) String() string {
|
||||
return "action_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex)
|
||||
}
|
||||
|
||||
type SetTransition struct {
|
||||
*BaseTransition
|
||||
}
|
||||
|
||||
func NewSetTransition(target ATNState, set *IntervalSet) *SetTransition {
|
||||
|
||||
t := new(SetTransition)
|
||||
t.BaseTransition = NewBaseTransition(target)
|
||||
|
||||
t.serializationType = TransitionSET
|
||||
if set != nil {
|
||||
t.intervalSet = set
|
||||
} else {
|
||||
t.intervalSet = NewIntervalSet()
|
||||
t.intervalSet.addOne(TokenInvalidType)
|
||||
}
|
||||
|
||||
return t
|
||||
}
|
||||
|
||||
func (t *SetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
|
||||
return t.intervalSet.contains(symbol)
|
||||
}
|
||||
|
||||
func (t *SetTransition) String() string {
|
||||
return t.intervalSet.String()
|
||||
}
|
||||
|
||||
type NotSetTransition struct {
|
||||
*SetTransition
|
||||
}
|
||||
|
||||
func NewNotSetTransition(target ATNState, set *IntervalSet) *NotSetTransition {
|
||||
|
||||
t := new(NotSetTransition)
|
||||
|
||||
t.SetTransition = NewSetTransition(target, set)
|
||||
|
||||
t.serializationType = TransitionNOTSET
|
||||
|
||||
return t
|
||||
}
|
||||
|
||||
func (t *NotSetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
|
||||
return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && !t.intervalSet.contains(symbol)
|
||||
}
|
||||
|
||||
func (t *NotSetTransition) String() string {
|
||||
return "~" + t.intervalSet.String()
|
||||
}
|
||||
|
||||
type WildcardTransition struct {
|
||||
*BaseTransition
|
||||
}
|
||||
|
||||
func NewWildcardTransition(target ATNState) *WildcardTransition {
|
||||
|
||||
t := new(WildcardTransition)
|
||||
t.BaseTransition = NewBaseTransition(target)
|
||||
|
||||
t.serializationType = TransitionWILDCARD
|
||||
return t
|
||||
}
|
||||
|
||||
func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
|
||||
return symbol >= minVocabSymbol && symbol <= maxVocabSymbol
|
||||
}
|
||||
|
||||
func (t *WildcardTransition) String() string {
|
||||
return "."
|
||||
}
|
||||
|
||||
type PrecedencePredicateTransition struct {
|
||||
*BaseAbstractPredicateTransition
|
||||
|
||||
precedence int
|
||||
}
|
||||
|
||||
func NewPrecedencePredicateTransition(target ATNState, precedence int) *PrecedencePredicateTransition {
|
||||
|
||||
t := new(PrecedencePredicateTransition)
|
||||
t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target)
|
||||
|
||||
t.serializationType = TransitionPRECEDENCE
|
||||
t.precedence = precedence
|
||||
t.isEpsilon = true
|
||||
|
||||
return t
|
||||
}
|
||||
|
||||
func (t *PrecedencePredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (t *PrecedencePredicateTransition) getPredicate() *PrecedencePredicate {
|
||||
return NewPrecedencePredicate(t.precedence)
|
||||
}
|
||||
|
||||
func (t *PrecedencePredicateTransition) String() string {
|
||||
return fmt.Sprint(t.precedence) + " >= _p"
|
||||
}
|
256
vendor/github.com/antlr/antlr4/runtime/Go/antlr/tree.go
generated
vendored
Normal file
256
vendor/github.com/antlr/antlr4/runtime/Go/antlr/tree.go
generated
vendored
Normal file
@ -0,0 +1,256 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
// The basic notion of a tree has a parent, a payload, and a list of children.
|
||||
// It is the most abstract interface for all the trees used by ANTLR.
|
||||
///
|
||||
|
||||
var TreeInvalidInterval = NewInterval(-1, -2)
|
||||
|
||||
type Tree interface {
|
||||
GetParent() Tree
|
||||
SetParent(Tree)
|
||||
GetPayload() interface{}
|
||||
GetChild(i int) Tree
|
||||
GetChildCount() int
|
||||
GetChildren() []Tree
|
||||
}
|
||||
|
||||
type SyntaxTree interface {
|
||||
Tree
|
||||
|
||||
GetSourceInterval() *Interval
|
||||
}
|
||||
|
||||
type ParseTree interface {
|
||||
SyntaxTree
|
||||
|
||||
Accept(Visitor ParseTreeVisitor) interface{}
|
||||
GetText() string
|
||||
|
||||
ToStringTree([]string, Recognizer) string
|
||||
}
|
||||
|
||||
type RuleNode interface {
|
||||
ParseTree
|
||||
|
||||
GetRuleContext() RuleContext
|
||||
GetBaseRuleContext() *BaseRuleContext
|
||||
}
|
||||
|
||||
type TerminalNode interface {
|
||||
ParseTree
|
||||
|
||||
GetSymbol() Token
|
||||
}
|
||||
|
||||
type ErrorNode interface {
|
||||
TerminalNode
|
||||
|
||||
errorNode()
|
||||
}
|
||||
|
||||
type ParseTreeVisitor interface {
|
||||
Visit(tree ParseTree) interface{}
|
||||
VisitChildren(node RuleNode) interface{}
|
||||
VisitTerminal(node TerminalNode) interface{}
|
||||
VisitErrorNode(node ErrorNode) interface{}
|
||||
}
|
||||
|
||||
type BaseParseTreeVisitor struct{}
|
||||
|
||||
var _ ParseTreeVisitor = &BaseParseTreeVisitor{}
|
||||
|
||||
func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return tree.Accept(v) }
|
||||
func (v *BaseParseTreeVisitor) VisitChildren(node RuleNode) interface{} { return nil }
|
||||
func (v *BaseParseTreeVisitor) VisitTerminal(node TerminalNode) interface{} { return nil }
|
||||
func (v *BaseParseTreeVisitor) VisitErrorNode(node ErrorNode) interface{} { return nil }
|
||||
|
||||
// TODO
|
||||
//func (this ParseTreeVisitor) Visit(ctx) {
|
||||
// if (Utils.isArray(ctx)) {
|
||||
// self := this
|
||||
// return ctx.map(function(child) { return VisitAtom(self, child)})
|
||||
// } else {
|
||||
// return VisitAtom(this, ctx)
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//func VisitAtom(Visitor, ctx) {
|
||||
// if (ctx.parser == nil) { //is terminal
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// name := ctx.parser.ruleNames[ctx.ruleIndex]
|
||||
// funcName := "Visit" + Utils.titleCase(name)
|
||||
//
|
||||
// return Visitor[funcName](ctx)
|
||||
//}
|
||||
|
||||
type ParseTreeListener interface {
|
||||
VisitTerminal(node TerminalNode)
|
||||
VisitErrorNode(node ErrorNode)
|
||||
EnterEveryRule(ctx ParserRuleContext)
|
||||
ExitEveryRule(ctx ParserRuleContext)
|
||||
}
|
||||
|
||||
type BaseParseTreeListener struct{}
|
||||
|
||||
var _ ParseTreeListener = &BaseParseTreeListener{}
|
||||
|
||||
func (l *BaseParseTreeListener) VisitTerminal(node TerminalNode) {}
|
||||
func (l *BaseParseTreeListener) VisitErrorNode(node ErrorNode) {}
|
||||
func (l *BaseParseTreeListener) EnterEveryRule(ctx ParserRuleContext) {}
|
||||
func (l *BaseParseTreeListener) ExitEveryRule(ctx ParserRuleContext) {}
|
||||
|
||||
type TerminalNodeImpl struct {
|
||||
parentCtx RuleContext
|
||||
|
||||
symbol Token
|
||||
}
|
||||
|
||||
var _ TerminalNode = &TerminalNodeImpl{}
|
||||
|
||||
func NewTerminalNodeImpl(symbol Token) *TerminalNodeImpl {
|
||||
tn := new(TerminalNodeImpl)
|
||||
|
||||
tn.parentCtx = nil
|
||||
tn.symbol = symbol
|
||||
|
||||
return tn
|
||||
}
|
||||
|
||||
func (t *TerminalNodeImpl) GetChild(i int) Tree {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *TerminalNodeImpl) GetChildren() []Tree {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *TerminalNodeImpl) SetChildren(tree []Tree) {
|
||||
panic("Cannot set children on terminal node")
|
||||
}
|
||||
|
||||
func (t *TerminalNodeImpl) GetSymbol() Token {
|
||||
return t.symbol
|
||||
}
|
||||
|
||||
func (t *TerminalNodeImpl) GetParent() Tree {
|
||||
return t.parentCtx
|
||||
}
|
||||
|
||||
func (t *TerminalNodeImpl) SetParent(tree Tree) {
|
||||
t.parentCtx = tree.(RuleContext)
|
||||
}
|
||||
|
||||
func (t *TerminalNodeImpl) GetPayload() interface{} {
|
||||
return t.symbol
|
||||
}
|
||||
|
||||
func (t *TerminalNodeImpl) GetSourceInterval() *Interval {
|
||||
if t.symbol == nil {
|
||||
return TreeInvalidInterval
|
||||
}
|
||||
tokenIndex := t.symbol.GetTokenIndex()
|
||||
return NewInterval(tokenIndex, tokenIndex)
|
||||
}
|
||||
|
||||
func (t *TerminalNodeImpl) GetChildCount() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (t *TerminalNodeImpl) Accept(v ParseTreeVisitor) interface{} {
|
||||
return v.VisitTerminal(t)
|
||||
}
|
||||
|
||||
func (t *TerminalNodeImpl) GetText() string {
|
||||
return t.symbol.GetText()
|
||||
}
|
||||
|
||||
func (t *TerminalNodeImpl) String() string {
|
||||
if t.symbol.GetTokenType() == TokenEOF {
|
||||
return "<EOF>"
|
||||
}
|
||||
|
||||
return t.symbol.GetText()
|
||||
}
|
||||
|
||||
func (t *TerminalNodeImpl) ToStringTree(s []string, r Recognizer) string {
|
||||
return t.String()
|
||||
}
|
||||
|
||||
// Represents a token that was consumed during reSynchronization
|
||||
// rather than during a valid Match operation. For example,
|
||||
// we will create this kind of a node during single token insertion
|
||||
// and deletion as well as during "consume until error recovery set"
|
||||
// upon no viable alternative exceptions.
|
||||
|
||||
type ErrorNodeImpl struct {
|
||||
*TerminalNodeImpl
|
||||
}
|
||||
|
||||
var _ ErrorNode = &ErrorNodeImpl{}
|
||||
|
||||
func NewErrorNodeImpl(token Token) *ErrorNodeImpl {
|
||||
en := new(ErrorNodeImpl)
|
||||
en.TerminalNodeImpl = NewTerminalNodeImpl(token)
|
||||
return en
|
||||
}
|
||||
|
||||
func (e *ErrorNodeImpl) errorNode() {}
|
||||
|
||||
func (e *ErrorNodeImpl) Accept(v ParseTreeVisitor) interface{} {
|
||||
return v.VisitErrorNode(e)
|
||||
}
|
||||
|
||||
type ParseTreeWalker struct {
|
||||
}
|
||||
|
||||
func NewParseTreeWalker() *ParseTreeWalker {
|
||||
return new(ParseTreeWalker)
|
||||
}
|
||||
|
||||
// Performs a walk on the given parse tree starting at the root and going down recursively
|
||||
// with depth-first search. On each node, EnterRule is called before
|
||||
// recursively walking down into child nodes, then
|
||||
// ExitRule is called after the recursive call to wind up.
|
||||
func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) {
|
||||
switch tt := t.(type) {
|
||||
case ErrorNode:
|
||||
listener.VisitErrorNode(tt)
|
||||
case TerminalNode:
|
||||
listener.VisitTerminal(tt)
|
||||
default:
|
||||
p.EnterRule(listener, t.(RuleNode))
|
||||
for i := 0; i < t.GetChildCount(); i++ {
|
||||
child := t.GetChild(i)
|
||||
p.Walk(listener, child)
|
||||
}
|
||||
p.ExitRule(listener, t.(RuleNode))
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Enters a grammar rule by first triggering the generic event {@link ParseTreeListener//EnterEveryRule}
|
||||
// then by triggering the event specific to the given parse tree node
|
||||
//
|
||||
func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) {
|
||||
ctx := r.GetRuleContext().(ParserRuleContext)
|
||||
listener.EnterEveryRule(ctx)
|
||||
ctx.EnterRule(listener)
|
||||
}
|
||||
|
||||
// Exits a grammar rule by first triggering the event specific to the given parse tree node
|
||||
// then by triggering the generic event {@link ParseTreeListener//ExitEveryRule}
|
||||
//
|
||||
func (p *ParseTreeWalker) ExitRule(listener ParseTreeListener, r RuleNode) {
|
||||
ctx := r.GetRuleContext().(ParserRuleContext)
|
||||
ctx.ExitRule(listener)
|
||||
listener.ExitEveryRule(ctx)
|
||||
}
|
||||
|
||||
var ParseTreeWalkerDefault = NewParseTreeWalker()
|
137
vendor/github.com/antlr/antlr4/runtime/Go/antlr/trees.go
generated
vendored
Normal file
137
vendor/github.com/antlr/antlr4/runtime/Go/antlr/trees.go
generated
vendored
Normal file
@ -0,0 +1,137 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import "fmt"
|
||||
|
||||
/** A set of utility routines useful for all kinds of ANTLR trees. */
|
||||
|
||||
// Print out a whole tree in LISP form. {@link //getNodeText} is used on the
|
||||
// node payloads to get the text for the nodes. Detect
|
||||
// parse trees and extract data appropriately.
|
||||
func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string {
|
||||
|
||||
if recog != nil {
|
||||
ruleNames = recog.GetRuleNames()
|
||||
}
|
||||
|
||||
s := TreesGetNodeText(tree, ruleNames, nil)
|
||||
|
||||
s = EscapeWhitespace(s, false)
|
||||
c := tree.GetChildCount()
|
||||
if c == 0 {
|
||||
return s
|
||||
}
|
||||
res := "(" + s + " "
|
||||
if c > 0 {
|
||||
s = TreesStringTree(tree.GetChild(0), ruleNames, nil)
|
||||
res += s
|
||||
}
|
||||
for i := 1; i < c; i++ {
|
||||
s = TreesStringTree(tree.GetChild(i), ruleNames, nil)
|
||||
res += (" " + s)
|
||||
}
|
||||
res += ")"
|
||||
return res
|
||||
}
|
||||
|
||||
func TreesGetNodeText(t Tree, ruleNames []string, recog Parser) string {
|
||||
if recog != nil {
|
||||
ruleNames = recog.GetRuleNames()
|
||||
}
|
||||
|
||||
if ruleNames != nil {
|
||||
switch t2 := t.(type) {
|
||||
case RuleNode:
|
||||
t3 := t2.GetRuleContext()
|
||||
altNumber := t3.GetAltNumber()
|
||||
|
||||
if altNumber != ATNInvalidAltNumber {
|
||||
return fmt.Sprintf("%s:%d", ruleNames[t3.GetRuleIndex()], altNumber)
|
||||
}
|
||||
return ruleNames[t3.GetRuleIndex()]
|
||||
case ErrorNode:
|
||||
return fmt.Sprint(t2)
|
||||
case TerminalNode:
|
||||
if t2.GetSymbol() != nil {
|
||||
return t2.GetSymbol().GetText()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// no recog for rule names
|
||||
payload := t.GetPayload()
|
||||
if p2, ok := payload.(Token); ok {
|
||||
return p2.GetText()
|
||||
}
|
||||
|
||||
return fmt.Sprint(t.GetPayload())
|
||||
}
|
||||
|
||||
// Return ordered list of all children of this node
|
||||
func TreesGetChildren(t Tree) []Tree {
|
||||
list := make([]Tree, 0)
|
||||
for i := 0; i < t.GetChildCount(); i++ {
|
||||
list = append(list, t.GetChild(i))
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
// Return a list of all ancestors of this node. The first node of
|
||||
// list is the root and the last is the parent of this node.
|
||||
//
|
||||
func TreesgetAncestors(t Tree) []Tree {
|
||||
ancestors := make([]Tree, 0)
|
||||
t = t.GetParent()
|
||||
for t != nil {
|
||||
f := []Tree{t}
|
||||
ancestors = append(f, ancestors...)
|
||||
t = t.GetParent()
|
||||
}
|
||||
return ancestors
|
||||
}
|
||||
|
||||
func TreesFindAllTokenNodes(t ParseTree, ttype int) []ParseTree {
|
||||
return TreesfindAllNodes(t, ttype, true)
|
||||
}
|
||||
|
||||
func TreesfindAllRuleNodes(t ParseTree, ruleIndex int) []ParseTree {
|
||||
return TreesfindAllNodes(t, ruleIndex, false)
|
||||
}
|
||||
|
||||
func TreesfindAllNodes(t ParseTree, index int, findTokens bool) []ParseTree {
|
||||
nodes := make([]ParseTree, 0)
|
||||
treesFindAllNodes(t, index, findTokens, &nodes)
|
||||
return nodes
|
||||
}
|
||||
|
||||
func treesFindAllNodes(t ParseTree, index int, findTokens bool, nodes *[]ParseTree) {
|
||||
// check this node (the root) first
|
||||
|
||||
t2, ok := t.(TerminalNode)
|
||||
t3, ok2 := t.(ParserRuleContext)
|
||||
|
||||
if findTokens && ok {
|
||||
if t2.GetSymbol().GetTokenType() == index {
|
||||
*nodes = append(*nodes, t2)
|
||||
}
|
||||
} else if !findTokens && ok2 {
|
||||
if t3.GetRuleIndex() == index {
|
||||
*nodes = append(*nodes, t3)
|
||||
}
|
||||
}
|
||||
// check children
|
||||
for i := 0; i < t.GetChildCount(); i++ {
|
||||
treesFindAllNodes(t.GetChild(i).(ParseTree), index, findTokens, nodes)
|
||||
}
|
||||
}
|
||||
|
||||
func TreesDescendants(t ParseTree) []ParseTree {
|
||||
nodes := []ParseTree{t}
|
||||
for i := 0; i < t.GetChildCount(); i++ {
|
||||
nodes = append(nodes, TreesDescendants(t.GetChild(i).(ParseTree))...)
|
||||
}
|
||||
return nodes
|
||||
}
|
355
vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils.go
generated
vendored
Normal file
355
vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils.go
generated
vendored
Normal file
@ -0,0 +1,355 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/bits"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func intMin(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func intMax(a, b int) int {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// A simple integer stack
|
||||
|
||||
type IntStack []int
|
||||
|
||||
var ErrEmptyStack = errors.New("Stack is empty")
|
||||
|
||||
func (s *IntStack) Pop() (int, error) {
|
||||
l := len(*s) - 1
|
||||
if l < 0 {
|
||||
return 0, ErrEmptyStack
|
||||
}
|
||||
v := (*s)[l]
|
||||
*s = (*s)[0:l]
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (s *IntStack) Push(e int) {
|
||||
*s = append(*s, e)
|
||||
}
|
||||
|
||||
func standardEqualsFunction(a interface{}, b interface{}) bool {
|
||||
|
||||
ac, oka := a.(comparable)
|
||||
bc, okb := b.(comparable)
|
||||
|
||||
if !oka || !okb {
|
||||
panic("Not Comparable")
|
||||
}
|
||||
|
||||
return ac.equals(bc)
|
||||
}
|
||||
|
||||
func standardHashFunction(a interface{}) int {
|
||||
if h, ok := a.(hasher); ok {
|
||||
return h.hash()
|
||||
}
|
||||
|
||||
panic("Not Hasher")
|
||||
}
|
||||
|
||||
type hasher interface {
|
||||
hash() int
|
||||
}
|
||||
|
||||
const bitsPerWord = 64
|
||||
|
||||
func indexForBit(bit int) int {
|
||||
return bit / bitsPerWord
|
||||
}
|
||||
|
||||
func wordForBit(data []uint64, bit int) uint64 {
|
||||
idx := indexForBit(bit)
|
||||
if idx >= len(data) {
|
||||
return 0
|
||||
}
|
||||
return data[idx]
|
||||
}
|
||||
|
||||
func maskForBit(bit int) uint64 {
|
||||
return uint64(1) << (bit % bitsPerWord)
|
||||
}
|
||||
|
||||
func wordsNeeded(bit int) int {
|
||||
return indexForBit(bit) + 1
|
||||
}
|
||||
|
||||
type BitSet struct {
|
||||
data []uint64
|
||||
}
|
||||
|
||||
func NewBitSet() *BitSet {
|
||||
return &BitSet{}
|
||||
}
|
||||
|
||||
func (b *BitSet) add(value int) {
|
||||
idx := indexForBit(value)
|
||||
if idx >= len(b.data) {
|
||||
size := wordsNeeded(value)
|
||||
data := make([]uint64, size)
|
||||
copy(data, b.data)
|
||||
b.data = data
|
||||
}
|
||||
b.data[idx] |= maskForBit(value)
|
||||
}
|
||||
|
||||
func (b *BitSet) clear(index int) {
|
||||
idx := indexForBit(index)
|
||||
if idx >= len(b.data) {
|
||||
return
|
||||
}
|
||||
b.data[idx] &= ^maskForBit(index)
|
||||
}
|
||||
|
||||
func (b *BitSet) or(set *BitSet) {
|
||||
// Get min size necessary to represent the bits in both sets.
|
||||
bLen := b.minLen()
|
||||
setLen := set.minLen()
|
||||
maxLen := intMax(bLen, setLen)
|
||||
if maxLen > len(b.data) {
|
||||
// Increase the size of len(b.data) to repesent the bits in both sets.
|
||||
data := make([]uint64, maxLen)
|
||||
copy(data, b.data)
|
||||
b.data = data
|
||||
}
|
||||
// len(b.data) is at least setLen.
|
||||
for i := 0; i < setLen; i++ {
|
||||
b.data[i] |= set.data[i]
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BitSet) remove(value int) {
|
||||
b.clear(value)
|
||||
}
|
||||
|
||||
func (b *BitSet) contains(value int) bool {
|
||||
idx := indexForBit(value)
|
||||
if idx >= len(b.data) {
|
||||
return false
|
||||
}
|
||||
return (b.data[idx] & maskForBit(value)) != 0
|
||||
}
|
||||
|
||||
func (b *BitSet) minValue() int {
|
||||
for i, v := range b.data {
|
||||
if v == 0 {
|
||||
continue
|
||||
}
|
||||
return i*bitsPerWord + bits.TrailingZeros64(v)
|
||||
}
|
||||
return 2147483647
|
||||
}
|
||||
|
||||
func (b *BitSet) equals(other interface{}) bool {
|
||||
otherBitSet, ok := other.(*BitSet)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
if b == otherBitSet {
|
||||
return true
|
||||
}
|
||||
|
||||
// We only compare set bits, so we cannot rely on the two slices having the same size. Its
|
||||
// possible for two BitSets to have different slice lengths but the same set bits. So we only
|
||||
// compare the relavent words and ignore the trailing zeros.
|
||||
bLen := b.minLen()
|
||||
otherLen := otherBitSet.minLen()
|
||||
|
||||
if bLen != otherLen {
|
||||
return false
|
||||
}
|
||||
|
||||
for i := 0; i < bLen; i++ {
|
||||
if b.data[i] != otherBitSet.data[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *BitSet) minLen() int {
|
||||
for i := len(b.data); i > 0; i-- {
|
||||
if b.data[i-1] != 0 {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (b *BitSet) length() int {
|
||||
cnt := 0
|
||||
for _, val := range b.data {
|
||||
cnt += bits.OnesCount64(val)
|
||||
}
|
||||
return cnt
|
||||
}
|
||||
|
||||
func (b *BitSet) String() string {
|
||||
vals := make([]string, 0, b.length())
|
||||
|
||||
for i, v := range b.data {
|
||||
for v != 0 {
|
||||
n := bits.TrailingZeros64(v)
|
||||
vals = append(vals, strconv.Itoa(i*bitsPerWord+n))
|
||||
v &= ^(uint64(1) << n)
|
||||
}
|
||||
}
|
||||
|
||||
return "{" + strings.Join(vals, ", ") + "}"
|
||||
}
|
||||
|
||||
type AltDict struct {
|
||||
data map[string]interface{}
|
||||
}
|
||||
|
||||
func NewAltDict() *AltDict {
|
||||
d := new(AltDict)
|
||||
d.data = make(map[string]interface{})
|
||||
return d
|
||||
}
|
||||
|
||||
func (a *AltDict) Get(key string) interface{} {
|
||||
key = "k-" + key
|
||||
return a.data[key]
|
||||
}
|
||||
|
||||
func (a *AltDict) put(key string, value interface{}) {
|
||||
key = "k-" + key
|
||||
a.data[key] = value
|
||||
}
|
||||
|
||||
func (a *AltDict) values() []interface{} {
|
||||
vs := make([]interface{}, len(a.data))
|
||||
i := 0
|
||||
for _, v := range a.data {
|
||||
vs[i] = v
|
||||
i++
|
||||
}
|
||||
return vs
|
||||
}
|
||||
|
||||
type DoubleDict struct {
|
||||
data map[int]map[int]interface{}
|
||||
}
|
||||
|
||||
func NewDoubleDict() *DoubleDict {
|
||||
dd := new(DoubleDict)
|
||||
dd.data = make(map[int]map[int]interface{})
|
||||
return dd
|
||||
}
|
||||
|
||||
func (d *DoubleDict) Get(a, b int) interface{} {
|
||||
data := d.data[a]
|
||||
|
||||
if data == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return data[b]
|
||||
}
|
||||
|
||||
func (d *DoubleDict) set(a, b int, o interface{}) {
|
||||
data := d.data[a]
|
||||
|
||||
if data == nil {
|
||||
data = make(map[int]interface{})
|
||||
d.data[a] = data
|
||||
}
|
||||
|
||||
data[b] = o
|
||||
}
|
||||
|
||||
func EscapeWhitespace(s string, escapeSpaces bool) string {
|
||||
|
||||
s = strings.Replace(s, "\t", "\\t", -1)
|
||||
s = strings.Replace(s, "\n", "\\n", -1)
|
||||
s = strings.Replace(s, "\r", "\\r", -1)
|
||||
if escapeSpaces {
|
||||
s = strings.Replace(s, " ", "\u00B7", -1)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func TerminalNodeToStringArray(sa []TerminalNode) []string {
|
||||
st := make([]string, len(sa))
|
||||
|
||||
for i, s := range sa {
|
||||
st[i] = fmt.Sprintf("%v", s)
|
||||
}
|
||||
|
||||
return st
|
||||
}
|
||||
|
||||
func PrintArrayJavaStyle(sa []string) string {
|
||||
var buffer bytes.Buffer
|
||||
|
||||
buffer.WriteString("[")
|
||||
|
||||
for i, s := range sa {
|
||||
buffer.WriteString(s)
|
||||
if i != len(sa)-1 {
|
||||
buffer.WriteString(", ")
|
||||
}
|
||||
}
|
||||
|
||||
buffer.WriteString("]")
|
||||
|
||||
return buffer.String()
|
||||
}
|
||||
|
||||
// murmur hash
|
||||
func murmurInit(seed int) int {
|
||||
return seed
|
||||
}
|
||||
|
||||
func murmurUpdate(h int, value int) int {
|
||||
const c1 uint32 = 0xCC9E2D51
|
||||
const c2 uint32 = 0x1B873593
|
||||
const r1 uint32 = 15
|
||||
const r2 uint32 = 13
|
||||
const m uint32 = 5
|
||||
const n uint32 = 0xE6546B64
|
||||
|
||||
k := uint32(value)
|
||||
k *= c1
|
||||
k = (k << r1) | (k >> (32 - r1))
|
||||
k *= c2
|
||||
|
||||
hash := uint32(h) ^ k
|
||||
hash = (hash << r2) | (hash >> (32 - r2))
|
||||
hash = hash*m + n
|
||||
return int(hash)
|
||||
}
|
||||
|
||||
func murmurFinish(h int, numberOfWords int) int {
|
||||
var hash = uint32(h)
|
||||
hash ^= uint32(numberOfWords) << 2
|
||||
hash ^= hash >> 16
|
||||
hash *= 0x85ebca6b
|
||||
hash ^= hash >> 13
|
||||
hash *= 0xc2b2ae35
|
||||
hash ^= hash >> 16
|
||||
|
||||
return int(hash)
|
||||
}
|
237
vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils_set.go
generated
vendored
Normal file
237
vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils_set.go
generated
vendored
Normal file
@ -0,0 +1,237 @@
|
||||
package antlr
|
||||
|
||||
import "math"
|
||||
|
||||
const (
|
||||
_initalCapacity = 16
|
||||
_initalBucketCapacity = 8
|
||||
_loadFactor = 0.75
|
||||
)
|
||||
|
||||
var _ Set = (*array2DHashSet)(nil)
|
||||
|
||||
type Set interface {
|
||||
Add(value interface{}) (added interface{})
|
||||
Len() int
|
||||
Get(value interface{}) (found interface{})
|
||||
Contains(value interface{}) bool
|
||||
Values() []interface{}
|
||||
Each(f func(interface{}) bool)
|
||||
}
|
||||
|
||||
type array2DHashSet struct {
|
||||
buckets [][]interface{}
|
||||
hashcodeFunction func(interface{}) int
|
||||
equalsFunction func(interface{}, interface{}) bool
|
||||
|
||||
n int // How many elements in set
|
||||
threshold int // when to expand
|
||||
|
||||
currentPrime int // jump by 4 primes each expand or whatever
|
||||
initialBucketCapacity int
|
||||
}
|
||||
|
||||
func (as *array2DHashSet) Each(f func(interface{}) bool) {
|
||||
if as.Len() < 1 {
|
||||
return
|
||||
}
|
||||
|
||||
for _, bucket := range as.buckets {
|
||||
for _, o := range bucket {
|
||||
if o == nil {
|
||||
break
|
||||
}
|
||||
if !f(o) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (as *array2DHashSet) Values() []interface{} {
|
||||
if as.Len() < 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
values := make([]interface{}, 0, as.Len())
|
||||
as.Each(func(i interface{}) bool {
|
||||
values = append(values, i)
|
||||
return true
|
||||
})
|
||||
return values
|
||||
}
|
||||
|
||||
func (as *array2DHashSet) Contains(value interface{}) bool {
|
||||
return as.Get(value) != nil
|
||||
}
|
||||
|
||||
func (as *array2DHashSet) Add(value interface{}) interface{} {
|
||||
if as.n > as.threshold {
|
||||
as.expand()
|
||||
}
|
||||
return as.innerAdd(value)
|
||||
}
|
||||
|
||||
func (as *array2DHashSet) expand() {
|
||||
old := as.buckets
|
||||
|
||||
as.currentPrime += 4
|
||||
|
||||
var (
|
||||
newCapacity = len(as.buckets) << 1
|
||||
newTable = as.createBuckets(newCapacity)
|
||||
newBucketLengths = make([]int, len(newTable))
|
||||
)
|
||||
|
||||
as.buckets = newTable
|
||||
as.threshold = int(float64(newCapacity) * _loadFactor)
|
||||
|
||||
for _, bucket := range old {
|
||||
if bucket == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, o := range bucket {
|
||||
if o == nil {
|
||||
break
|
||||
}
|
||||
|
||||
b := as.getBuckets(o)
|
||||
bucketLength := newBucketLengths[b]
|
||||
var newBucket []interface{}
|
||||
if bucketLength == 0 {
|
||||
// new bucket
|
||||
newBucket = as.createBucket(as.initialBucketCapacity)
|
||||
newTable[b] = newBucket
|
||||
} else {
|
||||
newBucket = newTable[b]
|
||||
if bucketLength == len(newBucket) {
|
||||
// expand
|
||||
newBucketCopy := make([]interface{}, len(newBucket)<<1)
|
||||
copy(newBucketCopy[:bucketLength], newBucket)
|
||||
newBucket = newBucketCopy
|
||||
newTable[b] = newBucket
|
||||
}
|
||||
}
|
||||
|
||||
newBucket[bucketLength] = o
|
||||
newBucketLengths[b]++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (as *array2DHashSet) Len() int {
|
||||
return as.n
|
||||
}
|
||||
|
||||
func (as *array2DHashSet) Get(o interface{}) interface{} {
|
||||
if o == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
b := as.getBuckets(o)
|
||||
bucket := as.buckets[b]
|
||||
if bucket == nil { // no bucket
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, e := range bucket {
|
||||
if e == nil {
|
||||
return nil // empty slot; not there
|
||||
}
|
||||
if as.equalsFunction(e, o) {
|
||||
return e
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (as *array2DHashSet) innerAdd(o interface{}) interface{} {
|
||||
b := as.getBuckets(o)
|
||||
|
||||
bucket := as.buckets[b]
|
||||
|
||||
// new bucket
|
||||
if bucket == nil {
|
||||
bucket = as.createBucket(as.initialBucketCapacity)
|
||||
bucket[0] = o
|
||||
|
||||
as.buckets[b] = bucket
|
||||
as.n++
|
||||
return o
|
||||
}
|
||||
|
||||
// look for it in bucket
|
||||
for i := 0; i < len(bucket); i++ {
|
||||
existing := bucket[i]
|
||||
if existing == nil { // empty slot; not there, add.
|
||||
bucket[i] = o
|
||||
as.n++
|
||||
return o
|
||||
}
|
||||
|
||||
if as.equalsFunction(existing, o) { // found existing, quit
|
||||
return existing
|
||||
}
|
||||
}
|
||||
|
||||
// full bucket, expand and add to end
|
||||
oldLength := len(bucket)
|
||||
bucketCopy := make([]interface{}, oldLength<<1)
|
||||
copy(bucketCopy[:oldLength], bucket)
|
||||
bucket = bucketCopy
|
||||
as.buckets[b] = bucket
|
||||
bucket[oldLength] = o
|
||||
as.n++
|
||||
return o
|
||||
}
|
||||
|
||||
func (as *array2DHashSet) getBuckets(value interface{}) int {
|
||||
hash := as.hashcodeFunction(value)
|
||||
return hash & (len(as.buckets) - 1)
|
||||
}
|
||||
|
||||
func (as *array2DHashSet) createBuckets(cap int) [][]interface{} {
|
||||
return make([][]interface{}, cap)
|
||||
}
|
||||
|
||||
func (as *array2DHashSet) createBucket(cap int) []interface{} {
|
||||
return make([]interface{}, cap)
|
||||
}
|
||||
|
||||
func newArray2DHashSetWithCap(
|
||||
hashcodeFunction func(interface{}) int,
|
||||
equalsFunction func(interface{}, interface{}) bool,
|
||||
initCap int,
|
||||
initBucketCap int,
|
||||
) *array2DHashSet {
|
||||
if hashcodeFunction == nil {
|
||||
hashcodeFunction = standardHashFunction
|
||||
}
|
||||
|
||||
if equalsFunction == nil {
|
||||
equalsFunction = standardEqualsFunction
|
||||
}
|
||||
|
||||
ret := &array2DHashSet{
|
||||
hashcodeFunction: hashcodeFunction,
|
||||
equalsFunction: equalsFunction,
|
||||
|
||||
n: 0,
|
||||
threshold: int(math.Floor(_initalCapacity * _loadFactor)),
|
||||
|
||||
currentPrime: 1,
|
||||
initialBucketCapacity: initBucketCap,
|
||||
}
|
||||
|
||||
ret.buckets = ret.createBuckets(initCap)
|
||||
return ret
|
||||
}
|
||||
|
||||
func newArray2DHashSet(
|
||||
hashcodeFunction func(interface{}) int,
|
||||
equalsFunction func(interface{}, interface{}) bool,
|
||||
) *array2DHashSet {
|
||||
return newArray2DHashSetWithCap(hashcodeFunction, equalsFunction, _initalCapacity, _initalBucketCapacity)
|
||||
}
|
36
vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go
generated
vendored
36
vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go
generated
vendored
@ -1,36 +0,0 @@
|
||||
// Package ec2query provides serialization of AWS EC2 requests and responses.
|
||||
package ec2query
|
||||
|
||||
//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/ec2.json build_test.go
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/query/queryutil"
|
||||
)
|
||||
|
||||
// BuildHandler is a named request handler for building ec2query protocol requests
|
||||
var BuildHandler = request.NamedHandler{Name: "awssdk.ec2query.Build", Fn: Build}
|
||||
|
||||
// Build builds a request for the EC2 protocol.
|
||||
func Build(r *request.Request) {
|
||||
body := url.Values{
|
||||
"Action": {r.Operation.Name},
|
||||
"Version": {r.ClientInfo.APIVersion},
|
||||
}
|
||||
if err := queryutil.Parse(body, r.Params, true); err != nil {
|
||||
r.Error = awserr.New(request.ErrCodeSerialization,
|
||||
"failed encoding EC2 Query request", err)
|
||||
}
|
||||
|
||||
if !r.IsPresigned() {
|
||||
r.HTTPRequest.Method = "POST"
|
||||
r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8")
|
||||
r.SetBufferBody([]byte(body.Encode()))
|
||||
} else { // This is a pre-signed request
|
||||
r.HTTPRequest.Method = "GET"
|
||||
r.HTTPRequest.URL.RawQuery = body.Encode()
|
||||
}
|
||||
}
|
78
vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal.go
generated
vendored
78
vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal.go
generated
vendored
@ -1,78 +0,0 @@
|
||||
package ec2query
|
||||
|
||||
//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/ec2.json unmarshal_test.go
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
|
||||
)
|
||||
|
||||
// UnmarshalHandler is a named request handler for unmarshaling ec2query protocol requests
|
||||
var UnmarshalHandler = request.NamedHandler{Name: "awssdk.ec2query.Unmarshal", Fn: Unmarshal}
|
||||
|
||||
// UnmarshalMetaHandler is a named request handler for unmarshaling ec2query protocol request metadata
|
||||
var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.ec2query.UnmarshalMeta", Fn: UnmarshalMeta}
|
||||
|
||||
// UnmarshalErrorHandler is a named request handler for unmarshaling ec2query protocol request errors
|
||||
var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.ec2query.UnmarshalError", Fn: UnmarshalError}
|
||||
|
||||
// Unmarshal unmarshals a response body for the EC2 protocol.
|
||||
func Unmarshal(r *request.Request) {
|
||||
defer r.HTTPResponse.Body.Close()
|
||||
if r.DataFilled() {
|
||||
decoder := xml.NewDecoder(r.HTTPResponse.Body)
|
||||
err := xmlutil.UnmarshalXML(r.Data, decoder, "")
|
||||
if err != nil {
|
||||
r.Error = awserr.NewRequestFailure(
|
||||
awserr.New(request.ErrCodeSerialization,
|
||||
"failed decoding EC2 Query response", err),
|
||||
r.HTTPResponse.StatusCode,
|
||||
r.RequestID,
|
||||
)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// UnmarshalMeta unmarshals response headers for the EC2 protocol.
|
||||
func UnmarshalMeta(r *request.Request) {
|
||||
r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid")
|
||||
if r.RequestID == "" {
|
||||
// Alternative version of request id in the header
|
||||
r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id")
|
||||
}
|
||||
}
|
||||
|
||||
type xmlErrorResponse struct {
|
||||
XMLName xml.Name `xml:"Response"`
|
||||
Code string `xml:"Errors>Error>Code"`
|
||||
Message string `xml:"Errors>Error>Message"`
|
||||
RequestID string `xml:"RequestID"`
|
||||
}
|
||||
|
||||
// UnmarshalError unmarshals a response error for the EC2 protocol.
|
||||
func UnmarshalError(r *request.Request) {
|
||||
defer r.HTTPResponse.Body.Close()
|
||||
|
||||
var respErr xmlErrorResponse
|
||||
err := xmlutil.UnmarshalXMLError(&respErr, r.HTTPResponse.Body)
|
||||
if err != nil {
|
||||
r.Error = awserr.NewRequestFailure(
|
||||
awserr.New(request.ErrCodeSerialization,
|
||||
"failed to unmarshal error message", err),
|
||||
r.HTTPResponse.StatusCode,
|
||||
r.RequestID,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
r.Error = awserr.NewRequestFailure(
|
||||
awserr.New(strings.TrimSpace(respErr.Code), strings.TrimSpace(respErr.Message), nil),
|
||||
r.HTTPResponse.StatusCode,
|
||||
respErr.RequestID,
|
||||
)
|
||||
}
|
190656
vendor/github.com/aws/aws-sdk-go/service/ec2/api.go
generated
vendored
190656
vendor/github.com/aws/aws-sdk-go/service/ec2/api.go
generated
vendored
File diff suppressed because it is too large
Load Diff
107
vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go
generated
vendored
107
vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go
generated
vendored
@ -1,107 +0,0 @@
|
||||
package ec2
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awsutil"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/endpoints"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
const (
|
||||
// ec2CopySnapshotPresignedUrlCustomization handler name
|
||||
ec2CopySnapshotPresignedUrlCustomization = "ec2CopySnapshotPresignedUrl"
|
||||
|
||||
// customRetryerMinRetryDelay sets min retry delay
|
||||
customRetryerMinRetryDelay = 1 * time.Second
|
||||
|
||||
// customRetryerMaxRetryDelay sets max retry delay
|
||||
customRetryerMaxRetryDelay = 8 * time.Second
|
||||
)
|
||||
|
||||
func init() {
|
||||
initRequest = func(r *request.Request) {
|
||||
if r.Operation.Name == opCopySnapshot { // fill the PresignedURL parameter
|
||||
r.Handlers.Build.PushFrontNamed(request.NamedHandler{
|
||||
Name: ec2CopySnapshotPresignedUrlCustomization,
|
||||
Fn: fillPresignedURL,
|
||||
})
|
||||
}
|
||||
|
||||
// only set the retryer on request if config doesn't have a retryer
|
||||
if r.Config.Retryer == nil && (r.Operation.Name == opModifyNetworkInterfaceAttribute || r.Operation.Name == opAssignPrivateIpAddresses) {
|
||||
maxRetries := client.DefaultRetryerMaxNumRetries
|
||||
if m := r.Config.MaxRetries; m != nil && *m != aws.UseServiceDefaultRetries {
|
||||
maxRetries = *m
|
||||
}
|
||||
r.Retryer = client.DefaultRetryer{
|
||||
NumMaxRetries: maxRetries,
|
||||
MinRetryDelay: customRetryerMinRetryDelay,
|
||||
MinThrottleDelay: customRetryerMinRetryDelay,
|
||||
MaxRetryDelay: customRetryerMaxRetryDelay,
|
||||
MaxThrottleDelay: customRetryerMaxRetryDelay,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func fillPresignedURL(r *request.Request) {
|
||||
if !r.ParamsFilled() {
|
||||
return
|
||||
}
|
||||
|
||||
origParams := r.Params.(*CopySnapshotInput)
|
||||
|
||||
// Stop if PresignedURL is set
|
||||
if origParams.PresignedUrl != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Always use config region as destination region for SDKs
|
||||
origParams.DestinationRegion = r.Config.Region
|
||||
|
||||
newParams := awsutil.CopyOf(origParams).(*CopySnapshotInput)
|
||||
|
||||
// Create a new request based on the existing request. We will use this to
|
||||
// presign the CopySnapshot request against the source region.
|
||||
cfg := r.Config.Copy(aws.NewConfig().
|
||||
WithEndpoint("").
|
||||
WithRegion(aws.StringValue(origParams.SourceRegion)))
|
||||
|
||||
clientInfo := r.ClientInfo
|
||||
resolved, err := r.Config.EndpointResolver.EndpointFor(
|
||||
clientInfo.ServiceName, aws.StringValue(cfg.Region),
|
||||
func(opt *endpoints.Options) {
|
||||
opt.DisableSSL = aws.BoolValue(cfg.DisableSSL)
|
||||
opt.UseDualStack = aws.BoolValue(cfg.UseDualStack)
|
||||
opt.UseDualStackEndpoint = cfg.UseDualStackEndpoint
|
||||
opt.UseFIPSEndpoint = cfg.UseFIPSEndpoint
|
||||
opt.Logger = r.Config.Logger
|
||||
opt.LogDeprecated = r.Config.LogLevel.Matches(aws.LogDebugWithDeprecated)
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
r.Error = err
|
||||
return
|
||||
}
|
||||
|
||||
clientInfo.Endpoint = resolved.URL
|
||||
clientInfo.SigningRegion = resolved.SigningRegion
|
||||
|
||||
// Copy handlers without Presigned URL customization to avoid an infinite loop
|
||||
handlersWithoutPresignCustomization := r.Handlers.Copy()
|
||||
handlersWithoutPresignCustomization.Build.RemoveByName(ec2CopySnapshotPresignedUrlCustomization)
|
||||
|
||||
// Presign a CopySnapshot request with modified params
|
||||
req := request.New(*cfg, clientInfo, handlersWithoutPresignCustomization, r.Retryer, r.Operation, newParams, r.Data)
|
||||
url, err := req.Presign(5 * time.Minute) // 5 minutes should be enough.
|
||||
if err != nil { // bubble error back up to original request
|
||||
r.Error = err
|
||||
return
|
||||
}
|
||||
|
||||
// We have our URL, set it on params
|
||||
origParams.PresignedUrl = &url
|
||||
}
|
51
vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go
generated
vendored
51
vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go
generated
vendored
@ -1,51 +0,0 @@
|
||||
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
|
||||
|
||||
// Package ec2 provides the client and types for making API
|
||||
// requests to Amazon Elastic Compute Cloud.
|
||||
//
|
||||
// Amazon Elastic Compute Cloud (Amazon EC2) provides secure and resizable computing
|
||||
// capacity in the Amazon Web Services Cloud. Using Amazon EC2 eliminates the
|
||||
// need to invest in hardware up front, so you can develop and deploy applications
|
||||
// faster. Amazon Virtual Private Cloud (Amazon VPC) enables you to provision
|
||||
// a logically isolated section of the Amazon Web Services Cloud where you can
|
||||
// launch Amazon Web Services resources in a virtual network that you've defined.
|
||||
// Amazon Elastic Block Store (Amazon EBS) provides block level storage volumes
|
||||
// for use with EC2 instances. EBS volumes are highly available and reliable
|
||||
// storage volumes that can be attached to any running instance and used like
|
||||
// a hard drive.
|
||||
//
|
||||
// To learn more, see the following resources:
|
||||
//
|
||||
// - Amazon EC2: Amazon EC2 product page (http://aws.amazon.com/ec2), Amazon
|
||||
// EC2 documentation (https://docs.aws.amazon.com/ec2/index.html)
|
||||
//
|
||||
// - Amazon EBS: Amazon EBS product page (http://aws.amazon.com/ebs), Amazon
|
||||
// EBS documentation (https://docs.aws.amazon.com/ebs/index.html)
|
||||
//
|
||||
// - Amazon VPC: Amazon VPC product page (http://aws.amazon.com/vpc), Amazon
|
||||
// VPC documentation (https://docs.aws.amazon.com/vpc/index.html)
|
||||
//
|
||||
// - VPN: VPN product page (http://aws.amazon.com/vpn), VPN documentation
|
||||
// (https://docs.aws.amazon.com/vpn/index.html)
|
||||
//
|
||||
// See https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15 for more information on this service.
|
||||
//
|
||||
// See ec2 package documentation for more information.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/service/ec2/
|
||||
//
|
||||
// # Using the Client
|
||||
//
|
||||
// To contact Amazon Elastic Compute Cloud with the SDK use the New function to create
|
||||
// a new service client. With that client you can make API requests to the service.
|
||||
// These clients are safe to use concurrently.
|
||||
//
|
||||
// See the SDK's documentation for more information on how to use the SDK.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/
|
||||
//
|
||||
// See aws.Config documentation for more information on configuring SDK clients.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
|
||||
//
|
||||
// See the Amazon Elastic Compute Cloud client EC2 for more
|
||||
// information on creating client for this service.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/service/ec2/#New
|
||||
package ec2
|
3
vendor/github.com/aws/aws-sdk-go/service/ec2/errors.go
generated
vendored
3
vendor/github.com/aws/aws-sdk-go/service/ec2/errors.go
generated
vendored
@ -1,3 +0,0 @@
|
||||
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
|
||||
|
||||
package ec2
|
104
vendor/github.com/aws/aws-sdk-go/service/ec2/service.go
generated
vendored
104
vendor/github.com/aws/aws-sdk-go/service/ec2/service.go
generated
vendored
@ -1,104 +0,0 @@
|
||||
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
|
||||
|
||||
package ec2
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/client/metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/aws/signer/v4"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/ec2query"
|
||||
)
|
||||
|
||||
// EC2 provides the API operation methods for making requests to
|
||||
// Amazon Elastic Compute Cloud. See this package's package overview docs
|
||||
// for details on the service.
|
||||
//
|
||||
// EC2 methods are safe to use concurrently. It is not safe to
|
||||
// modify mutate any of the struct's properties though.
|
||||
type EC2 struct {
|
||||
*client.Client
|
||||
}
|
||||
|
||||
// Used for custom client initialization logic
|
||||
var initClient func(*client.Client)
|
||||
|
||||
// Used for custom request initialization logic
|
||||
var initRequest func(*request.Request)
|
||||
|
||||
// Service information constants
|
||||
const (
|
||||
ServiceName = "ec2" // Name of service.
|
||||
EndpointsID = ServiceName // ID to lookup a service endpoint with.
|
||||
ServiceID = "EC2" // ServiceID is a unique identifier of a specific service.
|
||||
)
|
||||
|
||||
// New creates a new instance of the EC2 client with a session.
|
||||
// If additional configuration is needed for the client instance use the optional
|
||||
// aws.Config parameter to add your extra config.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// mySession := session.Must(session.NewSession())
|
||||
//
|
||||
// // Create a EC2 client from just a session.
|
||||
// svc := ec2.New(mySession)
|
||||
//
|
||||
// // Create a EC2 client with additional configuration
|
||||
// svc := ec2.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
|
||||
func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2 {
|
||||
c := p.ClientConfig(EndpointsID, cfgs...)
|
||||
if c.SigningNameDerived || len(c.SigningName) == 0 {
|
||||
c.SigningName = EndpointsID
|
||||
// No Fallback
|
||||
}
|
||||
return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion)
|
||||
}
|
||||
|
||||
// newClient creates, initializes and returns a new service client instance.
|
||||
func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *EC2 {
|
||||
svc := &EC2{
|
||||
Client: client.New(
|
||||
cfg,
|
||||
metadata.ClientInfo{
|
||||
ServiceName: ServiceName,
|
||||
ServiceID: ServiceID,
|
||||
SigningName: signingName,
|
||||
SigningRegion: signingRegion,
|
||||
PartitionID: partitionID,
|
||||
Endpoint: endpoint,
|
||||
APIVersion: "2016-11-15",
|
||||
ResolvedRegion: resolvedRegion,
|
||||
},
|
||||
handlers,
|
||||
),
|
||||
}
|
||||
|
||||
// Handlers
|
||||
svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
|
||||
svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler)
|
||||
svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler)
|
||||
svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler)
|
||||
svc.Handlers.UnmarshalError.PushBackNamed(ec2query.UnmarshalErrorHandler)
|
||||
|
||||
// Run custom client initialization if present
|
||||
if initClient != nil {
|
||||
initClient(svc.Client)
|
||||
}
|
||||
|
||||
return svc
|
||||
}
|
||||
|
||||
// newRequest creates a new request for a EC2 operation and runs any
|
||||
// custom request initialization.
|
||||
func (c *EC2) newRequest(op *request.Operation, params, data interface{}) *request.Request {
|
||||
req := c.NewRequest(op, params, data)
|
||||
|
||||
// Run custom request initialization if present
|
||||
if initRequest != nil {
|
||||
initRequest(req)
|
||||
}
|
||||
|
||||
return req
|
||||
}
|
1835
vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go
generated
vendored
1835
vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go
generated
vendored
File diff suppressed because it is too large
Load Diff
202
vendor/github.com/coreos/go-semver/LICENSE
generated
vendored
Normal file
202
vendor/github.com/coreos/go-semver/LICENSE
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
5
vendor/github.com/coreos/go-semver/NOTICE
generated
vendored
Normal file
5
vendor/github.com/coreos/go-semver/NOTICE
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
CoreOS Project
|
||||
Copyright 2018 CoreOS, Inc
|
||||
|
||||
This product includes software developed at CoreOS, Inc.
|
||||
(http://www.coreos.com/).
|
296
vendor/github.com/coreos/go-semver/semver/semver.go
generated
vendored
Normal file
296
vendor/github.com/coreos/go-semver/semver/semver.go
generated
vendored
Normal file
@ -0,0 +1,296 @@
|
||||
// Copyright 2013-2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Semantic Versions http://semver.org
|
||||
package semver
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Version struct {
|
||||
Major int64
|
||||
Minor int64
|
||||
Patch int64
|
||||
PreRelease PreRelease
|
||||
Metadata string
|
||||
}
|
||||
|
||||
type PreRelease string
|
||||
|
||||
func splitOff(input *string, delim string) (val string) {
|
||||
parts := strings.SplitN(*input, delim, 2)
|
||||
|
||||
if len(parts) == 2 {
|
||||
*input = parts[0]
|
||||
val = parts[1]
|
||||
}
|
||||
|
||||
return val
|
||||
}
|
||||
|
||||
func New(version string) *Version {
|
||||
return Must(NewVersion(version))
|
||||
}
|
||||
|
||||
func NewVersion(version string) (*Version, error) {
|
||||
v := Version{}
|
||||
|
||||
if err := v.Set(version); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &v, nil
|
||||
}
|
||||
|
||||
// Must is a helper for wrapping NewVersion and will panic if err is not nil.
|
||||
func Must(v *Version, err error) *Version {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Set parses and updates v from the given version string. Implements flag.Value
|
||||
func (v *Version) Set(version string) error {
|
||||
metadata := splitOff(&version, "+")
|
||||
preRelease := PreRelease(splitOff(&version, "-"))
|
||||
dotParts := strings.SplitN(version, ".", 3)
|
||||
|
||||
if len(dotParts) != 3 {
|
||||
return fmt.Errorf("%s is not in dotted-tri format", version)
|
||||
}
|
||||
|
||||
if err := validateIdentifier(string(preRelease)); err != nil {
|
||||
return fmt.Errorf("failed to validate pre-release: %v", err)
|
||||
}
|
||||
|
||||
if err := validateIdentifier(metadata); err != nil {
|
||||
return fmt.Errorf("failed to validate metadata: %v", err)
|
||||
}
|
||||
|
||||
parsed := make([]int64, 3, 3)
|
||||
|
||||
for i, v := range dotParts[:3] {
|
||||
val, err := strconv.ParseInt(v, 10, 64)
|
||||
parsed[i] = val
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
v.Metadata = metadata
|
||||
v.PreRelease = preRelease
|
||||
v.Major = parsed[0]
|
||||
v.Minor = parsed[1]
|
||||
v.Patch = parsed[2]
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v Version) String() string {
|
||||
var buffer bytes.Buffer
|
||||
|
||||
fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch)
|
||||
|
||||
if v.PreRelease != "" {
|
||||
fmt.Fprintf(&buffer, "-%s", v.PreRelease)
|
||||
}
|
||||
|
||||
if v.Metadata != "" {
|
||||
fmt.Fprintf(&buffer, "+%s", v.Metadata)
|
||||
}
|
||||
|
||||
return buffer.String()
|
||||
}
|
||||
|
||||
func (v *Version) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
var data string
|
||||
if err := unmarshal(&data); err != nil {
|
||||
return err
|
||||
}
|
||||
return v.Set(data)
|
||||
}
|
||||
|
||||
func (v Version) MarshalJSON() ([]byte, error) {
|
||||
return []byte(`"` + v.String() + `"`), nil
|
||||
}
|
||||
|
||||
func (v *Version) UnmarshalJSON(data []byte) error {
|
||||
l := len(data)
|
||||
if l == 0 || string(data) == `""` {
|
||||
return nil
|
||||
}
|
||||
if l < 2 || data[0] != '"' || data[l-1] != '"' {
|
||||
return errors.New("invalid semver string")
|
||||
}
|
||||
return v.Set(string(data[1 : l-1]))
|
||||
}
|
||||
|
||||
// Compare tests if v is less than, equal to, or greater than versionB,
|
||||
// returning -1, 0, or +1 respectively.
|
||||
func (v Version) Compare(versionB Version) int {
|
||||
if cmp := recursiveCompare(v.Slice(), versionB.Slice()); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
return preReleaseCompare(v, versionB)
|
||||
}
|
||||
|
||||
// Equal tests if v is equal to versionB.
|
||||
func (v Version) Equal(versionB Version) bool {
|
||||
return v.Compare(versionB) == 0
|
||||
}
|
||||
|
||||
// LessThan tests if v is less than versionB.
|
||||
func (v Version) LessThan(versionB Version) bool {
|
||||
return v.Compare(versionB) < 0
|
||||
}
|
||||
|
||||
// Slice converts the comparable parts of the semver into a slice of integers.
|
||||
func (v Version) Slice() []int64 {
|
||||
return []int64{v.Major, v.Minor, v.Patch}
|
||||
}
|
||||
|
||||
func (p PreRelease) Slice() []string {
|
||||
preRelease := string(p)
|
||||
return strings.Split(preRelease, ".")
|
||||
}
|
||||
|
||||
func preReleaseCompare(versionA Version, versionB Version) int {
|
||||
a := versionA.PreRelease
|
||||
b := versionB.PreRelease
|
||||
|
||||
/* Handle the case where if two versions are otherwise equal it is the
|
||||
* one without a PreRelease that is greater */
|
||||
if len(a) == 0 && (len(b) > 0) {
|
||||
return 1
|
||||
} else if len(b) == 0 && (len(a) > 0) {
|
||||
return -1
|
||||
}
|
||||
|
||||
// If there is a prerelease, check and compare each part.
|
||||
return recursivePreReleaseCompare(a.Slice(), b.Slice())
|
||||
}
|
||||
|
||||
func recursiveCompare(versionA []int64, versionB []int64) int {
|
||||
if len(versionA) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
a := versionA[0]
|
||||
b := versionB[0]
|
||||
|
||||
if a > b {
|
||||
return 1
|
||||
} else if a < b {
|
||||
return -1
|
||||
}
|
||||
|
||||
return recursiveCompare(versionA[1:], versionB[1:])
|
||||
}
|
||||
|
||||
func recursivePreReleaseCompare(versionA []string, versionB []string) int {
|
||||
// A larger set of pre-release fields has a higher precedence than a smaller set,
|
||||
// if all of the preceding identifiers are equal.
|
||||
if len(versionA) == 0 {
|
||||
if len(versionB) > 0 {
|
||||
return -1
|
||||
}
|
||||
return 0
|
||||
} else if len(versionB) == 0 {
|
||||
// We're longer than versionB so return 1.
|
||||
return 1
|
||||
}
|
||||
|
||||
a := versionA[0]
|
||||
b := versionB[0]
|
||||
|
||||
aInt := false
|
||||
bInt := false
|
||||
|
||||
aI, err := strconv.Atoi(versionA[0])
|
||||
if err == nil {
|
||||
aInt = true
|
||||
}
|
||||
|
||||
bI, err := strconv.Atoi(versionB[0])
|
||||
if err == nil {
|
||||
bInt = true
|
||||
}
|
||||
|
||||
// Numeric identifiers always have lower precedence than non-numeric identifiers.
|
||||
if aInt && !bInt {
|
||||
return -1
|
||||
} else if !aInt && bInt {
|
||||
return 1
|
||||
}
|
||||
|
||||
// Handle Integer Comparison
|
||||
if aInt && bInt {
|
||||
if aI > bI {
|
||||
return 1
|
||||
} else if aI < bI {
|
||||
return -1
|
||||
}
|
||||
}
|
||||
|
||||
// Handle String Comparison
|
||||
if a > b {
|
||||
return 1
|
||||
} else if a < b {
|
||||
return -1
|
||||
}
|
||||
|
||||
return recursivePreReleaseCompare(versionA[1:], versionB[1:])
|
||||
}
|
||||
|
||||
// BumpMajor increments the Major field by 1 and resets all other fields to their default values
|
||||
func (v *Version) BumpMajor() {
|
||||
v.Major += 1
|
||||
v.Minor = 0
|
||||
v.Patch = 0
|
||||
v.PreRelease = PreRelease("")
|
||||
v.Metadata = ""
|
||||
}
|
||||
|
||||
// BumpMinor increments the Minor field by 1 and resets all other fields to their default values
|
||||
func (v *Version) BumpMinor() {
|
||||
v.Minor += 1
|
||||
v.Patch = 0
|
||||
v.PreRelease = PreRelease("")
|
||||
v.Metadata = ""
|
||||
}
|
||||
|
||||
// BumpPatch increments the Patch field by 1 and resets all other fields to their default values
|
||||
func (v *Version) BumpPatch() {
|
||||
v.Patch += 1
|
||||
v.PreRelease = PreRelease("")
|
||||
v.Metadata = ""
|
||||
}
|
||||
|
||||
// validateIdentifier makes sure the provided identifier satisfies semver spec
|
||||
func validateIdentifier(id string) error {
|
||||
if id != "" && !reIdentifier.MatchString(id) {
|
||||
return fmt.Errorf("%s is not a valid semver identifier", id)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// reIdentifier is a regular expression used to check that pre-release and metadata
|
||||
// identifiers satisfy the spec requirements
|
||||
var reIdentifier = regexp.MustCompile(`^[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*$`)
|
38
vendor/github.com/coreos/go-semver/semver/sort.go
generated
vendored
Normal file
38
vendor/github.com/coreos/go-semver/semver/sort.go
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
// Copyright 2013-2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package semver
|
||||
|
||||
import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
type Versions []*Version
|
||||
|
||||
func (s Versions) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s Versions) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
func (s Versions) Less(i, j int) bool {
|
||||
return s[i].LessThan(*s[j])
|
||||
}
|
||||
|
||||
// Sort sorts the given slice of Version
|
||||
func Sort(versions []*Version) {
|
||||
sort.Sort(Versions(versions))
|
||||
}
|
191
vendor/github.com/coreos/go-systemd/v22/LICENSE
generated
vendored
Normal file
191
vendor/github.com/coreos/go-systemd/v22/LICENSE
generated
vendored
Normal file
@ -0,0 +1,191 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction, and
|
||||
distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by the copyright
|
||||
owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all other entities
|
||||
that control, are controlled by, or are under common control with that entity.
|
||||
For the purposes of this definition, "control" means (i) the power, direct or
|
||||
indirect, to cause the direction or management of such entity, whether by
|
||||
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity exercising
|
||||
permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications, including
|
||||
but not limited to software source code, documentation source, and configuration
|
||||
files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical transformation or
|
||||
translation of a Source form, including but not limited to compiled object code,
|
||||
generated documentation, and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or Object form, made
|
||||
available under the License, as indicated by a copyright notice that is included
|
||||
in or attached to the work (an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object form, that
|
||||
is based on (or derived from) the Work and for which the editorial revisions,
|
||||
annotations, elaborations, or other modifications represent, as a whole, an
|
||||
original work of authorship. For the purposes of this License, Derivative Works
|
||||
shall not include works that remain separable from, or merely link (or bind by
|
||||
name) to the interfaces of, the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including the original version
|
||||
of the Work and any modifications or additions to that Work or Derivative Works
|
||||
thereof, that is intentionally submitted to Licensor for inclusion in the Work
|
||||
by the copyright owner or by an individual or Legal Entity authorized to submit
|
||||
on behalf of the copyright owner. For the purposes of this definition,
|
||||
"submitted" means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems, and
|
||||
issue tracking systems that are managed by, or on behalf of, the Licensor for
|
||||
the purpose of discussing and improving the Work, but excluding communication
|
||||
that is conspicuously marked or otherwise designated in writing by the copyright
|
||||
owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
|
||||
of whom a Contribution has been received by Licensor and subsequently
|
||||
incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the Work and such
|
||||
Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable (except as stated in this section) patent license to make, have
|
||||
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
|
||||
such license applies only to those patent claims licensable by such Contributor
|
||||
that are necessarily infringed by their Contribution(s) alone or by combination
|
||||
of their Contribution(s) with the Work to which such Contribution(s) was
|
||||
submitted. If You institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
|
||||
Contribution incorporated within the Work constitutes direct or contributory
|
||||
patent infringement, then any patent licenses granted to You under this License
|
||||
for that Work shall terminate as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution.
|
||||
|
||||
You may reproduce and distribute copies of the Work or Derivative Works thereof
|
||||
in any medium, with or without modifications, and in Source or Object form,
|
||||
provided that You meet the following conditions:
|
||||
|
||||
You must give any other recipients of the Work or Derivative Works a copy of
|
||||
this License; and
|
||||
You must cause any modified files to carry prominent notices stating that You
|
||||
changed the files; and
|
||||
You must retain, in the Source form of any Derivative Works that You distribute,
|
||||
all copyright, patent, trademark, and attribution notices from the Source form
|
||||
of the Work, excluding those notices that do not pertain to any part of the
|
||||
Derivative Works; and
|
||||
If the Work includes a "NOTICE" text file as part of its distribution, then any
|
||||
Derivative Works that You distribute must include a readable copy of the
|
||||
attribution notices contained within such NOTICE file, excluding those notices
|
||||
that do not pertain to any part of the Derivative Works, in at least one of the
|
||||
following places: within a NOTICE text file distributed as part of the
|
||||
Derivative Works; within the Source form or documentation, if provided along
|
||||
with the Derivative Works; or, within a display generated by the Derivative
|
||||
Works, if and wherever such third-party notices normally appear. The contents of
|
||||
the NOTICE file are for informational purposes only and do not modify the
|
||||
License. You may add Your own attribution notices within Derivative Works that
|
||||
You distribute, alongside or as an addendum to the NOTICE text from the Work,
|
||||
provided that such additional attribution notices cannot be construed as
|
||||
modifying the License.
|
||||
You may add Your own copyright statement to Your modifications and may provide
|
||||
additional or different license terms and conditions for use, reproduction, or
|
||||
distribution of Your modifications, or for any such Derivative Works as a whole,
|
||||
provided Your use, reproduction, and distribution of the Work otherwise complies
|
||||
with the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions.
|
||||
|
||||
Unless You explicitly state otherwise, any Contribution intentionally submitted
|
||||
for inclusion in the Work by You to the Licensor shall be under the terms and
|
||||
conditions of this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify the terms of
|
||||
any separate license agreement you may have executed with Licensor regarding
|
||||
such Contributions.
|
||||
|
||||
6. Trademarks.
|
||||
|
||||
This License does not grant permission to use the trade names, trademarks,
|
||||
service marks, or product names of the Licensor, except as required for
|
||||
reasonable and customary use in describing the origin of the Work and
|
||||
reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty.
|
||||
|
||||
Unless required by applicable law or agreed to in writing, Licensor provides the
|
||||
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
|
||||
including, without limitation, any warranties or conditions of TITLE,
|
||||
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
|
||||
solely responsible for determining the appropriateness of using or
|
||||
redistributing the Work and assume any risks associated with Your exercise of
|
||||
permissions under this License.
|
||||
|
||||
8. Limitation of Liability.
|
||||
|
||||
In no event and under no legal theory, whether in tort (including negligence),
|
||||
contract, or otherwise, unless required by applicable law (such as deliberate
|
||||
and grossly negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special, incidental,
|
||||
or consequential damages of any character arising as a result of this License or
|
||||
out of the use or inability to use the Work (including but not limited to
|
||||
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
|
||||
any and all other commercial damages or losses), even if such Contributor has
|
||||
been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability.
|
||||
|
||||
While redistributing the Work or Derivative Works thereof, You may choose to
|
||||
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
|
||||
other liability obligations and/or rights consistent with this License. However,
|
||||
in accepting such obligations, You may act only on Your own behalf and on Your
|
||||
sole responsibility, not on behalf of any other Contributor, and only if You
|
||||
agree to indemnify, defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason of your
|
||||
accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work
|
||||
|
||||
To apply the Apache License to your work, attach the following boilerplate
|
||||
notice, with the fields enclosed by brackets "[]" replaced with your own
|
||||
identifying information. (Don't include the brackets!) The text should be
|
||||
enclosed in the appropriate comment syntax for the file format. We also
|
||||
recommend that a file or class name and description of purpose be included on
|
||||
the same "printed page" as the copyright notice for easier identification within
|
||||
third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
5
vendor/github.com/coreos/go-systemd/v22/NOTICE
generated
vendored
Normal file
5
vendor/github.com/coreos/go-systemd/v22/NOTICE
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
CoreOS Project
|
||||
Copyright 2018 CoreOS, Inc
|
||||
|
||||
This product includes software developed at CoreOS, Inc.
|
||||
(http://www.coreos.com/).
|
84
vendor/github.com/coreos/go-systemd/v22/daemon/sdnotify.go
generated
vendored
Normal file
84
vendor/github.com/coreos/go-systemd/v22/daemon/sdnotify.go
generated
vendored
Normal file
@ -0,0 +1,84 @@
|
||||
// Copyright 2014 Docker, Inc.
|
||||
// Copyright 2015-2018 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
// Package daemon provides a Go implementation of the sd_notify protocol.
|
||||
// It can be used to inform systemd of service start-up completion, watchdog
|
||||
// events, and other status changes.
|
||||
//
|
||||
// https://www.freedesktop.org/software/systemd/man/sd_notify.html#Description
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"net"
|
||||
"os"
|
||||
)
|
||||
|
||||
const (
|
||||
// SdNotifyReady tells the service manager that service startup is finished
|
||||
// or the service finished loading its configuration.
|
||||
SdNotifyReady = "READY=1"
|
||||
|
||||
// SdNotifyStopping tells the service manager that the service is beginning
|
||||
// its shutdown.
|
||||
SdNotifyStopping = "STOPPING=1"
|
||||
|
||||
// SdNotifyReloading tells the service manager that this service is
|
||||
// reloading its configuration. Note that you must call SdNotifyReady when
|
||||
// it completed reloading.
|
||||
SdNotifyReloading = "RELOADING=1"
|
||||
|
||||
// SdNotifyWatchdog tells the service manager to update the watchdog
|
||||
// timestamp for the service.
|
||||
SdNotifyWatchdog = "WATCHDOG=1"
|
||||
)
|
||||
|
||||
// SdNotify sends a message to the init daemon. It is common to ignore the error.
|
||||
// If `unsetEnvironment` is true, the environment variable `NOTIFY_SOCKET`
|
||||
// will be unconditionally unset.
|
||||
//
|
||||
// It returns one of the following:
|
||||
// (false, nil) - notification not supported (i.e. NOTIFY_SOCKET is unset)
|
||||
// (false, err) - notification supported, but failure happened (e.g. error connecting to NOTIFY_SOCKET or while sending data)
|
||||
// (true, nil) - notification supported, data has been sent
|
||||
func SdNotify(unsetEnvironment bool, state string) (bool, error) {
|
||||
socketAddr := &net.UnixAddr{
|
||||
Name: os.Getenv("NOTIFY_SOCKET"),
|
||||
Net: "unixgram",
|
||||
}
|
||||
|
||||
// NOTIFY_SOCKET not set
|
||||
if socketAddr.Name == "" {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if unsetEnvironment {
|
||||
if err := os.Unsetenv("NOTIFY_SOCKET"); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
conn, err := net.DialUnix(socketAddr.Net, nil, socketAddr)
|
||||
// Error connecting to NOTIFY_SOCKET
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
if _, err = conn.Write([]byte(state)); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
73
vendor/github.com/coreos/go-systemd/v22/daemon/watchdog.go
generated
vendored
Normal file
73
vendor/github.com/coreos/go-systemd/v22/daemon/watchdog.go
generated
vendored
Normal file
@ -0,0 +1,73 @@
|
||||
// Copyright 2016 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// SdWatchdogEnabled returns watchdog information for a service.
|
||||
// Processes should call daemon.SdNotify(false, daemon.SdNotifyWatchdog) every
|
||||
// time / 2.
|
||||
// If `unsetEnvironment` is true, the environment variables `WATCHDOG_USEC` and
|
||||
// `WATCHDOG_PID` will be unconditionally unset.
|
||||
//
|
||||
// It returns one of the following:
|
||||
// (0, nil) - watchdog isn't enabled or we aren't the watched PID.
|
||||
// (0, err) - an error happened (e.g. error converting time).
|
||||
// (time, nil) - watchdog is enabled and we can send ping. time is delay
|
||||
// before inactive service will be killed.
|
||||
func SdWatchdogEnabled(unsetEnvironment bool) (time.Duration, error) {
|
||||
wusec := os.Getenv("WATCHDOG_USEC")
|
||||
wpid := os.Getenv("WATCHDOG_PID")
|
||||
if unsetEnvironment {
|
||||
wusecErr := os.Unsetenv("WATCHDOG_USEC")
|
||||
wpidErr := os.Unsetenv("WATCHDOG_PID")
|
||||
if wusecErr != nil {
|
||||
return 0, wusecErr
|
||||
}
|
||||
if wpidErr != nil {
|
||||
return 0, wpidErr
|
||||
}
|
||||
}
|
||||
|
||||
if wusec == "" {
|
||||
return 0, nil
|
||||
}
|
||||
s, err := strconv.Atoi(wusec)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("error converting WATCHDOG_USEC: %s", err)
|
||||
}
|
||||
if s <= 0 {
|
||||
return 0, fmt.Errorf("error WATCHDOG_USEC must be a positive number")
|
||||
}
|
||||
interval := time.Duration(s) * time.Microsecond
|
||||
|
||||
if wpid == "" {
|
||||
return interval, nil
|
||||
}
|
||||
p, err := strconv.Atoi(wpid)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("error converting WATCHDOG_PID: %s", err)
|
||||
}
|
||||
if os.Getpid() != p {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
return interval, nil
|
||||
}
|
46
vendor/github.com/coreos/go-systemd/v22/journal/journal.go
generated
vendored
Normal file
46
vendor/github.com/coreos/go-systemd/v22/journal/journal.go
generated
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package journal provides write bindings to the local systemd journal.
|
||||
// It is implemented in pure Go and connects to the journal directly over its
|
||||
// unix socket.
|
||||
//
|
||||
// To read from the journal, see the "sdjournal" package, which wraps the
|
||||
// sd-journal a C API.
|
||||
//
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html
|
||||
package journal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Priority of a journal message
|
||||
type Priority int
|
||||
|
||||
const (
|
||||
PriEmerg Priority = iota
|
||||
PriAlert
|
||||
PriCrit
|
||||
PriErr
|
||||
PriWarning
|
||||
PriNotice
|
||||
PriInfo
|
||||
PriDebug
|
||||
)
|
||||
|
||||
// Print prints a message to the local systemd journal using Send().
|
||||
func Print(priority Priority, format string, a ...interface{}) error {
|
||||
return Send(fmt.Sprintf(format, a...), priority, nil)
|
||||
}
|
215
vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go
generated
vendored
Normal file
215
vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go
generated
vendored
Normal file
@ -0,0 +1,215 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
// Package journal provides write bindings to the local systemd journal.
|
||||
// It is implemented in pure Go and connects to the journal directly over its
|
||||
// unix socket.
|
||||
//
|
||||
// To read from the journal, see the "sdjournal" package, which wraps the
|
||||
// sd-journal a C API.
|
||||
//
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html
|
||||
package journal
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var (
|
||||
// This can be overridden at build-time:
|
||||
// https://github.com/golang/go/wiki/GcToolchainTricks#including-build-information-in-the-executable
|
||||
journalSocket = "/run/systemd/journal/socket"
|
||||
|
||||
// unixConnPtr atomically holds the local unconnected Unix-domain socket.
|
||||
// Concrete safe pointer type: *net.UnixConn
|
||||
unixConnPtr unsafe.Pointer
|
||||
// onceConn ensures that unixConnPtr is initialized exactly once.
|
||||
onceConn sync.Once
|
||||
)
|
||||
|
||||
// Enabled checks whether the local systemd journal is available for logging.
|
||||
func Enabled() bool {
|
||||
if c := getOrInitConn(); c == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
conn, err := net.Dial("unixgram", journalSocket)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Send a message to the local systemd journal. vars is a map of journald
|
||||
// fields to values. Fields must be composed of uppercase letters, numbers,
|
||||
// and underscores, but must not start with an underscore. Within these
|
||||
// restrictions, any arbitrary field name may be used. Some names have special
|
||||
// significance: see the journalctl documentation
|
||||
// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html)
|
||||
// for more details. vars may be nil.
|
||||
func Send(message string, priority Priority, vars map[string]string) error {
|
||||
conn := getOrInitConn()
|
||||
if conn == nil {
|
||||
return errors.New("could not initialize socket to journald")
|
||||
}
|
||||
|
||||
socketAddr := &net.UnixAddr{
|
||||
Name: journalSocket,
|
||||
Net: "unixgram",
|
||||
}
|
||||
|
||||
data := new(bytes.Buffer)
|
||||
appendVariable(data, "PRIORITY", strconv.Itoa(int(priority)))
|
||||
appendVariable(data, "MESSAGE", message)
|
||||
for k, v := range vars {
|
||||
appendVariable(data, k, v)
|
||||
}
|
||||
|
||||
_, _, err := conn.WriteMsgUnix(data.Bytes(), nil, socketAddr)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if !isSocketSpaceError(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
// Large log entry, send it via tempfile and ancillary-fd.
|
||||
file, err := tempFd()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
_, err = io.Copy(file, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rights := syscall.UnixRights(int(file.Fd()))
|
||||
_, _, err = conn.WriteMsgUnix([]byte{}, rights, socketAddr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getOrInitConn attempts to get the global `unixConnPtr` socket, initializing if necessary
|
||||
func getOrInitConn() *net.UnixConn {
|
||||
conn := (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr))
|
||||
if conn != nil {
|
||||
return conn
|
||||
}
|
||||
onceConn.Do(initConn)
|
||||
return (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr))
|
||||
}
|
||||
|
||||
func appendVariable(w io.Writer, name, value string) {
|
||||
if err := validVarName(name); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "variable name %s contains invalid character, ignoring\n", name)
|
||||
}
|
||||
if strings.ContainsRune(value, '\n') {
|
||||
/* When the value contains a newline, we write:
|
||||
* - the variable name, followed by a newline
|
||||
* - the size (in 64bit little endian format)
|
||||
* - the data, followed by a newline
|
||||
*/
|
||||
fmt.Fprintln(w, name)
|
||||
binary.Write(w, binary.LittleEndian, uint64(len(value)))
|
||||
fmt.Fprintln(w, value)
|
||||
} else {
|
||||
/* just write the variable and value all on one line */
|
||||
fmt.Fprintf(w, "%s=%s\n", name, value)
|
||||
}
|
||||
}
|
||||
|
||||
// validVarName validates a variable name to make sure journald will accept it.
|
||||
// The variable name must be in uppercase and consist only of characters,
|
||||
// numbers and underscores, and may not begin with an underscore:
|
||||
// https://www.freedesktop.org/software/systemd/man/sd_journal_print.html
|
||||
func validVarName(name string) error {
|
||||
if name == "" {
|
||||
return errors.New("Empty variable name")
|
||||
} else if name[0] == '_' {
|
||||
return errors.New("Variable name begins with an underscore")
|
||||
}
|
||||
|
||||
for _, c := range name {
|
||||
if !(('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_') {
|
||||
return errors.New("Variable name contains invalid characters")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// isSocketSpaceError checks whether the error is signaling
|
||||
// an "overlarge message" condition.
|
||||
func isSocketSpaceError(err error) bool {
|
||||
opErr, ok := err.(*net.OpError)
|
||||
if !ok || opErr == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
sysErr, ok := opErr.Err.(*os.SyscallError)
|
||||
if !ok || sysErr == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return sysErr.Err == syscall.EMSGSIZE || sysErr.Err == syscall.ENOBUFS
|
||||
}
|
||||
|
||||
// tempFd creates a temporary, unlinked file under `/dev/shm`.
|
||||
func tempFd() (*os.File, error) {
|
||||
file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = syscall.Unlink(file.Name())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return file, nil
|
||||
}
|
||||
|
||||
// initConn initializes the global `unixConnPtr` socket.
|
||||
// It is automatically called when needed.
|
||||
func initConn() {
|
||||
autobind, err := net.ResolveUnixAddr("unixgram", "")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
sock, err := net.ListenUnixgram("unixgram", autobind)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
atomic.StorePointer(&unixConnPtr, unsafe.Pointer(sock))
|
||||
}
|
35
vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go
generated
vendored
Normal file
35
vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package journal provides write bindings to the local systemd journal.
|
||||
// It is implemented in pure Go and connects to the journal directly over its
|
||||
// unix socket.
|
||||
//
|
||||
// To read from the journal, see the "sdjournal" package, which wraps the
|
||||
// sd-journal a C API.
|
||||
//
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html
|
||||
package journal
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
func Enabled() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func Send(message string, priority Priority, vars map[string]string) error {
|
||||
return errors.New("could not initialize socket to journald")
|
||||
}
|
15
vendor/github.com/go-openapi/jsonpointer/.travis.yml
generated
vendored
15
vendor/github.com/go-openapi/jsonpointer/.travis.yml
generated
vendored
@ -1,15 +0,0 @@
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
go:
|
||||
- 1.14.x
|
||||
- 1.15.x
|
||||
install:
|
||||
- GO111MODULE=off go get -u gotest.tools/gotestsum
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
language: go
|
||||
notifications:
|
||||
slack:
|
||||
secure: a5VgoiwB1G/AZqzmephPZIhEB9avMlsWSlVnM1dSAtYAwdrQHGTQxAmpOxYIoSPDhWNN5bfZmjd29++UlTwLcHSR+e0kJhH6IfDlsHj/HplNCJ9tyI0zYc7XchtdKgeMxMzBKCzgwFXGSbQGydXTliDNBo0HOzmY3cou/daMFTP60K+offcjS+3LRAYb1EroSRXZqrk1nuF/xDL3792DZUdPMiFR/L/Df6y74D6/QP4sTkTDFQitz4Wy/7jbsfj8dG6qK2zivgV6/l+w4OVjFkxVpPXogDWY10vVXNVynqxfJ7to2d1I9lNCHE2ilBCkWMIPdyJF7hjF8pKW+82yP4EzRh0vu8Xn0HT5MZpQxdRY/YMxNrWaG7SxsoEaO4q5uhgdzAqLYY3TRa7MjIK+7Ur+aqOeTXn6OKwVi0CjvZ6mIU3WUKSwiwkFZMbjRAkSb5CYwMEfGFO/z964xz83qGt6WAtBXNotqCQpTIiKtDHQeLOMfksHImCg6JLhQcWBVxamVgu0G3Pdh8Y6DyPnxraXY95+QDavbjqv7TeYT9T/FNnrkXaTTK0s4iWE5H4ACU0Qvz0wUYgfQrZv0/Hp7V17+rabUwnzYySHCy9SWX/7OV9Cfh31iMp9ZIffr76xmmThtOEqs8TrTtU6BWI3rWwvA9cXQipZTVtL0oswrGw=
|
||||
script:
|
||||
- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./...
|
13
vendor/github.com/go-openapi/jsonreference/.golangci.yml
generated
vendored
13
vendor/github.com/go-openapi/jsonreference/.golangci.yml
generated
vendored
@ -1,8 +1,6 @@
|
||||
linters-settings:
|
||||
govet:
|
||||
check-shadowing: true
|
||||
golint:
|
||||
min-confidence: 0
|
||||
gocyclo:
|
||||
min-complexity: 30
|
||||
maligned:
|
||||
@ -12,6 +10,8 @@ linters-settings:
|
||||
goconst:
|
||||
min-len: 2
|
||||
min-occurrences: 4
|
||||
paralleltest:
|
||||
ignore-missing: true
|
||||
linters:
|
||||
enable-all: true
|
||||
disable:
|
||||
@ -39,3 +39,12 @@ linters:
|
||||
- nestif
|
||||
- godot
|
||||
- errorlint
|
||||
- varcheck
|
||||
- interfacer
|
||||
- deadcode
|
||||
- golint
|
||||
- ifshort
|
||||
- structcheck
|
||||
- nosnakecase
|
||||
- varnamelen
|
||||
- exhaustruct
|
||||
|
24
vendor/github.com/go-openapi/jsonreference/.travis.yml
generated
vendored
24
vendor/github.com/go-openapi/jsonreference/.travis.yml
generated
vendored
@ -1,24 +0,0 @@
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
go:
|
||||
- 1.14.x
|
||||
- 1.x
|
||||
install:
|
||||
- go get gotest.tools/gotestsum
|
||||
jobs:
|
||||
include:
|
||||
# include linting job, but only for latest go version and amd64 arch
|
||||
- go: 1.x
|
||||
arch: amd64
|
||||
install:
|
||||
go get github.com/golangci/golangci-lint/cmd/golangci-lint
|
||||
script:
|
||||
- golangci-lint run --new-from-rev master
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
language: go
|
||||
notifications:
|
||||
slack:
|
||||
secure: OpQG/36F7DSF00HLm9WZMhyqFCYYyYTsVDObW226cWiR8PWYiNfLZiSEvIzT1Gx4dDjhigKTIqcLhG34CkL5iNXDjm9Yyo2RYhQPlK8NErNqUEXuBqn4RqYHW48VGhEhOyDd4Ei0E2FN5ZbgpvHgtpkdZ6XDi64r3Ac89isP9aPHXQTuv2Jog6b4/OKKiUTftLcTIst0p4Cp3gqOJWf1wnoj+IadWiECNVQT6zb47IYjtyw6+uV8iUjTzdKcRB6Zc6b4Dq7JAg1Zd7Jfxkql3hlKp4PNlRf9Cy7y5iA3G7MLyg3FcPX5z2kmcyPt2jOTRMBWUJ5zIQpOxizAcN8WsT3WWBL5KbuYK6k0PzujrIDLqdxGpNmjkkMfDBT9cKmZpm2FdW+oZgPFJP+oKmAo4u4KJz/vjiPTXgQlN5bmrLuRMCp+AwC5wkIohTqWZVPE2TK6ZSnMYcg/W39s+RP/9mJoyryAvPSpBOLTI+biCgaUCTOAZxNTWpMFc3tPYntc41WWkdKcooZ9JA5DwfcaVFyTGQ3YXz+HvX6G1z/gW0Q/A4dBi9mj2iE1xm7tRTT+4VQ2AXFvSEI1HJpfPgYnwAtwOD1v3Qm2EUHk9sCdtEDR4wVGEPIVn44GnwFMnGKx9JWppMPYwFu3SVDdHt+E+LOlhZUply11Aa+IVrT2KUQ=
|
||||
script:
|
||||
- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./...
|
7
vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go
generated
vendored
7
vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go
generated
vendored
@ -7,8 +7,8 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
defaultHttpPort = ":80"
|
||||
defaultHttpsPort = ":443"
|
||||
defaultHTTPPort = ":80"
|
||||
defaultHTTPSPort = ":443"
|
||||
)
|
||||
|
||||
// Regular expressions used by the normalizations
|
||||
@ -18,6 +18,7 @@ var rxDupSlashes = regexp.MustCompile(`/{2,}`)
|
||||
// NormalizeURL will normalize the specified URL
|
||||
// This was added to replace a previous call to the no longer maintained purell library:
|
||||
// The call that was used looked like the following:
|
||||
//
|
||||
// url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes))
|
||||
//
|
||||
// To explain all that was included in the call above, purell.FlagsSafe was really just the following:
|
||||
@ -48,7 +49,7 @@ func removeDefaultPort(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
scheme := strings.ToLower(u.Scheme)
|
||||
u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string {
|
||||
if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) {
|
||||
if (scheme == "http" && val == defaultHTTPPort) || (scheme == "https" && val == defaultHTTPSPort) {
|
||||
return ""
|
||||
}
|
||||
return val
|
||||
|
37
vendor/github.com/gogo/protobuf/gogoproto/Makefile
generated
vendored
Normal file
37
vendor/github.com/gogo/protobuf/gogoproto/Makefile
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
# Protocol Buffers for Go with Gadgets
|
||||
#
|
||||
# Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
||||
# http://github.com/gogo/protobuf
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
regenerate:
|
||||
go install github.com/gogo/protobuf/protoc-gen-gogo
|
||||
protoc --gogo_out=Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor:../../../../ --proto_path=../../../../:../protobuf/:. *.proto
|
||||
|
||||
restore:
|
||||
cp gogo.pb.golden gogo.pb.go
|
||||
|
||||
preserve:
|
||||
cp gogo.pb.go gogo.pb.golden
|
169
vendor/github.com/gogo/protobuf/gogoproto/doc.go
generated
vendored
Normal file
169
vendor/github.com/gogo/protobuf/gogoproto/doc.go
generated
vendored
Normal file
@ -0,0 +1,169 @@
|
||||
// Protocol Buffers for Go with Gadgets
|
||||
//
|
||||
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
||||
// http://github.com/gogo/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
/*
|
||||
Package gogoproto provides extensions for protocol buffers to achieve:
|
||||
|
||||
- fast marshalling and unmarshalling.
|
||||
- peace of mind by optionally generating test and benchmark code.
|
||||
- more canonical Go structures.
|
||||
- less typing by optionally generating extra helper code.
|
||||
- goprotobuf compatibility
|
||||
|
||||
More Canonical Go Structures
|
||||
|
||||
A lot of time working with a goprotobuf struct will lead you to a place where you create another struct that is easier to work with and then have a function to copy the values between the two structs.
|
||||
You might also find that basic structs that started their life as part of an API need to be sent over the wire. With gob, you could just send it. With goprotobuf, you need to make a parallel struct.
|
||||
Gogoprotobuf tries to fix these problems with the nullable, embed, customtype and customname field extensions.
|
||||
|
||||
- nullable, if false, a field is generated without a pointer (see warning below).
|
||||
- embed, if true, the field is generated as an embedded field.
|
||||
- customtype, It works with the Marshal and Unmarshal methods, to allow you to have your own types in your struct, but marshal to bytes. For example, custom.Uuid or custom.Fixed128
|
||||
- customname (beta), Changes the generated fieldname. This is especially useful when generated methods conflict with fieldnames.
|
||||
- casttype (beta), Changes the generated fieldtype. All generated code assumes that this type is castable to the protocol buffer field type. It does not work for structs or enums.
|
||||
- castkey (beta), Changes the generated fieldtype for a map key. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps.
|
||||
- castvalue (beta), Changes the generated fieldtype for a map value. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps.
|
||||
|
||||
Warning about nullable: According to the Protocol Buffer specification, you should be able to tell whether a field is set or unset. With the option nullable=false this feature is lost, since your non-nullable fields will always be set. It can be seen as a layer on top of Protocol Buffers, where before and after marshalling all non-nullable fields are set and they cannot be unset.
|
||||
|
||||
Let us look at:
|
||||
|
||||
github.com/gogo/protobuf/test/example/example.proto
|
||||
|
||||
for a quicker overview.
|
||||
|
||||
The following message:
|
||||
|
||||
package test;
|
||||
|
||||
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
|
||||
|
||||
message A {
|
||||
optional string Description = 1 [(gogoproto.nullable) = false];
|
||||
optional int64 Number = 2 [(gogoproto.nullable) = false];
|
||||
optional bytes Id = 3 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uuid", (gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
Will generate a go struct which looks a lot like this:
|
||||
|
||||
type A struct {
|
||||
Description string
|
||||
Number int64
|
||||
Id github_com_gogo_protobuf_test_custom.Uuid
|
||||
}
|
||||
|
||||
You will see there are no pointers, since all fields are non-nullable.
|
||||
You will also see a custom type which marshals to a string.
|
||||
Be warned it is your responsibility to test your custom types thoroughly.
|
||||
You should think of every possible empty and nil case for your marshaling, unmarshaling and size methods.
|
||||
|
||||
Next we will embed the message A in message B.
|
||||
|
||||
message B {
|
||||
optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true];
|
||||
repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
See below that A is embedded in B.
|
||||
|
||||
type B struct {
|
||||
A
|
||||
G []github_com_gogo_protobuf_test_custom.Uint128
|
||||
}
|
||||
|
||||
Also see the repeated custom type.
|
||||
|
||||
type Uint128 [2]uint64
|
||||
|
||||
Next we will create a custom name for one of our fields.
|
||||
|
||||
message C {
|
||||
optional int64 size = 1 [(gogoproto.customname) = "MySize"];
|
||||
}
|
||||
|
||||
See below that the field's name is MySize and not Size.
|
||||
|
||||
type C struct {
|
||||
MySize *int64
|
||||
}
|
||||
|
||||
The is useful when having a protocol buffer message with a field name which conflicts with a generated method.
|
||||
As an example, having a field name size and using the sizer plugin to generate a Size method will cause a go compiler error.
|
||||
Using customname you can fix this error without changing the field name.
|
||||
This is typically useful when working with a protocol buffer that was designed before these methods and/or the go language were avialable.
|
||||
|
||||
Gogoprotobuf also has some more subtle changes, these could be changed back:
|
||||
|
||||
- the generated package name for imports do not have the extra /filename.pb,
|
||||
but are actually the imports specified in the .proto file.
|
||||
|
||||
Gogoprotobuf also has lost some features which should be brought back with time:
|
||||
|
||||
- Marshalling and unmarshalling with reflect and without the unsafe package,
|
||||
this requires work in pointer_reflect.go
|
||||
|
||||
Why does nullable break protocol buffer specifications:
|
||||
|
||||
The protocol buffer specification states, somewhere, that you should be able to tell whether a
|
||||
field is set or unset. With the option nullable=false this feature is lost,
|
||||
since your non-nullable fields will always be set. It can be seen as a layer on top of
|
||||
protocol buffers, where before and after marshalling all non-nullable fields are set
|
||||
and they cannot be unset.
|
||||
|
||||
Goprotobuf Compatibility:
|
||||
|
||||
Gogoprotobuf is compatible with Goprotobuf, because it is compatible with protocol buffers.
|
||||
Gogoprotobuf generates the same code as goprotobuf if no extensions are used.
|
||||
The enumprefix, getters and stringer extensions can be used to remove some of the unnecessary code generated by goprotobuf:
|
||||
|
||||
- gogoproto_import, if false, the generated code imports github.com/golang/protobuf/proto instead of github.com/gogo/protobuf/proto.
|
||||
- goproto_enum_prefix, if false, generates the enum constant names without the messagetype prefix
|
||||
- goproto_enum_stringer (experimental), if false, the enum is generated without the default string method, this is useful for rather using enum_stringer, or allowing you to write your own string method.
|
||||
- goproto_getters, if false, the message is generated without get methods, this is useful when you would rather want to use face
|
||||
- goproto_stringer, if false, the message is generated without the default string method, this is useful for rather using stringer, or allowing you to write your own string method.
|
||||
- goproto_extensions_map (beta), if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension
|
||||
- goproto_unrecognized (beta), if false, XXX_unrecognized field is not generated. This is useful in conjunction with gogoproto.nullable=false, to generate structures completely devoid of pointers and reduce GC pressure at the cost of losing information about unrecognized fields.
|
||||
- goproto_registration (beta), if true, the generated files will register all messages and types against both gogo/protobuf and golang/protobuf. This is necessary when using third-party packages which read registrations from golang/protobuf (such as the grpc-gateway).
|
||||
|
||||
Less Typing and Peace of Mind is explained in their specific plugin folders godoc:
|
||||
|
||||
- github.com/gogo/protobuf/plugin/<extension_name>
|
||||
|
||||
If you do not use any of these extension the code that is generated
|
||||
will be the same as if goprotobuf has generated it.
|
||||
|
||||
The most complete way to see examples is to look at
|
||||
|
||||
github.com/gogo/protobuf/test/thetest.proto
|
||||
|
||||
Gogoprototest is a seperate project,
|
||||
because we want to keep gogoprotobuf independent of goprotobuf,
|
||||
but we still want to test it thoroughly.
|
||||
|
||||
*/
|
||||
package gogoproto
|
874
vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go
generated
vendored
Normal file
874
vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go
generated
vendored
Normal file
@ -0,0 +1,874 @@
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: gogo.proto
|
||||
|
||||
package gogoproto
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
var E_GoprotoEnumPrefix = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.EnumOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 62001,
|
||||
Name: "gogoproto.goproto_enum_prefix",
|
||||
Tag: "varint,62001,opt,name=goproto_enum_prefix",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoEnumStringer = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.EnumOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 62021,
|
||||
Name: "gogoproto.goproto_enum_stringer",
|
||||
Tag: "varint,62021,opt,name=goproto_enum_stringer",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_EnumStringer = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.EnumOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 62022,
|
||||
Name: "gogoproto.enum_stringer",
|
||||
Tag: "varint,62022,opt,name=enum_stringer",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_EnumCustomname = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.EnumOptions)(nil),
|
||||
ExtensionType: (*string)(nil),
|
||||
Field: 62023,
|
||||
Name: "gogoproto.enum_customname",
|
||||
Tag: "bytes,62023,opt,name=enum_customname",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Enumdecl = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.EnumOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 62024,
|
||||
Name: "gogoproto.enumdecl",
|
||||
Tag: "varint,62024,opt,name=enumdecl",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_EnumvalueCustomname = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.EnumValueOptions)(nil),
|
||||
ExtensionType: (*string)(nil),
|
||||
Field: 66001,
|
||||
Name: "gogoproto.enumvalue_customname",
|
||||
Tag: "bytes,66001,opt,name=enumvalue_customname",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoGettersAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63001,
|
||||
Name: "gogoproto.goproto_getters_all",
|
||||
Tag: "varint,63001,opt,name=goproto_getters_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63002,
|
||||
Name: "gogoproto.goproto_enum_prefix_all",
|
||||
Tag: "varint,63002,opt,name=goproto_enum_prefix_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoStringerAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63003,
|
||||
Name: "gogoproto.goproto_stringer_all",
|
||||
Tag: "varint,63003,opt,name=goproto_stringer_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_VerboseEqualAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63004,
|
||||
Name: "gogoproto.verbose_equal_all",
|
||||
Tag: "varint,63004,opt,name=verbose_equal_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_FaceAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63005,
|
||||
Name: "gogoproto.face_all",
|
||||
Tag: "varint,63005,opt,name=face_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GostringAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63006,
|
||||
Name: "gogoproto.gostring_all",
|
||||
Tag: "varint,63006,opt,name=gostring_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_PopulateAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63007,
|
||||
Name: "gogoproto.populate_all",
|
||||
Tag: "varint,63007,opt,name=populate_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_StringerAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63008,
|
||||
Name: "gogoproto.stringer_all",
|
||||
Tag: "varint,63008,opt,name=stringer_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_OnlyoneAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63009,
|
||||
Name: "gogoproto.onlyone_all",
|
||||
Tag: "varint,63009,opt,name=onlyone_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_EqualAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63013,
|
||||
Name: "gogoproto.equal_all",
|
||||
Tag: "varint,63013,opt,name=equal_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_DescriptionAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63014,
|
||||
Name: "gogoproto.description_all",
|
||||
Tag: "varint,63014,opt,name=description_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_TestgenAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63015,
|
||||
Name: "gogoproto.testgen_all",
|
||||
Tag: "varint,63015,opt,name=testgen_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_BenchgenAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63016,
|
||||
Name: "gogoproto.benchgen_all",
|
||||
Tag: "varint,63016,opt,name=benchgen_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_MarshalerAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63017,
|
||||
Name: "gogoproto.marshaler_all",
|
||||
Tag: "varint,63017,opt,name=marshaler_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_UnmarshalerAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63018,
|
||||
Name: "gogoproto.unmarshaler_all",
|
||||
Tag: "varint,63018,opt,name=unmarshaler_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_StableMarshalerAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63019,
|
||||
Name: "gogoproto.stable_marshaler_all",
|
||||
Tag: "varint,63019,opt,name=stable_marshaler_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_SizerAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63020,
|
||||
Name: "gogoproto.sizer_all",
|
||||
Tag: "varint,63020,opt,name=sizer_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63021,
|
||||
Name: "gogoproto.goproto_enum_stringer_all",
|
||||
Tag: "varint,63021,opt,name=goproto_enum_stringer_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_EnumStringerAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63022,
|
||||
Name: "gogoproto.enum_stringer_all",
|
||||
Tag: "varint,63022,opt,name=enum_stringer_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_UnsafeMarshalerAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63023,
|
||||
Name: "gogoproto.unsafe_marshaler_all",
|
||||
Tag: "varint,63023,opt,name=unsafe_marshaler_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63024,
|
||||
Name: "gogoproto.unsafe_unmarshaler_all",
|
||||
Tag: "varint,63024,opt,name=unsafe_unmarshaler_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63025,
|
||||
Name: "gogoproto.goproto_extensions_map_all",
|
||||
Tag: "varint,63025,opt,name=goproto_extensions_map_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63026,
|
||||
Name: "gogoproto.goproto_unrecognized_all",
|
||||
Tag: "varint,63026,opt,name=goproto_unrecognized_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GogoprotoImport = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63027,
|
||||
Name: "gogoproto.gogoproto_import",
|
||||
Tag: "varint,63027,opt,name=gogoproto_import",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_ProtosizerAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63028,
|
||||
Name: "gogoproto.protosizer_all",
|
||||
Tag: "varint,63028,opt,name=protosizer_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_CompareAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63029,
|
||||
Name: "gogoproto.compare_all",
|
||||
Tag: "varint,63029,opt,name=compare_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_TypedeclAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63030,
|
||||
Name: "gogoproto.typedecl_all",
|
||||
Tag: "varint,63030,opt,name=typedecl_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_EnumdeclAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63031,
|
||||
Name: "gogoproto.enumdecl_all",
|
||||
Tag: "varint,63031,opt,name=enumdecl_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoRegistration = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63032,
|
||||
Name: "gogoproto.goproto_registration",
|
||||
Tag: "varint,63032,opt,name=goproto_registration",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_MessagenameAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63033,
|
||||
Name: "gogoproto.messagename_all",
|
||||
Tag: "varint,63033,opt,name=messagename_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoSizecacheAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63034,
|
||||
Name: "gogoproto.goproto_sizecache_all",
|
||||
Tag: "varint,63034,opt,name=goproto_sizecache_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoUnkeyedAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63035,
|
||||
Name: "gogoproto.goproto_unkeyed_all",
|
||||
Tag: "varint,63035,opt,name=goproto_unkeyed_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoGetters = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64001,
|
||||
Name: "gogoproto.goproto_getters",
|
||||
Tag: "varint,64001,opt,name=goproto_getters",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoStringer = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64003,
|
||||
Name: "gogoproto.goproto_stringer",
|
||||
Tag: "varint,64003,opt,name=goproto_stringer",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_VerboseEqual = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64004,
|
||||
Name: "gogoproto.verbose_equal",
|
||||
Tag: "varint,64004,opt,name=verbose_equal",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Face = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64005,
|
||||
Name: "gogoproto.face",
|
||||
Tag: "varint,64005,opt,name=face",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Gostring = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64006,
|
||||
Name: "gogoproto.gostring",
|
||||
Tag: "varint,64006,opt,name=gostring",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Populate = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64007,
|
||||
Name: "gogoproto.populate",
|
||||
Tag: "varint,64007,opt,name=populate",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Stringer = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 67008,
|
||||
Name: "gogoproto.stringer",
|
||||
Tag: "varint,67008,opt,name=stringer",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Onlyone = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64009,
|
||||
Name: "gogoproto.onlyone",
|
||||
Tag: "varint,64009,opt,name=onlyone",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Equal = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64013,
|
||||
Name: "gogoproto.equal",
|
||||
Tag: "varint,64013,opt,name=equal",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Description = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64014,
|
||||
Name: "gogoproto.description",
|
||||
Tag: "varint,64014,opt,name=description",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Testgen = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64015,
|
||||
Name: "gogoproto.testgen",
|
||||
Tag: "varint,64015,opt,name=testgen",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Benchgen = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64016,
|
||||
Name: "gogoproto.benchgen",
|
||||
Tag: "varint,64016,opt,name=benchgen",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Marshaler = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64017,
|
||||
Name: "gogoproto.marshaler",
|
||||
Tag: "varint,64017,opt,name=marshaler",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Unmarshaler = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64018,
|
||||
Name: "gogoproto.unmarshaler",
|
||||
Tag: "varint,64018,opt,name=unmarshaler",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_StableMarshaler = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64019,
|
||||
Name: "gogoproto.stable_marshaler",
|
||||
Tag: "varint,64019,opt,name=stable_marshaler",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Sizer = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64020,
|
||||
Name: "gogoproto.sizer",
|
||||
Tag: "varint,64020,opt,name=sizer",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_UnsafeMarshaler = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64023,
|
||||
Name: "gogoproto.unsafe_marshaler",
|
||||
Tag: "varint,64023,opt,name=unsafe_marshaler",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_UnsafeUnmarshaler = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64024,
|
||||
Name: "gogoproto.unsafe_unmarshaler",
|
||||
Tag: "varint,64024,opt,name=unsafe_unmarshaler",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoExtensionsMap = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64025,
|
||||
Name: "gogoproto.goproto_extensions_map",
|
||||
Tag: "varint,64025,opt,name=goproto_extensions_map",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoUnrecognized = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64026,
|
||||
Name: "gogoproto.goproto_unrecognized",
|
||||
Tag: "varint,64026,opt,name=goproto_unrecognized",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Protosizer = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64028,
|
||||
Name: "gogoproto.protosizer",
|
||||
Tag: "varint,64028,opt,name=protosizer",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Compare = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64029,
|
||||
Name: "gogoproto.compare",
|
||||
Tag: "varint,64029,opt,name=compare",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Typedecl = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64030,
|
||||
Name: "gogoproto.typedecl",
|
||||
Tag: "varint,64030,opt,name=typedecl",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Messagename = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64033,
|
||||
Name: "gogoproto.messagename",
|
||||
Tag: "varint,64033,opt,name=messagename",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoSizecache = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64034,
|
||||
Name: "gogoproto.goproto_sizecache",
|
||||
Tag: "varint,64034,opt,name=goproto_sizecache",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoUnkeyed = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64035,
|
||||
Name: "gogoproto.goproto_unkeyed",
|
||||
Tag: "varint,64035,opt,name=goproto_unkeyed",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Nullable = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FieldOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 65001,
|
||||
Name: "gogoproto.nullable",
|
||||
Tag: "varint,65001,opt,name=nullable",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Embed = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FieldOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 65002,
|
||||
Name: "gogoproto.embed",
|
||||
Tag: "varint,65002,opt,name=embed",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Customtype = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FieldOptions)(nil),
|
||||
ExtensionType: (*string)(nil),
|
||||
Field: 65003,
|
||||
Name: "gogoproto.customtype",
|
||||
Tag: "bytes,65003,opt,name=customtype",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Customname = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FieldOptions)(nil),
|
||||
ExtensionType: (*string)(nil),
|
||||
Field: 65004,
|
||||
Name: "gogoproto.customname",
|
||||
Tag: "bytes,65004,opt,name=customname",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Jsontag = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FieldOptions)(nil),
|
||||
ExtensionType: (*string)(nil),
|
||||
Field: 65005,
|
||||
Name: "gogoproto.jsontag",
|
||||
Tag: "bytes,65005,opt,name=jsontag",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Moretags = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FieldOptions)(nil),
|
||||
ExtensionType: (*string)(nil),
|
||||
Field: 65006,
|
||||
Name: "gogoproto.moretags",
|
||||
Tag: "bytes,65006,opt,name=moretags",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Casttype = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FieldOptions)(nil),
|
||||
ExtensionType: (*string)(nil),
|
||||
Field: 65007,
|
||||
Name: "gogoproto.casttype",
|
||||
Tag: "bytes,65007,opt,name=casttype",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Castkey = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FieldOptions)(nil),
|
||||
ExtensionType: (*string)(nil),
|
||||
Field: 65008,
|
||||
Name: "gogoproto.castkey",
|
||||
Tag: "bytes,65008,opt,name=castkey",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Castvalue = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FieldOptions)(nil),
|
||||
ExtensionType: (*string)(nil),
|
||||
Field: 65009,
|
||||
Name: "gogoproto.castvalue",
|
||||
Tag: "bytes,65009,opt,name=castvalue",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Stdtime = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FieldOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 65010,
|
||||
Name: "gogoproto.stdtime",
|
||||
Tag: "varint,65010,opt,name=stdtime",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Stdduration = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FieldOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 65011,
|
||||
Name: "gogoproto.stdduration",
|
||||
Tag: "varint,65011,opt,name=stdduration",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Wktpointer = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FieldOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 65012,
|
||||
Name: "gogoproto.wktpointer",
|
||||
Tag: "varint,65012,opt,name=wktpointer",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterExtension(E_GoprotoEnumPrefix)
|
||||
proto.RegisterExtension(E_GoprotoEnumStringer)
|
||||
proto.RegisterExtension(E_EnumStringer)
|
||||
proto.RegisterExtension(E_EnumCustomname)
|
||||
proto.RegisterExtension(E_Enumdecl)
|
||||
proto.RegisterExtension(E_EnumvalueCustomname)
|
||||
proto.RegisterExtension(E_GoprotoGettersAll)
|
||||
proto.RegisterExtension(E_GoprotoEnumPrefixAll)
|
||||
proto.RegisterExtension(E_GoprotoStringerAll)
|
||||
proto.RegisterExtension(E_VerboseEqualAll)
|
||||
proto.RegisterExtension(E_FaceAll)
|
||||
proto.RegisterExtension(E_GostringAll)
|
||||
proto.RegisterExtension(E_PopulateAll)
|
||||
proto.RegisterExtension(E_StringerAll)
|
||||
proto.RegisterExtension(E_OnlyoneAll)
|
||||
proto.RegisterExtension(E_EqualAll)
|
||||
proto.RegisterExtension(E_DescriptionAll)
|
||||
proto.RegisterExtension(E_TestgenAll)
|
||||
proto.RegisterExtension(E_BenchgenAll)
|
||||
proto.RegisterExtension(E_MarshalerAll)
|
||||
proto.RegisterExtension(E_UnmarshalerAll)
|
||||
proto.RegisterExtension(E_StableMarshalerAll)
|
||||
proto.RegisterExtension(E_SizerAll)
|
||||
proto.RegisterExtension(E_GoprotoEnumStringerAll)
|
||||
proto.RegisterExtension(E_EnumStringerAll)
|
||||
proto.RegisterExtension(E_UnsafeMarshalerAll)
|
||||
proto.RegisterExtension(E_UnsafeUnmarshalerAll)
|
||||
proto.RegisterExtension(E_GoprotoExtensionsMapAll)
|
||||
proto.RegisterExtension(E_GoprotoUnrecognizedAll)
|
||||
proto.RegisterExtension(E_GogoprotoImport)
|
||||
proto.RegisterExtension(E_ProtosizerAll)
|
||||
proto.RegisterExtension(E_CompareAll)
|
||||
proto.RegisterExtension(E_TypedeclAll)
|
||||
proto.RegisterExtension(E_EnumdeclAll)
|
||||
proto.RegisterExtension(E_GoprotoRegistration)
|
||||
proto.RegisterExtension(E_MessagenameAll)
|
||||
proto.RegisterExtension(E_GoprotoSizecacheAll)
|
||||
proto.RegisterExtension(E_GoprotoUnkeyedAll)
|
||||
proto.RegisterExtension(E_GoprotoGetters)
|
||||
proto.RegisterExtension(E_GoprotoStringer)
|
||||
proto.RegisterExtension(E_VerboseEqual)
|
||||
proto.RegisterExtension(E_Face)
|
||||
proto.RegisterExtension(E_Gostring)
|
||||
proto.RegisterExtension(E_Populate)
|
||||
proto.RegisterExtension(E_Stringer)
|
||||
proto.RegisterExtension(E_Onlyone)
|
||||
proto.RegisterExtension(E_Equal)
|
||||
proto.RegisterExtension(E_Description)
|
||||
proto.RegisterExtension(E_Testgen)
|
||||
proto.RegisterExtension(E_Benchgen)
|
||||
proto.RegisterExtension(E_Marshaler)
|
||||
proto.RegisterExtension(E_Unmarshaler)
|
||||
proto.RegisterExtension(E_StableMarshaler)
|
||||
proto.RegisterExtension(E_Sizer)
|
||||
proto.RegisterExtension(E_UnsafeMarshaler)
|
||||
proto.RegisterExtension(E_UnsafeUnmarshaler)
|
||||
proto.RegisterExtension(E_GoprotoExtensionsMap)
|
||||
proto.RegisterExtension(E_GoprotoUnrecognized)
|
||||
proto.RegisterExtension(E_Protosizer)
|
||||
proto.RegisterExtension(E_Compare)
|
||||
proto.RegisterExtension(E_Typedecl)
|
||||
proto.RegisterExtension(E_Messagename)
|
||||
proto.RegisterExtension(E_GoprotoSizecache)
|
||||
proto.RegisterExtension(E_GoprotoUnkeyed)
|
||||
proto.RegisterExtension(E_Nullable)
|
||||
proto.RegisterExtension(E_Embed)
|
||||
proto.RegisterExtension(E_Customtype)
|
||||
proto.RegisterExtension(E_Customname)
|
||||
proto.RegisterExtension(E_Jsontag)
|
||||
proto.RegisterExtension(E_Moretags)
|
||||
proto.RegisterExtension(E_Casttype)
|
||||
proto.RegisterExtension(E_Castkey)
|
||||
proto.RegisterExtension(E_Castvalue)
|
||||
proto.RegisterExtension(E_Stdtime)
|
||||
proto.RegisterExtension(E_Stdduration)
|
||||
proto.RegisterExtension(E_Wktpointer)
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("gogo.proto", fileDescriptor_592445b5231bc2b9) }
|
||||
|
||||
var fileDescriptor_592445b5231bc2b9 = []byte{
|
||||
// 1328 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0x49, 0x6f, 0x1c, 0x45,
|
||||
0x14, 0x80, 0x85, 0x48, 0x64, 0x4f, 0x79, 0x8b, 0xc7, 0xc6, 0x84, 0x08, 0x44, 0xe0, 0xc4, 0xc9,
|
||||
0x3e, 0x45, 0x28, 0x65, 0x45, 0x96, 0x63, 0x39, 0x56, 0x10, 0x0e, 0xc6, 0x89, 0xc3, 0x76, 0x18,
|
||||
0xf5, 0xf4, 0x94, 0xdb, 0x8d, 0xbb, 0xbb, 0x9a, 0xee, 0xea, 0x10, 0xe7, 0x86, 0xc2, 0x22, 0x84,
|
||||
0xd8, 0x91, 0x20, 0x21, 0x09, 0x04, 0xc4, 0xbe, 0x86, 0x7d, 0xb9, 0x70, 0x61, 0xb9, 0xf2, 0x1f,
|
||||
0xb8, 0x00, 0x66, 0xf7, 0xcd, 0x17, 0xf4, 0xba, 0xdf, 0xeb, 0xa9, 0x69, 0x8f, 0x54, 0x35, 0xb7,
|
||||
0xf6, 0xb8, 0xbe, 0x6f, 0xaa, 0xdf, 0xeb, 0x7a, 0xef, 0x4d, 0x33, 0xe6, 0x49, 0x4f, 0x4e, 0xc6,
|
||||
0x89, 0x54, 0xb2, 0x5e, 0x83, 0xeb, 0xfc, 0x72, 0xdf, 0x7e, 0x4f, 0x4a, 0x2f, 0x10, 0x53, 0xf9,
|
||||
0x5f, 0xcd, 0x6c, 0x75, 0xaa, 0x25, 0x52, 0x37, 0xf1, 0x63, 0x25, 0x93, 0x62, 0x31, 0x3f, 0xc6,
|
||||
0xc6, 0x70, 0x71, 0x43, 0x44, 0x59, 0xd8, 0x88, 0x13, 0xb1, 0xea, 0x9f, 0xae, 0x5f, 0x3f, 0x59,
|
||||
0x90, 0x93, 0x44, 0x4e, 0xce, 0x47, 0x59, 0x78, 0x47, 0xac, 0x7c, 0x19, 0xa5, 0x7b, 0xaf, 0xfc,
|
||||
0x72, 0xf5, 0xfe, 0xab, 0x6e, 0xe9, 0x5f, 0x1e, 0x45, 0x14, 0xfe, 0xb7, 0x94, 0x83, 0x7c, 0x99,
|
||||
0x5d, 0xd3, 0xe1, 0x4b, 0x55, 0xe2, 0x47, 0x9e, 0x48, 0x0c, 0xc6, 0xef, 0xd1, 0x38, 0xa6, 0x19,
|
||||
0x8f, 0x23, 0xca, 0xe7, 0xd8, 0x50, 0x2f, 0xae, 0x1f, 0xd0, 0x35, 0x28, 0x74, 0xc9, 0x02, 0x1b,
|
||||
0xc9, 0x25, 0x6e, 0x96, 0x2a, 0x19, 0x46, 0x4e, 0x28, 0x0c, 0x9a, 0x1f, 0x73, 0x4d, 0x6d, 0x79,
|
||||
0x18, 0xb0, 0xb9, 0x92, 0xe2, 0x9c, 0xf5, 0xc3, 0x27, 0x2d, 0xe1, 0x06, 0x06, 0xc3, 0x4f, 0xb8,
|
||||
0x91, 0x72, 0x3d, 0x3f, 0xc9, 0xc6, 0xe1, 0xfa, 0x94, 0x13, 0x64, 0x42, 0xdf, 0xc9, 0x4d, 0x5d,
|
||||
0x3d, 0x27, 0x61, 0x19, 0xc9, 0x7e, 0x3e, 0xbb, 0x2b, 0xdf, 0xce, 0x58, 0x29, 0xd0, 0xf6, 0xa4,
|
||||
0x65, 0xd1, 0x13, 0x4a, 0x89, 0x24, 0x6d, 0x38, 0x41, 0xb7, 0xed, 0x1d, 0xf1, 0x83, 0xd2, 0x78,
|
||||
0x6e, 0xb3, 0x33, 0x8b, 0x0b, 0x05, 0x39, 0x1b, 0x04, 0x7c, 0x85, 0x5d, 0xdb, 0xe5, 0xa9, 0xb0,
|
||||
0x70, 0x9e, 0x47, 0xe7, 0xf8, 0x8e, 0x27, 0x03, 0xb4, 0x4b, 0x8c, 0x3e, 0x2f, 0x73, 0x69, 0xe1,
|
||||
0x7c, 0x19, 0x9d, 0x75, 0x64, 0x29, 0xa5, 0x60, 0xbc, 0x8d, 0x8d, 0x9e, 0x12, 0x49, 0x53, 0xa6,
|
||||
0xa2, 0x21, 0x1e, 0xc8, 0x9c, 0xc0, 0x42, 0x77, 0x01, 0x75, 0x23, 0x08, 0xce, 0x03, 0x07, 0xae,
|
||||
0x83, 0xac, 0x7f, 0xd5, 0x71, 0x85, 0x85, 0xe2, 0x22, 0x2a, 0xfa, 0x60, 0x3d, 0xa0, 0xb3, 0x6c,
|
||||
0xd0, 0x93, 0xc5, 0x2d, 0x59, 0xe0, 0x97, 0x10, 0x1f, 0x20, 0x06, 0x15, 0xb1, 0x8c, 0xb3, 0xc0,
|
||||
0x51, 0x36, 0x3b, 0x78, 0x85, 0x14, 0xc4, 0xa0, 0xa2, 0x87, 0xb0, 0xbe, 0x4a, 0x8a, 0x54, 0x8b,
|
||||
0xe7, 0x0c, 0x1b, 0x90, 0x51, 0xb0, 0x21, 0x23, 0x9b, 0x4d, 0x5c, 0x46, 0x03, 0x43, 0x04, 0x04,
|
||||
0xd3, 0xac, 0x66, 0x9b, 0x88, 0x37, 0x36, 0xe9, 0x78, 0x50, 0x06, 0x16, 0xd8, 0x08, 0x15, 0x28,
|
||||
0x5f, 0x46, 0x16, 0x8a, 0x37, 0x51, 0x31, 0xac, 0x61, 0x78, 0x1b, 0x4a, 0xa4, 0xca, 0x13, 0x36,
|
||||
0x92, 0xb7, 0xe8, 0x36, 0x10, 0xc1, 0x50, 0x36, 0x45, 0xe4, 0xae, 0xd9, 0x19, 0xde, 0xa6, 0x50,
|
||||
0x12, 0x03, 0x8a, 0x39, 0x36, 0x14, 0x3a, 0x49, 0xba, 0xe6, 0x04, 0x56, 0xe9, 0x78, 0x07, 0x1d,
|
||||
0x83, 0x25, 0x84, 0x11, 0xc9, 0xa2, 0x5e, 0x34, 0xef, 0x52, 0x44, 0x34, 0x0c, 0x8f, 0x5e, 0xaa,
|
||||
0x9c, 0x66, 0x20, 0x1a, 0xbd, 0xd8, 0xde, 0xa3, 0xa3, 0x57, 0xb0, 0x8b, 0xba, 0x71, 0x9a, 0xd5,
|
||||
0x52, 0xff, 0x8c, 0x95, 0xe6, 0x7d, 0xca, 0x74, 0x0e, 0x00, 0x7c, 0x0f, 0xbb, 0xae, 0x6b, 0x9b,
|
||||
0xb0, 0x90, 0x7d, 0x80, 0xb2, 0x89, 0x2e, 0xad, 0x02, 0x4b, 0x42, 0xaf, 0xca, 0x0f, 0xa9, 0x24,
|
||||
0x88, 0x8a, 0x6b, 0x89, 0x8d, 0x67, 0x51, 0xea, 0xac, 0xf6, 0x16, 0xb5, 0x8f, 0x28, 0x6a, 0x05,
|
||||
0xdb, 0x11, 0xb5, 0x13, 0x6c, 0x02, 0x8d, 0xbd, 0xe5, 0xf5, 0x63, 0x2a, 0xac, 0x05, 0xbd, 0xd2,
|
||||
0x99, 0xdd, 0xfb, 0xd8, 0xbe, 0x32, 0x9c, 0xa7, 0x95, 0x88, 0x52, 0x60, 0x1a, 0xa1, 0x13, 0x5b,
|
||||
0x98, 0xaf, 0xa0, 0x99, 0x2a, 0xfe, 0x7c, 0x29, 0x58, 0x74, 0x62, 0x90, 0xdf, 0xcd, 0xf6, 0x92,
|
||||
0x3c, 0x8b, 0x12, 0xe1, 0x4a, 0x2f, 0xf2, 0xcf, 0x88, 0x96, 0x85, 0xfa, 0x93, 0x4a, 0xaa, 0x56,
|
||||
0x34, 0x1c, 0xcc, 0x47, 0xd9, 0x9e, 0x72, 0x56, 0x69, 0xf8, 0x61, 0x2c, 0x13, 0x65, 0x30, 0x7e,
|
||||
0x4a, 0x99, 0x2a, 0xb9, 0xa3, 0x39, 0xc6, 0xe7, 0xd9, 0x70, 0xfe, 0xa7, 0xed, 0x23, 0xf9, 0x19,
|
||||
0x8a, 0x86, 0xda, 0x14, 0x16, 0x0e, 0x57, 0x86, 0xb1, 0x93, 0xd8, 0xd4, 0xbf, 0xcf, 0xa9, 0x70,
|
||||
0x20, 0x82, 0x85, 0x43, 0x6d, 0xc4, 0x02, 0xba, 0xbd, 0x85, 0xe1, 0x0b, 0x2a, 0x1c, 0xc4, 0xa0,
|
||||
0x82, 0x06, 0x06, 0x0b, 0xc5, 0x97, 0xa4, 0x20, 0x06, 0x14, 0x77, 0xb6, 0x1b, 0x6d, 0x22, 0x3c,
|
||||
0x3f, 0x55, 0x89, 0x03, 0xab, 0x0d, 0xaa, 0xaf, 0x36, 0x3b, 0x87, 0xb0, 0x65, 0x0d, 0x85, 0x4a,
|
||||
0x14, 0x8a, 0x34, 0x75, 0x3c, 0x01, 0x13, 0x87, 0xc5, 0xc6, 0xbe, 0xa6, 0x4a, 0xa4, 0x61, 0xb0,
|
||||
0x37, 0x6d, 0x42, 0x84, 0xb0, 0xbb, 0x8e, 0xbb, 0x66, 0xa3, 0xfb, 0xa6, 0xb2, 0xb9, 0xe3, 0xc4,
|
||||
0x82, 0x53, 0x9b, 0x7f, 0xb2, 0x68, 0x5d, 0x6c, 0x58, 0x3d, 0x9d, 0xdf, 0x56, 0xe6, 0x9f, 0x95,
|
||||
0x82, 0x2c, 0x6a, 0xc8, 0x48, 0x65, 0x9e, 0xaa, 0xdf, 0xb8, 0xc3, 0xb5, 0x58, 0xdc, 0x17, 0xe9,
|
||||
0x1e, 0xda, 0xc2, 0xfb, 0xed, 0x1c, 0xa7, 0xf8, 0xed, 0xf0, 0x90, 0x77, 0x0e, 0x3d, 0x66, 0xd9,
|
||||
0xd9, 0xad, 0xf2, 0x39, 0xef, 0x98, 0x79, 0xf8, 0x11, 0x36, 0xd4, 0x31, 0xf0, 0x98, 0x55, 0x0f,
|
||||
0xa3, 0x6a, 0x50, 0x9f, 0x77, 0xf8, 0x01, 0xb6, 0x0b, 0x86, 0x17, 0x33, 0xfe, 0x08, 0xe2, 0xf9,
|
||||
0x72, 0x7e, 0x88, 0xf5, 0xd3, 0xd0, 0x62, 0x46, 0x1f, 0x45, 0xb4, 0x44, 0x00, 0xa7, 0x81, 0xc5,
|
||||
0x8c, 0x3f, 0x46, 0x38, 0x21, 0x80, 0xdb, 0x87, 0xf0, 0xbb, 0x27, 0x76, 0x61, 0xd3, 0xa1, 0xd8,
|
||||
0x4d, 0xb3, 0x3e, 0x9c, 0x54, 0xcc, 0xf4, 0xe3, 0xf8, 0xe5, 0x44, 0xf0, 0x5b, 0xd9, 0x6e, 0xcb,
|
||||
0x80, 0x3f, 0x89, 0x68, 0xb1, 0x9e, 0xcf, 0xb1, 0x01, 0x6d, 0x3a, 0x31, 0xe3, 0x4f, 0x21, 0xae,
|
||||
0x53, 0xb0, 0x75, 0x9c, 0x4e, 0xcc, 0x82, 0xa7, 0x69, 0xeb, 0x48, 0x40, 0xd8, 0x68, 0x30, 0x31,
|
||||
0xd3, 0xcf, 0x50, 0xd4, 0x09, 0xe1, 0x33, 0xac, 0x56, 0x36, 0x1b, 0x33, 0xff, 0x2c, 0xf2, 0x6d,
|
||||
0x06, 0x22, 0xa0, 0x35, 0x3b, 0xb3, 0xe2, 0x39, 0x8a, 0x80, 0x46, 0xc1, 0x31, 0xaa, 0x0e, 0x30,
|
||||
0x66, 0xd3, 0xf3, 0x74, 0x8c, 0x2a, 0xf3, 0x0b, 0x64, 0x33, 0xaf, 0xf9, 0x66, 0xc5, 0x0b, 0x94,
|
||||
0xcd, 0x7c, 0x3d, 0x6c, 0xa3, 0x3a, 0x11, 0x98, 0x1d, 0x2f, 0xd2, 0x36, 0x2a, 0x03, 0x01, 0x5f,
|
||||
0x62, 0xf5, 0x9d, 0xd3, 0x80, 0xd9, 0xf7, 0x12, 0xfa, 0x46, 0x77, 0x0c, 0x03, 0xfc, 0x2e, 0x36,
|
||||
0xd1, 0x7d, 0x12, 0x30, 0x5b, 0xcf, 0x6d, 0x55, 0x7e, 0xbb, 0xe9, 0x83, 0x00, 0x3f, 0xd1, 0x6e,
|
||||
0x29, 0xfa, 0x14, 0x60, 0xd6, 0x9e, 0xdf, 0xea, 0x2c, 0xdc, 0xfa, 0x10, 0xc0, 0x67, 0x19, 0x6b,
|
||||
0x37, 0x60, 0xb3, 0xeb, 0x02, 0xba, 0x34, 0x08, 0x8e, 0x06, 0xf6, 0x5f, 0x33, 0x7f, 0x91, 0x8e,
|
||||
0x06, 0x12, 0x70, 0x34, 0xa8, 0xf5, 0x9a, 0xe9, 0x4b, 0x74, 0x34, 0x08, 0x81, 0x27, 0x5b, 0xeb,
|
||||
0x6e, 0x66, 0xc3, 0x65, 0x7a, 0xb2, 0x35, 0x8a, 0x1f, 0x63, 0xa3, 0x3b, 0x1a, 0xa2, 0x59, 0xf5,
|
||||
0x1a, 0xaa, 0xf6, 0x54, 0xfb, 0xa1, 0xde, 0xbc, 0xb0, 0x19, 0x9a, 0x6d, 0xaf, 0x57, 0x9a, 0x17,
|
||||
0xf6, 0x42, 0x3e, 0xcd, 0xfa, 0xa3, 0x2c, 0x08, 0xe0, 0xf0, 0xd4, 0x6f, 0xe8, 0xd2, 0x4d, 0x45,
|
||||
0xd0, 0x22, 0xc5, 0xaf, 0xdb, 0x18, 0x1d, 0x02, 0xf8, 0x01, 0xb6, 0x5b, 0x84, 0x4d, 0xd1, 0x32,
|
||||
0x91, 0xbf, 0x6d, 0x53, 0xc1, 0x84, 0xd5, 0x7c, 0x86, 0xb1, 0xe2, 0xd5, 0x08, 0x84, 0xd9, 0xc4,
|
||||
0xfe, 0xbe, 0x5d, 0xbc, 0xa5, 0xd1, 0x90, 0xb6, 0x20, 0x4f, 0x8a, 0x41, 0xb0, 0xd9, 0x29, 0xc8,
|
||||
0x33, 0x72, 0x90, 0xf5, 0xdd, 0x9f, 0xca, 0x48, 0x39, 0x9e, 0x89, 0xfe, 0x03, 0x69, 0x5a, 0x0f,
|
||||
0x01, 0x0b, 0x65, 0x22, 0x94, 0xe3, 0xa5, 0x26, 0xf6, 0x4f, 0x64, 0x4b, 0x00, 0x60, 0xd7, 0x49,
|
||||
0x95, 0xcd, 0x7d, 0xff, 0x45, 0x30, 0x01, 0xb0, 0x69, 0xb8, 0x5e, 0x17, 0x1b, 0x26, 0xf6, 0x6f,
|
||||
0xda, 0x34, 0xae, 0xe7, 0x87, 0x58, 0x0d, 0x2e, 0xf3, 0xb7, 0x4a, 0x26, 0xf8, 0x1f, 0x84, 0xdb,
|
||||
0x04, 0x7c, 0x73, 0xaa, 0x5a, 0xca, 0x37, 0x07, 0xfb, 0x5f, 0xcc, 0x34, 0xad, 0xe7, 0xb3, 0x6c,
|
||||
0x20, 0x55, 0xad, 0x56, 0x86, 0xf3, 0xa9, 0x01, 0xff, 0x6f, 0xbb, 0x7c, 0x65, 0x51, 0x32, 0x90,
|
||||
0xed, 0x07, 0xd7, 0x55, 0x2c, 0xfd, 0x48, 0x89, 0xc4, 0x64, 0xd8, 0x42, 0x83, 0x86, 0x1c, 0x9e,
|
||||
0x67, 0x63, 0xae, 0x0c, 0xab, 0xdc, 0x61, 0xb6, 0x20, 0x17, 0xe4, 0x52, 0x5e, 0x67, 0xee, 0xbd,
|
||||
0xd9, 0xf3, 0xd5, 0x5a, 0xd6, 0x9c, 0x74, 0x65, 0x38, 0x05, 0xbf, 0x3c, 0xda, 0x2f, 0x54, 0xcb,
|
||||
0xdf, 0x21, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0x9c, 0xaf, 0x70, 0x4e, 0x83, 0x15, 0x00, 0x00,
|
||||
}
|
45
vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden
generated
vendored
Normal file
45
vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
// Code generated by protoc-gen-go.
|
||||
// source: gogo.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
package gogoproto
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import json "encoding/json"
|
||||
import math "math"
|
||||
import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
|
||||
|
||||
// Reference proto, json, and math imports to suppress error if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = &json.SyntaxError{}
|
||||
var _ = math.Inf
|
||||
|
||||
var E_Nullable = &proto.ExtensionDesc{
|
||||
ExtendedType: (*google_protobuf.FieldOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 51235,
|
||||
Name: "gogoproto.nullable",
|
||||
Tag: "varint,51235,opt,name=nullable",
|
||||
}
|
||||
|
||||
var E_Embed = &proto.ExtensionDesc{
|
||||
ExtendedType: (*google_protobuf.FieldOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 51236,
|
||||
Name: "gogoproto.embed",
|
||||
Tag: "varint,51236,opt,name=embed",
|
||||
}
|
||||
|
||||
var E_Customtype = &proto.ExtensionDesc{
|
||||
ExtendedType: (*google_protobuf.FieldOptions)(nil),
|
||||
ExtensionType: (*string)(nil),
|
||||
Field: 51237,
|
||||
Name: "gogoproto.customtype",
|
||||
Tag: "bytes,51237,opt,name=customtype",
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterExtension(E_Nullable)
|
||||
proto.RegisterExtension(E_Embed)
|
||||
proto.RegisterExtension(E_Customtype)
|
||||
}
|
144
vendor/github.com/gogo/protobuf/gogoproto/gogo.proto
generated
vendored
Normal file
144
vendor/github.com/gogo/protobuf/gogoproto/gogo.proto
generated
vendored
Normal file
@ -0,0 +1,144 @@
|
||||
// Protocol Buffers for Go with Gadgets
|
||||
//
|
||||
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
||||
// http://github.com/gogo/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
syntax = "proto2";
|
||||
package gogoproto;
|
||||
|
||||
import "google/protobuf/descriptor.proto";
|
||||
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "GoGoProtos";
|
||||
option go_package = "github.com/gogo/protobuf/gogoproto";
|
||||
|
||||
extend google.protobuf.EnumOptions {
|
||||
optional bool goproto_enum_prefix = 62001;
|
||||
optional bool goproto_enum_stringer = 62021;
|
||||
optional bool enum_stringer = 62022;
|
||||
optional string enum_customname = 62023;
|
||||
optional bool enumdecl = 62024;
|
||||
}
|
||||
|
||||
extend google.protobuf.EnumValueOptions {
|
||||
optional string enumvalue_customname = 66001;
|
||||
}
|
||||
|
||||
extend google.protobuf.FileOptions {
|
||||
optional bool goproto_getters_all = 63001;
|
||||
optional bool goproto_enum_prefix_all = 63002;
|
||||
optional bool goproto_stringer_all = 63003;
|
||||
optional bool verbose_equal_all = 63004;
|
||||
optional bool face_all = 63005;
|
||||
optional bool gostring_all = 63006;
|
||||
optional bool populate_all = 63007;
|
||||
optional bool stringer_all = 63008;
|
||||
optional bool onlyone_all = 63009;
|
||||
|
||||
optional bool equal_all = 63013;
|
||||
optional bool description_all = 63014;
|
||||
optional bool testgen_all = 63015;
|
||||
optional bool benchgen_all = 63016;
|
||||
optional bool marshaler_all = 63017;
|
||||
optional bool unmarshaler_all = 63018;
|
||||
optional bool stable_marshaler_all = 63019;
|
||||
|
||||
optional bool sizer_all = 63020;
|
||||
|
||||
optional bool goproto_enum_stringer_all = 63021;
|
||||
optional bool enum_stringer_all = 63022;
|
||||
|
||||
optional bool unsafe_marshaler_all = 63023;
|
||||
optional bool unsafe_unmarshaler_all = 63024;
|
||||
|
||||
optional bool goproto_extensions_map_all = 63025;
|
||||
optional bool goproto_unrecognized_all = 63026;
|
||||
optional bool gogoproto_import = 63027;
|
||||
optional bool protosizer_all = 63028;
|
||||
optional bool compare_all = 63029;
|
||||
optional bool typedecl_all = 63030;
|
||||
optional bool enumdecl_all = 63031;
|
||||
|
||||
optional bool goproto_registration = 63032;
|
||||
optional bool messagename_all = 63033;
|
||||
|
||||
optional bool goproto_sizecache_all = 63034;
|
||||
optional bool goproto_unkeyed_all = 63035;
|
||||
}
|
||||
|
||||
extend google.protobuf.MessageOptions {
|
||||
optional bool goproto_getters = 64001;
|
||||
optional bool goproto_stringer = 64003;
|
||||
optional bool verbose_equal = 64004;
|
||||
optional bool face = 64005;
|
||||
optional bool gostring = 64006;
|
||||
optional bool populate = 64007;
|
||||
optional bool stringer = 67008;
|
||||
optional bool onlyone = 64009;
|
||||
|
||||
optional bool equal = 64013;
|
||||
optional bool description = 64014;
|
||||
optional bool testgen = 64015;
|
||||
optional bool benchgen = 64016;
|
||||
optional bool marshaler = 64017;
|
||||
optional bool unmarshaler = 64018;
|
||||
optional bool stable_marshaler = 64019;
|
||||
|
||||
optional bool sizer = 64020;
|
||||
|
||||
optional bool unsafe_marshaler = 64023;
|
||||
optional bool unsafe_unmarshaler = 64024;
|
||||
|
||||
optional bool goproto_extensions_map = 64025;
|
||||
optional bool goproto_unrecognized = 64026;
|
||||
|
||||
optional bool protosizer = 64028;
|
||||
optional bool compare = 64029;
|
||||
|
||||
optional bool typedecl = 64030;
|
||||
|
||||
optional bool messagename = 64033;
|
||||
|
||||
optional bool goproto_sizecache = 64034;
|
||||
optional bool goproto_unkeyed = 64035;
|
||||
}
|
||||
|
||||
extend google.protobuf.FieldOptions {
|
||||
optional bool nullable = 65001;
|
||||
optional bool embed = 65002;
|
||||
optional string customtype = 65003;
|
||||
optional string customname = 65004;
|
||||
optional string jsontag = 65005;
|
||||
optional string moretags = 65006;
|
||||
optional string casttype = 65007;
|
||||
optional string castkey = 65008;
|
||||
optional string castvalue = 65009;
|
||||
|
||||
optional bool stdtime = 65010;
|
||||
optional bool stdduration = 65011;
|
||||
optional bool wktpointer = 65012;
|
||||
|
||||
}
|
415
vendor/github.com/gogo/protobuf/gogoproto/helper.go
generated
vendored
Normal file
415
vendor/github.com/gogo/protobuf/gogoproto/helper.go
generated
vendored
Normal file
@ -0,0 +1,415 @@
|
||||
// Protocol Buffers for Go with Gadgets
|
||||
//
|
||||
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
||||
// http://github.com/gogo/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package gogoproto
|
||||
|
||||
import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
|
||||
func IsEmbed(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(field.Options, E_Embed, false)
|
||||
}
|
||||
|
||||
func IsNullable(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(field.Options, E_Nullable, true)
|
||||
}
|
||||
|
||||
func IsStdTime(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(field.Options, E_Stdtime, false)
|
||||
}
|
||||
|
||||
func IsStdDuration(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(field.Options, E_Stdduration, false)
|
||||
}
|
||||
|
||||
func IsStdDouble(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.DoubleValue"
|
||||
}
|
||||
|
||||
func IsStdFloat(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.FloatValue"
|
||||
}
|
||||
|
||||
func IsStdInt64(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int64Value"
|
||||
}
|
||||
|
||||
func IsStdUInt64(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt64Value"
|
||||
}
|
||||
|
||||
func IsStdInt32(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int32Value"
|
||||
}
|
||||
|
||||
func IsStdUInt32(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt32Value"
|
||||
}
|
||||
|
||||
func IsStdBool(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BoolValue"
|
||||
}
|
||||
|
||||
func IsStdString(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.StringValue"
|
||||
}
|
||||
|
||||
func IsStdBytes(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BytesValue"
|
||||
}
|
||||
|
||||
func IsStdType(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return (IsStdTime(field) || IsStdDuration(field) ||
|
||||
IsStdDouble(field) || IsStdFloat(field) ||
|
||||
IsStdInt64(field) || IsStdUInt64(field) ||
|
||||
IsStdInt32(field) || IsStdUInt32(field) ||
|
||||
IsStdBool(field) ||
|
||||
IsStdString(field) || IsStdBytes(field))
|
||||
}
|
||||
|
||||
func IsWktPtr(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(field.Options, E_Wktpointer, false)
|
||||
}
|
||||
|
||||
func NeedsNilCheck(proto3 bool, field *google_protobuf.FieldDescriptorProto) bool {
|
||||
nullable := IsNullable(field)
|
||||
if field.IsMessage() || IsCustomType(field) {
|
||||
return nullable
|
||||
}
|
||||
if proto3 {
|
||||
return false
|
||||
}
|
||||
return nullable || *field.Type == google_protobuf.FieldDescriptorProto_TYPE_BYTES
|
||||
}
|
||||
|
||||
func IsCustomType(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
typ := GetCustomType(field)
|
||||
if len(typ) > 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func IsCastType(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
typ := GetCastType(field)
|
||||
if len(typ) > 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func IsCastKey(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
typ := GetCastKey(field)
|
||||
if len(typ) > 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func IsCastValue(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
typ := GetCastValue(field)
|
||||
if len(typ) > 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func HasEnumDecl(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(enum.Options, E_Enumdecl, proto.GetBoolExtension(file.Options, E_EnumdeclAll, true))
|
||||
}
|
||||
|
||||
func HasTypeDecl(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Typedecl, proto.GetBoolExtension(file.Options, E_TypedeclAll, true))
|
||||
}
|
||||
|
||||
func GetCustomType(field *google_protobuf.FieldDescriptorProto) string {
|
||||
if field == nil {
|
||||
return ""
|
||||
}
|
||||
if field.Options != nil {
|
||||
v, err := proto.GetExtension(field.Options, E_Customtype)
|
||||
if err == nil && v.(*string) != nil {
|
||||
return *(v.(*string))
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func GetCastType(field *google_protobuf.FieldDescriptorProto) string {
|
||||
if field == nil {
|
||||
return ""
|
||||
}
|
||||
if field.Options != nil {
|
||||
v, err := proto.GetExtension(field.Options, E_Casttype)
|
||||
if err == nil && v.(*string) != nil {
|
||||
return *(v.(*string))
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func GetCastKey(field *google_protobuf.FieldDescriptorProto) string {
|
||||
if field == nil {
|
||||
return ""
|
||||
}
|
||||
if field.Options != nil {
|
||||
v, err := proto.GetExtension(field.Options, E_Castkey)
|
||||
if err == nil && v.(*string) != nil {
|
||||
return *(v.(*string))
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func GetCastValue(field *google_protobuf.FieldDescriptorProto) string {
|
||||
if field == nil {
|
||||
return ""
|
||||
}
|
||||
if field.Options != nil {
|
||||
v, err := proto.GetExtension(field.Options, E_Castvalue)
|
||||
if err == nil && v.(*string) != nil {
|
||||
return *(v.(*string))
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func IsCustomName(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
name := GetCustomName(field)
|
||||
if len(name) > 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func IsEnumCustomName(field *google_protobuf.EnumDescriptorProto) bool {
|
||||
name := GetEnumCustomName(field)
|
||||
if len(name) > 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func IsEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) bool {
|
||||
name := GetEnumValueCustomName(field)
|
||||
if len(name) > 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func GetCustomName(field *google_protobuf.FieldDescriptorProto) string {
|
||||
if field == nil {
|
||||
return ""
|
||||
}
|
||||
if field.Options != nil {
|
||||
v, err := proto.GetExtension(field.Options, E_Customname)
|
||||
if err == nil && v.(*string) != nil {
|
||||
return *(v.(*string))
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func GetEnumCustomName(field *google_protobuf.EnumDescriptorProto) string {
|
||||
if field == nil {
|
||||
return ""
|
||||
}
|
||||
if field.Options != nil {
|
||||
v, err := proto.GetExtension(field.Options, E_EnumCustomname)
|
||||
if err == nil && v.(*string) != nil {
|
||||
return *(v.(*string))
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func GetEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) string {
|
||||
if field == nil {
|
||||
return ""
|
||||
}
|
||||
if field.Options != nil {
|
||||
v, err := proto.GetExtension(field.Options, E_EnumvalueCustomname)
|
||||
if err == nil && v.(*string) != nil {
|
||||
return *(v.(*string))
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string {
|
||||
if field == nil {
|
||||
return nil
|
||||
}
|
||||
if field.Options != nil {
|
||||
v, err := proto.GetExtension(field.Options, E_Jsontag)
|
||||
if err == nil && v.(*string) != nil {
|
||||
return (v.(*string))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetMoreTags(field *google_protobuf.FieldDescriptorProto) *string {
|
||||
if field == nil {
|
||||
return nil
|
||||
}
|
||||
if field.Options != nil {
|
||||
v, err := proto.GetExtension(field.Options, E_Moretags)
|
||||
if err == nil && v.(*string) != nil {
|
||||
return (v.(*string))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type EnableFunc func(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool
|
||||
|
||||
func EnabledGoEnumPrefix(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(enum.Options, E_GoprotoEnumPrefix, proto.GetBoolExtension(file.Options, E_GoprotoEnumPrefixAll, true))
|
||||
}
|
||||
|
||||
func EnabledGoStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_GoprotoStringer, proto.GetBoolExtension(file.Options, E_GoprotoStringerAll, true))
|
||||
}
|
||||
|
||||
func HasGoGetters(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_GoprotoGetters, proto.GetBoolExtension(file.Options, E_GoprotoGettersAll, true))
|
||||
}
|
||||
|
||||
func IsUnion(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Onlyone, proto.GetBoolExtension(file.Options, E_OnlyoneAll, false))
|
||||
}
|
||||
|
||||
func HasGoString(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Gostring, proto.GetBoolExtension(file.Options, E_GostringAll, false))
|
||||
}
|
||||
|
||||
func HasEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Equal, proto.GetBoolExtension(file.Options, E_EqualAll, false))
|
||||
}
|
||||
|
||||
func HasVerboseEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_VerboseEqual, proto.GetBoolExtension(file.Options, E_VerboseEqualAll, false))
|
||||
}
|
||||
|
||||
func IsStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Stringer, proto.GetBoolExtension(file.Options, E_StringerAll, false))
|
||||
}
|
||||
|
||||
func IsFace(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Face, proto.GetBoolExtension(file.Options, E_FaceAll, false))
|
||||
}
|
||||
|
||||
func HasDescription(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Description, proto.GetBoolExtension(file.Options, E_DescriptionAll, false))
|
||||
}
|
||||
|
||||
func HasPopulate(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Populate, proto.GetBoolExtension(file.Options, E_PopulateAll, false))
|
||||
}
|
||||
|
||||
func HasTestGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Testgen, proto.GetBoolExtension(file.Options, E_TestgenAll, false))
|
||||
}
|
||||
|
||||
func HasBenchGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Benchgen, proto.GetBoolExtension(file.Options, E_BenchgenAll, false))
|
||||
}
|
||||
|
||||
func IsMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Marshaler, proto.GetBoolExtension(file.Options, E_MarshalerAll, false))
|
||||
}
|
||||
|
||||
func IsUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Unmarshaler, proto.GetBoolExtension(file.Options, E_UnmarshalerAll, false))
|
||||
}
|
||||
|
||||
func IsStableMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_StableMarshaler, proto.GetBoolExtension(file.Options, E_StableMarshalerAll, false))
|
||||
}
|
||||
|
||||
func IsSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Sizer, proto.GetBoolExtension(file.Options, E_SizerAll, false))
|
||||
}
|
||||
|
||||
func IsProtoSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Protosizer, proto.GetBoolExtension(file.Options, E_ProtosizerAll, false))
|
||||
}
|
||||
|
||||
func IsGoEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(enum.Options, E_GoprotoEnumStringer, proto.GetBoolExtension(file.Options, E_GoprotoEnumStringerAll, true))
|
||||
}
|
||||
|
||||
func IsEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(enum.Options, E_EnumStringer, proto.GetBoolExtension(file.Options, E_EnumStringerAll, false))
|
||||
}
|
||||
|
||||
func IsUnsafeMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_UnsafeMarshaler, proto.GetBoolExtension(file.Options, E_UnsafeMarshalerAll, false))
|
||||
}
|
||||
|
||||
func IsUnsafeUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_UnsafeUnmarshaler, proto.GetBoolExtension(file.Options, E_UnsafeUnmarshalerAll, false))
|
||||
}
|
||||
|
||||
func HasExtensionsMap(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_GoprotoExtensionsMap, proto.GetBoolExtension(file.Options, E_GoprotoExtensionsMapAll, true))
|
||||
}
|
||||
|
||||
func HasUnrecognized(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_GoprotoUnrecognized, proto.GetBoolExtension(file.Options, E_GoprotoUnrecognizedAll, true))
|
||||
}
|
||||
|
||||
func IsProto3(file *google_protobuf.FileDescriptorProto) bool {
|
||||
return file.GetSyntax() == "proto3"
|
||||
}
|
||||
|
||||
func ImportsGoGoProto(file *google_protobuf.FileDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(file.Options, E_GogoprotoImport, true)
|
||||
}
|
||||
|
||||
func HasCompare(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Compare, proto.GetBoolExtension(file.Options, E_CompareAll, false))
|
||||
}
|
||||
|
||||
func RegistersGolangProto(file *google_protobuf.FileDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(file.Options, E_GoprotoRegistration, false)
|
||||
}
|
||||
|
||||
func HasMessageName(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Messagename, proto.GetBoolExtension(file.Options, E_MessagenameAll, false))
|
||||
}
|
||||
|
||||
func HasSizecache(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_GoprotoSizecache, proto.GetBoolExtension(file.Options, E_GoprotoSizecacheAll, true))
|
||||
}
|
||||
|
||||
func HasUnkeyed(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_GoprotoUnkeyed, proto.GetBoolExtension(file.Options, E_GoprotoUnkeyedAll, true))
|
||||
}
|
36
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile
generated
vendored
Normal file
36
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
# Go support for Protocol Buffers - Google's data interchange format
|
||||
#
|
||||
# Copyright 2010 The Go Authors. All rights reserved.
|
||||
# https://github.com/golang/protobuf
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
regenerate:
|
||||
go install github.com/gogo/protobuf/protoc-gen-gogo
|
||||
go install github.com/gogo/protobuf/protoc-gen-gostring
|
||||
protoc --gogo_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto
|
||||
protoc --gostring_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto
|
118
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go
generated
vendored
Normal file
118
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go
generated
vendored
Normal file
@ -0,0 +1,118 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Package descriptor provides functions for obtaining protocol buffer
|
||||
// descriptors for generated Go types.
|
||||
//
|
||||
// These functions cannot go in package proto because they depend on the
|
||||
// generated protobuf descriptor messages, which themselves depend on proto.
|
||||
package descriptor
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
)
|
||||
|
||||
// extractFile extracts a FileDescriptorProto from a gzip'd buffer.
|
||||
func extractFile(gz []byte) (*FileDescriptorProto, error) {
|
||||
r, err := gzip.NewReader(bytes.NewReader(gz))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open gzip reader: %v", err)
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
b, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to uncompress descriptor: %v", err)
|
||||
}
|
||||
|
||||
fd := new(FileDescriptorProto)
|
||||
if err := proto.Unmarshal(b, fd); err != nil {
|
||||
return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err)
|
||||
}
|
||||
|
||||
return fd, nil
|
||||
}
|
||||
|
||||
// Message is a proto.Message with a method to return its descriptor.
|
||||
//
|
||||
// Message types generated by the protocol compiler always satisfy
|
||||
// the Message interface.
|
||||
type Message interface {
|
||||
proto.Message
|
||||
Descriptor() ([]byte, []int)
|
||||
}
|
||||
|
||||
// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it
|
||||
// describing the given message.
|
||||
func ForMessage(msg Message) (fd *FileDescriptorProto, md *DescriptorProto) {
|
||||
gz, path := msg.Descriptor()
|
||||
fd, err := extractFile(gz)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err))
|
||||
}
|
||||
|
||||
md = fd.MessageType[path[0]]
|
||||
for _, i := range path[1:] {
|
||||
md = md.NestedType[i]
|
||||
}
|
||||
return fd, md
|
||||
}
|
||||
|
||||
// Is this field a scalar numeric type?
|
||||
func (field *FieldDescriptorProto) IsScalar() bool {
|
||||
if field.Type == nil {
|
||||
return false
|
||||
}
|
||||
switch *field.Type {
|
||||
case FieldDescriptorProto_TYPE_DOUBLE,
|
||||
FieldDescriptorProto_TYPE_FLOAT,
|
||||
FieldDescriptorProto_TYPE_INT64,
|
||||
FieldDescriptorProto_TYPE_UINT64,
|
||||
FieldDescriptorProto_TYPE_INT32,
|
||||
FieldDescriptorProto_TYPE_FIXED64,
|
||||
FieldDescriptorProto_TYPE_FIXED32,
|
||||
FieldDescriptorProto_TYPE_BOOL,
|
||||
FieldDescriptorProto_TYPE_UINT32,
|
||||
FieldDescriptorProto_TYPE_ENUM,
|
||||
FieldDescriptorProto_TYPE_SFIXED32,
|
||||
FieldDescriptorProto_TYPE_SFIXED64,
|
||||
FieldDescriptorProto_TYPE_SINT32,
|
||||
FieldDescriptorProto_TYPE_SINT64:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
2865
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go
generated
vendored
Normal file
2865
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
752
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go
generated
vendored
Normal file
752
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go
generated
vendored
Normal file
@ -0,0 +1,752 @@
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: descriptor.proto
|
||||
|
||||
package descriptor
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
math "math"
|
||||
reflect "reflect"
|
||||
sort "sort"
|
||||
strconv "strconv"
|
||||
strings "strings"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
func (this *FileDescriptorSet) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 5)
|
||||
s = append(s, "&descriptor.FileDescriptorSet{")
|
||||
if this.File != nil {
|
||||
s = append(s, "File: "+fmt.Sprintf("%#v", this.File)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *FileDescriptorProto) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 16)
|
||||
s = append(s, "&descriptor.FileDescriptorProto{")
|
||||
if this.Name != nil {
|
||||
s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
|
||||
}
|
||||
if this.Package != nil {
|
||||
s = append(s, "Package: "+valueToGoStringDescriptor(this.Package, "string")+",\n")
|
||||
}
|
||||
if this.Dependency != nil {
|
||||
s = append(s, "Dependency: "+fmt.Sprintf("%#v", this.Dependency)+",\n")
|
||||
}
|
||||
if this.PublicDependency != nil {
|
||||
s = append(s, "PublicDependency: "+fmt.Sprintf("%#v", this.PublicDependency)+",\n")
|
||||
}
|
||||
if this.WeakDependency != nil {
|
||||
s = append(s, "WeakDependency: "+fmt.Sprintf("%#v", this.WeakDependency)+",\n")
|
||||
}
|
||||
if this.MessageType != nil {
|
||||
s = append(s, "MessageType: "+fmt.Sprintf("%#v", this.MessageType)+",\n")
|
||||
}
|
||||
if this.EnumType != nil {
|
||||
s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n")
|
||||
}
|
||||
if this.Service != nil {
|
||||
s = append(s, "Service: "+fmt.Sprintf("%#v", this.Service)+",\n")
|
||||
}
|
||||
if this.Extension != nil {
|
||||
s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n")
|
||||
}
|
||||
if this.Options != nil {
|
||||
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
|
||||
}
|
||||
if this.SourceCodeInfo != nil {
|
||||
s = append(s, "SourceCodeInfo: "+fmt.Sprintf("%#v", this.SourceCodeInfo)+",\n")
|
||||
}
|
||||
if this.Syntax != nil {
|
||||
s = append(s, "Syntax: "+valueToGoStringDescriptor(this.Syntax, "string")+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *DescriptorProto) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 14)
|
||||
s = append(s, "&descriptor.DescriptorProto{")
|
||||
if this.Name != nil {
|
||||
s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
|
||||
}
|
||||
if this.Field != nil {
|
||||
s = append(s, "Field: "+fmt.Sprintf("%#v", this.Field)+",\n")
|
||||
}
|
||||
if this.Extension != nil {
|
||||
s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n")
|
||||
}
|
||||
if this.NestedType != nil {
|
||||
s = append(s, "NestedType: "+fmt.Sprintf("%#v", this.NestedType)+",\n")
|
||||
}
|
||||
if this.EnumType != nil {
|
||||
s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n")
|
||||
}
|
||||
if this.ExtensionRange != nil {
|
||||
s = append(s, "ExtensionRange: "+fmt.Sprintf("%#v", this.ExtensionRange)+",\n")
|
||||
}
|
||||
if this.OneofDecl != nil {
|
||||
s = append(s, "OneofDecl: "+fmt.Sprintf("%#v", this.OneofDecl)+",\n")
|
||||
}
|
||||
if this.Options != nil {
|
||||
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
|
||||
}
|
||||
if this.ReservedRange != nil {
|
||||
s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n")
|
||||
}
|
||||
if this.ReservedName != nil {
|
||||
s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *DescriptorProto_ExtensionRange) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 7)
|
||||
s = append(s, "&descriptor.DescriptorProto_ExtensionRange{")
|
||||
if this.Start != nil {
|
||||
s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n")
|
||||
}
|
||||
if this.End != nil {
|
||||
s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n")
|
||||
}
|
||||
if this.Options != nil {
|
||||
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *DescriptorProto_ReservedRange) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 6)
|
||||
s = append(s, "&descriptor.DescriptorProto_ReservedRange{")
|
||||
if this.Start != nil {
|
||||
s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n")
|
||||
}
|
||||
if this.End != nil {
|
||||
s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *ExtensionRangeOptions) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 5)
|
||||
s = append(s, "&descriptor.ExtensionRangeOptions{")
|
||||
if this.UninterpretedOption != nil {
|
||||
s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
|
||||
}
|
||||
s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *FieldDescriptorProto) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 14)
|
||||
s = append(s, "&descriptor.FieldDescriptorProto{")
|
||||
if this.Name != nil {
|
||||
s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
|
||||
}
|
||||
if this.Number != nil {
|
||||
s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n")
|
||||
}
|
||||
if this.Label != nil {
|
||||
s = append(s, "Label: "+valueToGoStringDescriptor(this.Label, "FieldDescriptorProto_Label")+",\n")
|
||||
}
|
||||
if this.Type != nil {
|
||||
s = append(s, "Type: "+valueToGoStringDescriptor(this.Type, "FieldDescriptorProto_Type")+",\n")
|
||||
}
|
||||
if this.TypeName != nil {
|
||||
s = append(s, "TypeName: "+valueToGoStringDescriptor(this.TypeName, "string")+",\n")
|
||||
}
|
||||
if this.Extendee != nil {
|
||||
s = append(s, "Extendee: "+valueToGoStringDescriptor(this.Extendee, "string")+",\n")
|
||||
}
|
||||
if this.DefaultValue != nil {
|
||||
s = append(s, "DefaultValue: "+valueToGoStringDescriptor(this.DefaultValue, "string")+",\n")
|
||||
}
|
||||
if this.OneofIndex != nil {
|
||||
s = append(s, "OneofIndex: "+valueToGoStringDescriptor(this.OneofIndex, "int32")+",\n")
|
||||
}
|
||||
if this.JsonName != nil {
|
||||
s = append(s, "JsonName: "+valueToGoStringDescriptor(this.JsonName, "string")+",\n")
|
||||
}
|
||||
if this.Options != nil {
|
||||
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *OneofDescriptorProto) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 6)
|
||||
s = append(s, "&descriptor.OneofDescriptorProto{")
|
||||
if this.Name != nil {
|
||||
s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
|
||||
}
|
||||
if this.Options != nil {
|
||||
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *EnumDescriptorProto) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 9)
|
||||
s = append(s, "&descriptor.EnumDescriptorProto{")
|
||||
if this.Name != nil {
|
||||
s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
|
||||
}
|
||||
if this.Value != nil {
|
||||
s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n")
|
||||
}
|
||||
if this.Options != nil {
|
||||
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
|
||||
}
|
||||
if this.ReservedRange != nil {
|
||||
s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n")
|
||||
}
|
||||
if this.ReservedName != nil {
|
||||
s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *EnumDescriptorProto_EnumReservedRange) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 6)
|
||||
s = append(s, "&descriptor.EnumDescriptorProto_EnumReservedRange{")
|
||||
if this.Start != nil {
|
||||
s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n")
|
||||
}
|
||||
if this.End != nil {
|
||||
s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *EnumValueDescriptorProto) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 7)
|
||||
s = append(s, "&descriptor.EnumValueDescriptorProto{")
|
||||
if this.Name != nil {
|
||||
s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
|
||||
}
|
||||
if this.Number != nil {
|
||||
s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n")
|
||||
}
|
||||
if this.Options != nil {
|
||||
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *ServiceDescriptorProto) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 7)
|
||||
s = append(s, "&descriptor.ServiceDescriptorProto{")
|
||||
if this.Name != nil {
|
||||
s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
|
||||
}
|
||||
if this.Method != nil {
|
||||
s = append(s, "Method: "+fmt.Sprintf("%#v", this.Method)+",\n")
|
||||
}
|
||||
if this.Options != nil {
|
||||
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *MethodDescriptorProto) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 10)
|
||||
s = append(s, "&descriptor.MethodDescriptorProto{")
|
||||
if this.Name != nil {
|
||||
s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
|
||||
}
|
||||
if this.InputType != nil {
|
||||
s = append(s, "InputType: "+valueToGoStringDescriptor(this.InputType, "string")+",\n")
|
||||
}
|
||||
if this.OutputType != nil {
|
||||
s = append(s, "OutputType: "+valueToGoStringDescriptor(this.OutputType, "string")+",\n")
|
||||
}
|
||||
if this.Options != nil {
|
||||
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
|
||||
}
|
||||
if this.ClientStreaming != nil {
|
||||
s = append(s, "ClientStreaming: "+valueToGoStringDescriptor(this.ClientStreaming, "bool")+",\n")
|
||||
}
|
||||
if this.ServerStreaming != nil {
|
||||
s = append(s, "ServerStreaming: "+valueToGoStringDescriptor(this.ServerStreaming, "bool")+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *FileOptions) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 25)
|
||||
s = append(s, "&descriptor.FileOptions{")
|
||||
if this.JavaPackage != nil {
|
||||
s = append(s, "JavaPackage: "+valueToGoStringDescriptor(this.JavaPackage, "string")+",\n")
|
||||
}
|
||||
if this.JavaOuterClassname != nil {
|
||||
s = append(s, "JavaOuterClassname: "+valueToGoStringDescriptor(this.JavaOuterClassname, "string")+",\n")
|
||||
}
|
||||
if this.JavaMultipleFiles != nil {
|
||||
s = append(s, "JavaMultipleFiles: "+valueToGoStringDescriptor(this.JavaMultipleFiles, "bool")+",\n")
|
||||
}
|
||||
if this.JavaGenerateEqualsAndHash != nil {
|
||||
s = append(s, "JavaGenerateEqualsAndHash: "+valueToGoStringDescriptor(this.JavaGenerateEqualsAndHash, "bool")+",\n")
|
||||
}
|
||||
if this.JavaStringCheckUtf8 != nil {
|
||||
s = append(s, "JavaStringCheckUtf8: "+valueToGoStringDescriptor(this.JavaStringCheckUtf8, "bool")+",\n")
|
||||
}
|
||||
if this.OptimizeFor != nil {
|
||||
s = append(s, "OptimizeFor: "+valueToGoStringDescriptor(this.OptimizeFor, "FileOptions_OptimizeMode")+",\n")
|
||||
}
|
||||
if this.GoPackage != nil {
|
||||
s = append(s, "GoPackage: "+valueToGoStringDescriptor(this.GoPackage, "string")+",\n")
|
||||
}
|
||||
if this.CcGenericServices != nil {
|
||||
s = append(s, "CcGenericServices: "+valueToGoStringDescriptor(this.CcGenericServices, "bool")+",\n")
|
||||
}
|
||||
if this.JavaGenericServices != nil {
|
||||
s = append(s, "JavaGenericServices: "+valueToGoStringDescriptor(this.JavaGenericServices, "bool")+",\n")
|
||||
}
|
||||
if this.PyGenericServices != nil {
|
||||
s = append(s, "PyGenericServices: "+valueToGoStringDescriptor(this.PyGenericServices, "bool")+",\n")
|
||||
}
|
||||
if this.PhpGenericServices != nil {
|
||||
s = append(s, "PhpGenericServices: "+valueToGoStringDescriptor(this.PhpGenericServices, "bool")+",\n")
|
||||
}
|
||||
if this.Deprecated != nil {
|
||||
s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
|
||||
}
|
||||
if this.CcEnableArenas != nil {
|
||||
s = append(s, "CcEnableArenas: "+valueToGoStringDescriptor(this.CcEnableArenas, "bool")+",\n")
|
||||
}
|
||||
if this.ObjcClassPrefix != nil {
|
||||
s = append(s, "ObjcClassPrefix: "+valueToGoStringDescriptor(this.ObjcClassPrefix, "string")+",\n")
|
||||
}
|
||||
if this.CsharpNamespace != nil {
|
||||
s = append(s, "CsharpNamespace: "+valueToGoStringDescriptor(this.CsharpNamespace, "string")+",\n")
|
||||
}
|
||||
if this.SwiftPrefix != nil {
|
||||
s = append(s, "SwiftPrefix: "+valueToGoStringDescriptor(this.SwiftPrefix, "string")+",\n")
|
||||
}
|
||||
if this.PhpClassPrefix != nil {
|
||||
s = append(s, "PhpClassPrefix: "+valueToGoStringDescriptor(this.PhpClassPrefix, "string")+",\n")
|
||||
}
|
||||
if this.PhpNamespace != nil {
|
||||
s = append(s, "PhpNamespace: "+valueToGoStringDescriptor(this.PhpNamespace, "string")+",\n")
|
||||
}
|
||||
if this.PhpMetadataNamespace != nil {
|
||||
s = append(s, "PhpMetadataNamespace: "+valueToGoStringDescriptor(this.PhpMetadataNamespace, "string")+",\n")
|
||||
}
|
||||
if this.RubyPackage != nil {
|
||||
s = append(s, "RubyPackage: "+valueToGoStringDescriptor(this.RubyPackage, "string")+",\n")
|
||||
}
|
||||
if this.UninterpretedOption != nil {
|
||||
s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
|
||||
}
|
||||
s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *MessageOptions) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 9)
|
||||
s = append(s, "&descriptor.MessageOptions{")
|
||||
if this.MessageSetWireFormat != nil {
|
||||
s = append(s, "MessageSetWireFormat: "+valueToGoStringDescriptor(this.MessageSetWireFormat, "bool")+",\n")
|
||||
}
|
||||
if this.NoStandardDescriptorAccessor != nil {
|
||||
s = append(s, "NoStandardDescriptorAccessor: "+valueToGoStringDescriptor(this.NoStandardDescriptorAccessor, "bool")+",\n")
|
||||
}
|
||||
if this.Deprecated != nil {
|
||||
s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
|
||||
}
|
||||
if this.MapEntry != nil {
|
||||
s = append(s, "MapEntry: "+valueToGoStringDescriptor(this.MapEntry, "bool")+",\n")
|
||||
}
|
||||
if this.UninterpretedOption != nil {
|
||||
s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
|
||||
}
|
||||
s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *FieldOptions) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 11)
|
||||
s = append(s, "&descriptor.FieldOptions{")
|
||||
if this.Ctype != nil {
|
||||
s = append(s, "Ctype: "+valueToGoStringDescriptor(this.Ctype, "FieldOptions_CType")+",\n")
|
||||
}
|
||||
if this.Packed != nil {
|
||||
s = append(s, "Packed: "+valueToGoStringDescriptor(this.Packed, "bool")+",\n")
|
||||
}
|
||||
if this.Jstype != nil {
|
||||
s = append(s, "Jstype: "+valueToGoStringDescriptor(this.Jstype, "FieldOptions_JSType")+",\n")
|
||||
}
|
||||
if this.Lazy != nil {
|
||||
s = append(s, "Lazy: "+valueToGoStringDescriptor(this.Lazy, "bool")+",\n")
|
||||
}
|
||||
if this.Deprecated != nil {
|
||||
s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
|
||||
}
|
||||
if this.Weak != nil {
|
||||
s = append(s, "Weak: "+valueToGoStringDescriptor(this.Weak, "bool")+",\n")
|
||||
}
|
||||
if this.UninterpretedOption != nil {
|
||||
s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
|
||||
}
|
||||
s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *OneofOptions) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 5)
|
||||
s = append(s, "&descriptor.OneofOptions{")
|
||||
if this.UninterpretedOption != nil {
|
||||
s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
|
||||
}
|
||||
s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *EnumOptions) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 7)
|
||||
s = append(s, "&descriptor.EnumOptions{")
|
||||
if this.AllowAlias != nil {
|
||||
s = append(s, "AllowAlias: "+valueToGoStringDescriptor(this.AllowAlias, "bool")+",\n")
|
||||
}
|
||||
if this.Deprecated != nil {
|
||||
s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
|
||||
}
|
||||
if this.UninterpretedOption != nil {
|
||||
s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
|
||||
}
|
||||
s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *EnumValueOptions) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 6)
|
||||
s = append(s, "&descriptor.EnumValueOptions{")
|
||||
if this.Deprecated != nil {
|
||||
s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
|
||||
}
|
||||
if this.UninterpretedOption != nil {
|
||||
s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
|
||||
}
|
||||
s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *ServiceOptions) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 6)
|
||||
s = append(s, "&descriptor.ServiceOptions{")
|
||||
if this.Deprecated != nil {
|
||||
s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
|
||||
}
|
||||
if this.UninterpretedOption != nil {
|
||||
s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
|
||||
}
|
||||
s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *MethodOptions) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 7)
|
||||
s = append(s, "&descriptor.MethodOptions{")
|
||||
if this.Deprecated != nil {
|
||||
s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
|
||||
}
|
||||
if this.IdempotencyLevel != nil {
|
||||
s = append(s, "IdempotencyLevel: "+valueToGoStringDescriptor(this.IdempotencyLevel, "MethodOptions_IdempotencyLevel")+",\n")
|
||||
}
|
||||
if this.UninterpretedOption != nil {
|
||||
s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
|
||||
}
|
||||
s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *UninterpretedOption) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 11)
|
||||
s = append(s, "&descriptor.UninterpretedOption{")
|
||||
if this.Name != nil {
|
||||
s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n")
|
||||
}
|
||||
if this.IdentifierValue != nil {
|
||||
s = append(s, "IdentifierValue: "+valueToGoStringDescriptor(this.IdentifierValue, "string")+",\n")
|
||||
}
|
||||
if this.PositiveIntValue != nil {
|
||||
s = append(s, "PositiveIntValue: "+valueToGoStringDescriptor(this.PositiveIntValue, "uint64")+",\n")
|
||||
}
|
||||
if this.NegativeIntValue != nil {
|
||||
s = append(s, "NegativeIntValue: "+valueToGoStringDescriptor(this.NegativeIntValue, "int64")+",\n")
|
||||
}
|
||||
if this.DoubleValue != nil {
|
||||
s = append(s, "DoubleValue: "+valueToGoStringDescriptor(this.DoubleValue, "float64")+",\n")
|
||||
}
|
||||
if this.StringValue != nil {
|
||||
s = append(s, "StringValue: "+valueToGoStringDescriptor(this.StringValue, "byte")+",\n")
|
||||
}
|
||||
if this.AggregateValue != nil {
|
||||
s = append(s, "AggregateValue: "+valueToGoStringDescriptor(this.AggregateValue, "string")+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *UninterpretedOption_NamePart) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 6)
|
||||
s = append(s, "&descriptor.UninterpretedOption_NamePart{")
|
||||
if this.NamePart != nil {
|
||||
s = append(s, "NamePart: "+valueToGoStringDescriptor(this.NamePart, "string")+",\n")
|
||||
}
|
||||
if this.IsExtension != nil {
|
||||
s = append(s, "IsExtension: "+valueToGoStringDescriptor(this.IsExtension, "bool")+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *SourceCodeInfo) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 5)
|
||||
s = append(s, "&descriptor.SourceCodeInfo{")
|
||||
if this.Location != nil {
|
||||
s = append(s, "Location: "+fmt.Sprintf("%#v", this.Location)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *SourceCodeInfo_Location) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 9)
|
||||
s = append(s, "&descriptor.SourceCodeInfo_Location{")
|
||||
if this.Path != nil {
|
||||
s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n")
|
||||
}
|
||||
if this.Span != nil {
|
||||
s = append(s, "Span: "+fmt.Sprintf("%#v", this.Span)+",\n")
|
||||
}
|
||||
if this.LeadingComments != nil {
|
||||
s = append(s, "LeadingComments: "+valueToGoStringDescriptor(this.LeadingComments, "string")+",\n")
|
||||
}
|
||||
if this.TrailingComments != nil {
|
||||
s = append(s, "TrailingComments: "+valueToGoStringDescriptor(this.TrailingComments, "string")+",\n")
|
||||
}
|
||||
if this.LeadingDetachedComments != nil {
|
||||
s = append(s, "LeadingDetachedComments: "+fmt.Sprintf("%#v", this.LeadingDetachedComments)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *GeneratedCodeInfo) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 5)
|
||||
s = append(s, "&descriptor.GeneratedCodeInfo{")
|
||||
if this.Annotation != nil {
|
||||
s = append(s, "Annotation: "+fmt.Sprintf("%#v", this.Annotation)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *GeneratedCodeInfo_Annotation) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 8)
|
||||
s = append(s, "&descriptor.GeneratedCodeInfo_Annotation{")
|
||||
if this.Path != nil {
|
||||
s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n")
|
||||
}
|
||||
if this.SourceFile != nil {
|
||||
s = append(s, "SourceFile: "+valueToGoStringDescriptor(this.SourceFile, "string")+",\n")
|
||||
}
|
||||
if this.Begin != nil {
|
||||
s = append(s, "Begin: "+valueToGoStringDescriptor(this.Begin, "int32")+",\n")
|
||||
}
|
||||
if this.End != nil {
|
||||
s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func valueToGoStringDescriptor(v interface{}, typ string) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
return "nil"
|
||||
}
|
||||
pv := reflect.Indirect(rv).Interface()
|
||||
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
|
||||
}
|
||||
func extensionToGoStringDescriptor(m github_com_gogo_protobuf_proto.Message) string {
|
||||
e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
|
||||
if e == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
|
||||
keys := make([]int, 0, len(e))
|
||||
for k := range e {
|
||||
keys = append(keys, int(k))
|
||||
}
|
||||
sort.Ints(keys)
|
||||
ss := []string{}
|
||||
for _, k := range keys {
|
||||
ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
|
||||
}
|
||||
s += strings.Join(ss, ",") + "})"
|
||||
return s
|
||||
}
|
390
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go
generated
vendored
Normal file
390
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go
generated
vendored
Normal file
@ -0,0 +1,390 @@
|
||||
// Protocol Buffers for Go with Gadgets
|
||||
//
|
||||
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
||||
// http://github.com/gogo/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package descriptor
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
func (msg *DescriptorProto) GetMapFields() (*FieldDescriptorProto, *FieldDescriptorProto) {
|
||||
if !msg.GetOptions().GetMapEntry() {
|
||||
return nil, nil
|
||||
}
|
||||
return msg.GetField()[0], msg.GetField()[1]
|
||||
}
|
||||
|
||||
func dotToUnderscore(r rune) rune {
|
||||
if r == '.' {
|
||||
return '_'
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func (field *FieldDescriptorProto) WireType() (wire int) {
|
||||
switch *field.Type {
|
||||
case FieldDescriptorProto_TYPE_DOUBLE:
|
||||
return 1
|
||||
case FieldDescriptorProto_TYPE_FLOAT:
|
||||
return 5
|
||||
case FieldDescriptorProto_TYPE_INT64:
|
||||
return 0
|
||||
case FieldDescriptorProto_TYPE_UINT64:
|
||||
return 0
|
||||
case FieldDescriptorProto_TYPE_INT32:
|
||||
return 0
|
||||
case FieldDescriptorProto_TYPE_UINT32:
|
||||
return 0
|
||||
case FieldDescriptorProto_TYPE_FIXED64:
|
||||
return 1
|
||||
case FieldDescriptorProto_TYPE_FIXED32:
|
||||
return 5
|
||||
case FieldDescriptorProto_TYPE_BOOL:
|
||||
return 0
|
||||
case FieldDescriptorProto_TYPE_STRING:
|
||||
return 2
|
||||
case FieldDescriptorProto_TYPE_GROUP:
|
||||
return 2
|
||||
case FieldDescriptorProto_TYPE_MESSAGE:
|
||||
return 2
|
||||
case FieldDescriptorProto_TYPE_BYTES:
|
||||
return 2
|
||||
case FieldDescriptorProto_TYPE_ENUM:
|
||||
return 0
|
||||
case FieldDescriptorProto_TYPE_SFIXED32:
|
||||
return 5
|
||||
case FieldDescriptorProto_TYPE_SFIXED64:
|
||||
return 1
|
||||
case FieldDescriptorProto_TYPE_SINT32:
|
||||
return 0
|
||||
case FieldDescriptorProto_TYPE_SINT64:
|
||||
return 0
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func (field *FieldDescriptorProto) GetKeyUint64() (x uint64) {
|
||||
packed := field.IsPacked()
|
||||
wireType := field.WireType()
|
||||
fieldNumber := field.GetNumber()
|
||||
if packed {
|
||||
wireType = 2
|
||||
}
|
||||
x = uint64(uint32(fieldNumber)<<3 | uint32(wireType))
|
||||
return x
|
||||
}
|
||||
|
||||
func (field *FieldDescriptorProto) GetKey3Uint64() (x uint64) {
|
||||
packed := field.IsPacked3()
|
||||
wireType := field.WireType()
|
||||
fieldNumber := field.GetNumber()
|
||||
if packed {
|
||||
wireType = 2
|
||||
}
|
||||
x = uint64(uint32(fieldNumber)<<3 | uint32(wireType))
|
||||
return x
|
||||
}
|
||||
|
||||
func (field *FieldDescriptorProto) GetKey() []byte {
|
||||
x := field.GetKeyUint64()
|
||||
i := 0
|
||||
keybuf := make([]byte, 0)
|
||||
for i = 0; x > 127; i++ {
|
||||
keybuf = append(keybuf, 0x80|uint8(x&0x7F))
|
||||
x >>= 7
|
||||
}
|
||||
keybuf = append(keybuf, uint8(x))
|
||||
return keybuf
|
||||
}
|
||||
|
||||
func (field *FieldDescriptorProto) GetKey3() []byte {
|
||||
x := field.GetKey3Uint64()
|
||||
i := 0
|
||||
keybuf := make([]byte, 0)
|
||||
for i = 0; x > 127; i++ {
|
||||
keybuf = append(keybuf, 0x80|uint8(x&0x7F))
|
||||
x >>= 7
|
||||
}
|
||||
keybuf = append(keybuf, uint8(x))
|
||||
return keybuf
|
||||
}
|
||||
|
||||
func (desc *FileDescriptorSet) GetField(packageName, messageName, fieldName string) *FieldDescriptorProto {
|
||||
msg := desc.GetMessage(packageName, messageName)
|
||||
if msg == nil {
|
||||
return nil
|
||||
}
|
||||
for _, field := range msg.GetField() {
|
||||
if field.GetName() == fieldName {
|
||||
return field
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (file *FileDescriptorProto) GetMessage(typeName string) *DescriptorProto {
|
||||
for _, msg := range file.GetMessageType() {
|
||||
if msg.GetName() == typeName {
|
||||
return msg
|
||||
}
|
||||
nes := file.GetNestedMessage(msg, strings.TrimPrefix(typeName, msg.GetName()+"."))
|
||||
if nes != nil {
|
||||
return nes
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (file *FileDescriptorProto) GetNestedMessage(msg *DescriptorProto, typeName string) *DescriptorProto {
|
||||
for _, nes := range msg.GetNestedType() {
|
||||
if nes.GetName() == typeName {
|
||||
return nes
|
||||
}
|
||||
res := file.GetNestedMessage(nes, strings.TrimPrefix(typeName, nes.GetName()+"."))
|
||||
if res != nil {
|
||||
return res
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (desc *FileDescriptorSet) GetMessage(packageName string, typeName string) *DescriptorProto {
|
||||
for _, file := range desc.GetFile() {
|
||||
if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) {
|
||||
continue
|
||||
}
|
||||
for _, msg := range file.GetMessageType() {
|
||||
if msg.GetName() == typeName {
|
||||
return msg
|
||||
}
|
||||
}
|
||||
for _, msg := range file.GetMessageType() {
|
||||
for _, nes := range msg.GetNestedType() {
|
||||
if nes.GetName() == typeName {
|
||||
return nes
|
||||
}
|
||||
if msg.GetName()+"."+nes.GetName() == typeName {
|
||||
return nes
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (desc *FileDescriptorSet) IsProto3(packageName string, typeName string) bool {
|
||||
for _, file := range desc.GetFile() {
|
||||
if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) {
|
||||
continue
|
||||
}
|
||||
for _, msg := range file.GetMessageType() {
|
||||
if msg.GetName() == typeName {
|
||||
return file.GetSyntax() == "proto3"
|
||||
}
|
||||
}
|
||||
for _, msg := range file.GetMessageType() {
|
||||
for _, nes := range msg.GetNestedType() {
|
||||
if nes.GetName() == typeName {
|
||||
return file.GetSyntax() == "proto3"
|
||||
}
|
||||
if msg.GetName()+"."+nes.GetName() == typeName {
|
||||
return file.GetSyntax() == "proto3"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (msg *DescriptorProto) IsExtendable() bool {
|
||||
return len(msg.GetExtensionRange()) > 0
|
||||
}
|
||||
|
||||
func (desc *FileDescriptorSet) FindExtension(packageName string, typeName string, fieldName string) (extPackageName string, field *FieldDescriptorProto) {
|
||||
parent := desc.GetMessage(packageName, typeName)
|
||||
if parent == nil {
|
||||
return "", nil
|
||||
}
|
||||
if !parent.IsExtendable() {
|
||||
return "", nil
|
||||
}
|
||||
extendee := "." + packageName + "." + typeName
|
||||
for _, file := range desc.GetFile() {
|
||||
for _, ext := range file.GetExtension() {
|
||||
if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) {
|
||||
if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if ext.GetExtendee() != extendee {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if ext.GetName() == fieldName {
|
||||
return file.GetPackage(), ext
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (desc *FileDescriptorSet) FindExtensionByFieldNumber(packageName string, typeName string, fieldNum int32) (extPackageName string, field *FieldDescriptorProto) {
|
||||
parent := desc.GetMessage(packageName, typeName)
|
||||
if parent == nil {
|
||||
return "", nil
|
||||
}
|
||||
if !parent.IsExtendable() {
|
||||
return "", nil
|
||||
}
|
||||
extendee := "." + packageName + "." + typeName
|
||||
for _, file := range desc.GetFile() {
|
||||
for _, ext := range file.GetExtension() {
|
||||
if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) {
|
||||
if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if ext.GetExtendee() != extendee {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if ext.GetNumber() == fieldNum {
|
||||
return file.GetPackage(), ext
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (desc *FileDescriptorSet) FindMessage(packageName string, typeName string, fieldName string) (msgPackageName string, msgName string) {
|
||||
parent := desc.GetMessage(packageName, typeName)
|
||||
if parent == nil {
|
||||
return "", ""
|
||||
}
|
||||
field := parent.GetFieldDescriptor(fieldName)
|
||||
if field == nil {
|
||||
var extPackageName string
|
||||
extPackageName, field = desc.FindExtension(packageName, typeName, fieldName)
|
||||
if field == nil {
|
||||
return "", ""
|
||||
}
|
||||
packageName = extPackageName
|
||||
}
|
||||
typeNames := strings.Split(field.GetTypeName(), ".")
|
||||
if len(typeNames) == 1 {
|
||||
msg := desc.GetMessage(packageName, typeName)
|
||||
if msg == nil {
|
||||
return "", ""
|
||||
}
|
||||
return packageName, msg.GetName()
|
||||
}
|
||||
if len(typeNames) > 2 {
|
||||
for i := 1; i < len(typeNames)-1; i++ {
|
||||
packageName = strings.Join(typeNames[1:len(typeNames)-i], ".")
|
||||
typeName = strings.Join(typeNames[len(typeNames)-i:], ".")
|
||||
msg := desc.GetMessage(packageName, typeName)
|
||||
if msg != nil {
|
||||
typeNames := strings.Split(msg.GetName(), ".")
|
||||
if len(typeNames) == 1 {
|
||||
return packageName, msg.GetName()
|
||||
}
|
||||
return strings.Join(typeNames[1:len(typeNames)-1], "."), typeNames[len(typeNames)-1]
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", ""
|
||||
}
|
||||
|
||||
func (msg *DescriptorProto) GetFieldDescriptor(fieldName string) *FieldDescriptorProto {
|
||||
for _, field := range msg.GetField() {
|
||||
if field.GetName() == fieldName {
|
||||
return field
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (desc *FileDescriptorSet) GetEnum(packageName string, typeName string) *EnumDescriptorProto {
|
||||
for _, file := range desc.GetFile() {
|
||||
if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) {
|
||||
continue
|
||||
}
|
||||
for _, enum := range file.GetEnumType() {
|
||||
if enum.GetName() == typeName {
|
||||
return enum
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FieldDescriptorProto) IsEnum() bool {
|
||||
return *f.Type == FieldDescriptorProto_TYPE_ENUM
|
||||
}
|
||||
|
||||
func (f *FieldDescriptorProto) IsMessage() bool {
|
||||
return *f.Type == FieldDescriptorProto_TYPE_MESSAGE
|
||||
}
|
||||
|
||||
func (f *FieldDescriptorProto) IsBytes() bool {
|
||||
return *f.Type == FieldDescriptorProto_TYPE_BYTES
|
||||
}
|
||||
|
||||
func (f *FieldDescriptorProto) IsRepeated() bool {
|
||||
return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REPEATED
|
||||
}
|
||||
|
||||
func (f *FieldDescriptorProto) IsString() bool {
|
||||
return *f.Type == FieldDescriptorProto_TYPE_STRING
|
||||
}
|
||||
|
||||
func (f *FieldDescriptorProto) IsBool() bool {
|
||||
return *f.Type == FieldDescriptorProto_TYPE_BOOL
|
||||
}
|
||||
|
||||
func (f *FieldDescriptorProto) IsRequired() bool {
|
||||
return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REQUIRED
|
||||
}
|
||||
|
||||
func (f *FieldDescriptorProto) IsPacked() bool {
|
||||
return f.Options != nil && f.GetOptions().GetPacked()
|
||||
}
|
||||
|
||||
func (f *FieldDescriptorProto) IsPacked3() bool {
|
||||
if f.IsRepeated() && f.IsScalar() {
|
||||
if f.Options == nil || f.GetOptions().Packed == nil {
|
||||
return true
|
||||
}
|
||||
return f.Options != nil && f.GetOptions().GetPacked()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *DescriptorProto) HasExtension() bool {
|
||||
return len(m.ExtensionRange) > 0
|
||||
}
|
233
vendor/github.com/google/cel-go/LICENSE
generated
vendored
Normal file
233
vendor/github.com/google/cel-go/LICENSE
generated
vendored
Normal file
@ -0,0 +1,233 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
===========================================================================
|
||||
The common/types/pb/equal.go modification of proto.Equal logic
|
||||
===========================================================================
|
||||
Copyright (c) 2018 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
76
vendor/github.com/google/cel-go/cel/BUILD.bazel
generated
vendored
Normal file
76
vendor/github.com/google/cel-go/cel/BUILD.bazel
generated
vendored
Normal file
@ -0,0 +1,76 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
package(
|
||||
licenses = ["notice"], # Apache 2.0
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"cel.go",
|
||||
"decls.go",
|
||||
"env.go",
|
||||
"io.go",
|
||||
"library.go",
|
||||
"macro.go",
|
||||
"options.go",
|
||||
"program.go",
|
||||
],
|
||||
importpath = "github.com/google/cel-go/cel",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//checker:go_default_library",
|
||||
"//checker/decls:go_default_library",
|
||||
"//common:go_default_library",
|
||||
"//common/containers:go_default_library",
|
||||
"//common/overloads:go_default_library",
|
||||
"//common/types:go_default_library",
|
||||
"//common/types/pb:go_default_library",
|
||||
"//common/types/ref:go_default_library",
|
||||
"//common/types/traits:go_default_library",
|
||||
"//interpreter:go_default_library",
|
||||
"//interpreter/functions:go_default_library",
|
||||
"//parser:go_default_library",
|
||||
"@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
"@org_golang_google_protobuf//reflect/protodesc:go_default_library",
|
||||
"@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
|
||||
"@org_golang_google_protobuf//reflect/protoregistry:go_default_library",
|
||||
"@org_golang_google_protobuf//types/descriptorpb:go_default_library",
|
||||
"@org_golang_google_protobuf//types/dynamicpb:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/anypb:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/durationpb:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"cel_example_test.go",
|
||||
"cel_test.go",
|
||||
"decls_test.go",
|
||||
"env_test.go",
|
||||
"io_test.go",
|
||||
],
|
||||
data = [
|
||||
"//cel/testdata:gen_test_fds",
|
||||
],
|
||||
embed = [
|
||||
":go_default_library",
|
||||
],
|
||||
deps = [
|
||||
"//common/operators:go_default_library",
|
||||
"//common/overloads:go_default_library",
|
||||
"//common/types:go_default_library",
|
||||
"//common/types/ref:go_default_library",
|
||||
"//common/types/traits:go_default_library",
|
||||
"//test:go_default_library",
|
||||
"//test/proto2pb:go_default_library",
|
||||
"//test/proto3pb:go_default_library",
|
||||
"@io_bazel_rules_go//proto/wkt:descriptor_go_proto",
|
||||
"@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/structpb:go_default_library",
|
||||
],
|
||||
)
|
19
vendor/github.com/google/cel-go/cel/cel.go
generated
vendored
Normal file
19
vendor/github.com/google/cel-go/cel/cel.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
// Copyright 2019 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package cel defines the top-level interface for the Common Expression Language (CEL).
|
||||
//
|
||||
// CEL is a non-Turing complete expression language designed to parse, check, and evaluate
|
||||
// expressions against user-defined environments.
|
||||
package cel
|
1179
vendor/github.com/google/cel-go/cel/decls.go
generated
vendored
Normal file
1179
vendor/github.com/google/cel-go/cel/decls.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
613
vendor/github.com/google/cel-go/cel/env.go
generated
vendored
Normal file
613
vendor/github.com/google/cel-go/cel/env.go
generated
vendored
Normal file
@ -0,0 +1,613 @@
|
||||
// Copyright 2019 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cel
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/google/cel-go/checker"
|
||||
"github.com/google/cel-go/checker/decls"
|
||||
"github.com/google/cel-go/common"
|
||||
"github.com/google/cel-go/common/containers"
|
||||
"github.com/google/cel-go/common/types"
|
||||
"github.com/google/cel-go/common/types/ref"
|
||||
"github.com/google/cel-go/interpreter"
|
||||
"github.com/google/cel-go/parser"
|
||||
|
||||
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
|
||||
)
|
||||
|
||||
// Source interface representing a user-provided expression.
|
||||
type Source = common.Source
|
||||
|
||||
// Ast representing the checked or unchecked expression, its source, and related metadata such as
|
||||
// source position information.
|
||||
type Ast struct {
|
||||
expr *exprpb.Expr
|
||||
info *exprpb.SourceInfo
|
||||
source Source
|
||||
refMap map[int64]*exprpb.Reference
|
||||
typeMap map[int64]*exprpb.Type
|
||||
}
|
||||
|
||||
// Expr returns the proto serializable instance of the parsed/checked expression.
|
||||
func (ast *Ast) Expr() *exprpb.Expr {
|
||||
return ast.expr
|
||||
}
|
||||
|
||||
// IsChecked returns whether the Ast value has been successfully type-checked.
|
||||
func (ast *Ast) IsChecked() bool {
|
||||
return ast.typeMap != nil && len(ast.typeMap) > 0
|
||||
}
|
||||
|
||||
// SourceInfo returns character offset and newline position information about expression elements.
|
||||
func (ast *Ast) SourceInfo() *exprpb.SourceInfo {
|
||||
return ast.info
|
||||
}
|
||||
|
||||
// ResultType returns the output type of the expression if the Ast has been type-checked, else
|
||||
// returns decls.Dyn as the parse step cannot infer the type.
|
||||
//
|
||||
// Deprecated: use OutputType
|
||||
func (ast *Ast) ResultType() *exprpb.Type {
|
||||
if !ast.IsChecked() {
|
||||
return decls.Dyn
|
||||
}
|
||||
return ast.typeMap[ast.expr.GetId()]
|
||||
}
|
||||
|
||||
// OutputType returns the output type of the expression if the Ast has been type-checked, else
|
||||
// returns cel.DynType as the parse step cannot infer types.
|
||||
func (ast *Ast) OutputType() *Type {
|
||||
t, err := ExprTypeToType(ast.ResultType())
|
||||
if err != nil {
|
||||
return DynType
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// Source returns a view of the input used to create the Ast. This source may be complete or
|
||||
// constructed from the SourceInfo.
|
||||
func (ast *Ast) Source() Source {
|
||||
return ast.source
|
||||
}
|
||||
|
||||
// FormatType converts a type message into a string representation.
|
||||
func FormatType(t *exprpb.Type) string {
|
||||
return checker.FormatCheckedType(t)
|
||||
}
|
||||
|
||||
// Env encapsulates the context necessary to perform parsing, type checking, or generation of
|
||||
// evaluable programs for different expressions.
|
||||
type Env struct {
|
||||
Container *containers.Container
|
||||
functions map[string]*functionDecl
|
||||
declarations []*exprpb.Decl
|
||||
macros []parser.Macro
|
||||
adapter ref.TypeAdapter
|
||||
provider ref.TypeProvider
|
||||
features map[int]bool
|
||||
appliedFeatures map[int]bool
|
||||
|
||||
// Internal parser representation
|
||||
prsr *parser.Parser
|
||||
|
||||
// Internal checker representation
|
||||
chk *checker.Env
|
||||
chkErr error
|
||||
chkOnce sync.Once
|
||||
chkOpts []checker.Option
|
||||
|
||||
// Program options tied to the environment
|
||||
progOpts []ProgramOption
|
||||
}
|
||||
|
||||
// NewEnv creates a program environment configured with the standard library of CEL functions and
|
||||
// macros. The Env value returned can parse and check any CEL program which builds upon the core
|
||||
// features documented in the CEL specification.
|
||||
//
|
||||
// See the EnvOption helper functions for the options that can be used to configure the
|
||||
// environment.
|
||||
func NewEnv(opts ...EnvOption) (*Env, error) {
|
||||
// Extend the statically configured standard environment, disabling eager validation to ensure
|
||||
// the cost of setup for the environment is still just as cheap as it is in v0.11.x and earlier
|
||||
// releases. The user provided options can easily re-enable the eager validation as they are
|
||||
// processed after this default option.
|
||||
stdOpts := append([]EnvOption{EagerlyValidateDeclarations(false)}, opts...)
|
||||
env, err := getStdEnv()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return env.Extend(stdOpts...)
|
||||
}
|
||||
|
||||
// NewCustomEnv creates a custom program environment which is not automatically configured with the
|
||||
// standard library of functions and macros documented in the CEL spec.
|
||||
//
|
||||
// The purpose for using a custom environment might be for subsetting the standard library produced
|
||||
// by the cel.StdLib() function. Subsetting CEL is a core aspect of its design that allows users to
|
||||
// limit the compute and memory impact of a CEL program by controlling the functions and macros
|
||||
// that may appear in a given expression.
|
||||
//
|
||||
// See the EnvOption helper functions for the options that can be used to configure the
|
||||
// environment.
|
||||
func NewCustomEnv(opts ...EnvOption) (*Env, error) {
|
||||
registry, err := types.NewRegistry()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return (&Env{
|
||||
declarations: []*exprpb.Decl{},
|
||||
functions: map[string]*functionDecl{},
|
||||
macros: []parser.Macro{},
|
||||
Container: containers.DefaultContainer,
|
||||
adapter: registry,
|
||||
provider: registry,
|
||||
features: map[int]bool{},
|
||||
appliedFeatures: map[int]bool{},
|
||||
progOpts: []ProgramOption{},
|
||||
}).configure(opts)
|
||||
}
|
||||
|
||||
// Check performs type-checking on the input Ast and yields a checked Ast and/or set of Issues.
|
||||
//
|
||||
// Checking has failed if the returned Issues value and its Issues.Err() value are non-nil.
|
||||
// Issues should be inspected if they are non-nil, but may not represent a fatal error.
|
||||
//
|
||||
// It is possible to have both non-nil Ast and Issues values returned from this call: however,
|
||||
// the mere presence of an Ast does not imply that it is valid for use.
|
||||
func (e *Env) Check(ast *Ast) (*Ast, *Issues) {
|
||||
// Note, errors aren't currently possible on the Ast to ParsedExpr conversion.
|
||||
pe, _ := AstToParsedExpr(ast)
|
||||
|
||||
// Construct the internal checker env, erroring if there is an issue adding the declarations.
|
||||
err := e.initChecker()
|
||||
if err != nil {
|
||||
errs := common.NewErrors(ast.Source())
|
||||
errs.ReportError(common.NoLocation, e.chkErr.Error())
|
||||
return nil, NewIssues(errs)
|
||||
}
|
||||
|
||||
res, errs := checker.Check(pe, ast.Source(), e.chk)
|
||||
if len(errs.GetErrors()) > 0 {
|
||||
return nil, NewIssues(errs)
|
||||
}
|
||||
// Manually create the Ast to ensure that the Ast source information (which may be more
|
||||
// detailed than the information provided by Check), is returned to the caller.
|
||||
return &Ast{
|
||||
source: ast.Source(),
|
||||
expr: res.GetExpr(),
|
||||
info: res.GetSourceInfo(),
|
||||
refMap: res.GetReferenceMap(),
|
||||
typeMap: res.GetTypeMap()}, nil
|
||||
}
|
||||
|
||||
// Compile combines the Parse and Check phases CEL program compilation to produce an Ast and
|
||||
// associated issues.
|
||||
//
|
||||
// If an error is encountered during parsing the Compile step will not continue with the Check
|
||||
// phase. If non-error issues are encountered during Parse, they may be combined with any issues
|
||||
// discovered during Check.
|
||||
//
|
||||
// Note, for parse-only uses of CEL use Parse.
|
||||
func (e *Env) Compile(txt string) (*Ast, *Issues) {
|
||||
return e.CompileSource(common.NewTextSource(txt))
|
||||
}
|
||||
|
||||
// CompileSource combines the Parse and Check phases CEL program compilation to produce an Ast and
|
||||
// associated issues.
|
||||
//
|
||||
// If an error is encountered during parsing the CompileSource step will not continue with the
|
||||
// Check phase. If non-error issues are encountered during Parse, they may be combined with any
|
||||
// issues discovered during Check.
|
||||
//
|
||||
// Note, for parse-only uses of CEL use Parse.
|
||||
func (e *Env) CompileSource(src Source) (*Ast, *Issues) {
|
||||
ast, iss := e.ParseSource(src)
|
||||
if iss.Err() != nil {
|
||||
return nil, iss
|
||||
}
|
||||
checked, iss2 := e.Check(ast)
|
||||
if iss2.Err() != nil {
|
||||
return nil, iss2
|
||||
}
|
||||
return checked, iss2
|
||||
}
|
||||
|
||||
// Extend the current environment with additional options to produce a new Env.
|
||||
//
|
||||
// Note, the extended Env value should not share memory with the original. It is possible, however,
|
||||
// that a CustomTypeAdapter or CustomTypeProvider options could provide values which are mutable.
|
||||
// To ensure separation of state between extended environments either make sure the TypeAdapter and
|
||||
// TypeProvider are immutable, or that their underlying implementations are based on the
|
||||
// ref.TypeRegistry which provides a Copy method which will be invoked by this method.
|
||||
func (e *Env) Extend(opts ...EnvOption) (*Env, error) {
|
||||
if e.chkErr != nil {
|
||||
return nil, e.chkErr
|
||||
}
|
||||
|
||||
// The type-checker is configured with Declarations. The declarations may either be provided
|
||||
// as options which have not yet been validated, or may come from a previous checker instance
|
||||
// whose types have already been validated.
|
||||
chkOptsCopy := make([]checker.Option, len(e.chkOpts))
|
||||
copy(chkOptsCopy, e.chkOpts)
|
||||
|
||||
// Copy the declarations if needed.
|
||||
decsCopy := []*exprpb.Decl{}
|
||||
if e.chk != nil {
|
||||
// If the type-checker has already been instantiated, then the e.declarations have been
|
||||
// valdiated within the chk instance.
|
||||
chkOptsCopy = append(chkOptsCopy, checker.ValidatedDeclarations(e.chk))
|
||||
} else {
|
||||
// If the type-checker has not been instantiated, ensure the unvalidated declarations are
|
||||
// provided to the extended Env instance.
|
||||
decsCopy = make([]*exprpb.Decl, len(e.declarations))
|
||||
copy(decsCopy, e.declarations)
|
||||
}
|
||||
|
||||
// Copy macros and program options
|
||||
macsCopy := make([]parser.Macro, len(e.macros))
|
||||
progOptsCopy := make([]ProgramOption, len(e.progOpts))
|
||||
copy(macsCopy, e.macros)
|
||||
copy(progOptsCopy, e.progOpts)
|
||||
|
||||
// Copy the adapter / provider if they appear to be mutable.
|
||||
adapter := e.adapter
|
||||
provider := e.provider
|
||||
adapterReg, isAdapterReg := e.adapter.(ref.TypeRegistry)
|
||||
providerReg, isProviderReg := e.provider.(ref.TypeRegistry)
|
||||
// In most cases the provider and adapter will be a ref.TypeRegistry;
|
||||
// however, in the rare cases where they are not, they are assumed to
|
||||
// be immutable. Since it is possible to set the TypeProvider separately
|
||||
// from the TypeAdapter, the possible configurations which could use a
|
||||
// TypeRegistry as the base implementation are captured below.
|
||||
if isAdapterReg && isProviderReg {
|
||||
reg := providerReg.Copy()
|
||||
provider = reg
|
||||
// If the adapter and provider are the same object, set the adapter
|
||||
// to the same ref.TypeRegistry as the provider.
|
||||
if adapterReg == providerReg {
|
||||
adapter = reg
|
||||
} else {
|
||||
// Otherwise, make a copy of the adapter.
|
||||
adapter = adapterReg.Copy()
|
||||
}
|
||||
} else if isProviderReg {
|
||||
provider = providerReg.Copy()
|
||||
} else if isAdapterReg {
|
||||
adapter = adapterReg.Copy()
|
||||
}
|
||||
|
||||
featuresCopy := make(map[int]bool, len(e.features))
|
||||
for k, v := range e.features {
|
||||
featuresCopy[k] = v
|
||||
}
|
||||
appliedFeaturesCopy := make(map[int]bool, len(e.appliedFeatures))
|
||||
for k, v := range e.appliedFeatures {
|
||||
appliedFeaturesCopy[k] = v
|
||||
}
|
||||
funcsCopy := make(map[string]*functionDecl, len(e.functions))
|
||||
for k, v := range e.functions {
|
||||
funcsCopy[k] = v
|
||||
}
|
||||
|
||||
// TODO: functions copy needs to happen here.
|
||||
ext := &Env{
|
||||
Container: e.Container,
|
||||
declarations: decsCopy,
|
||||
functions: funcsCopy,
|
||||
macros: macsCopy,
|
||||
progOpts: progOptsCopy,
|
||||
adapter: adapter,
|
||||
features: featuresCopy,
|
||||
appliedFeatures: appliedFeaturesCopy,
|
||||
provider: provider,
|
||||
chkOpts: chkOptsCopy,
|
||||
}
|
||||
return ext.configure(opts)
|
||||
}
|
||||
|
||||
// HasFeature checks whether the environment enables the given feature
|
||||
// flag, as enumerated in options.go.
|
||||
func (e *Env) HasFeature(flag int) bool {
|
||||
enabled, has := e.features[flag]
|
||||
return has && enabled
|
||||
}
|
||||
|
||||
// Parse parses the input expression value `txt` to a Ast and/or a set of Issues.
|
||||
//
|
||||
// This form of Parse creates a Source value for the input `txt` and forwards to the
|
||||
// ParseSource method.
|
||||
func (e *Env) Parse(txt string) (*Ast, *Issues) {
|
||||
src := common.NewTextSource(txt)
|
||||
return e.ParseSource(src)
|
||||
}
|
||||
|
||||
// ParseSource parses the input source to an Ast and/or set of Issues.
|
||||
//
|
||||
// Parsing has failed if the returned Issues value and its Issues.Err() value is non-nil.
|
||||
// Issues should be inspected if they are non-nil, but may not represent a fatal error.
|
||||
//
|
||||
// It is possible to have both non-nil Ast and Issues values returned from this call; however,
|
||||
// the mere presence of an Ast does not imply that it is valid for use.
|
||||
func (e *Env) ParseSource(src Source) (*Ast, *Issues) {
|
||||
res, errs := e.prsr.Parse(src)
|
||||
if len(errs.GetErrors()) > 0 {
|
||||
return nil, &Issues{errs: errs}
|
||||
}
|
||||
// Manually create the Ast to ensure that the text source information is propagated on
|
||||
// subsequent calls to Check.
|
||||
return &Ast{
|
||||
source: src,
|
||||
expr: res.GetExpr(),
|
||||
info: res.GetSourceInfo()}, nil
|
||||
}
|
||||
|
||||
// Program generates an evaluable instance of the Ast within the environment (Env).
|
||||
func (e *Env) Program(ast *Ast, opts ...ProgramOption) (Program, error) {
|
||||
optSet := e.progOpts
|
||||
if len(opts) != 0 {
|
||||
mergedOpts := []ProgramOption{}
|
||||
mergedOpts = append(mergedOpts, e.progOpts...)
|
||||
mergedOpts = append(mergedOpts, opts...)
|
||||
optSet = mergedOpts
|
||||
}
|
||||
return newProgram(e, ast, optSet)
|
||||
}
|
||||
|
||||
// TypeAdapter returns the `ref.TypeAdapter` configured for the environment.
|
||||
func (e *Env) TypeAdapter() ref.TypeAdapter {
|
||||
return e.adapter
|
||||
}
|
||||
|
||||
// TypeProvider returns the `ref.TypeProvider` configured for the environment.
|
||||
func (e *Env) TypeProvider() ref.TypeProvider {
|
||||
return e.provider
|
||||
}
|
||||
|
||||
// UnknownVars returns an interpreter.PartialActivation which marks all variables
|
||||
// declared in the Env as unknown AttributePattern values.
|
||||
//
|
||||
// Note, the UnknownVars will behave the same as an interpreter.EmptyActivation
|
||||
// unless the PartialAttributes option is provided as a ProgramOption.
|
||||
func (e *Env) UnknownVars() interpreter.PartialActivation {
|
||||
var unknownPatterns []*interpreter.AttributePattern
|
||||
for _, d := range e.declarations {
|
||||
switch d.GetDeclKind().(type) {
|
||||
case *exprpb.Decl_Ident:
|
||||
unknownPatterns = append(unknownPatterns,
|
||||
interpreter.NewAttributePattern(d.GetName()))
|
||||
}
|
||||
}
|
||||
part, _ := PartialVars(
|
||||
interpreter.EmptyActivation(),
|
||||
unknownPatterns...)
|
||||
return part
|
||||
}
|
||||
|
||||
// ResidualAst takes an Ast and its EvalDetails to produce a new Ast which only contains the
|
||||
// attribute references which are unknown.
|
||||
//
|
||||
// Residual expressions are beneficial in a few scenarios:
|
||||
//
|
||||
// - Optimizing constant expression evaluations away.
|
||||
// - Indexing and pruning expressions based on known input arguments.
|
||||
// - Surfacing additional requirements that are needed in order to complete an evaluation.
|
||||
// - Sharing the evaluation of an expression across multiple machines/nodes.
|
||||
//
|
||||
// For example, if an expression targets a 'resource' and 'request' attribute and the possible
|
||||
// values for the resource are known, a PartialActivation could mark the 'request' as an unknown
|
||||
// interpreter.AttributePattern and the resulting ResidualAst would be reduced to only the parts
|
||||
// of the expression that reference the 'request'.
|
||||
//
|
||||
// Note, the expression ids within the residual AST generated through this method have no
|
||||
// correlation to the expression ids of the original AST.
|
||||
//
|
||||
// See the PartialVars helper for how to construct a PartialActivation.
|
||||
//
|
||||
// TODO: Consider adding an option to generate a Program.Residual to avoid round-tripping to an
|
||||
// Ast format and then Program again.
|
||||
func (e *Env) ResidualAst(a *Ast, details *EvalDetails) (*Ast, error) {
|
||||
pruned := interpreter.PruneAst(a.Expr(), details.State())
|
||||
expr, err := AstToString(ParsedExprToAst(&exprpb.ParsedExpr{Expr: pruned}))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
parsed, iss := e.Parse(expr)
|
||||
if iss != nil && iss.Err() != nil {
|
||||
return nil, iss.Err()
|
||||
}
|
||||
if !a.IsChecked() {
|
||||
return parsed, nil
|
||||
}
|
||||
checked, iss := e.Check(parsed)
|
||||
if iss != nil && iss.Err() != nil {
|
||||
return nil, iss.Err()
|
||||
}
|
||||
return checked, nil
|
||||
}
|
||||
|
||||
// EstimateCost estimates the cost of a type checked CEL expression using the length estimates of input data and
|
||||
// extension functions provided by estimator.
|
||||
func (e *Env) EstimateCost(ast *Ast, estimator checker.CostEstimator) (checker.CostEstimate, error) {
|
||||
checked, err := AstToCheckedExpr(ast)
|
||||
if err != nil {
|
||||
return checker.CostEstimate{}, fmt.Errorf("EsimateCost could not inspect Ast: %v", err)
|
||||
}
|
||||
return checker.Cost(checked, estimator), nil
|
||||
}
|
||||
|
||||
// configure applies a series of EnvOptions to the current environment.
|
||||
func (e *Env) configure(opts []EnvOption) (*Env, error) {
|
||||
// Customized the environment using the provided EnvOption values. If an error is
|
||||
// generated at any step this, will be returned as a nil Env with a non-nil error.
|
||||
var err error
|
||||
for _, opt := range opts {
|
||||
e, err = opt(e)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// If the default UTC timezone fix has been enabled, make sure the library is configured
|
||||
if e.HasFeature(featureDefaultUTCTimeZone) {
|
||||
if _, found := e.appliedFeatures[featureDefaultUTCTimeZone]; !found {
|
||||
e, err = Lib(timeUTCLibrary{})(e)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// record that the feature has been applied since it will generate declarations
|
||||
// and functions which will be propagated on Extend() calls and which should only
|
||||
// be registered once.
|
||||
e.appliedFeatures[featureDefaultUTCTimeZone] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize all of the functions configured within the environment.
|
||||
for _, fn := range e.functions {
|
||||
err = fn.init()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Configure the parser.
|
||||
prsrOpts := []parser.Option{parser.Macros(e.macros...)}
|
||||
if e.HasFeature(featureEnableMacroCallTracking) {
|
||||
prsrOpts = append(prsrOpts, parser.PopulateMacroCalls(true))
|
||||
}
|
||||
e.prsr, err = parser.NewParser(prsrOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Ensure that the checker init happens eagerly rather than lazily.
|
||||
if e.HasFeature(featureEagerlyValidateDeclarations) {
|
||||
err := e.initChecker()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return e, nil
|
||||
}
|
||||
|
||||
func (e *Env) initChecker() error {
|
||||
e.chkOnce.Do(func() {
|
||||
chkOpts := []checker.Option{}
|
||||
chkOpts = append(chkOpts, e.chkOpts...)
|
||||
chkOpts = append(chkOpts,
|
||||
checker.HomogeneousAggregateLiterals(
|
||||
e.HasFeature(featureDisableDynamicAggregateLiterals)),
|
||||
checker.CrossTypeNumericComparisons(
|
||||
e.HasFeature(featureCrossTypeNumericComparisons)))
|
||||
|
||||
ce, err := checker.NewEnv(e.Container, e.provider, chkOpts...)
|
||||
if err != nil {
|
||||
e.chkErr = err
|
||||
return
|
||||
}
|
||||
// Add the statically configured declarations.
|
||||
err = ce.Add(e.declarations...)
|
||||
if err != nil {
|
||||
e.chkErr = err
|
||||
return
|
||||
}
|
||||
// Add the function declarations which are derived from the FunctionDecl instances.
|
||||
for _, fn := range e.functions {
|
||||
fnDecl, err := functionDeclToExprDecl(fn)
|
||||
if err != nil {
|
||||
e.chkErr = err
|
||||
return
|
||||
}
|
||||
err = ce.Add(fnDecl)
|
||||
if err != nil {
|
||||
e.chkErr = err
|
||||
return
|
||||
}
|
||||
}
|
||||
// Add function declarations here separately.
|
||||
e.chk = ce
|
||||
})
|
||||
return e.chkErr
|
||||
}
|
||||
|
||||
// Issues defines methods for inspecting the error details of parse and check calls.
|
||||
//
|
||||
// Note: in the future, non-fatal warnings and notices may be inspectable via the Issues struct.
|
||||
type Issues struct {
|
||||
errs *common.Errors
|
||||
}
|
||||
|
||||
// NewIssues returns an Issues struct from a common.Errors object.
|
||||
func NewIssues(errs *common.Errors) *Issues {
|
||||
return &Issues{
|
||||
errs: errs,
|
||||
}
|
||||
}
|
||||
|
||||
// Err returns an error value if the issues list contains one or more errors.
|
||||
func (i *Issues) Err() error {
|
||||
if i == nil {
|
||||
return nil
|
||||
}
|
||||
if len(i.Errors()) > 0 {
|
||||
return errors.New(i.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Errors returns the collection of errors encountered in more granular detail.
|
||||
func (i *Issues) Errors() []common.Error {
|
||||
if i == nil {
|
||||
return []common.Error{}
|
||||
}
|
||||
return i.errs.GetErrors()
|
||||
}
|
||||
|
||||
// Append collects the issues from another Issues struct into a new Issues object.
|
||||
func (i *Issues) Append(other *Issues) *Issues {
|
||||
if i == nil {
|
||||
return other
|
||||
}
|
||||
if other == nil {
|
||||
return i
|
||||
}
|
||||
return NewIssues(i.errs.Append(other.errs.GetErrors()))
|
||||
}
|
||||
|
||||
// String converts the issues to a suitable display string.
|
||||
func (i *Issues) String() string {
|
||||
if i == nil {
|
||||
return ""
|
||||
}
|
||||
return i.errs.ToDisplayString()
|
||||
}
|
||||
|
||||
// getStdEnv lazy initializes the CEL standard environment.
|
||||
func getStdEnv() (*Env, error) {
|
||||
stdEnvInit.Do(func() {
|
||||
stdEnv, stdEnvErr = NewCustomEnv(StdLib(), EagerlyValidateDeclarations(true))
|
||||
})
|
||||
return stdEnv, stdEnvErr
|
||||
}
|
||||
|
||||
var (
|
||||
stdEnvInit sync.Once
|
||||
stdEnv *Env
|
||||
stdEnvErr error
|
||||
)
|
280
vendor/github.com/google/cel-go/cel/io.go
generated
vendored
Normal file
280
vendor/github.com/google/cel-go/cel/io.go
generated
vendored
Normal file
@ -0,0 +1,280 @@
|
||||
// Copyright 2019 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cel
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/google/cel-go/common"
|
||||
"github.com/google/cel-go/common/types"
|
||||
"github.com/google/cel-go/common/types/ref"
|
||||
"github.com/google/cel-go/common/types/traits"
|
||||
"github.com/google/cel-go/parser"
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
|
||||
anypb "google.golang.org/protobuf/types/known/anypb"
|
||||
)
|
||||
|
||||
// CheckedExprToAst converts a checked expression proto message to an Ast.
|
||||
func CheckedExprToAst(checkedExpr *exprpb.CheckedExpr) *Ast {
|
||||
return CheckedExprToAstWithSource(checkedExpr, nil)
|
||||
}
|
||||
|
||||
// CheckedExprToAstWithSource converts a checked expression proto message to an Ast,
|
||||
// using the provided Source as the textual contents.
|
||||
//
|
||||
// In general the source is not necessary unless the AST has been modified between the
|
||||
// `Parse` and `Check` calls as an `Ast` created from the `Parse` step will carry the source
|
||||
// through future calls.
|
||||
//
|
||||
// Prefer CheckedExprToAst if loading expressions from storage.
|
||||
func CheckedExprToAstWithSource(checkedExpr *exprpb.CheckedExpr, src Source) *Ast {
|
||||
refMap := checkedExpr.GetReferenceMap()
|
||||
if refMap == nil {
|
||||
refMap = map[int64]*exprpb.Reference{}
|
||||
}
|
||||
typeMap := checkedExpr.GetTypeMap()
|
||||
if typeMap == nil {
|
||||
typeMap = map[int64]*exprpb.Type{}
|
||||
}
|
||||
si := checkedExpr.GetSourceInfo()
|
||||
if si == nil {
|
||||
si = &exprpb.SourceInfo{}
|
||||
}
|
||||
if src == nil {
|
||||
src = common.NewInfoSource(si)
|
||||
}
|
||||
return &Ast{
|
||||
expr: checkedExpr.GetExpr(),
|
||||
info: si,
|
||||
source: src,
|
||||
refMap: refMap,
|
||||
typeMap: typeMap,
|
||||
}
|
||||
}
|
||||
|
||||
// AstToCheckedExpr converts an Ast to an protobuf CheckedExpr value.
|
||||
//
|
||||
// If the Ast.IsChecked() returns false, this conversion method will return an error.
|
||||
func AstToCheckedExpr(a *Ast) (*exprpb.CheckedExpr, error) {
|
||||
if !a.IsChecked() {
|
||||
return nil, fmt.Errorf("cannot convert unchecked ast")
|
||||
}
|
||||
return &exprpb.CheckedExpr{
|
||||
Expr: a.Expr(),
|
||||
SourceInfo: a.SourceInfo(),
|
||||
ReferenceMap: a.refMap,
|
||||
TypeMap: a.typeMap,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ParsedExprToAst converts a parsed expression proto message to an Ast.
|
||||
func ParsedExprToAst(parsedExpr *exprpb.ParsedExpr) *Ast {
|
||||
return ParsedExprToAstWithSource(parsedExpr, nil)
|
||||
}
|
||||
|
||||
// ParsedExprToAstWithSource converts a parsed expression proto message to an Ast,
|
||||
// using the provided Source as the textual contents.
|
||||
//
|
||||
// In general you only need this if you need to recheck a previously checked
|
||||
// expression, or if you need to separately check a subset of an expression.
|
||||
//
|
||||
// Prefer ParsedExprToAst if loading expressions from storage.
|
||||
func ParsedExprToAstWithSource(parsedExpr *exprpb.ParsedExpr, src Source) *Ast {
|
||||
si := parsedExpr.GetSourceInfo()
|
||||
if si == nil {
|
||||
si = &exprpb.SourceInfo{}
|
||||
}
|
||||
if src == nil {
|
||||
src = common.NewInfoSource(si)
|
||||
}
|
||||
return &Ast{
|
||||
expr: parsedExpr.GetExpr(),
|
||||
info: si,
|
||||
source: src,
|
||||
}
|
||||
}
|
||||
|
||||
// AstToParsedExpr converts an Ast to an protobuf ParsedExpr value.
|
||||
func AstToParsedExpr(a *Ast) (*exprpb.ParsedExpr, error) {
|
||||
return &exprpb.ParsedExpr{
|
||||
Expr: a.Expr(),
|
||||
SourceInfo: a.SourceInfo(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// AstToString converts an Ast back to a string if possible.
|
||||
//
|
||||
// Note, the conversion may not be an exact replica of the original expression, but will produce
|
||||
// a string that is semantically equivalent and whose textual representation is stable.
|
||||
func AstToString(a *Ast) (string, error) {
|
||||
expr := a.Expr()
|
||||
info := a.SourceInfo()
|
||||
return parser.Unparse(expr, info)
|
||||
}
|
||||
|
||||
// RefValueToValue converts between ref.Val and api.expr.Value.
|
||||
// The result Value is the serialized proto form. The ref.Val must not be error or unknown.
|
||||
func RefValueToValue(res ref.Val) (*exprpb.Value, error) {
|
||||
switch res.Type() {
|
||||
case types.BoolType:
|
||||
return &exprpb.Value{
|
||||
Kind: &exprpb.Value_BoolValue{BoolValue: res.Value().(bool)}}, nil
|
||||
case types.BytesType:
|
||||
return &exprpb.Value{
|
||||
Kind: &exprpb.Value_BytesValue{BytesValue: res.Value().([]byte)}}, nil
|
||||
case types.DoubleType:
|
||||
return &exprpb.Value{
|
||||
Kind: &exprpb.Value_DoubleValue{DoubleValue: res.Value().(float64)}}, nil
|
||||
case types.IntType:
|
||||
return &exprpb.Value{
|
||||
Kind: &exprpb.Value_Int64Value{Int64Value: res.Value().(int64)}}, nil
|
||||
case types.ListType:
|
||||
l := res.(traits.Lister)
|
||||
sz := l.Size().(types.Int)
|
||||
elts := make([]*exprpb.Value, 0, int64(sz))
|
||||
for i := types.Int(0); i < sz; i++ {
|
||||
v, err := RefValueToValue(l.Get(i))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
elts = append(elts, v)
|
||||
}
|
||||
return &exprpb.Value{
|
||||
Kind: &exprpb.Value_ListValue{
|
||||
ListValue: &exprpb.ListValue{Values: elts}}}, nil
|
||||
case types.MapType:
|
||||
mapper := res.(traits.Mapper)
|
||||
sz := mapper.Size().(types.Int)
|
||||
entries := make([]*exprpb.MapValue_Entry, 0, int64(sz))
|
||||
for it := mapper.Iterator(); it.HasNext().(types.Bool); {
|
||||
k := it.Next()
|
||||
v := mapper.Get(k)
|
||||
kv, err := RefValueToValue(k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vv, err := RefValueToValue(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries = append(entries, &exprpb.MapValue_Entry{Key: kv, Value: vv})
|
||||
}
|
||||
return &exprpb.Value{
|
||||
Kind: &exprpb.Value_MapValue{
|
||||
MapValue: &exprpb.MapValue{Entries: entries}}}, nil
|
||||
case types.NullType:
|
||||
return &exprpb.Value{
|
||||
Kind: &exprpb.Value_NullValue{}}, nil
|
||||
case types.StringType:
|
||||
return &exprpb.Value{
|
||||
Kind: &exprpb.Value_StringValue{StringValue: res.Value().(string)}}, nil
|
||||
case types.TypeType:
|
||||
typeName := res.(ref.Type).TypeName()
|
||||
return &exprpb.Value{Kind: &exprpb.Value_TypeValue{TypeValue: typeName}}, nil
|
||||
case types.UintType:
|
||||
return &exprpb.Value{
|
||||
Kind: &exprpb.Value_Uint64Value{Uint64Value: res.Value().(uint64)}}, nil
|
||||
default:
|
||||
any, err := res.ConvertToNative(anyPbType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &exprpb.Value{
|
||||
Kind: &exprpb.Value_ObjectValue{ObjectValue: any.(*anypb.Any)}}, nil
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
typeNameToTypeValue = map[string]*types.TypeValue{
|
||||
"bool": types.BoolType,
|
||||
"bytes": types.BytesType,
|
||||
"double": types.DoubleType,
|
||||
"null_type": types.NullType,
|
||||
"int": types.IntType,
|
||||
"list": types.ListType,
|
||||
"map": types.MapType,
|
||||
"string": types.StringType,
|
||||
"type": types.TypeType,
|
||||
"uint": types.UintType,
|
||||
}
|
||||
|
||||
anyPbType = reflect.TypeOf(&anypb.Any{})
|
||||
)
|
||||
|
||||
// ValueToRefValue converts between exprpb.Value and ref.Val.
|
||||
func ValueToRefValue(adapter ref.TypeAdapter, v *exprpb.Value) (ref.Val, error) {
|
||||
switch v.Kind.(type) {
|
||||
case *exprpb.Value_NullValue:
|
||||
return types.NullValue, nil
|
||||
case *exprpb.Value_BoolValue:
|
||||
return types.Bool(v.GetBoolValue()), nil
|
||||
case *exprpb.Value_Int64Value:
|
||||
return types.Int(v.GetInt64Value()), nil
|
||||
case *exprpb.Value_Uint64Value:
|
||||
return types.Uint(v.GetUint64Value()), nil
|
||||
case *exprpb.Value_DoubleValue:
|
||||
return types.Double(v.GetDoubleValue()), nil
|
||||
case *exprpb.Value_StringValue:
|
||||
return types.String(v.GetStringValue()), nil
|
||||
case *exprpb.Value_BytesValue:
|
||||
return types.Bytes(v.GetBytesValue()), nil
|
||||
case *exprpb.Value_ObjectValue:
|
||||
any := v.GetObjectValue()
|
||||
msg, err := anypb.UnmarshalNew(any, proto.UnmarshalOptions{DiscardUnknown: true})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return adapter.NativeToValue(msg), nil
|
||||
case *exprpb.Value_MapValue:
|
||||
m := v.GetMapValue()
|
||||
entries := make(map[ref.Val]ref.Val)
|
||||
for _, entry := range m.Entries {
|
||||
key, err := ValueToRefValue(adapter, entry.Key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pb, err := ValueToRefValue(adapter, entry.Value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries[key] = pb
|
||||
}
|
||||
return adapter.NativeToValue(entries), nil
|
||||
case *exprpb.Value_ListValue:
|
||||
l := v.GetListValue()
|
||||
elts := make([]ref.Val, len(l.Values))
|
||||
for i, e := range l.Values {
|
||||
rv, err := ValueToRefValue(adapter, e)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
elts[i] = rv
|
||||
}
|
||||
return adapter.NativeToValue(elts), nil
|
||||
case *exprpb.Value_TypeValue:
|
||||
typeName := v.GetTypeValue()
|
||||
tv, ok := typeNameToTypeValue[typeName]
|
||||
if ok {
|
||||
return tv, nil
|
||||
}
|
||||
return types.NewObjectTypeValue(typeName), nil
|
||||
}
|
||||
return nil, errors.New("unknown value")
|
||||
}
|
343
vendor/github.com/google/cel-go/cel/library.go
generated
vendored
Normal file
343
vendor/github.com/google/cel-go/cel/library.go
generated
vendored
Normal file
@ -0,0 +1,343 @@
|
||||
// Copyright 2020 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cel
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/cel-go/checker"
|
||||
"github.com/google/cel-go/common/overloads"
|
||||
"github.com/google/cel-go/common/types"
|
||||
"github.com/google/cel-go/common/types/ref"
|
||||
"github.com/google/cel-go/interpreter/functions"
|
||||
)
|
||||
|
||||
// Library provides a collection of EnvOption and ProgramOption values used to configure a CEL
|
||||
// environment for a particular use case or with a related set of functionality.
|
||||
//
|
||||
// Note, the ProgramOption values provided by a library are expected to be static and not vary
|
||||
// between calls to Env.Program(). If there is a need for such dynamic configuration, prefer to
|
||||
// configure these options outside the Library and within the Env.Program() call directly.
|
||||
type Library interface {
|
||||
// CompileOptions returns a collection of functional options for configuring the Parse / Check
|
||||
// environment.
|
||||
CompileOptions() []EnvOption
|
||||
|
||||
// ProgramOptions returns a collection of functional options which should be included in every
|
||||
// Program generated from the Env.Program() call.
|
||||
ProgramOptions() []ProgramOption
|
||||
}
|
||||
|
||||
// Lib creates an EnvOption out of a Library, allowing libraries to be provided as functional args,
|
||||
// and to be linked to each other.
|
||||
func Lib(l Library) EnvOption {
|
||||
return func(e *Env) (*Env, error) {
|
||||
var err error
|
||||
for _, opt := range l.CompileOptions() {
|
||||
e, err = opt(e)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
e.progOpts = append(e.progOpts, l.ProgramOptions()...)
|
||||
return e, nil
|
||||
}
|
||||
}
|
||||
|
||||
// StdLib returns an EnvOption for the standard library of CEL functions and macros.
|
||||
func StdLib() EnvOption {
|
||||
return Lib(stdLibrary{})
|
||||
}
|
||||
|
||||
// stdLibrary implements the Library interface and provides functional options for the core CEL
|
||||
// features documented in the specification.
|
||||
type stdLibrary struct{}
|
||||
|
||||
// EnvOptions returns options for the standard CEL function declarations and macros.
|
||||
func (stdLibrary) CompileOptions() []EnvOption {
|
||||
return []EnvOption{
|
||||
Declarations(checker.StandardDeclarations()...),
|
||||
Macros(StandardMacros...),
|
||||
}
|
||||
}
|
||||
|
||||
// ProgramOptions returns function implementations for the standard CEL functions.
|
||||
func (stdLibrary) ProgramOptions() []ProgramOption {
|
||||
return []ProgramOption{
|
||||
Functions(functions.StandardOverloads()...),
|
||||
}
|
||||
}
|
||||
|
||||
type timeUTCLibrary struct{}
|
||||
|
||||
func (timeUTCLibrary) CompileOptions() []EnvOption {
|
||||
return timeOverloadDeclarations
|
||||
}
|
||||
|
||||
func (timeUTCLibrary) ProgramOptions() []ProgramOption {
|
||||
return []ProgramOption{}
|
||||
}
|
||||
|
||||
// Declarations and functions which enable using UTC on time.Time inputs when the timezone is unspecified
|
||||
// in the CEL expression.
|
||||
var (
|
||||
utcTZ = types.String("UTC")
|
||||
|
||||
timeOverloadDeclarations = []EnvOption{
|
||||
Function(overloads.TimeGetHours,
|
||||
MemberOverload(overloads.DurationToHours, []*Type{DurationType}, IntType,
|
||||
UnaryBinding(func(dur ref.Val) ref.Val {
|
||||
d := dur.(types.Duration)
|
||||
return types.Int(d.Hours())
|
||||
}))),
|
||||
Function(overloads.TimeGetMinutes,
|
||||
MemberOverload(overloads.DurationToMinutes, []*Type{DurationType}, IntType,
|
||||
UnaryBinding(func(dur ref.Val) ref.Val {
|
||||
d := dur.(types.Duration)
|
||||
return types.Int(d.Minutes())
|
||||
}))),
|
||||
Function(overloads.TimeGetSeconds,
|
||||
MemberOverload(overloads.DurationToSeconds, []*Type{DurationType}, IntType,
|
||||
UnaryBinding(func(dur ref.Val) ref.Val {
|
||||
d := dur.(types.Duration)
|
||||
return types.Int(d.Seconds())
|
||||
}))),
|
||||
Function(overloads.TimeGetMilliseconds,
|
||||
MemberOverload(overloads.DurationToMilliseconds, []*Type{DurationType}, IntType,
|
||||
UnaryBinding(func(dur ref.Val) ref.Val {
|
||||
d := dur.(types.Duration)
|
||||
return types.Int(d.Milliseconds())
|
||||
}))),
|
||||
Function(overloads.TimeGetFullYear,
|
||||
MemberOverload(overloads.TimestampToYear, []*Type{TimestampType}, IntType,
|
||||
UnaryBinding(func(ts ref.Val) ref.Val {
|
||||
return timestampGetFullYear(ts, utcTZ)
|
||||
}),
|
||||
),
|
||||
MemberOverload(overloads.TimestampToYearWithTz, []*Type{TimestampType, StringType}, IntType,
|
||||
BinaryBinding(timestampGetFullYear),
|
||||
),
|
||||
),
|
||||
Function(overloads.TimeGetMonth,
|
||||
MemberOverload(overloads.TimestampToMonth, []*Type{TimestampType}, IntType,
|
||||
UnaryBinding(func(ts ref.Val) ref.Val {
|
||||
return timestampGetMonth(ts, utcTZ)
|
||||
}),
|
||||
),
|
||||
MemberOverload(overloads.TimestampToMonthWithTz, []*Type{TimestampType, StringType}, IntType,
|
||||
BinaryBinding(timestampGetMonth),
|
||||
),
|
||||
),
|
||||
Function(overloads.TimeGetDayOfYear,
|
||||
MemberOverload(overloads.TimestampToDayOfYear, []*Type{TimestampType}, IntType,
|
||||
UnaryBinding(func(ts ref.Val) ref.Val {
|
||||
return timestampGetDayOfYear(ts, utcTZ)
|
||||
}),
|
||||
),
|
||||
MemberOverload(overloads.TimestampToDayOfYearWithTz, []*Type{TimestampType, StringType}, IntType,
|
||||
BinaryBinding(func(ts, tz ref.Val) ref.Val {
|
||||
return timestampGetDayOfYear(ts, tz)
|
||||
}),
|
||||
),
|
||||
),
|
||||
Function(overloads.TimeGetDayOfMonth,
|
||||
MemberOverload(overloads.TimestampToDayOfMonthZeroBased, []*Type{TimestampType}, IntType,
|
||||
UnaryBinding(func(ts ref.Val) ref.Val {
|
||||
return timestampGetDayOfMonthZeroBased(ts, utcTZ)
|
||||
}),
|
||||
),
|
||||
MemberOverload(overloads.TimestampToDayOfMonthZeroBasedWithTz, []*Type{TimestampType, StringType}, IntType,
|
||||
BinaryBinding(timestampGetDayOfMonthZeroBased),
|
||||
),
|
||||
),
|
||||
Function(overloads.TimeGetDate,
|
||||
MemberOverload(overloads.TimestampToDayOfMonthOneBased, []*Type{TimestampType}, IntType,
|
||||
UnaryBinding(func(ts ref.Val) ref.Val {
|
||||
return timestampGetDayOfMonthOneBased(ts, utcTZ)
|
||||
}),
|
||||
),
|
||||
MemberOverload(overloads.TimestampToDayOfMonthOneBasedWithTz, []*Type{TimestampType, StringType}, IntType,
|
||||
BinaryBinding(timestampGetDayOfMonthOneBased),
|
||||
),
|
||||
),
|
||||
Function(overloads.TimeGetDayOfWeek,
|
||||
MemberOverload(overloads.TimestampToDayOfWeek, []*Type{TimestampType}, IntType,
|
||||
UnaryBinding(func(ts ref.Val) ref.Val {
|
||||
return timestampGetDayOfWeek(ts, utcTZ)
|
||||
}),
|
||||
),
|
||||
MemberOverload(overloads.TimestampToDayOfWeekWithTz, []*Type{TimestampType, StringType}, IntType,
|
||||
BinaryBinding(timestampGetDayOfWeek),
|
||||
),
|
||||
),
|
||||
Function(overloads.TimeGetHours,
|
||||
MemberOverload(overloads.TimestampToHours, []*Type{TimestampType}, IntType,
|
||||
UnaryBinding(func(ts ref.Val) ref.Val {
|
||||
return timestampGetHours(ts, utcTZ)
|
||||
}),
|
||||
),
|
||||
MemberOverload(overloads.TimestampToHoursWithTz, []*Type{TimestampType, StringType}, IntType,
|
||||
BinaryBinding(timestampGetHours),
|
||||
),
|
||||
),
|
||||
Function(overloads.TimeGetMinutes,
|
||||
MemberOverload(overloads.TimestampToMinutes, []*Type{TimestampType}, IntType,
|
||||
UnaryBinding(func(ts ref.Val) ref.Val {
|
||||
return timestampGetMinutes(ts, utcTZ)
|
||||
}),
|
||||
),
|
||||
MemberOverload(overloads.TimestampToMinutesWithTz, []*Type{TimestampType, StringType}, IntType,
|
||||
BinaryBinding(timestampGetMinutes),
|
||||
),
|
||||
),
|
||||
Function(overloads.TimeGetSeconds,
|
||||
MemberOverload(overloads.TimestampToSeconds, []*Type{TimestampType}, IntType,
|
||||
UnaryBinding(func(ts ref.Val) ref.Val {
|
||||
return timestampGetSeconds(ts, utcTZ)
|
||||
}),
|
||||
),
|
||||
MemberOverload(overloads.TimestampToSecondsWithTz, []*Type{TimestampType, StringType}, IntType,
|
||||
BinaryBinding(timestampGetSeconds),
|
||||
),
|
||||
),
|
||||
Function(overloads.TimeGetMilliseconds,
|
||||
MemberOverload(overloads.TimestampToMilliseconds, []*Type{TimestampType}, IntType,
|
||||
UnaryBinding(func(ts ref.Val) ref.Val {
|
||||
return timestampGetMilliseconds(ts, utcTZ)
|
||||
}),
|
||||
),
|
||||
MemberOverload(overloads.TimestampToMillisecondsWithTz, []*Type{TimestampType, StringType}, IntType,
|
||||
BinaryBinding(timestampGetMilliseconds),
|
||||
),
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
func timestampGetFullYear(ts, tz ref.Val) ref.Val {
|
||||
t, err := inTimeZone(ts, tz)
|
||||
if err != nil {
|
||||
return types.NewErr(err.Error())
|
||||
}
|
||||
return types.Int(t.Year())
|
||||
}
|
||||
|
||||
func timestampGetMonth(ts, tz ref.Val) ref.Val {
|
||||
t, err := inTimeZone(ts, tz)
|
||||
if err != nil {
|
||||
return types.NewErr(err.Error())
|
||||
}
|
||||
// CEL spec indicates that the month should be 0-based, but the Time value
|
||||
// for Month() is 1-based.
|
||||
return types.Int(t.Month() - 1)
|
||||
}
|
||||
|
||||
func timestampGetDayOfYear(ts, tz ref.Val) ref.Val {
|
||||
t, err := inTimeZone(ts, tz)
|
||||
if err != nil {
|
||||
return types.NewErr(err.Error())
|
||||
}
|
||||
return types.Int(t.YearDay() - 1)
|
||||
}
|
||||
|
||||
func timestampGetDayOfMonthZeroBased(ts, tz ref.Val) ref.Val {
|
||||
t, err := inTimeZone(ts, tz)
|
||||
if err != nil {
|
||||
return types.NewErr(err.Error())
|
||||
}
|
||||
return types.Int(t.Day() - 1)
|
||||
}
|
||||
|
||||
func timestampGetDayOfMonthOneBased(ts, tz ref.Val) ref.Val {
|
||||
t, err := inTimeZone(ts, tz)
|
||||
if err != nil {
|
||||
return types.NewErr(err.Error())
|
||||
}
|
||||
return types.Int(t.Day())
|
||||
}
|
||||
|
||||
func timestampGetDayOfWeek(ts, tz ref.Val) ref.Val {
|
||||
t, err := inTimeZone(ts, tz)
|
||||
if err != nil {
|
||||
return types.NewErr(err.Error())
|
||||
}
|
||||
return types.Int(t.Weekday())
|
||||
}
|
||||
|
||||
func timestampGetHours(ts, tz ref.Val) ref.Val {
|
||||
t, err := inTimeZone(ts, tz)
|
||||
if err != nil {
|
||||
return types.NewErr(err.Error())
|
||||
}
|
||||
return types.Int(t.Hour())
|
||||
}
|
||||
|
||||
func timestampGetMinutes(ts, tz ref.Val) ref.Val {
|
||||
t, err := inTimeZone(ts, tz)
|
||||
if err != nil {
|
||||
return types.NewErr(err.Error())
|
||||
}
|
||||
return types.Int(t.Minute())
|
||||
}
|
||||
|
||||
func timestampGetSeconds(ts, tz ref.Val) ref.Val {
|
||||
t, err := inTimeZone(ts, tz)
|
||||
if err != nil {
|
||||
return types.NewErr(err.Error())
|
||||
}
|
||||
return types.Int(t.Second())
|
||||
}
|
||||
|
||||
func timestampGetMilliseconds(ts, tz ref.Val) ref.Val {
|
||||
t, err := inTimeZone(ts, tz)
|
||||
if err != nil {
|
||||
return types.NewErr(err.Error())
|
||||
}
|
||||
return types.Int(t.Nanosecond() / 1000000)
|
||||
}
|
||||
|
||||
func inTimeZone(ts, tz ref.Val) (time.Time, error) {
|
||||
t := ts.(types.Timestamp)
|
||||
val := string(tz.(types.String))
|
||||
ind := strings.Index(val, ":")
|
||||
if ind == -1 {
|
||||
loc, err := time.LoadLocation(val)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
return t.In(loc), nil
|
||||
}
|
||||
|
||||
// If the input is not the name of a timezone (for example, 'US/Central'), it should be a numerical offset from UTC
|
||||
// in the format ^(+|-)(0[0-9]|1[0-4]):[0-5][0-9]$. The numerical input is parsed in terms of hours and minutes.
|
||||
hr, err := strconv.Atoi(string(val[0:ind]))
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
min, err := strconv.Atoi(string(val[ind+1:]))
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
var offset int
|
||||
if string(val[0]) == "-" {
|
||||
offset = hr*60 - min
|
||||
} else {
|
||||
offset = hr*60 + min
|
||||
}
|
||||
secondsEastOfUTC := int((time.Duration(offset) * time.Minute).Seconds())
|
||||
timezone := time.FixedZone("", secondsEastOfUTC)
|
||||
return t.In(timezone), nil
|
||||
}
|
139
vendor/github.com/google/cel-go/cel/macro.go
generated
vendored
Normal file
139
vendor/github.com/google/cel-go/cel/macro.go
generated
vendored
Normal file
@ -0,0 +1,139 @@
|
||||
// Copyright 2022 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cel
|
||||
|
||||
import (
|
||||
"github.com/google/cel-go/common"
|
||||
"github.com/google/cel-go/parser"
|
||||
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
|
||||
)
|
||||
|
||||
// Macro describes a function signature to match and the MacroExpander to apply.
|
||||
//
|
||||
// Note: when a Macro should apply to multiple overloads (based on arg count) of a given function,
|
||||
// a Macro should be created per arg-count or as a var arg macro.
|
||||
type Macro = parser.Macro
|
||||
|
||||
// MacroExpander converts a call and its associated arguments into a new CEL abstract syntax tree, or an error
|
||||
// if the input arguments are not suitable for the expansion requirements for the macro in question.
|
||||
//
|
||||
// The MacroExpander accepts as arguments a MacroExprHelper as well as the arguments used in the function call
|
||||
// and produces as output an Expr ast node.
|
||||
//
|
||||
// Note: when the Macro.IsReceiverStyle() method returns true, the target argument will be nil.
|
||||
type MacroExpander = parser.MacroExpander
|
||||
|
||||
// MacroExprHelper exposes helper methods for creating new expressions within a CEL abstract syntax tree.
|
||||
type MacroExprHelper = parser.ExprHelper
|
||||
|
||||
// NewGlobalMacro creates a Macro for a global function with the specified arg count.
|
||||
func NewGlobalMacro(function string, argCount int, expander MacroExpander) Macro {
|
||||
return parser.NewGlobalMacro(function, argCount, expander)
|
||||
}
|
||||
|
||||
// NewReceiverMacro creates a Macro for a receiver function matching the specified arg count.
|
||||
func NewReceiverMacro(function string, argCount int, expander MacroExpander) Macro {
|
||||
return parser.NewReceiverMacro(function, argCount, expander)
|
||||
}
|
||||
|
||||
// NewGlobalVarArgMacro creates a Macro for a global function with a variable arg count.
|
||||
func NewGlobalVarArgMacro(function string, expander MacroExpander) Macro {
|
||||
return parser.NewGlobalVarArgMacro(function, expander)
|
||||
}
|
||||
|
||||
// NewReceiverVarArgMacro creates a Macro for a receiver function matching a variable arg count.
|
||||
func NewReceiverVarArgMacro(function string, expander MacroExpander) Macro {
|
||||
return parser.NewReceiverVarArgMacro(function, expander)
|
||||
}
|
||||
|
||||
// HasMacroExpander expands the input call arguments into a presence test, e.g. has(<operand>.field)
|
||||
func HasMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
|
||||
return parser.MakeHas(meh, target, args)
|
||||
}
|
||||
|
||||
// ExistsMacroExpander expands the input call arguments into a comprehension that returns true if any of the
|
||||
// elements in the range match the predicate expressions:
|
||||
// <iterRange>.exists(<iterVar>, <predicate>)
|
||||
func ExistsMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
|
||||
return parser.MakeExists(meh, target, args)
|
||||
}
|
||||
|
||||
// ExistsOneMacroExpander expands the input call arguments into a comprehension that returns true if exactly
|
||||
// one of the elements in the range match the predicate expressions:
|
||||
// <iterRange>.exists_one(<iterVar>, <predicate>)
|
||||
func ExistsOneMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
|
||||
return parser.MakeExistsOne(meh, target, args)
|
||||
}
|
||||
|
||||
// MapMacroExpander expands the input call arguments into a comprehension that transforms each element in the
|
||||
// input to produce an output list.
|
||||
//
|
||||
// There are two call patterns supported by map:
|
||||
// <iterRange>.map(<iterVar>, <transform>)
|
||||
// <iterRange>.map(<iterVar>, <predicate>, <transform>)
|
||||
// In the second form only iterVar values which return true when provided to the predicate expression
|
||||
// are transformed.
|
||||
func MapMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
|
||||
return parser.MakeMap(meh, target, args)
|
||||
}
|
||||
|
||||
// FilterMacroExpander expands the input call arguments into a comprehension which produces a list which contains
|
||||
// only elements which match the provided predicate expression:
|
||||
// <iterRange>.filter(<iterVar>, <predicate>)
|
||||
func FilterMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
|
||||
return parser.MakeFilter(meh, target, args)
|
||||
}
|
||||
|
||||
var (
|
||||
// Aliases to each macro in the CEL standard environment.
|
||||
// Note: reassigning these macro variables may result in undefined behavior.
|
||||
|
||||
// HasMacro expands "has(m.f)" which tests the presence of a field, avoiding the need to
|
||||
// specify the field as a string.
|
||||
HasMacro = parser.HasMacro
|
||||
|
||||
// AllMacro expands "range.all(var, predicate)" into a comprehension which ensures that all
|
||||
// elements in the range satisfy the predicate.
|
||||
AllMacro = parser.AllMacro
|
||||
|
||||
// ExistsMacro expands "range.exists(var, predicate)" into a comprehension which ensures that
|
||||
// some element in the range satisfies the predicate.
|
||||
ExistsMacro = parser.ExistsMacro
|
||||
|
||||
// ExistsOneMacro expands "range.exists_one(var, predicate)", which is true if for exactly one
|
||||
// element in range the predicate holds.
|
||||
ExistsOneMacro = parser.ExistsOneMacro
|
||||
|
||||
// MapMacro expands "range.map(var, function)" into a comprehension which applies the function
|
||||
// to each element in the range to produce a new list.
|
||||
MapMacro = parser.MapMacro
|
||||
|
||||
// MapFilterMacro expands "range.map(var, predicate, function)" into a comprehension which
|
||||
// first filters the elements in the range by the predicate, then applies the transform function
|
||||
// to produce a new list.
|
||||
MapFilterMacro = parser.MapFilterMacro
|
||||
|
||||
// FilterMacro expands "range.filter(var, predicate)" into a comprehension which filters
|
||||
// elements in the range, producing a new list from the elements that satisfy the predicate.
|
||||
FilterMacro = parser.FilterMacro
|
||||
|
||||
// StandardMacros provides an alias to all the CEL macros defined in the standard environment.
|
||||
StandardMacros = []Macro{
|
||||
HasMacro, AllMacro, ExistsMacro, ExistsOneMacro, MapMacro, MapFilterMacro, FilterMacro,
|
||||
}
|
||||
|
||||
// NoMacros provides an alias to an empty list of macros
|
||||
NoMacros = []Macro{}
|
||||
)
|
543
vendor/github.com/google/cel-go/cel/options.go
generated
vendored
Normal file
543
vendor/github.com/google/cel-go/cel/options.go
generated
vendored
Normal file
@ -0,0 +1,543 @@
|
||||
// Copyright 2019 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cel
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protodesc"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
"google.golang.org/protobuf/types/dynamicpb"
|
||||
|
||||
"github.com/google/cel-go/checker/decls"
|
||||
"github.com/google/cel-go/common/containers"
|
||||
"github.com/google/cel-go/common/types/pb"
|
||||
"github.com/google/cel-go/common/types/ref"
|
||||
"github.com/google/cel-go/interpreter"
|
||||
"github.com/google/cel-go/interpreter/functions"
|
||||
|
||||
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
|
||||
descpb "google.golang.org/protobuf/types/descriptorpb"
|
||||
)
|
||||
|
||||
// These constants beginning with "Feature" enable optional behavior in
|
||||
// the library. See the documentation for each constant to see its
|
||||
// effects, compatibility restrictions, and standard conformance.
|
||||
const (
|
||||
_ = iota
|
||||
|
||||
// Disallow heterogeneous aggregate (list, map) literals.
|
||||
// Note, it is still possible to have heterogeneous aggregates when
|
||||
// provided as variables to the expression, as well as via conversion
|
||||
// of well-known dynamic types, or with unchecked expressions.
|
||||
// Affects checking. Provides a subset of standard behavior.
|
||||
featureDisableDynamicAggregateLiterals
|
||||
|
||||
// Enable the tracking of function call expressions replaced by macros.
|
||||
featureEnableMacroCallTracking
|
||||
|
||||
// Enable the use of cross-type numeric comparisons at the type-checker.
|
||||
featureCrossTypeNumericComparisons
|
||||
|
||||
// Enable eager validation of declarations to ensure that Env values created
|
||||
// with `Extend` inherit a validated list of declarations from the parent Env.
|
||||
featureEagerlyValidateDeclarations
|
||||
|
||||
// Enable the use of the default UTC timezone when a timezone is not specified
|
||||
// on a CEL timestamp operation. This fixes the scenario where the input time
|
||||
// is not already in UTC.
|
||||
featureDefaultUTCTimeZone
|
||||
)
|
||||
|
||||
// EnvOption is a functional interface for configuring the environment.
|
||||
type EnvOption func(e *Env) (*Env, error)
|
||||
|
||||
// ClearMacros options clears all parser macros.
|
||||
//
|
||||
// Clearing macros will ensure CEL expressions can only contain linear evaluation paths, as
|
||||
// comprehensions such as `all` and `exists` are enabled only via macros.
|
||||
func ClearMacros() EnvOption {
|
||||
return func(e *Env) (*Env, error) {
|
||||
e.macros = NoMacros
|
||||
return e, nil
|
||||
}
|
||||
}
|
||||
|
||||
// CustomTypeAdapter swaps the default ref.TypeAdapter implementation with a custom one.
|
||||
//
|
||||
// Note: This option must be specified before the Types and TypeDescs options when used together.
|
||||
func CustomTypeAdapter(adapter ref.TypeAdapter) EnvOption {
|
||||
return func(e *Env) (*Env, error) {
|
||||
e.adapter = adapter
|
||||
return e, nil
|
||||
}
|
||||
}
|
||||
|
||||
// CustomTypeProvider swaps the default ref.TypeProvider implementation with a custom one.
|
||||
//
|
||||
// Note: This option must be specified before the Types and TypeDescs options when used together.
|
||||
func CustomTypeProvider(provider ref.TypeProvider) EnvOption {
|
||||
return func(e *Env) (*Env, error) {
|
||||
e.provider = provider
|
||||
return e, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Declarations option extends the declaration set configured in the environment.
|
||||
//
|
||||
// Note: Declarations will by default be appended to the pre-existing declaration set configured
|
||||
// for the environment. The NewEnv call builds on top of the standard CEL declarations. For a
|
||||
// purely custom set of declarations use NewCustomEnv.
|
||||
func Declarations(decls ...*exprpb.Decl) EnvOption {
|
||||
return func(e *Env) (*Env, error) {
|
||||
e.declarations = append(e.declarations, decls...)
|
||||
return e, nil
|
||||
}
|
||||
}
|
||||
|
||||
// EagerlyValidateDeclarations ensures that any collisions between configured declarations are caught
|
||||
// at the time of the `NewEnv` call.
|
||||
//
|
||||
// Eagerly validating declarations is also useful for bootstrapping a base `cel.Env` value.
|
||||
// Calls to base `Env.Extend()` will be significantly faster when declarations are eagerly validated
|
||||
// as declarations will be collision-checked at most once and only incrementally by way of `Extend`
|
||||
//
|
||||
// Disabled by default as not all environments are used for type-checking.
|
||||
func EagerlyValidateDeclarations(enabled bool) EnvOption {
|
||||
return features(featureEagerlyValidateDeclarations, enabled)
|
||||
}
|
||||
|
||||
// HomogeneousAggregateLiterals option ensures that list and map literal entry types must agree
|
||||
// during type-checking.
|
||||
//
|
||||
// Note, it is still possible to have heterogeneous aggregates when provided as variables to the
|
||||
// expression, as well as via conversion of well-known dynamic types, or with unchecked
|
||||
// expressions.
|
||||
func HomogeneousAggregateLiterals() EnvOption {
|
||||
return features(featureDisableDynamicAggregateLiterals, true)
|
||||
}
|
||||
|
||||
// Macros option extends the macro set configured in the environment.
|
||||
//
|
||||
// Note: This option must be specified after ClearMacros if used together.
|
||||
func Macros(macros ...Macro) EnvOption {
|
||||
return func(e *Env) (*Env, error) {
|
||||
e.macros = append(e.macros, macros...)
|
||||
return e, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Container sets the container for resolving variable names. Defaults to an empty container.
|
||||
//
|
||||
// If all references within an expression are relative to a protocol buffer package, then
|
||||
// specifying a container of `google.type` would make it possible to write expressions such as
|
||||
// `Expr{expression: 'a < b'}` instead of having to write `google.type.Expr{...}`.
|
||||
func Container(name string) EnvOption {
|
||||
return func(e *Env) (*Env, error) {
|
||||
cont, err := e.Container.Extend(containers.Name(name))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e.Container = cont
|
||||
return e, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Abbrevs configures a set of simple names as abbreviations for fully-qualified names.
|
||||
//
|
||||
// An abbreviation (abbrev for short) is a simple name that expands to a fully-qualified name.
|
||||
// Abbreviations can be useful when working with variables, functions, and especially types from
|
||||
// multiple namespaces:
|
||||
//
|
||||
// // CEL object construction
|
||||
// qual.pkg.version.ObjTypeName{
|
||||
// field: alt.container.ver.FieldTypeName{value: ...}
|
||||
// }
|
||||
//
|
||||
// Only one the qualified names above may be used as the CEL container, so at least one of these
|
||||
// references must be a long qualified name within an otherwise short CEL program. Using the
|
||||
// following abbreviations, the program becomes much simpler:
|
||||
//
|
||||
// // CEL Go option
|
||||
// Abbrevs("qual.pkg.version.ObjTypeName", "alt.container.ver.FieldTypeName")
|
||||
// // Simplified Object construction
|
||||
// ObjTypeName{field: FieldTypeName{value: ...}}
|
||||
//
|
||||
// There are a few rules for the qualified names and the simple abbreviations generated from them:
|
||||
// - Qualified names must be dot-delimited, e.g. `package.subpkg.name`.
|
||||
// - The last element in the qualified name is the abbreviation.
|
||||
// - Abbreviations must not collide with each other.
|
||||
// - The abbreviation must not collide with unqualified names in use.
|
||||
//
|
||||
// Abbreviations are distinct from container-based references in the following important ways:
|
||||
// - Abbreviations must expand to a fully-qualified name.
|
||||
// - Expanded abbreviations do not participate in namespace resolution.
|
||||
// - Abbreviation expansion is done instead of the container search for a matching identifier.
|
||||
// - Containers follow C++ namespace resolution rules with searches from the most qualified name
|
||||
// to the least qualified name.
|
||||
// - Container references within the CEL program may be relative, and are resolved to fully
|
||||
// qualified names at either type-check time or program plan time, whichever comes first.
|
||||
//
|
||||
// If there is ever a case where an identifier could be in both the container and as an
|
||||
// abbreviation, the abbreviation wins as this will ensure that the meaning of a program is
|
||||
// preserved between compilations even as the container evolves.
|
||||
func Abbrevs(qualifiedNames ...string) EnvOption {
|
||||
return func(e *Env) (*Env, error) {
|
||||
cont, err := e.Container.Extend(containers.Abbrevs(qualifiedNames...))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e.Container = cont
|
||||
return e, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Types adds one or more type declarations to the environment, allowing for construction of
|
||||
// type-literals whose definitions are included in the common expression built-in set.
|
||||
//
|
||||
// The input types may either be instances of `proto.Message` or `ref.Type`. Any other type
|
||||
// provided to this option will result in an error.
|
||||
//
|
||||
// Well-known protobuf types within the `google.protobuf.*` package are included in the standard
|
||||
// environment by default.
|
||||
//
|
||||
// Note: This option must be specified after the CustomTypeProvider option when used together.
|
||||
func Types(addTypes ...interface{}) EnvOption {
|
||||
return func(e *Env) (*Env, error) {
|
||||
reg, isReg := e.provider.(ref.TypeRegistry)
|
||||
if !isReg {
|
||||
return nil, fmt.Errorf("custom types not supported by provider: %T", e.provider)
|
||||
}
|
||||
for _, t := range addTypes {
|
||||
switch v := t.(type) {
|
||||
case proto.Message:
|
||||
fdMap := pb.CollectFileDescriptorSet(v)
|
||||
for _, fd := range fdMap {
|
||||
err := reg.RegisterDescriptor(fd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
case ref.Type:
|
||||
err := reg.RegisterType(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported type: %T", t)
|
||||
}
|
||||
}
|
||||
return e, nil
|
||||
}
|
||||
}
|
||||
|
||||
// TypeDescs adds type declarations from any protoreflect.FileDescriptor, protoregistry.Files,
|
||||
// google.protobuf.FileDescriptorProto or google.protobuf.FileDescriptorSet provided.
|
||||
//
|
||||
// Note that messages instantiated from these descriptors will be *dynamicpb.Message values
|
||||
// rather than the concrete message type.
|
||||
//
|
||||
// TypeDescs are hermetic to a single Env object, but may be copied to other Env values via
|
||||
// extension or by re-using the same EnvOption with another NewEnv() call.
|
||||
func TypeDescs(descs ...interface{}) EnvOption {
|
||||
return func(e *Env) (*Env, error) {
|
||||
reg, isReg := e.provider.(ref.TypeRegistry)
|
||||
if !isReg {
|
||||
return nil, fmt.Errorf("custom types not supported by provider: %T", e.provider)
|
||||
}
|
||||
// Scan the input descriptors for FileDescriptorProto messages and accumulate them into a
|
||||
// synthetic FileDescriptorSet as the FileDescriptorProto messages may refer to each other
|
||||
// and will not resolve properly unless they are part of the same set.
|
||||
var fds *descpb.FileDescriptorSet
|
||||
for _, d := range descs {
|
||||
switch f := d.(type) {
|
||||
case *descpb.FileDescriptorProto:
|
||||
if fds == nil {
|
||||
fds = &descpb.FileDescriptorSet{
|
||||
File: []*descpb.FileDescriptorProto{},
|
||||
}
|
||||
}
|
||||
fds.File = append(fds.File, f)
|
||||
}
|
||||
}
|
||||
if fds != nil {
|
||||
if err := registerFileSet(reg, fds); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
for _, d := range descs {
|
||||
switch f := d.(type) {
|
||||
case *protoregistry.Files:
|
||||
if err := registerFiles(reg, f); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case protoreflect.FileDescriptor:
|
||||
if err := reg.RegisterDescriptor(f); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case *descpb.FileDescriptorSet:
|
||||
if err := registerFileSet(reg, f); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case *descpb.FileDescriptorProto:
|
||||
// skip, handled as a synthetic file descriptor set.
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported type descriptor: %T", d)
|
||||
}
|
||||
}
|
||||
return e, nil
|
||||
}
|
||||
}
|
||||
|
||||
func registerFileSet(reg ref.TypeRegistry, fileSet *descpb.FileDescriptorSet) error {
|
||||
files, err := protodesc.NewFiles(fileSet)
|
||||
if err != nil {
|
||||
return fmt.Errorf("protodesc.NewFiles(%v) failed: %v", fileSet, err)
|
||||
}
|
||||
return registerFiles(reg, files)
|
||||
}
|
||||
|
||||
func registerFiles(reg ref.TypeRegistry, files *protoregistry.Files) error {
|
||||
var err error
|
||||
files.RangeFiles(func(fd protoreflect.FileDescriptor) bool {
|
||||
err = reg.RegisterDescriptor(fd)
|
||||
return err == nil
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// ProgramOption is a functional interface for configuring evaluation bindings and behaviors.
|
||||
type ProgramOption func(p *prog) (*prog, error)
|
||||
|
||||
// CustomDecorator appends an InterpreterDecorator to the program.
|
||||
//
|
||||
// InterpretableDecorators can be used to inspect, alter, or replace the Program plan.
|
||||
func CustomDecorator(dec interpreter.InterpretableDecorator) ProgramOption {
|
||||
return func(p *prog) (*prog, error) {
|
||||
p.decorators = append(p.decorators, dec)
|
||||
return p, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Functions adds function overloads that extend or override the set of CEL built-ins.
|
||||
//
|
||||
// Deprecated: use Function() instead to declare the function, its overload signatures,
|
||||
// and the overload implementations.
|
||||
func Functions(funcs ...*functions.Overload) ProgramOption {
|
||||
return func(p *prog) (*prog, error) {
|
||||
if err := p.dispatcher.Add(funcs...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Globals sets the global variable values for a given program. These values may be shadowed by
|
||||
// variables with the same name provided to the Eval() call. If Globals is used in a Library with
|
||||
// a Lib EnvOption, vars may shadow variables provided by previously added libraries.
|
||||
//
|
||||
// The vars value may either be an `interpreter.Activation` instance or a `map[string]interface{}`.
|
||||
func Globals(vars interface{}) ProgramOption {
|
||||
return func(p *prog) (*prog, error) {
|
||||
defaultVars, err := interpreter.NewActivation(vars)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if p.defaultVars != nil {
|
||||
defaultVars = interpreter.NewHierarchicalActivation(p.defaultVars, defaultVars)
|
||||
}
|
||||
p.defaultVars = defaultVars
|
||||
return p, nil
|
||||
}
|
||||
}
|
||||
|
||||
// OptimizeRegex provides a way to replace the InterpretableCall for regex functions. This can be used
|
||||
// to compile regex string constants at program creation time and report any errors and then use the
|
||||
// compiled regex for all regex function invocations.
|
||||
func OptimizeRegex(regexOptimizations ...*interpreter.RegexOptimization) ProgramOption {
|
||||
return func(p *prog) (*prog, error) {
|
||||
p.regexOptimizations = append(p.regexOptimizations, regexOptimizations...)
|
||||
return p, nil
|
||||
}
|
||||
}
|
||||
|
||||
// EvalOption indicates an evaluation option that may affect the evaluation behavior or information
|
||||
// in the output result.
|
||||
type EvalOption int
|
||||
|
||||
const (
|
||||
// OptTrackState will cause the runtime to return an immutable EvalState value in the Result.
|
||||
OptTrackState EvalOption = 1 << iota
|
||||
|
||||
// OptExhaustiveEval causes the runtime to disable short-circuits and track state.
|
||||
OptExhaustiveEval EvalOption = 1<<iota | OptTrackState
|
||||
|
||||
// OptOptimize precomputes functions and operators with constants as arguments at program
|
||||
// creation time. It also pre-compiles regex pattern constants passed to 'matches', reports any compilation errors
|
||||
// at program creation and uses the compiled regex pattern for all 'matches' function invocations.
|
||||
// This flag is useful when the expression will be evaluated repeatedly against
|
||||
// a series of different inputs.
|
||||
OptOptimize EvalOption = 1 << iota
|
||||
|
||||
// OptPartialEval enables the evaluation of a partial state where the input data that may be
|
||||
// known to be missing, either as top-level variables, or somewhere within a variable's object
|
||||
// member graph.
|
||||
//
|
||||
// By itself, OptPartialEval does not change evaluation behavior unless the input to the
|
||||
// Program Eval() call is created via PartialVars().
|
||||
OptPartialEval EvalOption = 1 << iota
|
||||
|
||||
// OptTrackCost enables the runtime cost calculation while validation and return cost within evalDetails
|
||||
// cost calculation is available via func ActualCost()
|
||||
OptTrackCost EvalOption = 1 << iota
|
||||
)
|
||||
|
||||
// EvalOptions sets one or more evaluation options which may affect the evaluation or Result.
|
||||
func EvalOptions(opts ...EvalOption) ProgramOption {
|
||||
return func(p *prog) (*prog, error) {
|
||||
for _, opt := range opts {
|
||||
p.evalOpts |= opt
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
}
|
||||
|
||||
// InterruptCheckFrequency configures the number of iterations within a comprehension to evaluate
|
||||
// before checking whether the function evaluation has been interrupted.
|
||||
func InterruptCheckFrequency(checkFrequency uint) ProgramOption {
|
||||
return func(p *prog) (*prog, error) {
|
||||
p.interruptCheckFrequency = checkFrequency
|
||||
return p, nil
|
||||
}
|
||||
}
|
||||
|
||||
// CostTracking enables cost tracking and registers a ActualCostEstimator that can optionally provide a runtime cost estimate for any function calls.
|
||||
func CostTracking(costEstimator interpreter.ActualCostEstimator) ProgramOption {
|
||||
return func(p *prog) (*prog, error) {
|
||||
p.callCostEstimator = costEstimator
|
||||
p.evalOpts |= OptTrackCost
|
||||
return p, nil
|
||||
}
|
||||
}
|
||||
|
||||
// CostLimit enables cost tracking and sets configures program evaluation to exit early with a
|
||||
// "runtime cost limit exceeded" error if the runtime cost exceeds the costLimit.
|
||||
// The CostLimit is a metric that corresponds to the number and estimated expense of operations
|
||||
// performed while evaluating an expression. It is indicative of CPU usage, not memory usage.
|
||||
func CostLimit(costLimit uint64) ProgramOption {
|
||||
return func(p *prog) (*prog, error) {
|
||||
p.costLimit = &costLimit
|
||||
p.evalOpts |= OptTrackCost
|
||||
return p, nil
|
||||
}
|
||||
}
|
||||
|
||||
func fieldToCELType(field protoreflect.FieldDescriptor) (*exprpb.Type, error) {
|
||||
if field.Kind() == protoreflect.MessageKind || field.Kind() == protoreflect.GroupKind {
|
||||
msgName := (string)(field.Message().FullName())
|
||||
wellKnownType, found := pb.CheckedWellKnowns[msgName]
|
||||
if found {
|
||||
return wellKnownType, nil
|
||||
}
|
||||
return decls.NewObjectType(msgName), nil
|
||||
}
|
||||
if primitiveType, found := pb.CheckedPrimitives[field.Kind()]; found {
|
||||
return primitiveType, nil
|
||||
}
|
||||
if field.Kind() == protoreflect.EnumKind {
|
||||
return decls.Int, nil
|
||||
}
|
||||
return nil, fmt.Errorf("field %s type %s not implemented", field.FullName(), field.Kind().String())
|
||||
}
|
||||
|
||||
func fieldToDecl(field protoreflect.FieldDescriptor) (*exprpb.Decl, error) {
|
||||
name := string(field.Name())
|
||||
if field.IsMap() {
|
||||
mapKey := field.MapKey()
|
||||
mapValue := field.MapValue()
|
||||
keyType, err := fieldToCELType(mapKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
valueType, err := fieldToCELType(mapValue)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return decls.NewVar(name, decls.NewMapType(keyType, valueType)), nil
|
||||
}
|
||||
if field.IsList() {
|
||||
elemType, err := fieldToCELType(field)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return decls.NewVar(name, decls.NewListType(elemType)), nil
|
||||
}
|
||||
celType, err := fieldToCELType(field)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return decls.NewVar(name, celType), nil
|
||||
}
|
||||
|
||||
// DeclareContextProto returns an option to extend CEL environment with declarations from the given context proto.
|
||||
// Each field of the proto defines a variable of the same name in the environment.
|
||||
// https://github.com/google/cel-spec/blob/master/doc/langdef.md#evaluation-environment
|
||||
func DeclareContextProto(descriptor protoreflect.MessageDescriptor) EnvOption {
|
||||
return func(e *Env) (*Env, error) {
|
||||
var decls []*exprpb.Decl
|
||||
fields := descriptor.Fields()
|
||||
for i := 0; i < fields.Len(); i++ {
|
||||
field := fields.Get(i)
|
||||
decl, err := fieldToDecl(field)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
decls = append(decls, decl)
|
||||
}
|
||||
var err error
|
||||
e, err = Declarations(decls...)(e)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return Types(dynamicpb.NewMessage(descriptor))(e)
|
||||
}
|
||||
}
|
||||
|
||||
// EnableMacroCallTracking ensures that call expressions which are replaced by macros
|
||||
// are tracked in the `SourceInfo` of parsed and checked expressions.
|
||||
func EnableMacroCallTracking() EnvOption {
|
||||
return features(featureEnableMacroCallTracking, true)
|
||||
}
|
||||
|
||||
// CrossTypeNumericComparisons makes it possible to compare across numeric types, e.g. double < int
|
||||
func CrossTypeNumericComparisons(enabled bool) EnvOption {
|
||||
return features(featureCrossTypeNumericComparisons, enabled)
|
||||
}
|
||||
|
||||
// DefaultUTCTimeZone ensures that time-based operations use the UTC timezone rather than the
|
||||
// input time's local timezone.
|
||||
func DefaultUTCTimeZone(enabled bool) EnvOption {
|
||||
return features(featureDefaultUTCTimeZone, enabled)
|
||||
}
|
||||
|
||||
// features sets the given feature flags. See list of Feature constants above.
|
||||
func features(flag int, enabled bool) EnvOption {
|
||||
return func(e *Env) (*Env, error) {
|
||||
e.features[flag] = enabled
|
||||
return e, nil
|
||||
}
|
||||
}
|
567
vendor/github.com/google/cel-go/cel/program.go
generated
vendored
Normal file
567
vendor/github.com/google/cel-go/cel/program.go
generated
vendored
Normal file
@ -0,0 +1,567 @@
|
||||
// Copyright 2019 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cel
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
|
||||
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
|
||||
|
||||
"github.com/google/cel-go/common/types"
|
||||
"github.com/google/cel-go/common/types/ref"
|
||||
"github.com/google/cel-go/interpreter"
|
||||
)
|
||||
|
||||
// Program is an evaluable view of an Ast.
|
||||
type Program interface {
|
||||
// Eval returns the result of an evaluation of the Ast and environment against the input vars.
|
||||
//
|
||||
// The vars value may either be an `interpreter.Activation` or a `map[string]interface{}`.
|
||||
//
|
||||
// If the `OptTrackState`, `OptTrackCost` or `OptExhaustiveEval` flags are used, the `details` response will
|
||||
// be non-nil. Given this caveat on `details`, the return state from evaluation will be:
|
||||
//
|
||||
// * `val`, `details`, `nil` - Successful evaluation of a non-error result.
|
||||
// * `val`, `details`, `err` - Successful evaluation to an error result.
|
||||
// * `nil`, `details`, `err` - Unsuccessful evaluation.
|
||||
//
|
||||
// An unsuccessful evaluation is typically the result of a series of incompatible `EnvOption`
|
||||
// or `ProgramOption` values used in the creation of the evaluation environment or executable
|
||||
// program.
|
||||
Eval(interface{}) (ref.Val, *EvalDetails, error)
|
||||
|
||||
// ContextEval evaluates the program with a set of input variables and a context object in order
|
||||
// to support cancellation and timeouts. This method must be used in conjunction with the
|
||||
// InterruptCheckFrequency() option for cancellation interrupts to be impact evaluation.
|
||||
//
|
||||
// The vars value may either be an `interpreter.Activation` or `map[string]interface{}`.
|
||||
//
|
||||
// The output contract for `ContextEval` is otherwise identical to the `Eval` method.
|
||||
ContextEval(context.Context, interface{}) (ref.Val, *EvalDetails, error)
|
||||
}
|
||||
|
||||
// NoVars returns an empty Activation.
|
||||
func NoVars() interpreter.Activation {
|
||||
return interpreter.EmptyActivation()
|
||||
}
|
||||
|
||||
// PartialVars returns a PartialActivation which contains variables and a set of AttributePattern
|
||||
// values that indicate variables or parts of variables whose value are not yet known.
|
||||
//
|
||||
// The `vars` value may either be an interpreter.Activation or any valid input to the
|
||||
// interpreter.NewActivation call.
|
||||
func PartialVars(vars interface{},
|
||||
unknowns ...*interpreter.AttributePattern) (interpreter.PartialActivation, error) {
|
||||
return interpreter.NewPartialActivation(vars, unknowns...)
|
||||
}
|
||||
|
||||
// AttributePattern returns an AttributePattern that matches a top-level variable. The pattern is
|
||||
// mutable, and its methods support the specification of one or more qualifier patterns.
|
||||
//
|
||||
// For example, the AttributePattern(`a`).QualString(`b`) represents a variable access `a` with a
|
||||
// string field or index qualification `b`. This pattern will match Attributes `a`, and `a.b`,
|
||||
// but not `a.c`.
|
||||
//
|
||||
// When using a CEL expression within a container, e.g. a package or namespace, the variable name
|
||||
// in the pattern must match the qualified name produced during the variable namespace resolution.
|
||||
// For example, when variable `a` is declared within an expression whose container is `ns.app`, the
|
||||
// fully qualified variable name may be `ns.app.a`, `ns.a`, or `a` per the CEL namespace resolution
|
||||
// rules. Pick the fully qualified variable name that makes sense within the container as the
|
||||
// AttributePattern `varName` argument.
|
||||
//
|
||||
// See the interpreter.AttributePattern and interpreter.AttributeQualifierPattern for more info
|
||||
// about how to create and manipulate AttributePattern values.
|
||||
func AttributePattern(varName string) *interpreter.AttributePattern {
|
||||
return interpreter.NewAttributePattern(varName)
|
||||
}
|
||||
|
||||
// EvalDetails holds additional information observed during the Eval() call.
|
||||
type EvalDetails struct {
|
||||
state interpreter.EvalState
|
||||
costTracker *interpreter.CostTracker
|
||||
}
|
||||
|
||||
// State of the evaluation, non-nil if the OptTrackState or OptExhaustiveEval is specified
|
||||
// within EvalOptions.
|
||||
func (ed *EvalDetails) State() interpreter.EvalState {
|
||||
return ed.state
|
||||
}
|
||||
|
||||
// ActualCost returns the tracked cost through the course of execution when `CostTracking` is enabled.
|
||||
// Otherwise, returns nil if the cost was not enabled.
|
||||
func (ed *EvalDetails) ActualCost() *uint64 {
|
||||
if ed.costTracker == nil {
|
||||
return nil
|
||||
}
|
||||
cost := ed.costTracker.ActualCost()
|
||||
return &cost
|
||||
}
|
||||
|
||||
// prog is the internal implementation of the Program interface.
|
||||
type prog struct {
|
||||
*Env
|
||||
evalOpts EvalOption
|
||||
defaultVars interpreter.Activation
|
||||
dispatcher interpreter.Dispatcher
|
||||
interpreter interpreter.Interpreter
|
||||
interruptCheckFrequency uint
|
||||
|
||||
// Intermediate state used to configure the InterpretableDecorator set provided
|
||||
// to the initInterpretable call.
|
||||
decorators []interpreter.InterpretableDecorator
|
||||
regexOptimizations []*interpreter.RegexOptimization
|
||||
|
||||
// Interpretable configured from an Ast and aggregate decorator set based on program options.
|
||||
interpretable interpreter.Interpretable
|
||||
callCostEstimator interpreter.ActualCostEstimator
|
||||
costLimit *uint64
|
||||
}
|
||||
|
||||
func (p *prog) clone() *prog {
|
||||
return &prog{
|
||||
Env: p.Env,
|
||||
evalOpts: p.evalOpts,
|
||||
defaultVars: p.defaultVars,
|
||||
dispatcher: p.dispatcher,
|
||||
interpreter: p.interpreter,
|
||||
interruptCheckFrequency: p.interruptCheckFrequency,
|
||||
}
|
||||
}
|
||||
|
||||
// newProgram creates a program instance with an environment, an ast, and an optional list of
|
||||
// ProgramOption values.
|
||||
//
|
||||
// If the program cannot be configured the prog will be nil, with a non-nil error response.
|
||||
func newProgram(e *Env, ast *Ast, opts []ProgramOption) (Program, error) {
|
||||
// Build the dispatcher, interpreter, and default program value.
|
||||
disp := interpreter.NewDispatcher()
|
||||
|
||||
// Ensure the default attribute factory is set after the adapter and provider are
|
||||
// configured.
|
||||
p := &prog{
|
||||
Env: e,
|
||||
decorators: []interpreter.InterpretableDecorator{},
|
||||
dispatcher: disp,
|
||||
}
|
||||
|
||||
// Configure the program via the ProgramOption values.
|
||||
var err error
|
||||
for _, opt := range opts {
|
||||
p, err = opt(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Add the function bindings created via Function() options.
|
||||
for _, fn := range e.functions {
|
||||
bindings, err := fn.bindings()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = disp.Add(bindings...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Set the attribute factory after the options have been set.
|
||||
var attrFactory interpreter.AttributeFactory
|
||||
if p.evalOpts&OptPartialEval == OptPartialEval {
|
||||
attrFactory = interpreter.NewPartialAttributeFactory(e.Container, e.adapter, e.provider)
|
||||
} else {
|
||||
attrFactory = interpreter.NewAttributeFactory(e.Container, e.adapter, e.provider)
|
||||
}
|
||||
interp := interpreter.NewInterpreter(disp, e.Container, e.provider, e.adapter, attrFactory)
|
||||
p.interpreter = interp
|
||||
|
||||
// Translate the EvalOption flags into InterpretableDecorator instances.
|
||||
decorators := make([]interpreter.InterpretableDecorator, len(p.decorators))
|
||||
copy(decorators, p.decorators)
|
||||
|
||||
// Enable interrupt checking if there's a non-zero check frequency
|
||||
if p.interruptCheckFrequency > 0 {
|
||||
decorators = append(decorators, interpreter.InterruptableEval())
|
||||
}
|
||||
// Enable constant folding first.
|
||||
if p.evalOpts&OptOptimize == OptOptimize {
|
||||
decorators = append(decorators, interpreter.Optimize())
|
||||
p.regexOptimizations = append(p.regexOptimizations, interpreter.MatchesRegexOptimization)
|
||||
}
|
||||
// Enable regex compilation of constants immediately after folding constants.
|
||||
if len(p.regexOptimizations) > 0 {
|
||||
decorators = append(decorators, interpreter.CompileRegexConstants(p.regexOptimizations...))
|
||||
}
|
||||
|
||||
// Enable exhaustive eval, state tracking and cost tracking last since they require a factory.
|
||||
if p.evalOpts&(OptExhaustiveEval|OptTrackState|OptTrackCost) != 0 {
|
||||
factory := func(state interpreter.EvalState, costTracker *interpreter.CostTracker) (Program, error) {
|
||||
costTracker.Estimator = p.callCostEstimator
|
||||
costTracker.Limit = p.costLimit
|
||||
// Limit capacity to guarantee a reallocation when calling 'append(decs, ...)' below. This
|
||||
// prevents the underlying memory from being shared between factory function calls causing
|
||||
// undesired mutations.
|
||||
decs := decorators[:len(decorators):len(decorators)]
|
||||
var observers []interpreter.EvalObserver
|
||||
|
||||
if p.evalOpts&(OptExhaustiveEval|OptTrackState) != 0 {
|
||||
// EvalStateObserver is required for OptExhaustiveEval.
|
||||
observers = append(observers, interpreter.EvalStateObserver(state))
|
||||
}
|
||||
if p.evalOpts&OptTrackCost == OptTrackCost {
|
||||
observers = append(observers, interpreter.CostObserver(costTracker))
|
||||
}
|
||||
|
||||
// Enable exhaustive eval over a basic observer since it offers a superset of features.
|
||||
if p.evalOpts&OptExhaustiveEval == OptExhaustiveEval {
|
||||
decs = append(decs, interpreter.ExhaustiveEval(), interpreter.Observe(observers...))
|
||||
} else if len(observers) > 0 {
|
||||
decs = append(decs, interpreter.Observe(observers...))
|
||||
}
|
||||
|
||||
return p.clone().initInterpretable(ast, decs)
|
||||
}
|
||||
return newProgGen(factory)
|
||||
}
|
||||
return p.initInterpretable(ast, decorators)
|
||||
}
|
||||
|
||||
func (p *prog) initInterpretable(ast *Ast, decs []interpreter.InterpretableDecorator) (*prog, error) {
|
||||
// Unchecked programs do not contain type and reference information and may be slower to execute.
|
||||
if !ast.IsChecked() {
|
||||
interpretable, err :=
|
||||
p.interpreter.NewUncheckedInterpretable(ast.Expr(), decs...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.interpretable = interpretable
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// When the AST has been checked it contains metadata that can be used to speed up program execution.
|
||||
var checked *exprpb.CheckedExpr
|
||||
checked, err := AstToCheckedExpr(ast)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
interpretable, err := p.interpreter.NewInterpretable(checked, decs...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.interpretable = interpretable
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// Eval implements the Program interface method.
|
||||
func (p *prog) Eval(input interface{}) (v ref.Val, det *EvalDetails, err error) {
|
||||
// Configure error recovery for unexpected panics during evaluation. Note, the use of named
|
||||
// return values makes it possible to modify the error response during the recovery
|
||||
// function.
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
switch t := r.(type) {
|
||||
case interpreter.EvalCancelledError:
|
||||
err = t
|
||||
default:
|
||||
err = fmt.Errorf("internal error: %v", r)
|
||||
}
|
||||
}
|
||||
}()
|
||||
// Build a hierarchical activation if there are default vars set.
|
||||
var vars interpreter.Activation
|
||||
switch v := input.(type) {
|
||||
case interpreter.Activation:
|
||||
vars = v
|
||||
case map[string]interface{}:
|
||||
vars = activationPool.Setup(v)
|
||||
defer activationPool.Put(vars)
|
||||
default:
|
||||
return nil, nil, fmt.Errorf("invalid input, wanted Activation or map[string]interface{}, got: (%T)%v", input, input)
|
||||
}
|
||||
if p.defaultVars != nil {
|
||||
vars = interpreter.NewHierarchicalActivation(p.defaultVars, vars)
|
||||
}
|
||||
v = p.interpretable.Eval(vars)
|
||||
// The output of an internal Eval may have a value (`v`) that is a types.Err. This step
|
||||
// translates the CEL value to a Go error response. This interface does not quite match the
|
||||
// RPC signature which allows for multiple errors to be returned, but should be sufficient.
|
||||
if types.IsError(v) {
|
||||
err = v.(*types.Err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ContextEval implements the Program interface.
|
||||
func (p *prog) ContextEval(ctx context.Context, input interface{}) (ref.Val, *EvalDetails, error) {
|
||||
if ctx == nil {
|
||||
return nil, nil, fmt.Errorf("context can not be nil")
|
||||
}
|
||||
// Configure the input, making sure to wrap Activation inputs in the special ctxActivation which
|
||||
// exposes the #interrupted variable and manages rate-limited checks of the ctx.Done() state.
|
||||
var vars interpreter.Activation
|
||||
switch v := input.(type) {
|
||||
case interpreter.Activation:
|
||||
vars = ctxActivationPool.Setup(v, ctx.Done(), p.interruptCheckFrequency)
|
||||
defer ctxActivationPool.Put(vars)
|
||||
case map[string]interface{}:
|
||||
rawVars := activationPool.Setup(v)
|
||||
defer activationPool.Put(rawVars)
|
||||
vars = ctxActivationPool.Setup(rawVars, ctx.Done(), p.interruptCheckFrequency)
|
||||
defer ctxActivationPool.Put(vars)
|
||||
default:
|
||||
return nil, nil, fmt.Errorf("invalid input, wanted Activation or map[string]interface{}, got: (%T)%v", input, input)
|
||||
}
|
||||
return p.Eval(vars)
|
||||
}
|
||||
|
||||
// Cost implements the Coster interface method.
|
||||
func (p *prog) Cost() (min, max int64) {
|
||||
return estimateCost(p.interpretable)
|
||||
}
|
||||
|
||||
// progFactory is a helper alias for marking a program creation factory function.
|
||||
type progFactory func(interpreter.EvalState, *interpreter.CostTracker) (Program, error)
|
||||
|
||||
// progGen holds a reference to a progFactory instance and implements the Program interface.
|
||||
type progGen struct {
|
||||
factory progFactory
|
||||
}
|
||||
|
||||
// newProgGen tests the factory object by calling it once and returns a factory-based Program if
|
||||
// the test is successful.
|
||||
func newProgGen(factory progFactory) (Program, error) {
|
||||
// Test the factory to make sure that configuration errors are spotted at config
|
||||
_, err := factory(interpreter.NewEvalState(), &interpreter.CostTracker{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &progGen{factory: factory}, nil
|
||||
}
|
||||
|
||||
// Eval implements the Program interface method.
|
||||
func (gen *progGen) Eval(input interface{}) (ref.Val, *EvalDetails, error) {
|
||||
// The factory based Eval() differs from the standard evaluation model in that it generates a
|
||||
// new EvalState instance for each call to ensure that unique evaluations yield unique stateful
|
||||
// results.
|
||||
state := interpreter.NewEvalState()
|
||||
costTracker := &interpreter.CostTracker{}
|
||||
det := &EvalDetails{state: state, costTracker: costTracker}
|
||||
|
||||
// Generate a new instance of the interpretable using the factory configured during the call to
|
||||
// newProgram(). It is incredibly unlikely that the factory call will generate an error given
|
||||
// the factory test performed within the Program() call.
|
||||
p, err := gen.factory(state, costTracker)
|
||||
if err != nil {
|
||||
return nil, det, err
|
||||
}
|
||||
|
||||
// Evaluate the input, returning the result and the 'state' within EvalDetails.
|
||||
v, _, err := p.Eval(input)
|
||||
if err != nil {
|
||||
return v, det, err
|
||||
}
|
||||
return v, det, nil
|
||||
}
|
||||
|
||||
// ContextEval implements the Program interface method.
|
||||
func (gen *progGen) ContextEval(ctx context.Context, input interface{}) (ref.Val, *EvalDetails, error) {
|
||||
if ctx == nil {
|
||||
return nil, nil, fmt.Errorf("context can not be nil")
|
||||
}
|
||||
// The factory based Eval() differs from the standard evaluation model in that it generates a
|
||||
// new EvalState instance for each call to ensure that unique evaluations yield unique stateful
|
||||
// results.
|
||||
state := interpreter.NewEvalState()
|
||||
costTracker := &interpreter.CostTracker{}
|
||||
det := &EvalDetails{state: state, costTracker: costTracker}
|
||||
|
||||
// Generate a new instance of the interpretable using the factory configured during the call to
|
||||
// newProgram(). It is incredibly unlikely that the factory call will generate an error given
|
||||
// the factory test performed within the Program() call.
|
||||
p, err := gen.factory(state, costTracker)
|
||||
if err != nil {
|
||||
return nil, det, err
|
||||
}
|
||||
|
||||
// Evaluate the input, returning the result and the 'state' within EvalDetails.
|
||||
v, _, err := p.ContextEval(ctx, input)
|
||||
if err != nil {
|
||||
return v, det, err
|
||||
}
|
||||
return v, det, nil
|
||||
}
|
||||
|
||||
// Cost implements the Coster interface method.
|
||||
func (gen *progGen) Cost() (min, max int64) {
|
||||
// Use an empty state value since no evaluation is performed.
|
||||
p, err := gen.factory(emptyEvalState, nil)
|
||||
if err != nil {
|
||||
return 0, math.MaxInt64
|
||||
}
|
||||
return estimateCost(p)
|
||||
}
|
||||
|
||||
// EstimateCost returns the heuristic cost interval for the program.
|
||||
func EstimateCost(p Program) (min, max int64) {
|
||||
return estimateCost(p)
|
||||
}
|
||||
|
||||
func estimateCost(i interface{}) (min, max int64) {
|
||||
c, ok := i.(interpreter.Coster)
|
||||
if !ok {
|
||||
return 0, math.MaxInt64
|
||||
}
|
||||
return c.Cost()
|
||||
}
|
||||
|
||||
type ctxEvalActivation struct {
|
||||
parent interpreter.Activation
|
||||
interrupt <-chan struct{}
|
||||
interruptCheckCount uint
|
||||
interruptCheckFrequency uint
|
||||
}
|
||||
|
||||
// ResolveName implements the Activation interface method, but adds a special #interrupted variable
|
||||
// which is capable of testing whether a 'done' signal is provided from a context.Context channel.
|
||||
func (a *ctxEvalActivation) ResolveName(name string) (interface{}, bool) {
|
||||
if name == "#interrupted" {
|
||||
a.interruptCheckCount++
|
||||
if a.interruptCheckCount%a.interruptCheckFrequency == 0 {
|
||||
select {
|
||||
case <-a.interrupt:
|
||||
return true, true
|
||||
default:
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
return a.parent.ResolveName(name)
|
||||
}
|
||||
|
||||
func (a *ctxEvalActivation) Parent() interpreter.Activation {
|
||||
return a.parent
|
||||
}
|
||||
|
||||
func newCtxEvalActivationPool() *ctxEvalActivationPool {
|
||||
return &ctxEvalActivationPool{
|
||||
Pool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
return &ctxEvalActivation{}
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type ctxEvalActivationPool struct {
|
||||
sync.Pool
|
||||
}
|
||||
|
||||
// Setup initializes a pooled Activation with the ability check for context.Context cancellation
|
||||
func (p *ctxEvalActivationPool) Setup(vars interpreter.Activation, done <-chan struct{}, interruptCheckRate uint) *ctxEvalActivation {
|
||||
a := p.Pool.Get().(*ctxEvalActivation)
|
||||
a.parent = vars
|
||||
a.interrupt = done
|
||||
a.interruptCheckCount = 0
|
||||
a.interruptCheckFrequency = interruptCheckRate
|
||||
return a
|
||||
}
|
||||
|
||||
type evalActivation struct {
|
||||
vars map[string]interface{}
|
||||
lazyVars map[string]interface{}
|
||||
}
|
||||
|
||||
// ResolveName looks up the value of the input variable name, if found.
|
||||
//
|
||||
// Lazy bindings may be supplied within the map-based input in either of the following forms:
|
||||
// - func() interface{}
|
||||
// - func() ref.Val
|
||||
//
|
||||
// The lazy binding will only be invoked once per evaluation.
|
||||
//
|
||||
// Values which are not represented as ref.Val types on input may be adapted to a ref.Val using
|
||||
// the ref.TypeAdapter configured in the environment.
|
||||
func (a *evalActivation) ResolveName(name string) (interface{}, bool) {
|
||||
v, found := a.vars[name]
|
||||
if !found {
|
||||
return nil, false
|
||||
}
|
||||
switch obj := v.(type) {
|
||||
case func() ref.Val:
|
||||
if resolved, found := a.lazyVars[name]; found {
|
||||
return resolved, true
|
||||
}
|
||||
lazy := obj()
|
||||
a.lazyVars[name] = lazy
|
||||
return lazy, true
|
||||
case func() interface{}:
|
||||
if resolved, found := a.lazyVars[name]; found {
|
||||
return resolved, true
|
||||
}
|
||||
lazy := obj()
|
||||
a.lazyVars[name] = lazy
|
||||
return lazy, true
|
||||
default:
|
||||
return obj, true
|
||||
}
|
||||
}
|
||||
|
||||
// Parent implements the interpreter.Activation interface
|
||||
func (a *evalActivation) Parent() interpreter.Activation {
|
||||
return nil
|
||||
}
|
||||
|
||||
func newEvalActivationPool() *evalActivationPool {
|
||||
return &evalActivationPool{
|
||||
Pool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
return &evalActivation{lazyVars: make(map[string]interface{})}
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type evalActivationPool struct {
|
||||
sync.Pool
|
||||
}
|
||||
|
||||
// Setup initializes a pooled Activation object with the map input.
|
||||
func (p *evalActivationPool) Setup(vars map[string]interface{}) *evalActivation {
|
||||
a := p.Pool.Get().(*evalActivation)
|
||||
a.vars = vars
|
||||
return a
|
||||
}
|
||||
|
||||
func (p *evalActivationPool) Put(value interface{}) {
|
||||
a := value.(*evalActivation)
|
||||
for k := range a.lazyVars {
|
||||
delete(a.lazyVars, k)
|
||||
}
|
||||
p.Pool.Put(a)
|
||||
}
|
||||
|
||||
var (
|
||||
emptyEvalState = interpreter.NewEvalState()
|
||||
|
||||
// activationPool is an internally managed pool of Activation values that wrap map[string]interface{} inputs
|
||||
activationPool = newEvalActivationPool()
|
||||
|
||||
// ctxActivationPool is an internally managed pool of Activation values that expose a special #interrupted variable
|
||||
ctxActivationPool = newCtxEvalActivationPool()
|
||||
)
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user