rebase: bump the golang-dependencies group with 1 update

Bumps the golang-dependencies group with 1 update: [golang.org/x/crypto](https://github.com/golang/crypto).


Updates `golang.org/x/crypto` from 0.16.0 to 0.17.0
- [Commits](https://github.com/golang/crypto/compare/v0.16.0...v0.17.0)

---
updated-dependencies:
- dependency-name: golang.org/x/crypto
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: golang-dependencies
...

Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
dependabot[bot] 2023-12-18 20:31:00 +00:00 committed by mergify[bot]
parent 1ad79314f9
commit e5d9b68d36
398 changed files with 33924 additions and 10753 deletions

48
go.mod
View File

@ -25,7 +25,7 @@ require (
github.com/pkg/xattr v0.4.9 github.com/pkg/xattr v0.4.9
github.com/prometheus/client_golang v1.17.0 github.com/prometheus/client_golang v1.17.0
github.com/stretchr/testify v1.8.4 github.com/stretchr/testify v1.8.4
golang.org/x/crypto v0.16.0 golang.org/x/crypto v0.17.0
golang.org/x/net v0.19.0 golang.org/x/net v0.19.0
golang.org/x/sys v0.15.0 golang.org/x/sys v0.15.0
google.golang.org/grpc v1.59.0 google.golang.org/grpc v1.59.0
@ -38,10 +38,10 @@ require (
k8s.io/client-go v12.0.0+incompatible k8s.io/client-go v12.0.0+incompatible
k8s.io/cloud-provider v0.28.3 k8s.io/cloud-provider v0.28.3
k8s.io/klog/v2 v2.110.1 k8s.io/klog/v2 v2.110.1
k8s.io/kubernetes v1.28.4 k8s.io/kubernetes v1.29.0
k8s.io/mount-utils v0.28.3 k8s.io/mount-utils v0.28.3
k8s.io/pod-security-admission v0.0.0 k8s.io/pod-security-admission v0.0.0
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 k8s.io/utils v0.0.0-20230726121419-3b25d923346b
sigs.k8s.io/controller-runtime v0.16.3 sigs.k8s.io/controller-runtime v0.16.3
) )
@ -65,12 +65,12 @@ require (
github.com/coreos/go-semver v0.3.1 // indirect github.com/coreos/go-semver v0.3.1 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/docker/distribution v2.8.2+incompatible // indirect github.com/distribution/reference v0.5.0 // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect
github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect
github.com/evanphx/json-patch/v5 v5.6.0 // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect
github.com/felixge/httpsnoop v1.0.3 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/gemalto/flume v0.13.0 // indirect github.com/gemalto/flume v0.13.0 // indirect
github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 // indirect github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 // indirect
github.com/go-jose/go-jose/v3 v3.0.1 // indirect github.com/go-jose/go-jose/v3 v3.0.1 // indirect
@ -82,13 +82,12 @@ require (
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
github.com/gogo/protobuf v1.3.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/google/cel-go v0.16.1 // indirect github.com/google/cel-go v0.17.7 // indirect
github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-cmp v0.6.0 // indirect github.com/google/go-cmp v0.6.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect github.com/google/gofuzz v1.2.0 // indirect
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect
github.com/gorilla/websocket v1.5.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-hclog v1.2.2 // indirect github.com/hashicorp/go-hclog v1.2.2 // indirect
@ -119,7 +118,7 @@ require (
github.com/modern-go/reflect2 v1.0.2 // indirect github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/selinux v1.10.0 // indirect github.com/opencontainers/selinux v1.11.0 // indirect
github.com/openshift/api v0.0.0-20230320192226-1fc631efd341 // indirect github.com/openshift/api v0.0.0-20230320192226-1fc631efd341 // indirect
github.com/pkg/errors v0.9.1 // indirect github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
@ -131,19 +130,18 @@ require (
github.com/spf13/cobra v1.7.0 // indirect github.com/spf13/cobra v1.7.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/pflag v1.0.5 // indirect
github.com/stoewer/go-strcase v1.2.0 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect
go.etcd.io/etcd/api/v3 v3.5.9 // indirect go.etcd.io/etcd/api/v3 v3.5.10 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.9 // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.10 // indirect
go.etcd.io/etcd/client/v3 v3.5.9 // indirect go.etcd.io/etcd/client/v3 v3.5.10 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 // indirect
go.opentelemetry.io/otel v1.10.0 // indirect go.opentelemetry.io/otel v1.19.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 // indirect go.opentelemetry.io/otel/metric v1.19.0 // indirect
go.opentelemetry.io/otel/metric v0.31.0 // indirect go.opentelemetry.io/otel/sdk v1.19.0 // indirect
go.opentelemetry.io/otel/sdk v1.10.0 // indirect go.opentelemetry.io/otel/trace v1.19.0 // indirect
go.opentelemetry.io/otel/trace v1.10.0 // indirect go.opentelemetry.io/proto/otlp v1.0.0 // indirect
go.opentelemetry.io/proto/otlp v0.19.0 // indirect
go.uber.org/multierr v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.25.0 // indirect go.uber.org/zap v1.25.0 // indirect
golang.org/x/exp v0.0.0-20220827204233-334a2380cb91 // indirect golang.org/x/exp v0.0.0-20220827204233-334a2380cb91 // indirect
@ -168,12 +166,12 @@ require (
k8s.io/component-helpers v0.28.3 // indirect k8s.io/component-helpers v0.28.3 // indirect
k8s.io/controller-manager v0.28.3 // indirect k8s.io/controller-manager v0.28.3 // indirect
k8s.io/kms v0.28.3 // indirect k8s.io/kms v0.28.3 // indirect
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect
k8s.io/kubectl v0.0.0 // indirect k8s.io/kubectl v0.0.0 // indirect
k8s.io/kubelet v0.0.0 // indirect k8s.io/kubelet v0.0.0 // indirect
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect sigs.k8s.io/yaml v1.3.0 // indirect
) )

86
go.sum
View File

@ -751,8 +751,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
@ -794,8 +794,9 @@ github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/
github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/gemalto/flume v0.13.0 h1:EEeQvAxyFys3BH8IxEU7ZpM6Kr1sYn20HuZq6dgyMR8= github.com/gemalto/flume v0.13.0 h1:EEeQvAxyFys3BH8IxEU7ZpM6Kr1sYn20HuZq6dgyMR8=
github.com/gemalto/flume v0.13.0/go.mod h1:3iOEZiK/HD8SnFTqHCQoOHQKaHlBY0b6z55P8SLaOzk= github.com/gemalto/flume v0.13.0/go.mod h1:3iOEZiK/HD8SnFTqHCQoOHQKaHlBY0b6z55P8SLaOzk=
github.com/gemalto/kmip-go v0.0.10 h1:jAAZejUdRrspKigLoA62MTmIj0T7DDDOzdxHi1cDjoU= github.com/gemalto/kmip-go v0.0.10 h1:jAAZejUdRrspKigLoA62MTmIj0T7DDDOzdxHi1cDjoU=
@ -923,8 +924,9 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
github.com/google/cel-go v0.16.1 h1:3hZfSNiAU3KOiNtxuFXVp5WFy4hf/Ly3Sa4/7F8SXNo=
github.com/google/cel-go v0.16.1/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= github.com/google/cel-go v0.16.1/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
github.com/google/cel-go v0.17.7 h1:6ebJFzu1xO2n7TLtN+UBqShGBhlD85bhvglh5DpcfqQ=
github.com/google/cel-go v0.17.7/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
github.com/google/fscrypt v0.3.4 h1:XGSVMIsQFooj82aRRfYn3JpgU/4fOTnzXPnjhxC8uH8= github.com/google/fscrypt v0.3.4 h1:XGSVMIsQFooj82aRRfYn3JpgU/4fOTnzXPnjhxC8uH8=
github.com/google/fscrypt v0.3.4/go.mod h1:BRpw7vaeDitXGRvXa281i/ivQszAdBIiUYDWHjVTkcs= github.com/google/fscrypt v0.3.4/go.mod h1:BRpw7vaeDitXGRvXa281i/ivQszAdBIiUYDWHjVTkcs=
@ -1004,7 +1006,6 @@ github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
@ -1016,8 +1017,9 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 h1:lLT7ZLSzGLI08vc9cpd+tYmNWjdKDqyr/2L+f6U12Fk=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw= github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw=
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI= github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI=
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
@ -1251,8 +1253,8 @@ github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8=
github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/selinux v1.10.0 h1:rAiKF8hTcgLI3w0DHm6i0ylVVcOrlgR1kK99DRLDhyU= github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU=
github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
github.com/openshift/api v0.0.0-20210105115604-44119421ec6b/go.mod h1:aqU5Cq+kqKKPbDMqxo9FojgDeSpNJI7iuskjXjtojDg= github.com/openshift/api v0.0.0-20210105115604-44119421ec6b/go.mod h1:aqU5Cq+kqKKPbDMqxo9FojgDeSpNJI7iuskjXjtojDg=
github.com/openshift/api v0.0.0-20230320192226-1fc631efd341 h1:PhLdiIlVqgN4frwrG8lNlbQdJ4eJcGdjX/vhlN6xupk= github.com/openshift/api v0.0.0-20230320192226-1fc631efd341 h1:PhLdiIlVqgN4frwrG8lNlbQdJ4eJcGdjX/vhlN6xupk=
github.com/openshift/api v0.0.0-20230320192226-1fc631efd341/go.mod h1:ctXNyWanKEjGj8sss1KjjHQ3ENKFm33FFnS5BKaIPh4= github.com/openshift/api v0.0.0-20230320192226-1fc631efd341/go.mod h1:ctXNyWanKEjGj8sss1KjjHQ3ENKFm33FFnS5BKaIPh4=
@ -1424,22 +1426,25 @@ github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5t
github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ=
go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
go.etcd.io/etcd/api/v3 v3.5.9 h1:4wSsluwyTbGGmyjJktOf3wFQoTBIURXHnq9n/G/JQHs= go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA=
go.etcd.io/etcd/api/v3 v3.5.9/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k= go.etcd.io/etcd/api/v3 v3.5.9/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k=
go.etcd.io/etcd/client/pkg/v3 v3.5.9 h1:oidDC4+YEuSIQbsR94rY9gur91UPL6DnxDCIYd2IGsE= go.etcd.io/etcd/api/v3 v3.5.10 h1:szRajuUUbLyppkhs9K6BRtjY37l66XQQmw7oZRANE4k=
go.etcd.io/etcd/api/v3 v3.5.10/go.mod h1:TidfmT4Uycad3NM/o25fG3J07odo4GBB9hoxaodFCtI=
go.etcd.io/etcd/client/pkg/v3 v3.5.9/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4= go.etcd.io/etcd/client/pkg/v3 v3.5.9/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4=
go.etcd.io/etcd/client/v2 v2.305.9 h1:YZ2OLi0OvR0H75AcgSUajjd5uqKDKocQUqROTG11jIo= go.etcd.io/etcd/client/pkg/v3 v3.5.10 h1:kfYIdQftBnbAq8pUWFXfpuuxFSKzlmM5cSn76JByiT0=
go.etcd.io/etcd/client/pkg/v3 v3.5.10/go.mod h1:DYivfIviIuQ8+/lCq4vcxuseg2P2XbHygkKwFo9fc8U=
go.etcd.io/etcd/client/v2 v2.305.9/go.mod h1:0NBdNx9wbxtEQLwAQtrDHwx58m02vXpDcgSYI2seohQ= go.etcd.io/etcd/client/v2 v2.305.9/go.mod h1:0NBdNx9wbxtEQLwAQtrDHwx58m02vXpDcgSYI2seohQ=
go.etcd.io/etcd/client/v3 v3.5.9 h1:r5xghnU7CwbUxD/fbUtRyJGaYNfDun8sp/gTr1hew6E= go.etcd.io/etcd/client/v2 v2.305.10 h1:MrmRktzv/XF8CvtQt+P6wLUlURaNpSDJHFZhe//2QE4=
go.etcd.io/etcd/client/v3 v3.5.9/go.mod h1:i/Eo5LrZ5IKqpbtpPDuaUnDOUv471oDg8cjQaUr2MbA= go.etcd.io/etcd/client/v3 v3.5.9/go.mod h1:i/Eo5LrZ5IKqpbtpPDuaUnDOUv471oDg8cjQaUr2MbA=
go.etcd.io/etcd/pkg/v3 v3.5.9 h1:6R2jg/aWd/zB9+9JxmijDKStGJAPFsX3e6BeJkMi6eQ= go.etcd.io/etcd/client/v3 v3.5.10 h1:W9TXNZ+oB3MCd/8UjxHTWK5J9Nquw9fQBLJd5ne5/Ao=
go.etcd.io/etcd/client/v3 v3.5.10/go.mod h1:RVeBnDz2PUEZqTpgqwAtUd8nAPf5kjyFyND7P1VkOKc=
go.etcd.io/etcd/pkg/v3 v3.5.9/go.mod h1:BZl0SAShQFk0IpLWR78T/+pyt8AruMHhTNNX73hkNVY= go.etcd.io/etcd/pkg/v3 v3.5.9/go.mod h1:BZl0SAShQFk0IpLWR78T/+pyt8AruMHhTNNX73hkNVY=
go.etcd.io/etcd/raft/v3 v3.5.9 h1:ZZ1GIHoUlHsn0QVqiRysAm3/81Xx7+i2d7nSdWxlOiI= go.etcd.io/etcd/pkg/v3 v3.5.10 h1:WPR8K0e9kWl1gAhB5A7gEa5ZBTNkT9NdNWrR8Qpo1CM=
go.etcd.io/etcd/raft/v3 v3.5.9/go.mod h1:WnFkqzFdZua4LVlVXQEGhmooLeyS7mqzS4Pf4BCVqXg= go.etcd.io/etcd/raft/v3 v3.5.9/go.mod h1:WnFkqzFdZua4LVlVXQEGhmooLeyS7mqzS4Pf4BCVqXg=
go.etcd.io/etcd/server/v3 v3.5.9 h1:vomEmmxeztLtS5OEH7d0hBAg4cjVIu9wXuNzUZx2ZA0= go.etcd.io/etcd/raft/v3 v3.5.10 h1:cgNAYe7xrsrn/5kXMSaH8kM/Ky8mAdMqGOxyYwpP0LA=
go.etcd.io/etcd/server/v3 v3.5.9/go.mod h1:GgI1fQClQCFIzuVjlvdbMxNbnISt90gdfYyqiAIt65g= go.etcd.io/etcd/server/v3 v3.5.9/go.mod h1:GgI1fQClQCFIzuVjlvdbMxNbnISt90gdfYyqiAIt65g=
go.etcd.io/etcd/server/v3 v3.5.10 h1:4NOGyOwD5sUZ22PiWYKmfxqoeh72z6EhYjNosKGLmZg=
go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
@ -1450,36 +1455,44 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.25.0/go.mod h1:E5NNboN0UqSAki0Atn9kVwaN7I+l25gGxDqBueo/74E= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.25.0/go.mod h1:E5NNboN0UqSAki0Atn9kVwaN7I+l25gGxDqBueo/74E=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 h1:xFSRQBbXF6VvYRf2lqMJXxoB72XI1K/azav8TekHHSw=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 h1:sxoY9kG1s1WpSYNyzm24rlwH4lnRYFXUVVBmKMBfRgw= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0 h1:ZOLJc06r4CB42laIXg/7udr0pbZyuAihN10A/XuiQRY=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0/go.mod h1:5z+/ZWJQKXa9YT34fQNx5K8Hd1EoIhvtUygUQPqEOgQ=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48=
go.opentelemetry.io/otel v1.0.1/go.mod h1:OPEOD4jIT2SlZPMmwT6FqZz2C0ZNdQqiWcoK6M0SNFU= go.opentelemetry.io/otel v1.0.1/go.mod h1:OPEOD4jIT2SlZPMmwT6FqZz2C0ZNdQqiWcoK6M0SNFU=
go.opentelemetry.io/otel v1.8.0/go.mod h1:2pkj+iMj0o03Y+cW6/m8Y4WkRdYN3AvCXCnzRMp9yvM= go.opentelemetry.io/otel v1.8.0/go.mod h1:2pkj+iMj0o03Y+cW6/m8Y4WkRdYN3AvCXCnzRMp9yvM=
go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4=
go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ=
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y= go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs=
go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY=
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.1/go.mod h1:Kv8liBeVNFkkkbilbgWRpV+wWuu+H5xdOT6HAgd30iw= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.1/go.mod h1:Kv8liBeVNFkkkbilbgWRpV+wWuu+H5xdOT6HAgd30iw=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 h1:pDDYmo0QadUPal5fwXoY1pmMpFcdyhXOmL5drCrI3vU=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.1/go.mod h1:xOvWoTOrQjxjW61xtOmD/WKGRYb/P4NzRo3bs65U6Rk= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.1/go.mod h1:xOvWoTOrQjxjW61xtOmD/WKGRYb/P4NzRo3bs65U6Rk=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 h1:KtiUEhQmj/Pa874bVYKGNVdq8NPKiacPbaRRtgXi+t4=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0=
go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I=
go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A=
go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE=
go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8=
go.opentelemetry.io/otel/sdk v1.0.1/go.mod h1:HrdXne+BiwsOHYYkBE5ysIcv2bvdZstxzmCQhxTcZkI= go.opentelemetry.io/otel/sdk v1.0.1/go.mod h1:HrdXne+BiwsOHYYkBE5ysIcv2bvdZstxzmCQhxTcZkI=
go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY=
go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE=
go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o=
go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A=
go.opentelemetry.io/otel/trace v1.0.1/go.mod h1:5g4i4fKLaX2BQpSBsxw8YYcgKpMMSW3x7ZTuYBr3sUk= go.opentelemetry.io/otel/trace v1.0.1/go.mod h1:5g4i4fKLaX2BQpSBsxw8YYcgKpMMSW3x7ZTuYBr3sUk=
go.opentelemetry.io/otel/trace v1.8.0/go.mod h1:0Bt3PXY8w+3pheS3hQUt+wow8b1ojPaTBoTCh2zIFI4= go.opentelemetry.io/otel/trace v1.8.0/go.mod h1:0Bt3PXY8w+3pheS3hQUt+wow8b1ojPaTBoTCh2zIFI4=
go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E=
go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM=
go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg=
go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg= go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg=
go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw=
go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
@ -1522,8 +1535,8 @@ golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k=
golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@ -1736,7 +1749,6 @@ golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -2321,14 +2333,15 @@ k8s.io/kms v0.28.3 h1:jYwwAe96XELNjYWv1G4kNzizcFoZ50OOElvPansbw70=
k8s.io/kms v0.28.3/go.mod h1:kSMjU2tg7vjqqoWVVCcmPmNZ/CofPsoTbSxAipCvZuE= k8s.io/kms v0.28.3/go.mod h1:kSMjU2tg7vjqqoWVVCcmPmNZ/CofPsoTbSxAipCvZuE=
k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4=
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ=
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM=
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780=
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA=
k8s.io/kubectl v0.28.3 h1:H1Peu1O3EbN9zHkJCcvhiJ4NUj6lb88sGPO5wrWIM6k= k8s.io/kubectl v0.28.3 h1:H1Peu1O3EbN9zHkJCcvhiJ4NUj6lb88sGPO5wrWIM6k=
k8s.io/kubectl v0.28.3/go.mod h1:RDAudrth/2wQ3Sg46fbKKl4/g+XImzvbsSRZdP2RiyE= k8s.io/kubectl v0.28.3/go.mod h1:RDAudrth/2wQ3Sg46fbKKl4/g+XImzvbsSRZdP2RiyE=
k8s.io/kubelet v0.28.3 h1:bp/uIf1R5F61BlFvFtzc4PDEiK7TtFcw3wFJlc0V0LM= k8s.io/kubelet v0.28.3 h1:bp/uIf1R5F61BlFvFtzc4PDEiK7TtFcw3wFJlc0V0LM=
k8s.io/kubelet v0.28.3/go.mod h1:E3NHYbp/v45Ao6AD0EOZnqO3L0R6Haks6Nm0+bnFwtU= k8s.io/kubelet v0.28.3/go.mod h1:E3NHYbp/v45Ao6AD0EOZnqO3L0R6Haks6Nm0+bnFwtU=
k8s.io/kubernetes v1.28.4 h1:aRNxs5jb8FVTtlnxeA4FSDBVKuFwA8Gw40/U2zReBYA= k8s.io/kubernetes v1.29.0 h1:DOLN7g8+nnAYBi8JHoW0+/MCrZKDPIqAxzLCXDXd0cg=
k8s.io/kubernetes v1.28.4/go.mod h1:BTzDCKYAlu6LL9ITbfjwgwIrJ30hlTgbv0eXDoA/WoA= k8s.io/kubernetes v1.29.0/go.mod h1:9kztbUQf9stVDcIYXx+BX3nuGCsAQDsuClkGMpPs3pA=
k8s.io/mount-utils v0.28.3 h1:1p6Dk2QhoK0IYOee2MOec/90a7fC0yUqlWPfQy/4JFE= k8s.io/mount-utils v0.28.3 h1:1p6Dk2QhoK0IYOee2MOec/90a7fC0yUqlWPfQy/4JFE=
k8s.io/mount-utils v0.28.3/go.mod h1:ceMAZ+Nzlk8zOwN205YXXGJRGmf1o0/XIwsKnG44p0I= k8s.io/mount-utils v0.28.3/go.mod h1:ceMAZ+Nzlk8zOwN205YXXGJRGmf1o0/XIwsKnG44p0I=
k8s.io/pod-security-admission v0.28.3 h1:CtVVG36YwniCH4d18wAoFW6n0Qm5Z1uUVfDIiO4kY0I= k8s.io/pod-security-admission v0.28.3 h1:CtVVG36YwniCH4d18wAoFW6n0Qm5Z1uUVfDIiO4kY0I=
@ -2336,8 +2349,9 @@ k8s.io/pod-security-admission v0.28.3/go.mod h1:qm+gZ8FdnxBgVVTZfSjlK/oeBosmvECB
k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= k8s.io/utils v0.0.0-20221128185143-99ec85e7a448/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk=
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI=
k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI=
@ -2376,15 +2390,17 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 h1:trsWhjU5jZrx6UvFu4WzQDrN7Pga4a7Qg+zcfcj64PA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2/go.mod h1:+qG7ISXqCDVVcyO8hLn12AKVYYUjM7ftlqsqmrhMZE0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2/go.mod h1:+qG7ISXqCDVVcyO8hLn12AKVYYUjM7ftlqsqmrhMZE0=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y=
sigs.k8s.io/controller-runtime v0.2.2/go.mod h1:9dyohw3ZtoXQuV1e766PHUn+cmrRCIcBh6XIMFNMZ+I= sigs.k8s.io/controller-runtime v0.2.2/go.mod h1:9dyohw3ZtoXQuV1e766PHUn+cmrRCIcBh6XIMFNMZ+I=
sigs.k8s.io/controller-runtime v0.16.3 h1:2TuvuokmfXvDUamSx1SuAOO3eTyye+47mJCigwG62c4= sigs.k8s.io/controller-runtime v0.16.3 h1:2TuvuokmfXvDUamSx1SuAOO3eTyye+47mJCigwG62c4=
sigs.k8s.io/controller-runtime v0.16.3/go.mod h1:j7bialYoSn142nv9sCOJmQgDXQXxnroFU4VnX/brVJ0= sigs.k8s.io/controller-runtime v0.16.3/go.mod h1:j7bialYoSn142nv9sCOJmQgDXQXxnroFU4VnX/brVJ0=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
sigs.k8s.io/testing_frameworks v0.1.1/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U= sigs.k8s.io/testing_frameworks v0.1.1/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=

View File

@ -0,0 +1 @@
*.go text eol=lf

2
vendor/github.com/distribution/reference/.gitignore generated vendored Normal file
View File

@ -0,0 +1,2 @@
# Cover profiles
*.out

18
vendor/github.com/distribution/reference/.golangci.yml generated vendored Normal file
View File

@ -0,0 +1,18 @@
linters:
enable:
- bodyclose
- dupword # Checks for duplicate words in the source code
- gofmt
- goimports
- ineffassign
- misspell
- revive
- staticcheck
- unconvert
- unused
- vet
disable:
- errcheck
run:
deadline: 2m

View File

@ -0,0 +1,5 @@
# Code of Conduct
We follow the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
Please contact the [CNCF Code of Conduct Committee](mailto:conduct@cncf.io) in order to report violations of the Code of Conduct.

View File

@ -0,0 +1,114 @@
# Contributing to the reference library
## Community help
If you need help, please ask in the [#distribution](https://cloud-native.slack.com/archives/C01GVR8SY4R) channel on CNCF community slack.
[Click here for an invite to the CNCF community slack](https://slack.cncf.io/)
## Reporting security issues
The maintainers take security seriously. If you discover a security
issue, please bring it to their attention right away!
Please **DO NOT** file a public issue, instead send your report privately to
[cncf-distribution-security@lists.cncf.io](mailto:cncf-distribution-security@lists.cncf.io).
## Reporting an issue properly
By following these simple rules you will get better and faster feedback on your issue.
- search the bugtracker for an already reported issue
### If you found an issue that describes your problem:
- please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments
- please refrain from adding "same thing here" or "+1" comments
- you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button
- comment if you have some new, technical and relevant information to add to the case
- __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue.
### If you have not found an existing issue that describes your problem:
1. create a new issue, with a succinct title that describes your issue:
- bad title: "It doesn't work with my docker"
- good title: "Private registry push fail: 400 error with E_INVALID_DIGEST"
2. copy the output of (or similar for other container tools):
- `docker version`
- `docker info`
- `docker exec <registry-container> registry --version`
3. copy the command line you used to launch your Registry
4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments)
5. reproduce your problem and get your docker daemon logs showing the error
6. if relevant, copy your registry logs that show the error
7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used)
8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry
## Contributing Code
Contributions should be made via pull requests. Pull requests will be reviewed
by one or more maintainers or reviewers and merged when acceptable.
You should follow the basic GitHub workflow:
1. Use your own [fork](https://help.github.com/en/articles/about-forks)
2. Create your [change](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#successful-changes)
3. Test your code
4. [Commit](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#commit-messages) your work, always [sign your commits](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#commit-messages)
5. Push your change to your fork and create a [Pull Request](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request-from-a-fork)
Refer to [containerd's contribution guide](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#successful-changes)
for tips on creating a successful contribution.
## Sign your work
The sign-off is a simple line at the end of the explanation for the patch. Your
signature certifies that you wrote the patch or otherwise have the right to pass
it on as an open-source patch. The rules are pretty simple: if you can certify
the below (from [developercertificate.org](http://developercertificate.org/)):
```
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
660 York Street, Suite 102,
San Francisco, CA 94110 USA
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
```
Then you just add a line to every git commit message:
Signed-off-by: Joe Smith <joe.smith@email.com>
Use your real name (sorry, no pseudonyms or anonymous contributions.)
If you set your `user.name` and `user.email` git configs, you can sign your
commit automatically with `git commit -s`.

144
vendor/github.com/distribution/reference/GOVERNANCE.md generated vendored Normal file
View File

@ -0,0 +1,144 @@
# distribution/reference Project Governance
Distribution [Code of Conduct](./CODE-OF-CONDUCT.md) can be found here.
For specific guidance on practical contribution steps please
see our [CONTRIBUTING.md](./CONTRIBUTING.md) guide.
## Maintainership
There are different types of maintainers, with different responsibilities, but
all maintainers have 3 things in common:
1) They share responsibility in the project's success.
2) They have made a long-term, recurring time investment to improve the project.
3) They spend that time doing whatever needs to be done, not necessarily what
is the most interesting or fun.
Maintainers are often under-appreciated, because their work is harder to appreciate.
It's easy to appreciate a really cool and technically advanced feature. It's harder
to appreciate the absence of bugs, the slow but steady improvement in stability,
or the reliability of a release process. But those things distinguish a good
project from a great one.
## Reviewers
A reviewer is a core role within the project.
They share in reviewing issues and pull requests and their LGTM counts towards the
required LGTM count to merge a code change into the project.
Reviewers are part of the organization but do not have write access.
Becoming a reviewer is a core aspect in the journey to becoming a maintainer.
## Adding maintainers
Maintainers are first and foremost contributors that have shown they are
committed to the long term success of a project. Contributors wanting to become
maintainers are expected to be deeply involved in contributing code, pull
request review, and triage of issues in the project for more than three months.
Just contributing does not make you a maintainer, it is about building trust
with the current maintainers of the project and being a person that they can
depend on and trust to make decisions in the best interest of the project.
Periodically, the existing maintainers curate a list of contributors that have
shown regular activity on the project over the prior months. From this list,
maintainer candidates are selected and proposed in a pull request or a
maintainers communication channel.
After a candidate has been announced to the maintainers, the existing
maintainers are given five business days to discuss the candidate, raise
objections and cast their vote. Votes may take place on the communication
channel or via pull request comment. Candidates must be approved by at least 66%
of the current maintainers by adding their vote on the mailing list. The
reviewer role has the same process but only requires 33% of current maintainers.
Only maintainers of the repository that the candidate is proposed for are
allowed to vote.
If a candidate is approved, a maintainer will contact the candidate to invite
the candidate to open a pull request that adds the contributor to the
MAINTAINERS file. The voting process may take place inside a pull request if a
maintainer has already discussed the candidacy with the candidate and a
maintainer is willing to be a sponsor by opening the pull request. The candidate
becomes a maintainer once the pull request is merged.
## Stepping down policy
Life priorities, interests, and passions can change. If you're a maintainer but
feel you must remove yourself from the list, inform other maintainers that you
intend to step down, and if possible, help find someone to pick up your work.
At the very least, ensure your work can be continued where you left off.
After you've informed other maintainers, create a pull request to remove
yourself from the MAINTAINERS file.
## Removal of inactive maintainers
Similar to the procedure for adding new maintainers, existing maintainers can
be removed from the list if they do not show significant activity on the
project. Periodically, the maintainers review the list of maintainers and their
activity over the last three months.
If a maintainer has shown insufficient activity over this period, a neutral
person will contact the maintainer to ask if they want to continue being
a maintainer. If the maintainer decides to step down as a maintainer, they
open a pull request to be removed from the MAINTAINERS file.
If the maintainer wants to remain a maintainer, but is unable to perform the
required duties they can be removed with a vote of at least 66% of the current
maintainers. In this case, maintainers should first propose the change to
maintainers via the maintainers communication channel, then open a pull request
for voting. The voting period is five business days. The voting pull request
should not come as a surpise to any maintainer and any discussion related to
performance must not be discussed on the pull request.
## How are decisions made?
Docker distribution is an open-source project with an open design philosophy.
This means that the repository is the source of truth for EVERY aspect of the
project, including its philosophy, design, road map, and APIs. *If it's part of
the project, it's in the repo. If it's in the repo, it's part of the project.*
As a result, all decisions can be expressed as changes to the repository. An
implementation change is a change to the source code. An API change is a change
to the API specification. A philosophy change is a change to the philosophy
manifesto, and so on.
All decisions affecting distribution, big and small, follow the same 3 steps:
* Step 1: Open a pull request. Anyone can do this.
* Step 2: Discuss the pull request. Anyone can do this.
* Step 3: Merge or refuse the pull request. Who does this depends on the nature
of the pull request and which areas of the project it affects.
## Helping contributors with the DCO
The [DCO or `Sign your work`](./CONTRIBUTING.md#sign-your-work)
requirement is not intended as a roadblock or speed bump.
Some contributors are not as familiar with `git`, or have used a web
based editor, and thus asking them to `git commit --amend -s` is not the best
way forward.
In this case, maintainers can update the commits based on clause (c) of the DCO.
The most trivial way for a contributor to allow the maintainer to do this, is to
add a DCO signature in a pull requests's comment, or a maintainer can simply
note that the change is sufficiently trivial that it does not substantially
change the existing contribution - i.e., a spelling change.
When you add someone's DCO, please also add your own to keep a log.
## I'm a maintainer. Should I make pull requests too?
Yes. Nobody should ever push to master directly. All changes should be
made through a pull request.
## Conflict Resolution
If you have a technical dispute that you feel has reached an impasse with a
subset of the community, any contributor may open an issue, specifically
calling for a resolution vote of the current core maintainers to resolve the
dispute. The same voting quorums required (2/3) for adding and removing
maintainers will apply to conflict resolution.

26
vendor/github.com/distribution/reference/MAINTAINERS generated vendored Normal file
View File

@ -0,0 +1,26 @@
# Distribution project maintainers & reviewers
#
# See GOVERNANCE.md for maintainer versus reviewer roles
#
# MAINTAINERS (cncf-distribution-maintainers@lists.cncf.io)
# GitHub ID, Name, Email address
"chrispat","Chris Patterson","chrispat@github.com"
"clarkbw","Bryan Clark","clarkbw@github.com"
"corhere","Cory Snider","csnider@mirantis.com"
"deleteriousEffect","Hayley Swimelar","hswimelar@gitlab.com"
"heww","He Weiwei","hweiwei@vmware.com"
"joaodrp","João Pereira","jpereira@gitlab.com"
"justincormack","Justin Cormack","justin.cormack@docker.com"
"squizzi","Kyle Squizzato","ksquizzato@mirantis.com"
"milosgajdos","Milos Gajdos","milosthegajdos@gmail.com"
"sargun","Sargun Dhillon","sargun@sargun.me"
"wy65701436","Wang Yan","wangyan@vmware.com"
"stevelasker","Steve Lasker","steve.lasker@microsoft.com"
#
# REVIEWERS
# GitHub ID, Name, Email address
"dmcgowan","Derek McGowan","derek@mcgstyle.net"
"stevvooe","Stephen Day","stevvooe@gmail.com"
"thajeztah","Sebastiaan van Stijn","github@gone.nl"
"DavidSpek", "David van der Spek", "vanderspek.david@gmail.com"
"Jamstah", "James Hewitt", "james.hewitt@gmail.com"

25
vendor/github.com/distribution/reference/Makefile generated vendored Normal file
View File

@ -0,0 +1,25 @@
# Project packages.
PACKAGES=$(shell go list ./...)
# Flags passed to `go test`
BUILDFLAGS ?=
TESTFLAGS ?=
.PHONY: all build test coverage
.DEFAULT: all
all: build
build: ## no binaries to build, so just check compilation suceeds
go build ${BUILDFLAGS} ./...
test: ## run tests
go test ${TESTFLAGS} ./...
coverage: ## generate coverprofiles from the unit tests
rm -f coverage.txt
go test ${TESTFLAGS} -cover -coverprofile=cover.out ./...
.PHONY: help
help:
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_\/%-]+:.*?##/ { printf " \033[36m%-27s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)

30
vendor/github.com/distribution/reference/README.md generated vendored Normal file
View File

@ -0,0 +1,30 @@
# Distribution reference
Go library to handle references to container images.
<img src="/distribution-logo.svg" width="200px" />
[![Build Status](https://github.com/distribution/reference/actions/workflows/test.yml/badge.svg?branch=main&event=push)](https://github.com/distribution/reference/actions?query=workflow%3ACI)
[![GoDoc](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/github.com/distribution/reference)
[![License: Apache-2.0](https://img.shields.io/badge/License-Apache--2.0-blue.svg)](LICENSE)
[![codecov](https://codecov.io/gh/distribution/reference/branch/main/graph/badge.svg)](https://codecov.io/gh/distribution/reference)
[![FOSSA Status](https://app.fossa.com/api/projects/custom%2B162%2Fgithub.com%2Fdistribution%2Freference.svg?type=shield)](https://app.fossa.com/projects/custom%2B162%2Fgithub.com%2Fdistribution%2Freference?ref=badge_shield)
This repository contains a library for handling refrences to container images held in container registries. Please see [godoc](https://pkg.go.dev/github.com/distribution/reference) for details.
## Contribution
Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute
issues, fixes, and patches to this project.
## Communication
For async communication and long running discussions please use issues and pull requests on the github repo.
This will be the best place to discuss design and implementation.
For sync communication we have a #distribution channel in the [CNCF Slack](https://slack.cncf.io/)
that everyone is welcome to join and chat about development.
## Licenses
The distribution codebase is released under the [Apache 2.0 license](LICENSE).

7
vendor/github.com/distribution/reference/SECURITY.md generated vendored Normal file
View File

@ -0,0 +1,7 @@
# Security Policy
## Reporting a Vulnerability
The maintainers take security seriously. If you discover a security issue, please bring it to their attention right away!
Please DO NOT file a public issue, instead send your report privately to cncf-distribution-security@lists.cncf.io.

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 8.6 KiB

View File

@ -32,7 +32,7 @@ func FamiliarString(ref Reference) string {
} }
// FamiliarMatch reports whether ref matches the specified pattern. // FamiliarMatch reports whether ref matches the specified pattern.
// See https://godoc.org/path#Match for supported patterns. // See [path.Match] for supported patterns.
func FamiliarMatch(pattern string, ref Reference) (bool, error) { func FamiliarMatch(pattern string, ref Reference) (bool, error) {
matched, err := path.Match(pattern, FamiliarString(ref)) matched, err := path.Match(pattern, FamiliarString(ref))
if namedRef, isNamed := ref.(Named); isNamed && !matched { if namedRef, isNamed := ref.(Named); isNamed && !matched {

View File

@ -1,19 +1,42 @@
package reference package reference
import ( import (
"errors"
"fmt" "fmt"
"strings" "strings"
"github.com/docker/distribution/digestset"
"github.com/opencontainers/go-digest" "github.com/opencontainers/go-digest"
) )
var ( const (
// legacyDefaultDomain is the legacy domain for Docker Hub (which was
// originally named "the Docker Index"). This domain is still used for
// authentication and image search, which were part of the "v1" Docker
// registry specification.
//
// This domain will continue to be supported, but there are plans to consolidate
// legacy domains to new "canonical" domains. Once those domains are decided
// on, we must update the normalization functions, but preserve compatibility
// with existing installs, clients, and user configuration.
legacyDefaultDomain = "index.docker.io" legacyDefaultDomain = "index.docker.io"
defaultDomain = "docker.io"
officialRepoName = "library" // defaultDomain is the default domain used for images on Docker Hub.
defaultTag = "latest" // It is used to normalize "familiar" names to canonical names, for example,
// to convert "ubuntu" to "docker.io/library/ubuntu:latest".
//
// Note that actual domain of Docker Hub's registry is registry-1.docker.io.
// This domain will continue to be supported, but there are plans to consolidate
// legacy domains to new "canonical" domains. Once those domains are decided
// on, we must update the normalization functions, but preserve compatibility
// with existing installs, clients, and user configuration.
defaultDomain = "docker.io"
// officialRepoPrefix is the namespace used for official images on Docker Hub.
// It is used to normalize "familiar" names to canonical names, for example,
// to convert "ubuntu" to "docker.io/library/ubuntu:latest".
officialRepoPrefix = "library/"
// defaultTag is the default tag if no tag is provided.
defaultTag = "latest"
) )
// normalizedNamed represents a name which has been // normalizedNamed represents a name which has been
@ -35,14 +58,14 @@ func ParseNormalizedNamed(s string) (Named, error) {
return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s)
} }
domain, remainder := splitDockerDomain(s) domain, remainder := splitDockerDomain(s)
var remoteName string var remote string
if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 {
remoteName = remainder[:tagSep] remote = remainder[:tagSep]
} else { } else {
remoteName = remainder remote = remainder
} }
if strings.ToLower(remoteName) != remoteName { if strings.ToLower(remote) != remote {
return nil, errors.New("invalid reference format: repository name must be lowercase") return nil, fmt.Errorf("invalid reference format: repository name (%s) must be lowercase", remote)
} }
ref, err := Parse(domain + "/" + remainder) ref, err := Parse(domain + "/" + remainder)
@ -56,41 +79,53 @@ func ParseNormalizedNamed(s string) (Named, error) {
return named, nil return named, nil
} }
// ParseDockerRef normalizes the image reference following the docker convention. This is added // namedTaggedDigested is a reference that has both a tag and a digest.
// mainly for backward compatibility. type namedTaggedDigested interface {
// The reference returned can only be either tagged or digested. For reference contains both tag NamedTagged
// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@ Digested
// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as }
// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa.
// ParseDockerRef normalizes the image reference following the docker convention,
// which allows for references to contain both a tag and a digest. It returns a
// reference that is either tagged or digested. For references containing both
// a tag and a digest, it returns a digested reference. For example, the following
// reference:
//
// docker.io/library/busybox:latest@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa
//
// Is returned as a digested reference (with the ":latest" tag removed):
//
// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa
//
// References that are already "tagged" or "digested" are returned unmodified:
//
// // Already a digested reference
// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa
//
// // Already a named reference
// docker.io/library/busybox:latest
func ParseDockerRef(ref string) (Named, error) { func ParseDockerRef(ref string) (Named, error) {
named, err := ParseNormalizedNamed(ref) named, err := ParseNormalizedNamed(ref)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if _, ok := named.(NamedTagged); ok { if canonical, ok := named.(namedTaggedDigested); ok {
if canonical, ok := named.(Canonical); ok { // The reference is both tagged and digested; only return digested.
// The reference is both tagged and digested, only newNamed, err := WithName(canonical.Name())
// return digested. if err != nil {
newNamed, err := WithName(canonical.Name()) return nil, err
if err != nil {
return nil, err
}
newCanonical, err := WithDigest(newNamed, canonical.Digest())
if err != nil {
return nil, err
}
return newCanonical, nil
} }
return WithDigest(newNamed, canonical.Digest())
} }
return TagNameOnly(named), nil return TagNameOnly(named), nil
} }
// splitDockerDomain splits a repository name to domain and remotename string. // splitDockerDomain splits a repository name to domain and remote-name.
// If no valid domain is found, the default domain is used. Repository name // If no valid domain is found, the default domain is used. Repository name
// needs to be already validated before. // needs to be already validated before.
func splitDockerDomain(name string) (domain, remainder string) { func splitDockerDomain(name string) (domain, remainder string) {
i := strings.IndexRune(name, '/') i := strings.IndexRune(name, '/')
if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != localhost && strings.ToLower(name[:i]) == name[:i]) {
domain, remainder = defaultDomain, name domain, remainder = defaultDomain, name
} else { } else {
domain, remainder = name[:i], name[i+1:] domain, remainder = name[:i], name[i+1:]
@ -99,13 +134,13 @@ func splitDockerDomain(name string) (domain, remainder string) {
domain = defaultDomain domain = defaultDomain
} }
if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { if domain == defaultDomain && !strings.ContainsRune(remainder, '/') {
remainder = officialRepoName + "/" + remainder remainder = officialRepoPrefix + remainder
} }
return return
} }
// familiarizeName returns a shortened version of the name familiar // familiarizeName returns a shortened version of the name familiar
// to to the Docker UI. Familiar names have the default domain // to the Docker UI. Familiar names have the default domain
// "docker.io" and "library/" repository prefix removed. // "docker.io" and "library/" repository prefix removed.
// For example, "docker.io/library/redis" will have the familiar // For example, "docker.io/library/redis" will have the familiar
// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". // name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp".
@ -119,8 +154,15 @@ func familiarizeName(named namedRepository) repository {
if repo.domain == defaultDomain { if repo.domain == defaultDomain {
repo.domain = "" repo.domain = ""
// Handle official repositories which have the pattern "library/<official repo name>" // Handle official repositories which have the pattern "library/<official repo name>"
if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { if strings.HasPrefix(repo.path, officialRepoPrefix) {
repo.path = split[1] // TODO(thaJeztah): this check may be too strict, as it assumes the
// "library/" namespace does not have nested namespaces. While this
// is true (currently), technically it would be possible for Docker
// Hub to use those (e.g. "library/distros/ubuntu:latest").
// See https://github.com/distribution/distribution/pull/3769#issuecomment-1302031785.
if remainder := strings.TrimPrefix(repo.path, officialRepoPrefix); !strings.ContainsRune(remainder, '/') {
repo.path = remainder
}
} }
} }
return repo return repo
@ -180,20 +222,3 @@ func ParseAnyReference(ref string) (Reference, error) {
return ParseNormalizedNamed(ref) return ParseNormalizedNamed(ref)
} }
// ParseAnyReferenceWithSet parses a reference string as a possible short
// identifier to be matched in a digest set, a full digest, or familiar name.
func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) {
if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok {
dgst, err := ds.Lookup(ref)
if err == nil {
return digestReference(dgst), nil
}
} else {
if dgst, err := digest.Parse(ref); err == nil {
return digestReference(dgst), nil
}
}
return ParseNormalizedNamed(ref)
}

View File

@ -4,11 +4,14 @@
// Grammar // Grammar
// //
// reference := name [ ":" tag ] [ "@" digest ] // reference := name [ ":" tag ] [ "@" digest ]
// name := [domain '/'] path-component ['/' path-component]* // name := [domain '/'] remote-name
// domain := domain-component ['.' domain-component]* [':' port-number] // domain := host [':' port-number]
// host := domain-name | IPv4address | \[ IPv6address \] ; rfc3986 appendix-A
// domain-name := domain-component ['.' domain-component]*
// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ // domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
// port-number := /[0-9]+/ // port-number := /[0-9]+/
// path-component := alpha-numeric [separator alpha-numeric]* // path-component := alpha-numeric [separator alpha-numeric]*
// path (or "remote-name") := path-component ['/' path-component]*
// alpha-numeric := /[a-z0-9]+/ // alpha-numeric := /[a-z0-9]+/
// separator := /[_.]|__|[-]*/ // separator := /[_.]|__|[-]*/
// //
@ -21,7 +24,6 @@
// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value // digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value
// //
// identifier := /[a-f0-9]{64}/ // identifier := /[a-f0-9]{64}/
// short-identifier := /[a-f0-9]{6,64}/
package reference package reference
import ( import (
@ -145,7 +147,7 @@ type namedRepository interface {
Path() string Path() string
} }
// Domain returns the domain part of the Named reference // Domain returns the domain part of the [Named] reference.
func Domain(named Named) string { func Domain(named Named) string {
if r, ok := named.(namedRepository); ok { if r, ok := named.(namedRepository); ok {
return r.Domain() return r.Domain()
@ -154,7 +156,7 @@ func Domain(named Named) string {
return domain return domain
} }
// Path returns the name without the domain part of the Named reference // Path returns the name without the domain part of the [Named] reference.
func Path(named Named) (name string) { func Path(named Named) (name string) {
if r, ok := named.(namedRepository); ok { if r, ok := named.(namedRepository); ok {
return r.Path() return r.Path()
@ -175,7 +177,8 @@ func splitDomain(name string) (string, string) {
// hostname and name string. If no valid hostname is // hostname and name string. If no valid hostname is
// found, the hostname is empty and the full value // found, the hostname is empty and the full value
// is returned as name // is returned as name
// DEPRECATED: Use Domain or Path //
// Deprecated: Use [Domain] or [Path].
func SplitHostname(named Named) (string, string) { func SplitHostname(named Named) (string, string) {
if r, ok := named.(namedRepository); ok { if r, ok := named.(namedRepository); ok {
return r.Domain(), r.Path() return r.Domain(), r.Path()
@ -185,7 +188,6 @@ func SplitHostname(named Named) (string, string) {
// Parse parses s and returns a syntactically valid Reference. // Parse parses s and returns a syntactically valid Reference.
// If an error was encountered it is returned, along with a nil Reference. // If an error was encountered it is returned, along with a nil Reference.
// NOTE: Parse will not handle short digests.
func Parse(s string) (Reference, error) { func Parse(s string) (Reference, error) {
matches := ReferenceRegexp.FindStringSubmatch(s) matches := ReferenceRegexp.FindStringSubmatch(s)
if matches == nil { if matches == nil {
@ -237,7 +239,6 @@ func Parse(s string) (Reference, error) {
// the Named interface. The reference must have a name and be in the canonical // the Named interface. The reference must have a name and be in the canonical
// form, otherwise an error is returned. // form, otherwise an error is returned.
// If an error was encountered it is returned, along with a nil Reference. // If an error was encountered it is returned, along with a nil Reference.
// NOTE: ParseNamed will not handle short digests.
func ParseNamed(s string) (Named, error) { func ParseNamed(s string) (Named, error) {
named, err := ParseNormalizedNamed(s) named, err := ParseNormalizedNamed(s)
if err != nil { if err != nil {
@ -320,11 +321,13 @@ func WithDigest(name Named, digest digest.Digest) (Canonical, error) {
// TrimNamed removes any tag or digest from the named reference. // TrimNamed removes any tag or digest from the named reference.
func TrimNamed(ref Named) Named { func TrimNamed(ref Named) Named {
domain, path := SplitHostname(ref) repo := repository{}
return repository{ if r, ok := ref.(namedRepository); ok {
domain: domain, repo.domain, repo.path = r.Domain(), r.Path()
path: path, } else {
repo.domain, repo.path = splitDomain(ref.Name())
} }
return repo
} }
func getBestReferenceType(ref reference) Reference { func getBestReferenceType(ref reference) Reference {

163
vendor/github.com/distribution/reference/regexp.go generated vendored Normal file
View File

@ -0,0 +1,163 @@
package reference
import (
"regexp"
"strings"
)
// DigestRegexp matches well-formed digests, including algorithm (e.g. "sha256:<encoded>").
var DigestRegexp = regexp.MustCompile(digestPat)
// DomainRegexp matches hostname or IP-addresses, optionally including a port
// number. It defines the structure of potential domain components that may be
// part of image names. This is purposely a subset of what is allowed by DNS to
// ensure backwards compatibility with Docker image names. It may be a subset of
// DNS domain name, an IPv4 address in decimal format, or an IPv6 address between
// square brackets (excluding zone identifiers as defined by [RFC 6874] or special
// addresses such as IPv4-Mapped).
//
// [RFC 6874]: https://www.rfc-editor.org/rfc/rfc6874.
var DomainRegexp = regexp.MustCompile(domainAndPort)
// IdentifierRegexp is the format for string identifier used as a
// content addressable identifier using sha256. These identifiers
// are like digests without the algorithm, since sha256 is used.
var IdentifierRegexp = regexp.MustCompile(identifier)
// NameRegexp is the format for the name component of references, including
// an optional domain and port, but without tag or digest suffix.
var NameRegexp = regexp.MustCompile(namePat)
// ReferenceRegexp is the full supported format of a reference. The regexp
// is anchored and has capturing groups for name, tag, and digest
// components.
var ReferenceRegexp = regexp.MustCompile(referencePat)
// TagRegexp matches valid tag names. From [docker/docker:graph/tags.go].
//
// [docker/docker:graph/tags.go]: https://github.com/moby/moby/blob/v1.6.0/graph/tags.go#L26-L28
var TagRegexp = regexp.MustCompile(tag)
const (
// alphanumeric defines the alphanumeric atom, typically a
// component of names. This only allows lower case characters and digits.
alphanumeric = `[a-z0-9]+`
// separator defines the separators allowed to be embedded in name
// components. This allows one period, one or two underscore and multiple
// dashes. Repeated dashes and underscores are intentionally treated
// differently. In order to support valid hostnames as name components,
// supporting repeated dash was added. Additionally double underscore is
// now allowed as a separator to loosen the restriction for previously
// supported names.
separator = `(?:[._]|__|[-]+)`
// localhost is treated as a special value for domain-name. Any other
// domain-name without a "." or a ":port" are considered a path component.
localhost = `localhost`
// domainNameComponent restricts the registry domain component of a
// repository name to start with a component as defined by DomainRegexp.
domainNameComponent = `(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`
// optionalPort matches an optional port-number including the port separator
// (e.g. ":80").
optionalPort = `(?::[0-9]+)?`
// tag matches valid tag names. From docker/docker:graph/tags.go.
tag = `[\w][\w.-]{0,127}`
// digestPat matches well-formed digests, including algorithm (e.g. "sha256:<encoded>").
//
// TODO(thaJeztah): this should follow the same rules as https://pkg.go.dev/github.com/opencontainers/go-digest@v1.0.0#DigestRegexp
// so that go-digest defines the canonical format. Note that the go-digest is
// more relaxed:
// - it allows multiple algorithms (e.g. "sha256+b64:<encoded>") to allow
// future expansion of supported algorithms.
// - it allows the "<encoded>" value to use urlsafe base64 encoding as defined
// in [rfc4648, section 5].
//
// [rfc4648, section 5]: https://www.rfc-editor.org/rfc/rfc4648#section-5.
digestPat = `[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`
// identifier is the format for a content addressable identifier using sha256.
// These identifiers are like digests without the algorithm, since sha256 is used.
identifier = `([a-f0-9]{64})`
// ipv6address are enclosed between square brackets and may be represented
// in many ways, see rfc5952. Only IPv6 in compressed or uncompressed format
// are allowed, IPv6 zone identifiers (rfc6874) or Special addresses such as
// IPv4-Mapped are deliberately excluded.
ipv6address = `\[(?:[a-fA-F0-9:]+)\]`
)
var (
// domainName defines the structure of potential domain components
// that may be part of image names. This is purposely a subset of what is
// allowed by DNS to ensure backwards compatibility with Docker image
// names. This includes IPv4 addresses on decimal format.
domainName = domainNameComponent + anyTimes(`\.`+domainNameComponent)
// host defines the structure of potential domains based on the URI
// Host subcomponent on rfc3986. It may be a subset of DNS domain name,
// or an IPv4 address in decimal format, or an IPv6 address between square
// brackets (excluding zone identifiers as defined by rfc6874 or special
// addresses such as IPv4-Mapped).
host = `(?:` + domainName + `|` + ipv6address + `)`
// allowed by the URI Host subcomponent on rfc3986 to ensure backwards
// compatibility with Docker image names.
domainAndPort = host + optionalPort
// anchoredTagRegexp matches valid tag names, anchored at the start and
// end of the matched string.
anchoredTagRegexp = regexp.MustCompile(anchored(tag))
// anchoredDigestRegexp matches valid digests, anchored at the start and
// end of the matched string.
anchoredDigestRegexp = regexp.MustCompile(anchored(digestPat))
// pathComponent restricts path-components to start with an alphanumeric
// character, with following parts able to be separated by a separator
// (one period, one or two underscore and multiple dashes).
pathComponent = alphanumeric + anyTimes(separator+alphanumeric)
// remoteName matches the remote-name of a repository. It consists of one
// or more forward slash (/) delimited path-components:
//
// pathComponent[[/pathComponent] ...] // e.g., "library/ubuntu"
remoteName = pathComponent + anyTimes(`/`+pathComponent)
namePat = optional(domainAndPort+`/`) + remoteName
// anchoredNameRegexp is used to parse a name value, capturing the
// domain and trailing components.
anchoredNameRegexp = regexp.MustCompile(anchored(optional(capture(domainAndPort), `/`), capture(remoteName)))
referencePat = anchored(capture(namePat), optional(`:`, capture(tag)), optional(`@`, capture(digestPat)))
// anchoredIdentifierRegexp is used to check or match an
// identifier value, anchored at start and end of string.
anchoredIdentifierRegexp = regexp.MustCompile(anchored(identifier))
)
// optional wraps the expression in a non-capturing group and makes the
// production optional.
func optional(res ...string) string {
return `(?:` + strings.Join(res, "") + `)?`
}
// anyTimes wraps the expression in a non-capturing group that can occur
// any number of times.
func anyTimes(res ...string) string {
return `(?:` + strings.Join(res, "") + `)*`
}
// capture wraps the expression in a capturing group.
func capture(res ...string) string {
return `(` + strings.Join(res, "") + `)`
}
// anchored anchors the regular expression by adding start and end delimiters.
func anchored(res ...string) string {
return `^` + strings.Join(res, "") + `$`
}

75
vendor/github.com/distribution/reference/sort.go generated vendored Normal file
View File

@ -0,0 +1,75 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package reference
import (
"sort"
)
// Sort sorts string references preferring higher information references.
//
// The precedence is as follows:
//
// 1. [Named] + [Tagged] + [Digested] (e.g., "docker.io/library/busybox:latest@sha256:<digest>")
// 2. [Named] + [Tagged] (e.g., "docker.io/library/busybox:latest")
// 3. [Named] + [Digested] (e.g., "docker.io/library/busybo@sha256:<digest>")
// 4. [Named] (e.g., "docker.io/library/busybox")
// 5. [Digested] (e.g., "docker.io@sha256:<digest>")
// 6. Parse error
func Sort(references []string) []string {
var prefs []Reference
var bad []string
for _, ref := range references {
pref, err := ParseAnyReference(ref)
if err != nil {
bad = append(bad, ref)
} else {
prefs = append(prefs, pref)
}
}
sort.Slice(prefs, func(a, b int) bool {
ar := refRank(prefs[a])
br := refRank(prefs[b])
if ar == br {
return prefs[a].String() < prefs[b].String()
}
return ar < br
})
sort.Strings(bad)
var refs []string
for _, pref := range prefs {
refs = append(refs, pref.String())
}
return append(refs, bad...)
}
func refRank(ref Reference) uint8 {
if _, ok := ref.(Named); ok {
if _, ok = ref.(Tagged); ok {
if _, ok = ref.(Digested); ok {
return 1
}
return 2
}
if _, ok = ref.(Digested); ok {
return 3
}
return 4
}
return 5
}

View File

@ -1,247 +0,0 @@
package digestset
import (
"errors"
"sort"
"strings"
"sync"
digest "github.com/opencontainers/go-digest"
)
var (
// ErrDigestNotFound is used when a matching digest
// could not be found in a set.
ErrDigestNotFound = errors.New("digest not found")
// ErrDigestAmbiguous is used when multiple digests
// are found in a set. None of the matching digests
// should be considered valid matches.
ErrDigestAmbiguous = errors.New("ambiguous digest string")
)
// Set is used to hold a unique set of digests which
// may be easily referenced by easily referenced by a string
// representation of the digest as well as short representation.
// The uniqueness of the short representation is based on other
// digests in the set. If digests are omitted from this set,
// collisions in a larger set may not be detected, therefore it
// is important to always do short representation lookups on
// the complete set of digests. To mitigate collisions, an
// appropriately long short code should be used.
type Set struct {
mutex sync.RWMutex
entries digestEntries
}
// NewSet creates an empty set of digests
// which may have digests added.
func NewSet() *Set {
return &Set{
entries: digestEntries{},
}
}
// checkShortMatch checks whether two digests match as either whole
// values or short values. This function does not test equality,
// rather whether the second value could match against the first
// value.
func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool {
if len(hex) == len(shortHex) {
if hex != shortHex {
return false
}
if len(shortAlg) > 0 && string(alg) != shortAlg {
return false
}
} else if !strings.HasPrefix(hex, shortHex) {
return false
} else if len(shortAlg) > 0 && string(alg) != shortAlg {
return false
}
return true
}
// Lookup looks for a digest matching the given string representation.
// If no digests could be found ErrDigestNotFound will be returned
// with an empty digest value. If multiple matches are found
// ErrDigestAmbiguous will be returned with an empty digest value.
func (dst *Set) Lookup(d string) (digest.Digest, error) {
dst.mutex.RLock()
defer dst.mutex.RUnlock()
if len(dst.entries) == 0 {
return "", ErrDigestNotFound
}
var (
searchFunc func(int) bool
alg digest.Algorithm
hex string
)
dgst, err := digest.Parse(d)
if err == digest.ErrDigestInvalidFormat {
hex = d
searchFunc = func(i int) bool {
return dst.entries[i].val >= d
}
} else {
hex = dgst.Hex()
alg = dgst.Algorithm()
searchFunc = func(i int) bool {
if dst.entries[i].val == hex {
return dst.entries[i].alg >= alg
}
return dst.entries[i].val >= hex
}
}
idx := sort.Search(len(dst.entries), searchFunc)
if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) {
return "", ErrDigestNotFound
}
if dst.entries[idx].alg == alg && dst.entries[idx].val == hex {
return dst.entries[idx].digest, nil
}
if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) {
return "", ErrDigestAmbiguous
}
return dst.entries[idx].digest, nil
}
// Add adds the given digest to the set. An error will be returned
// if the given digest is invalid. If the digest already exists in the
// set, this operation will be a no-op.
func (dst *Set) Add(d digest.Digest) error {
if err := d.Validate(); err != nil {
return err
}
dst.mutex.Lock()
defer dst.mutex.Unlock()
entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
searchFunc := func(i int) bool {
if dst.entries[i].val == entry.val {
return dst.entries[i].alg >= entry.alg
}
return dst.entries[i].val >= entry.val
}
idx := sort.Search(len(dst.entries), searchFunc)
if idx == len(dst.entries) {
dst.entries = append(dst.entries, entry)
return nil
} else if dst.entries[idx].digest == d {
return nil
}
entries := append(dst.entries, nil)
copy(entries[idx+1:], entries[idx:len(entries)-1])
entries[idx] = entry
dst.entries = entries
return nil
}
// Remove removes the given digest from the set. An err will be
// returned if the given digest is invalid. If the digest does
// not exist in the set, this operation will be a no-op.
func (dst *Set) Remove(d digest.Digest) error {
if err := d.Validate(); err != nil {
return err
}
dst.mutex.Lock()
defer dst.mutex.Unlock()
entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
searchFunc := func(i int) bool {
if dst.entries[i].val == entry.val {
return dst.entries[i].alg >= entry.alg
}
return dst.entries[i].val >= entry.val
}
idx := sort.Search(len(dst.entries), searchFunc)
// Not found if idx is after or value at idx is not digest
if idx == len(dst.entries) || dst.entries[idx].digest != d {
return nil
}
entries := dst.entries
copy(entries[idx:], entries[idx+1:])
entries = entries[:len(entries)-1]
dst.entries = entries
return nil
}
// All returns all the digests in the set
func (dst *Set) All() []digest.Digest {
dst.mutex.RLock()
defer dst.mutex.RUnlock()
retValues := make([]digest.Digest, len(dst.entries))
for i := range dst.entries {
retValues[i] = dst.entries[i].digest
}
return retValues
}
// ShortCodeTable returns a map of Digest to unique short codes. The
// length represents the minimum value, the maximum length may be the
// entire value of digest if uniqueness cannot be achieved without the
// full value. This function will attempt to make short codes as short
// as possible to be unique.
func ShortCodeTable(dst *Set, length int) map[digest.Digest]string {
dst.mutex.RLock()
defer dst.mutex.RUnlock()
m := make(map[digest.Digest]string, len(dst.entries))
l := length
resetIdx := 0
for i := 0; i < len(dst.entries); i++ {
var short string
extended := true
for extended {
extended = false
if len(dst.entries[i].val) <= l {
short = dst.entries[i].digest.String()
} else {
short = dst.entries[i].val[:l]
for j := i + 1; j < len(dst.entries); j++ {
if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) {
if j > resetIdx {
resetIdx = j
}
extended = true
} else {
break
}
}
if extended {
l++
}
}
}
m[dst.entries[i].digest] = short
if i >= resetIdx {
l = length
}
}
return m
}
type digestEntry struct {
alg digest.Algorithm
val string
digest digest.Digest
}
type digestEntries []*digestEntry
func (d digestEntries) Len() int {
return len(d)
}
func (d digestEntries) Less(i, j int) bool {
if d[i].val != d[j].val {
return d[i].val < d[j].val
}
return d[i].alg < d[j].alg
}
func (d digestEntries) Swap(i, j int) {
d[i], d[j] = d[j], d[i]
}

View File

@ -1,143 +0,0 @@
package reference
import "regexp"
var (
// alphaNumericRegexp defines the alpha numeric atom, typically a
// component of names. This only allows lower case characters and digits.
alphaNumericRegexp = match(`[a-z0-9]+`)
// separatorRegexp defines the separators allowed to be embedded in name
// components. This allow one period, one or two underscore and multiple
// dashes.
separatorRegexp = match(`(?:[._]|__|[-]*)`)
// nameComponentRegexp restricts registry path component names to start
// with at least one letter or number, with following parts able to be
// separated by one period, one or two underscore and multiple dashes.
nameComponentRegexp = expression(
alphaNumericRegexp,
optional(repeated(separatorRegexp, alphaNumericRegexp)))
// domainComponentRegexp restricts the registry domain component of a
// repository name to start with a component as defined by DomainRegexp
// and followed by an optional port.
domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`)
// DomainRegexp defines the structure of potential domain components
// that may be part of image names. This is purposely a subset of what is
// allowed by DNS to ensure backwards compatibility with Docker image
// names.
DomainRegexp = expression(
domainComponentRegexp,
optional(repeated(literal(`.`), domainComponentRegexp)),
optional(literal(`:`), match(`[0-9]+`)))
// TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
TagRegexp = match(`[\w][\w.-]{0,127}`)
// anchoredTagRegexp matches valid tag names, anchored at the start and
// end of the matched string.
anchoredTagRegexp = anchored(TagRegexp)
// DigestRegexp matches valid digests.
DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`)
// anchoredDigestRegexp matches valid digests, anchored at the start and
// end of the matched string.
anchoredDigestRegexp = anchored(DigestRegexp)
// NameRegexp is the format for the name component of references. The
// regexp has capturing groups for the domain and name part omitting
// the separating forward slash from either.
NameRegexp = expression(
optional(DomainRegexp, literal(`/`)),
nameComponentRegexp,
optional(repeated(literal(`/`), nameComponentRegexp)))
// anchoredNameRegexp is used to parse a name value, capturing the
// domain and trailing components.
anchoredNameRegexp = anchored(
optional(capture(DomainRegexp), literal(`/`)),
capture(nameComponentRegexp,
optional(repeated(literal(`/`), nameComponentRegexp))))
// ReferenceRegexp is the full supported format of a reference. The regexp
// is anchored and has capturing groups for name, tag, and digest
// components.
ReferenceRegexp = anchored(capture(NameRegexp),
optional(literal(":"), capture(TagRegexp)),
optional(literal("@"), capture(DigestRegexp)))
// IdentifierRegexp is the format for string identifier used as a
// content addressable identifier using sha256. These identifiers
// are like digests without the algorithm, since sha256 is used.
IdentifierRegexp = match(`([a-f0-9]{64})`)
// ShortIdentifierRegexp is the format used to represent a prefix
// of an identifier. A prefix may be used to match a sha256 identifier
// within a list of trusted identifiers.
ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`)
// anchoredIdentifierRegexp is used to check or match an
// identifier value, anchored at start and end of string.
anchoredIdentifierRegexp = anchored(IdentifierRegexp)
// anchoredShortIdentifierRegexp is used to check if a value
// is a possible identifier prefix, anchored at start and end
// of string.
anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp)
)
// match compiles the string to a regular expression.
var match = regexp.MustCompile
// literal compiles s into a literal regular expression, escaping any regexp
// reserved characters.
func literal(s string) *regexp.Regexp {
re := match(regexp.QuoteMeta(s))
if _, complete := re.LiteralPrefix(); !complete {
panic("must be a literal")
}
return re
}
// expression defines a full expression, where each regular expression must
// follow the previous.
func expression(res ...*regexp.Regexp) *regexp.Regexp {
var s string
for _, re := range res {
s += re.String()
}
return match(s)
}
// optional wraps the expression in a non-capturing group and makes the
// production optional.
func optional(res ...*regexp.Regexp) *regexp.Regexp {
return match(group(expression(res...)).String() + `?`)
}
// repeated wraps the regexp in a non-capturing group to get one or more
// matches.
func repeated(res ...*regexp.Regexp) *regexp.Regexp {
return match(group(expression(res...)).String() + `+`)
}
// group wraps the regexp in a non-capturing group.
func group(res ...*regexp.Regexp) *regexp.Regexp {
return match(`(?:` + expression(res...).String() + `)`)
}
// capture wraps the expression in a capturing group.
func capture(res ...*regexp.Regexp) *regexp.Regexp {
return match(`(` + expression(res...).String() + `)`)
}
// anchored anchors the regular expression by adding start and end delimiters.
func anchored(res ...*regexp.Regexp) *regexp.Regexp {
return match(`^` + expression(res...).String() + `$`)
}

13
vendor/github.com/fsnotify/fsnotify/.cirrus.yml generated vendored Normal file
View File

@ -0,0 +1,13 @@
freebsd_task:
name: 'FreeBSD'
freebsd_instance:
image_family: freebsd-13-2
install_script:
- pkg update -f
- pkg install -y go
test_script:
# run tests as user "cirrus" instead of root
- pw useradd cirrus -m
- chown -R cirrus:cirrus .
- FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
- sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...

View File

@ -4,3 +4,4 @@
# Output of go build ./cmd/fsnotify # Output of go build ./cmd/fsnotify
/fsnotify /fsnotify
/fsnotify.exe

View File

@ -1,16 +1,87 @@
# Changelog # Changelog
All notable changes to this project will be documented in this file. Unreleased
----------
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
Nothing yet. Nothing yet.
## [1.6.0] - 2022-10-13 1.7.0 - 2023-10-22
------------------
This version of fsnotify needs Go 1.17.
### Additions
- illumos: add FEN backend to support illumos and Solaris. ([#371])
- all: add `NewBufferedWatcher()` to use a buffered channel, which can be useful
in cases where you can't control the kernel buffer and receive a large number
of events in bursts. ([#550], [#572])
- all: add `AddWith()`, which is identical to `Add()` but allows passing
options. ([#521])
- windows: allow setting the ReadDirectoryChangesW() buffer size with
`fsnotify.WithBufferSize()`; the default of 64K is the highest value that
works on all platforms and is enough for most purposes, but in some cases a
highest buffer is needed. ([#521])
### Changes and fixes
- inotify: remove watcher if a watched path is renamed ([#518])
After a rename the reported name wasn't updated, or even an empty string.
Inotify doesn't provide any good facilities to update it, so just remove the
watcher. This is already how it worked on kqueue and FEN.
On Windows this does work, and remains working.
- windows: don't listen for file attribute changes ([#520])
File attribute changes are sent as `FILE_ACTION_MODIFIED` by the Windows API,
with no way to see if they're a file write or attribute change, so would show
up as a fsnotify.Write event. This is never useful, and could result in many
spurious Write events.
- windows: return `ErrEventOverflow` if the buffer is full ([#525])
Before it would merely return "short read", making it hard to detect this
error.
- kqueue: make sure events for all files are delivered properly when removing a
watched directory ([#526])
Previously they would get sent with `""` (empty string) or `"."` as the path
name.
- kqueue: don't emit spurious Create events for symbolic links ([#524])
The link would get resolved but kqueue would "forget" it already saw the link
itself, resulting on a Create for every Write event for the directory.
- all: return `ErrClosed` on `Add()` when the watcher is closed ([#516])
- other: add `Watcher.Errors` and `Watcher.Events` to the no-op `Watcher` in
`backend_other.go`, making it easier to use on unsupported platforms such as
WASM, AIX, etc. ([#528])
- other: use the `backend_other.go` no-op if the `appengine` build tag is set;
Google AppEngine forbids usage of the unsafe package so the inotify backend
won't compile there.
[#371]: https://github.com/fsnotify/fsnotify/pull/371
[#516]: https://github.com/fsnotify/fsnotify/pull/516
[#518]: https://github.com/fsnotify/fsnotify/pull/518
[#520]: https://github.com/fsnotify/fsnotify/pull/520
[#521]: https://github.com/fsnotify/fsnotify/pull/521
[#524]: https://github.com/fsnotify/fsnotify/pull/524
[#525]: https://github.com/fsnotify/fsnotify/pull/525
[#526]: https://github.com/fsnotify/fsnotify/pull/526
[#528]: https://github.com/fsnotify/fsnotify/pull/528
[#537]: https://github.com/fsnotify/fsnotify/pull/537
[#550]: https://github.com/fsnotify/fsnotify/pull/550
[#572]: https://github.com/fsnotify/fsnotify/pull/572
1.6.0 - 2022-10-13
------------------
This version of fsnotify needs Go 1.16 (this was already the case since 1.5.1, This version of fsnotify needs Go 1.16 (this was already the case since 1.5.1,
but not documented). It also increases the minimum Linux version to 2.6.32. but not documented). It also increases the minimum Linux version to 2.6.32.

View File

@ -1,29 +1,31 @@
fsnotify is a Go library to provide cross-platform filesystem notifications on fsnotify is a Go library to provide cross-platform filesystem notifications on
Windows, Linux, macOS, and BSD systems. Windows, Linux, macOS, BSD, and illumos.
Go 1.16 or newer is required; the full documentation is at Go 1.17 or newer is required; the full documentation is at
https://pkg.go.dev/github.com/fsnotify/fsnotify https://pkg.go.dev/github.com/fsnotify/fsnotify
**It's best to read the documentation at pkg.go.dev, as it's pinned to the last
released version, whereas this README is for the last development version which
may include additions/changes.**
--- ---
Platform support: Platform support:
| Adapter | OS | Status | | Backend | OS | Status |
| --------------------- | ---------------| -------------------------------------------------------------| | :-------------------- | :--------- | :------------------------------------------------------------------------ |
| inotify | Linux 2.6.32+ | Supported | | inotify | Linux | Supported |
| kqueue | BSD, macOS | Supported | | kqueue | BSD, macOS | Supported |
| ReadDirectoryChangesW | Windows | Supported | | ReadDirectoryChangesW | Windows | Supported |
| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | | FEN | illumos | Supported |
| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/pull/371) | | fanotify | Linux 5.9+ | [Not yet](https://github.com/fsnotify/fsnotify/issues/114) |
| fanotify | Linux 5.9+ | [Maybe](https://github.com/fsnotify/fsnotify/issues/114) | | AHAFS | AIX | [aix branch]; experimental due to lack of maintainer and test environment |
| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) | | FSEvents | macOS | [Needs support in x/sys/unix][fsevents] |
| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) | | USN Journals | Windows | [Needs support in x/sys/windows][usn] |
| Polling | *All* | [Not yet](https://github.com/fsnotify/fsnotify/issues/9) |
Linux and macOS should include Android and iOS, but these are currently untested. Linux and illumos should include Android and Solaris, but these are currently
untested.
[fsevents]: https://github.com/fsnotify/fsnotify/issues/11#issuecomment-1279133120
[usn]: https://github.com/fsnotify/fsnotify/issues/53#issuecomment-1279829847
[aix branch]: https://github.com/fsnotify/fsnotify/issues/353#issuecomment-1284590129
Usage Usage
----- -----
@ -83,20 +85,23 @@ run with:
% go run ./cmd/fsnotify % go run ./cmd/fsnotify
Further detailed documentation can be found in godoc:
https://pkg.go.dev/github.com/fsnotify/fsnotify
FAQ FAQ
--- ---
### Will a file still be watched when it's moved to another directory? ### Will a file still be watched when it's moved to another directory?
No, not unless you are watching the location it was moved to. No, not unless you are watching the location it was moved to.
### Are subdirectories watched too? ### Are subdirectories watched?
No, you must add watches for any directory you want to watch (a recursive No, you must add watches for any directory you want to watch (a recursive
watcher is on the roadmap: [#18]). watcher is on the roadmap: [#18]).
[#18]: https://github.com/fsnotify/fsnotify/issues/18 [#18]: https://github.com/fsnotify/fsnotify/issues/18
### Do I have to watch the Error and Event channels in a goroutine? ### Do I have to watch the Error and Event channels in a goroutine?
As of now, yes (you can read both channels in the same goroutine using `select`, Yes. You can read both channels in the same goroutine using `select` (you don't
you don't need a separate goroutine for both channels; see the example). need a separate goroutine for both channels; see the example).
### Why don't notifications work with NFS, SMB, FUSE, /proc, or /sys? ### Why don't notifications work with NFS, SMB, FUSE, /proc, or /sys?
fsnotify requires support from underlying OS to work. The current NFS and SMB fsnotify requires support from underlying OS to work. The current NFS and SMB
@ -107,6 +112,32 @@ This could be fixed with a polling watcher ([#9]), but it's not yet implemented.
[#9]: https://github.com/fsnotify/fsnotify/issues/9 [#9]: https://github.com/fsnotify/fsnotify/issues/9
### Why do I get many Chmod events?
Some programs may generate a lot of attribute changes; for example Spotlight on
macOS, anti-virus programs, backup applications, and some others are known to do
this. As a rule, it's typically best to ignore Chmod events. They're often not
useful, and tend to cause problems.
Spotlight indexing on macOS can result in multiple events (see [#15]). A
temporary workaround is to add your folder(s) to the *Spotlight Privacy
settings* until we have a native FSEvents implementation (see [#11]).
[#11]: https://github.com/fsnotify/fsnotify/issues/11
[#15]: https://github.com/fsnotify/fsnotify/issues/15
### Watching a file doesn't work well
Watching individual files (rather than directories) is generally not recommended
as many programs (especially editors) update files atomically: it will write to
a temporary file which is then moved to to destination, overwriting the original
(or some variant thereof). The watcher on the original file is now lost, as that
no longer exists.
The upshot of this is that a power failure or crash won't leave a half-written
file.
Watch the parent directory and use `Event.Name` to filter out files you're not
interested in. There is an example of this in `cmd/fsnotify/file.go`.
Platform-specific notes Platform-specific notes
----------------------- -----------------------
### Linux ### Linux
@ -151,11 +182,3 @@ these platforms.
The sysctl variables `kern.maxfiles` and `kern.maxfilesperproc` can be used to The sysctl variables `kern.maxfiles` and `kern.maxfilesperproc` can be used to
control the maximum number of open files. control the maximum number of open files.
### macOS
Spotlight indexing on macOS can result in multiple events (see [#15]). A temporary
workaround is to add your folder(s) to the *Spotlight Privacy settings* until we
have a native FSEvents implementation (see [#11]).
[#11]: https://github.com/fsnotify/fsnotify/issues/11
[#15]: https://github.com/fsnotify/fsnotify/issues/15

View File

@ -1,10 +1,19 @@
//go:build solaris //go:build solaris
// +build solaris // +build solaris
// Note: the documentation on the Watcher type and methods is generated from
// mkdoc.zsh
package fsnotify package fsnotify
import ( import (
"errors" "errors"
"fmt"
"os"
"path/filepath"
"sync"
"golang.org/x/sys/unix"
) )
// Watcher watches a set of paths, delivering events on a channel. // Watcher watches a set of paths, delivering events on a channel.
@ -17,9 +26,9 @@ import (
// When a file is removed a Remove event won't be emitted until all file // When a file is removed a Remove event won't be emitted until all file
// descriptors are closed, and deletes will always emit a Chmod. For example: // descriptors are closed, and deletes will always emit a Chmod. For example:
// //
// fp := os.Open("file") // fp := os.Open("file")
// os.Remove("file") // Triggers Chmod // os.Remove("file") // Triggers Chmod
// fp.Close() // Triggers Remove // fp.Close() // Triggers Remove
// //
// This is the event that inotify sends, so not much can be changed about this. // This is the event that inotify sends, so not much can be changed about this.
// //
@ -33,16 +42,16 @@ import (
// //
// To increase them you can use sysctl or write the value to the /proc file: // To increase them you can use sysctl or write the value to the /proc file:
// //
// # Default values on Linux 5.18 // # Default values on Linux 5.18
// sysctl fs.inotify.max_user_watches=124983 // sysctl fs.inotify.max_user_watches=124983
// sysctl fs.inotify.max_user_instances=128 // sysctl fs.inotify.max_user_instances=128
// //
// To make the changes persist on reboot edit /etc/sysctl.conf or // To make the changes persist on reboot edit /etc/sysctl.conf or
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check // /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
// your distro's documentation): // your distro's documentation):
// //
// fs.inotify.max_user_watches=124983 // fs.inotify.max_user_watches=124983
// fs.inotify.max_user_instances=128 // fs.inotify.max_user_instances=128
// //
// Reaching the limit will result in a "no space left on device" or "too many open // Reaching the limit will result in a "no space left on device" or "too many open
// files" error. // files" error.
@ -58,14 +67,20 @@ import (
// control the maximum number of open files, as well as /etc/login.conf on BSD // control the maximum number of open files, as well as /etc/login.conf on BSD
// systems. // systems.
// //
// # macOS notes // # Windows notes
// //
// Spotlight indexing on macOS can result in multiple events (see [#15]). A // Paths can be added as "C:\path\to\dir", but forward slashes
// temporary workaround is to add your folder(s) to the "Spotlight Privacy // ("C:/path/to/dir") will also work.
// Settings" until we have a native FSEvents implementation (see [#11]).
// //
// [#11]: https://github.com/fsnotify/fsnotify/issues/11 // When a watched directory is removed it will always send an event for the
// [#15]: https://github.com/fsnotify/fsnotify/issues/15 // directory itself, but may not send events for all files in that directory.
// Sometimes it will send events for all times, sometimes it will send no
// events, and often only for some files.
//
// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
// value that is guaranteed to work with SMB filesystems. If you have many
// events in quick succession this may not be enough, and you will have to use
// [WithBufferSize] to increase the value.
type Watcher struct { type Watcher struct {
// Events sends the filesystem change events. // Events sends the filesystem change events.
// //
@ -92,44 +107,129 @@ type Watcher struct {
// initiated by the user may show up as one or multiple // initiated by the user may show up as one or multiple
// writes, depending on when the system syncs things to // writes, depending on when the system syncs things to
// disk. For example when compiling a large Go program // disk. For example when compiling a large Go program
// you may get hundreds of Write events, so you // you may get hundreds of Write events, and you may
// probably want to wait until you've stopped receiving // want to wait until you've stopped receiving them
// them (see the dedup example in cmd/fsnotify). // (see the dedup example in cmd/fsnotify).
//
// Some systems may send Write event for directories
// when the directory content changes.
// //
// fsnotify.Chmod Attributes were changed. On Linux this is also sent // fsnotify.Chmod Attributes were changed. On Linux this is also sent
// when a file is removed (or more accurately, when a // when a file is removed (or more accurately, when a
// link to an inode is removed). On kqueue it's sent // link to an inode is removed). On kqueue it's sent
// and on kqueue when a file is truncated. On Windows // when a file is truncated. On Windows it's never
// it's never sent. // sent.
Events chan Event Events chan Event
// Errors sends any errors. // Errors sends any errors.
//
// ErrEventOverflow is used to indicate there are too many events:
//
// - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
// - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
// - kqueue, fen: Not used.
Errors chan error Errors chan error
mu sync.Mutex
port *unix.EventPort
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
dirs map[string]struct{} // Explicitly watched directories
watches map[string]struct{} // Explicitly watched non-directories
} }
// NewWatcher creates a new Watcher. // NewWatcher creates a new Watcher.
func NewWatcher() (*Watcher, error) { func NewWatcher() (*Watcher, error) {
return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") return NewBufferedWatcher(0)
} }
// Close removes all watches and closes the events channel. // NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
// channel.
//
// The main use case for this is situations with a very large number of events
// where the kernel buffer size can't be increased (e.g. due to lack of
// permissions). An unbuffered Watcher will perform better for almost all use
// cases, and whenever possible you will be better off increasing the kernel
// buffers instead of adding a large userspace buffer.
func NewBufferedWatcher(sz uint) (*Watcher, error) {
w := &Watcher{
Events: make(chan Event, sz),
Errors: make(chan error),
dirs: make(map[string]struct{}),
watches: make(map[string]struct{}),
done: make(chan struct{}),
}
var err error
w.port, err = unix.NewEventPort()
if err != nil {
return nil, fmt.Errorf("fsnotify.NewWatcher: %w", err)
}
go w.readEvents()
return w, nil
}
// sendEvent attempts to send an event to the user, returning true if the event
// was put in the channel successfully and false if the watcher has been closed.
func (w *Watcher) sendEvent(name string, op Op) (sent bool) {
select {
case w.Events <- Event{Name: name, Op: op}:
return true
case <-w.done:
return false
}
}
// sendError attempts to send an error to the user, returning true if the error
// was put in the channel successfully and false if the watcher has been closed.
func (w *Watcher) sendError(err error) (sent bool) {
select {
case w.Errors <- err:
return true
case <-w.done:
return false
}
}
func (w *Watcher) isClosed() bool {
select {
case <-w.done:
return true
default:
return false
}
}
// Close removes all watches and closes the Events channel.
func (w *Watcher) Close() error { func (w *Watcher) Close() error {
return nil // Take the lock used by associateFile to prevent lingering events from
// being processed after the close
w.mu.Lock()
defer w.mu.Unlock()
if w.isClosed() {
return nil
}
close(w.done)
return w.port.Close()
} }
// Add starts monitoring the path for changes. // Add starts monitoring the path for changes.
// //
// A path can only be watched once; attempting to watch it more than once will // A path can only be watched once; watching it more than once is a no-op and will
// return an error. Paths that do not yet exist on the filesystem cannot be // not return an error. Paths that do not yet exist on the filesystem cannot be
// added. A watch will be automatically removed if the path is deleted. // watched.
// //
// A path will remain watched if it gets renamed to somewhere else on the same // A watch will be automatically removed if the watched path is deleted or
// filesystem, but the monitor will get removed if the path gets deleted and // renamed. The exception is the Windows backend, which doesn't remove the
// re-created, or if it's moved to a different filesystem. // watcher on renames.
// //
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special // Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
// filesystems (/proc, /sys, etc.) generally don't work. // filesystems (/proc, /sys, etc.) generally don't work.
// //
// Returns [ErrClosed] if [Watcher.Close] was called.
//
// See [Watcher.AddWith] for a version that allows adding options.
//
// # Watching directories // # Watching directories
// //
// All files in a directory are monitored, including new files that are created // All files in a directory are monitored, including new files that are created
@ -139,15 +239,63 @@ func (w *Watcher) Close() error {
// # Watching files // # Watching files
// //
// Watching individual files (rather than directories) is generally not // Watching individual files (rather than directories) is generally not
// recommended as many tools update files atomically. Instead of "just" writing // recommended as many programs (especially editors) update files atomically: it
// to the file a temporary file will be written to first, and if successful the // will write to a temporary file which is then moved to to destination,
// temporary file is moved to to destination removing the original, or some // overwriting the original (or some variant thereof). The watcher on the
// variant thereof. The watcher on the original file is now lost, as it no // original file is now lost, as that no longer exists.
// longer exists.
// //
// Instead, watch the parent directory and use Event.Name to filter out files // The upshot of this is that a power failure or crash won't leave a
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. // half-written file.
func (w *Watcher) Add(name string) error { //
// Watch the parent directory and use Event.Name to filter out files you're not
// interested in. There is an example of this in cmd/fsnotify/file.go.
func (w *Watcher) Add(name string) error { return w.AddWith(name) }
// AddWith is like [Watcher.Add], but allows adding options. When using Add()
// the defaults described below are used.
//
// Possible options are:
//
// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
// other platforms. The default is 64K (65536 bytes).
func (w *Watcher) AddWith(name string, opts ...addOpt) error {
if w.isClosed() {
return ErrClosed
}
if w.port.PathIsWatched(name) {
return nil
}
_ = getOptions(opts...)
// Currently we resolve symlinks that were explicitly requested to be
// watched. Otherwise we would use LStat here.
stat, err := os.Stat(name)
if err != nil {
return err
}
// Associate all files in the directory.
if stat.IsDir() {
err := w.handleDirectory(name, stat, true, w.associateFile)
if err != nil {
return err
}
w.mu.Lock()
w.dirs[name] = struct{}{}
w.mu.Unlock()
return nil
}
err = w.associateFile(name, stat, true)
if err != nil {
return err
}
w.mu.Lock()
w.watches[name] = struct{}{}
w.mu.Unlock()
return nil return nil
} }
@ -157,6 +305,336 @@ func (w *Watcher) Add(name string) error {
// /tmp/dir and /tmp/dir/subdir then you will need to remove both. // /tmp/dir and /tmp/dir/subdir then you will need to remove both.
// //
// Removing a path that has not yet been added returns [ErrNonExistentWatch]. // Removing a path that has not yet been added returns [ErrNonExistentWatch].
//
// Returns nil if [Watcher.Close] was called.
func (w *Watcher) Remove(name string) error { func (w *Watcher) Remove(name string) error {
if w.isClosed() {
return nil
}
if !w.port.PathIsWatched(name) {
return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
}
// The user has expressed an intent. Immediately remove this name from
// whichever watch list it might be in. If it's not in there the delete
// doesn't cause harm.
w.mu.Lock()
delete(w.watches, name)
delete(w.dirs, name)
w.mu.Unlock()
stat, err := os.Stat(name)
if err != nil {
return err
}
// Remove associations for every file in the directory.
if stat.IsDir() {
err := w.handleDirectory(name, stat, false, w.dissociateFile)
if err != nil {
return err
}
return nil
}
err = w.port.DissociatePath(name)
if err != nil {
return err
}
return nil return nil
} }
// readEvents contains the main loop that runs in a goroutine watching for events.
func (w *Watcher) readEvents() {
// If this function returns, the watcher has been closed and we can close
// these channels
defer func() {
close(w.Errors)
close(w.Events)
}()
pevents := make([]unix.PortEvent, 8)
for {
count, err := w.port.Get(pevents, 1, nil)
if err != nil && err != unix.ETIME {
// Interrupted system call (count should be 0) ignore and continue
if errors.Is(err, unix.EINTR) && count == 0 {
continue
}
// Get failed because we called w.Close()
if errors.Is(err, unix.EBADF) && w.isClosed() {
return
}
// There was an error not caused by calling w.Close()
if !w.sendError(err) {
return
}
}
p := pevents[:count]
for _, pevent := range p {
if pevent.Source != unix.PORT_SOURCE_FILE {
// Event from unexpected source received; should never happen.
if !w.sendError(errors.New("Event from unexpected source received")) {
return
}
continue
}
err = w.handleEvent(&pevent)
if err != nil {
if !w.sendError(err) {
return
}
}
}
}
}
func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error {
files, err := os.ReadDir(path)
if err != nil {
return err
}
// Handle all children of the directory.
for _, entry := range files {
finfo, err := entry.Info()
if err != nil {
return err
}
err = handler(filepath.Join(path, finfo.Name()), finfo, false)
if err != nil {
return err
}
}
// And finally handle the directory itself.
return handler(path, stat, follow)
}
// handleEvent might need to emit more than one fsnotify event if the events
// bitmap matches more than one event type (e.g. the file was both modified and
// had the attributes changed between when the association was created and the
// when event was returned)
func (w *Watcher) handleEvent(event *unix.PortEvent) error {
var (
events = event.Events
path = event.Path
fmode = event.Cookie.(os.FileMode)
reRegister = true
)
w.mu.Lock()
_, watchedDir := w.dirs[path]
_, watchedPath := w.watches[path]
w.mu.Unlock()
isWatched := watchedDir || watchedPath
if events&unix.FILE_DELETE != 0 {
if !w.sendEvent(path, Remove) {
return nil
}
reRegister = false
}
if events&unix.FILE_RENAME_FROM != 0 {
if !w.sendEvent(path, Rename) {
return nil
}
// Don't keep watching the new file name
reRegister = false
}
if events&unix.FILE_RENAME_TO != 0 {
// We don't report a Rename event for this case, because Rename events
// are interpreted as referring to the _old_ name of the file, and in
// this case the event would refer to the new name of the file. This
// type of rename event is not supported by fsnotify.
// inotify reports a Remove event in this case, so we simulate this
// here.
if !w.sendEvent(path, Remove) {
return nil
}
// Don't keep watching the file that was removed
reRegister = false
}
// The file is gone, nothing left to do.
if !reRegister {
if watchedDir {
w.mu.Lock()
delete(w.dirs, path)
w.mu.Unlock()
}
if watchedPath {
w.mu.Lock()
delete(w.watches, path)
w.mu.Unlock()
}
return nil
}
// If we didn't get a deletion the file still exists and we're going to have
// to watch it again. Let's Stat it now so that we can compare permissions
// and have what we need to continue watching the file
stat, err := os.Lstat(path)
if err != nil {
// This is unexpected, but we should still emit an event. This happens
// most often on "rm -r" of a subdirectory inside a watched directory We
// get a modify event of something happening inside, but by the time we
// get here, the sudirectory is already gone. Clearly we were watching
// this path but now it is gone. Let's tell the user that it was
// removed.
if !w.sendEvent(path, Remove) {
return nil
}
// Suppress extra write events on removed directories; they are not
// informative and can be confusing.
return nil
}
// resolve symlinks that were explicitly watched as we would have at Add()
// time. this helps suppress spurious Chmod events on watched symlinks
if isWatched {
stat, err = os.Stat(path)
if err != nil {
// The symlink still exists, but the target is gone. Report the
// Remove similar to above.
if !w.sendEvent(path, Remove) {
return nil
}
// Don't return the error
}
}
if events&unix.FILE_MODIFIED != 0 {
if fmode.IsDir() {
if watchedDir {
if err := w.updateDirectory(path); err != nil {
return err
}
} else {
if !w.sendEvent(path, Write) {
return nil
}
}
} else {
if !w.sendEvent(path, Write) {
return nil
}
}
}
if events&unix.FILE_ATTRIB != 0 && stat != nil {
// Only send Chmod if perms changed
if stat.Mode().Perm() != fmode.Perm() {
if !w.sendEvent(path, Chmod) {
return nil
}
}
}
if stat != nil {
// If we get here, it means we've hit an event above that requires us to
// continue watching the file or directory
return w.associateFile(path, stat, isWatched)
}
return nil
}
func (w *Watcher) updateDirectory(path string) error {
// The directory was modified, so we must find unwatched entities and watch
// them. If something was removed from the directory, nothing will happen,
// as everything else should still be watched.
files, err := os.ReadDir(path)
if err != nil {
return err
}
for _, entry := range files {
path := filepath.Join(path, entry.Name())
if w.port.PathIsWatched(path) {
continue
}
finfo, err := entry.Info()
if err != nil {
return err
}
err = w.associateFile(path, finfo, false)
if err != nil {
if !w.sendError(err) {
return nil
}
}
if !w.sendEvent(path, Create) {
return nil
}
}
return nil
}
func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) error {
if w.isClosed() {
return ErrClosed
}
// This is primarily protecting the call to AssociatePath but it is
// important and intentional that the call to PathIsWatched is also
// protected by this mutex. Without this mutex, AssociatePath has been seen
// to error out that the path is already associated.
w.mu.Lock()
defer w.mu.Unlock()
if w.port.PathIsWatched(path) {
// Remove the old association in favor of this one If we get ENOENT,
// then while the x/sys/unix wrapper still thought that this path was
// associated, the underlying event port did not. This call will have
// cleared up that discrepancy. The most likely cause is that the event
// has fired but we haven't processed it yet.
err := w.port.DissociatePath(path)
if err != nil && err != unix.ENOENT {
return err
}
}
// FILE_NOFOLLOW means we watch symlinks themselves rather than their
// targets.
events := unix.FILE_MODIFIED | unix.FILE_ATTRIB | unix.FILE_NOFOLLOW
if follow {
// We *DO* follow symlinks for explicitly watched entries.
events = unix.FILE_MODIFIED | unix.FILE_ATTRIB
}
return w.port.AssociatePath(path, stat,
events,
stat.Mode())
}
func (w *Watcher) dissociateFile(path string, stat os.FileInfo, unused bool) error {
if !w.port.PathIsWatched(path) {
return nil
}
return w.port.DissociatePath(path)
}
// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
// yet removed).
//
// Returns nil if [Watcher.Close] was called.
func (w *Watcher) WatchList() []string {
if w.isClosed() {
return nil
}
w.mu.Lock()
defer w.mu.Unlock()
entries := make([]string, 0, len(w.watches)+len(w.dirs))
for pathname := range w.dirs {
entries = append(entries, pathname)
}
for pathname := range w.watches {
entries = append(entries, pathname)
}
return entries
}

View File

@ -1,5 +1,8 @@
//go:build linux //go:build linux && !appengine
// +build linux // +build linux,!appengine
// Note: the documentation on the Watcher type and methods is generated from
// mkdoc.zsh
package fsnotify package fsnotify
@ -26,9 +29,9 @@ import (
// When a file is removed a Remove event won't be emitted until all file // When a file is removed a Remove event won't be emitted until all file
// descriptors are closed, and deletes will always emit a Chmod. For example: // descriptors are closed, and deletes will always emit a Chmod. For example:
// //
// fp := os.Open("file") // fp := os.Open("file")
// os.Remove("file") // Triggers Chmod // os.Remove("file") // Triggers Chmod
// fp.Close() // Triggers Remove // fp.Close() // Triggers Remove
// //
// This is the event that inotify sends, so not much can be changed about this. // This is the event that inotify sends, so not much can be changed about this.
// //
@ -42,16 +45,16 @@ import (
// //
// To increase them you can use sysctl or write the value to the /proc file: // To increase them you can use sysctl or write the value to the /proc file:
// //
// # Default values on Linux 5.18 // # Default values on Linux 5.18
// sysctl fs.inotify.max_user_watches=124983 // sysctl fs.inotify.max_user_watches=124983
// sysctl fs.inotify.max_user_instances=128 // sysctl fs.inotify.max_user_instances=128
// //
// To make the changes persist on reboot edit /etc/sysctl.conf or // To make the changes persist on reboot edit /etc/sysctl.conf or
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check // /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
// your distro's documentation): // your distro's documentation):
// //
// fs.inotify.max_user_watches=124983 // fs.inotify.max_user_watches=124983
// fs.inotify.max_user_instances=128 // fs.inotify.max_user_instances=128
// //
// Reaching the limit will result in a "no space left on device" or "too many open // Reaching the limit will result in a "no space left on device" or "too many open
// files" error. // files" error.
@ -67,14 +70,20 @@ import (
// control the maximum number of open files, as well as /etc/login.conf on BSD // control the maximum number of open files, as well as /etc/login.conf on BSD
// systems. // systems.
// //
// # macOS notes // # Windows notes
// //
// Spotlight indexing on macOS can result in multiple events (see [#15]). A // Paths can be added as "C:\path\to\dir", but forward slashes
// temporary workaround is to add your folder(s) to the "Spotlight Privacy // ("C:/path/to/dir") will also work.
// Settings" until we have a native FSEvents implementation (see [#11]).
// //
// [#11]: https://github.com/fsnotify/fsnotify/issues/11 // When a watched directory is removed it will always send an event for the
// [#15]: https://github.com/fsnotify/fsnotify/issues/15 // directory itself, but may not send events for all files in that directory.
// Sometimes it will send events for all times, sometimes it will send no
// events, and often only for some files.
//
// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
// value that is guaranteed to work with SMB filesystems. If you have many
// events in quick succession this may not be enough, and you will have to use
// [WithBufferSize] to increase the value.
type Watcher struct { type Watcher struct {
// Events sends the filesystem change events. // Events sends the filesystem change events.
// //
@ -101,36 +110,148 @@ type Watcher struct {
// initiated by the user may show up as one or multiple // initiated by the user may show up as one or multiple
// writes, depending on when the system syncs things to // writes, depending on when the system syncs things to
// disk. For example when compiling a large Go program // disk. For example when compiling a large Go program
// you may get hundreds of Write events, so you // you may get hundreds of Write events, and you may
// probably want to wait until you've stopped receiving // want to wait until you've stopped receiving them
// them (see the dedup example in cmd/fsnotify). // (see the dedup example in cmd/fsnotify).
//
// Some systems may send Write event for directories
// when the directory content changes.
// //
// fsnotify.Chmod Attributes were changed. On Linux this is also sent // fsnotify.Chmod Attributes were changed. On Linux this is also sent
// when a file is removed (or more accurately, when a // when a file is removed (or more accurately, when a
// link to an inode is removed). On kqueue it's sent // link to an inode is removed). On kqueue it's sent
// and on kqueue when a file is truncated. On Windows // when a file is truncated. On Windows it's never
// it's never sent. // sent.
Events chan Event Events chan Event
// Errors sends any errors. // Errors sends any errors.
//
// ErrEventOverflow is used to indicate there are too many events:
//
// - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
// - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
// - kqueue, fen: Not used.
Errors chan error Errors chan error
// Store fd here as os.File.Read() will no longer return on close after // Store fd here as os.File.Read() will no longer return on close after
// calling Fd(). See: https://github.com/golang/go/issues/26439 // calling Fd(). See: https://github.com/golang/go/issues/26439
fd int fd int
mu sync.Mutex // Map access
inotifyFile *os.File inotifyFile *os.File
watches map[string]*watch // Map of inotify watches (key: path) watches *watches
paths map[int]string // Map of watched paths (key: watch descriptor) done chan struct{} // Channel for sending a "quit message" to the reader goroutine
done chan struct{} // Channel for sending a "quit message" to the reader goroutine closeMu sync.Mutex
doneResp chan struct{} // Channel to respond to Close doneResp chan struct{} // Channel to respond to Close
}
type (
watches struct {
mu sync.RWMutex
wd map[uint32]*watch // wd → watch
path map[string]uint32 // pathname → wd
}
watch struct {
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
path string // Watch path.
}
)
func newWatches() *watches {
return &watches{
wd: make(map[uint32]*watch),
path: make(map[string]uint32),
}
}
func (w *watches) len() int {
w.mu.RLock()
defer w.mu.RUnlock()
return len(w.wd)
}
func (w *watches) add(ww *watch) {
w.mu.Lock()
defer w.mu.Unlock()
w.wd[ww.wd] = ww
w.path[ww.path] = ww.wd
}
func (w *watches) remove(wd uint32) {
w.mu.Lock()
defer w.mu.Unlock()
delete(w.path, w.wd[wd].path)
delete(w.wd, wd)
}
func (w *watches) removePath(path string) (uint32, bool) {
w.mu.Lock()
defer w.mu.Unlock()
wd, ok := w.path[path]
if !ok {
return 0, false
}
delete(w.path, path)
delete(w.wd, wd)
return wd, true
}
func (w *watches) byPath(path string) *watch {
w.mu.RLock()
defer w.mu.RUnlock()
return w.wd[w.path[path]]
}
func (w *watches) byWd(wd uint32) *watch {
w.mu.RLock()
defer w.mu.RUnlock()
return w.wd[wd]
}
func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error {
w.mu.Lock()
defer w.mu.Unlock()
var existing *watch
wd, ok := w.path[path]
if ok {
existing = w.wd[wd]
}
upd, err := f(existing)
if err != nil {
return err
}
if upd != nil {
w.wd[upd.wd] = upd
w.path[upd.path] = upd.wd
if upd.wd != wd {
delete(w.wd, wd)
}
}
return nil
} }
// NewWatcher creates a new Watcher. // NewWatcher creates a new Watcher.
func NewWatcher() (*Watcher, error) { func NewWatcher() (*Watcher, error) {
// Create inotify fd return NewBufferedWatcher(0)
// Need to set the FD to nonblocking mode in order for SetDeadline methods to work }
// Otherwise, blocking i/o operations won't terminate on close
// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
// channel.
//
// The main use case for this is situations with a very large number of events
// where the kernel buffer size can't be increased (e.g. due to lack of
// permissions). An unbuffered Watcher will perform better for almost all use
// cases, and whenever possible you will be better off increasing the kernel
// buffers instead of adding a large userspace buffer.
func NewBufferedWatcher(sz uint) (*Watcher, error) {
// Need to set nonblocking mode for SetDeadline to work, otherwise blocking
// I/O operations won't terminate on close.
fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK) fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK)
if fd == -1 { if fd == -1 {
return nil, errno return nil, errno
@ -139,9 +260,8 @@ func NewWatcher() (*Watcher, error) {
w := &Watcher{ w := &Watcher{
fd: fd, fd: fd,
inotifyFile: os.NewFile(uintptr(fd), ""), inotifyFile: os.NewFile(uintptr(fd), ""),
watches: make(map[string]*watch), watches: newWatches(),
paths: make(map[int]string), Events: make(chan Event, sz),
Events: make(chan Event),
Errors: make(chan error), Errors: make(chan error),
done: make(chan struct{}), done: make(chan struct{}),
doneResp: make(chan struct{}), doneResp: make(chan struct{}),
@ -157,8 +277,8 @@ func (w *Watcher) sendEvent(e Event) bool {
case w.Events <- e: case w.Events <- e:
return true return true
case <-w.done: case <-w.done:
return false
} }
return false
} }
// Returns true if the error was sent, or false if watcher is closed. // Returns true if the error was sent, or false if watcher is closed.
@ -180,17 +300,15 @@ func (w *Watcher) isClosed() bool {
} }
} }
// Close removes all watches and closes the events channel. // Close removes all watches and closes the Events channel.
func (w *Watcher) Close() error { func (w *Watcher) Close() error {
w.mu.Lock() w.closeMu.Lock()
if w.isClosed() { if w.isClosed() {
w.mu.Unlock() w.closeMu.Unlock()
return nil return nil
} }
// Send 'close' signal to goroutine, and set the Watcher to closed.
close(w.done) close(w.done)
w.mu.Unlock() w.closeMu.Unlock()
// Causes any blocking reads to return with an error, provided the file // Causes any blocking reads to return with an error, provided the file
// still supports deadline operations. // still supports deadline operations.
@ -207,17 +325,21 @@ func (w *Watcher) Close() error {
// Add starts monitoring the path for changes. // Add starts monitoring the path for changes.
// //
// A path can only be watched once; attempting to watch it more than once will // A path can only be watched once; watching it more than once is a no-op and will
// return an error. Paths that do not yet exist on the filesystem cannot be // not return an error. Paths that do not yet exist on the filesystem cannot be
// added. A watch will be automatically removed if the path is deleted. // watched.
// //
// A path will remain watched if it gets renamed to somewhere else on the same // A watch will be automatically removed if the watched path is deleted or
// filesystem, but the monitor will get removed if the path gets deleted and // renamed. The exception is the Windows backend, which doesn't remove the
// re-created, or if it's moved to a different filesystem. // watcher on renames.
// //
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special // Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
// filesystems (/proc, /sys, etc.) generally don't work. // filesystems (/proc, /sys, etc.) generally don't work.
// //
// Returns [ErrClosed] if [Watcher.Close] was called.
//
// See [Watcher.AddWith] for a version that allows adding options.
//
// # Watching directories // # Watching directories
// //
// All files in a directory are monitored, including new files that are created // All files in a directory are monitored, including new files that are created
@ -227,44 +349,59 @@ func (w *Watcher) Close() error {
// # Watching files // # Watching files
// //
// Watching individual files (rather than directories) is generally not // Watching individual files (rather than directories) is generally not
// recommended as many tools update files atomically. Instead of "just" writing // recommended as many programs (especially editors) update files atomically: it
// to the file a temporary file will be written to first, and if successful the // will write to a temporary file which is then moved to to destination,
// temporary file is moved to to destination removing the original, or some // overwriting the original (or some variant thereof). The watcher on the
// variant thereof. The watcher on the original file is now lost, as it no // original file is now lost, as that no longer exists.
// longer exists.
// //
// Instead, watch the parent directory and use Event.Name to filter out files // The upshot of this is that a power failure or crash won't leave a
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. // half-written file.
func (w *Watcher) Add(name string) error { //
name = filepath.Clean(name) // Watch the parent directory and use Event.Name to filter out files you're not
// interested in. There is an example of this in cmd/fsnotify/file.go.
func (w *Watcher) Add(name string) error { return w.AddWith(name) }
// AddWith is like [Watcher.Add], but allows adding options. When using Add()
// the defaults described below are used.
//
// Possible options are:
//
// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
// other platforms. The default is 64K (65536 bytes).
func (w *Watcher) AddWith(name string, opts ...addOpt) error {
if w.isClosed() { if w.isClosed() {
return errors.New("inotify instance already closed") return ErrClosed
} }
name = filepath.Clean(name)
_ = getOptions(opts...)
var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
w.mu.Lock() return w.watches.updatePath(name, func(existing *watch) (*watch, error) {
defer w.mu.Unlock() if existing != nil {
watchEntry := w.watches[name] flags |= existing.flags | unix.IN_MASK_ADD
if watchEntry != nil { }
flags |= watchEntry.flags | unix.IN_MASK_ADD
}
wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
if wd == -1 {
return errno
}
if watchEntry == nil { wd, err := unix.InotifyAddWatch(w.fd, name, flags)
w.watches[name] = &watch{wd: uint32(wd), flags: flags} if wd == -1 {
w.paths[wd] = name return nil, err
} else { }
watchEntry.wd = uint32(wd)
watchEntry.flags = flags
}
return nil if existing == nil {
return &watch{
wd: uint32(wd),
path: name,
flags: flags,
}, nil
}
existing.wd = uint32(wd)
existing.flags = flags
return existing, nil
})
} }
// Remove stops monitoring the path for changes. // Remove stops monitoring the path for changes.
@ -273,32 +410,22 @@ func (w *Watcher) Add(name string) error {
// /tmp/dir and /tmp/dir/subdir then you will need to remove both. // /tmp/dir and /tmp/dir/subdir then you will need to remove both.
// //
// Removing a path that has not yet been added returns [ErrNonExistentWatch]. // Removing a path that has not yet been added returns [ErrNonExistentWatch].
//
// Returns nil if [Watcher.Close] was called.
func (w *Watcher) Remove(name string) error { func (w *Watcher) Remove(name string) error {
name = filepath.Clean(name) if w.isClosed() {
return nil
}
return w.remove(filepath.Clean(name))
}
// Fetch the watch. func (w *Watcher) remove(name string) error {
w.mu.Lock() wd, ok := w.watches.removePath(name)
defer w.mu.Unlock()
watch, ok := w.watches[name]
// Remove it from inotify.
if !ok { if !ok {
return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
} }
// We successfully removed the watch if InotifyRmWatch doesn't return an success, errno := unix.InotifyRmWatch(w.fd, wd)
// error, we need to clean up our internal state to ensure it matches
// inotify's kernel state.
delete(w.paths, int(watch.wd))
delete(w.watches, name)
// inotify_rm_watch will return EINVAL if the file has been deleted;
// the inotify will already have been removed.
// watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
// by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE
// so that EINVAL means that the wd is being rm_watch()ed or its file removed
// by another thread and we have not received IN_IGNORE event.
success, errno := unix.InotifyRmWatch(w.fd, watch.wd)
if success == -1 { if success == -1 {
// TODO: Perhaps it's not helpful to return an error here in every case; // TODO: Perhaps it's not helpful to return an error here in every case;
// The only two possible errors are: // The only two possible errors are:
@ -312,26 +439,26 @@ func (w *Watcher) Remove(name string) error {
// are watching is deleted. // are watching is deleted.
return errno return errno
} }
return nil return nil
} }
// WatchList returns all paths added with [Add] (and are not yet removed). // WatchList returns all paths explicitly added with [Watcher.Add] (and are not
// yet removed).
//
// Returns nil if [Watcher.Close] was called.
func (w *Watcher) WatchList() []string { func (w *Watcher) WatchList() []string {
w.mu.Lock() if w.isClosed() {
defer w.mu.Unlock() return nil
entries := make([]string, 0, len(w.watches))
for pathname := range w.watches {
entries = append(entries, pathname)
} }
return entries entries := make([]string, 0, w.watches.len())
} w.watches.mu.RLock()
for pathname := range w.watches.path {
entries = append(entries, pathname)
}
w.watches.mu.RUnlock()
type watch struct { return entries
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
} }
// readEvents reads from the inotify file descriptor, converts the // readEvents reads from the inotify file descriptor, converts the
@ -367,14 +494,11 @@ func (w *Watcher) readEvents() {
if n < unix.SizeofInotifyEvent { if n < unix.SizeofInotifyEvent {
var err error var err error
if n == 0 { if n == 0 {
// If EOF is received. This should really never happen. err = io.EOF // If EOF is received. This should really never happen.
err = io.EOF
} else if n < 0 { } else if n < 0 {
// If an error occurred while reading. err = errno // If an error occurred while reading.
err = errno
} else { } else {
// Read was too short. err = errors.New("notify: short read in readEvents()") // Read was too short.
err = errors.New("notify: short read in readEvents()")
} }
if !w.sendError(err) { if !w.sendError(err) {
return return
@ -403,18 +527,29 @@ func (w *Watcher) readEvents() {
// doesn't append the filename to the event, but we would like to always fill the // doesn't append the filename to the event, but we would like to always fill the
// the "Name" field with a valid filename. We retrieve the path of the watch from // the "Name" field with a valid filename. We retrieve the path of the watch from
// the "paths" map. // the "paths" map.
w.mu.Lock() watch := w.watches.byWd(uint32(raw.Wd))
name, ok := w.paths[int(raw.Wd)]
// IN_DELETE_SELF occurs when the file/directory being watched is removed.
// This is a sign to clean up the maps, otherwise we are no longer in sync
// with the inotify kernel state which has already deleted the watch
// automatically.
if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
delete(w.paths, int(raw.Wd))
delete(w.watches, name)
}
w.mu.Unlock()
// inotify will automatically remove the watch on deletes; just need
// to clean our state here.
if watch != nil && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
w.watches.remove(watch.wd)
}
// We can't really update the state when a watched path is moved;
// only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove
// the watch.
if watch != nil && mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF {
err := w.remove(watch.path)
if err != nil && !errors.Is(err, ErrNonExistentWatch) {
if !w.sendError(err) {
return
}
}
}
var name string
if watch != nil {
name = watch.path
}
if nameLen > 0 { if nameLen > 0 {
// Point "bytes" at the first byte of the filename // Point "bytes" at the first byte of the filename
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen]

View File

@ -1,12 +1,14 @@
//go:build freebsd || openbsd || netbsd || dragonfly || darwin //go:build freebsd || openbsd || netbsd || dragonfly || darwin
// +build freebsd openbsd netbsd dragonfly darwin // +build freebsd openbsd netbsd dragonfly darwin
// Note: the documentation on the Watcher type and methods is generated from
// mkdoc.zsh
package fsnotify package fsnotify
import ( import (
"errors" "errors"
"fmt" "fmt"
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"sync" "sync"
@ -24,9 +26,9 @@ import (
// When a file is removed a Remove event won't be emitted until all file // When a file is removed a Remove event won't be emitted until all file
// descriptors are closed, and deletes will always emit a Chmod. For example: // descriptors are closed, and deletes will always emit a Chmod. For example:
// //
// fp := os.Open("file") // fp := os.Open("file")
// os.Remove("file") // Triggers Chmod // os.Remove("file") // Triggers Chmod
// fp.Close() // Triggers Remove // fp.Close() // Triggers Remove
// //
// This is the event that inotify sends, so not much can be changed about this. // This is the event that inotify sends, so not much can be changed about this.
// //
@ -40,16 +42,16 @@ import (
// //
// To increase them you can use sysctl or write the value to the /proc file: // To increase them you can use sysctl or write the value to the /proc file:
// //
// # Default values on Linux 5.18 // # Default values on Linux 5.18
// sysctl fs.inotify.max_user_watches=124983 // sysctl fs.inotify.max_user_watches=124983
// sysctl fs.inotify.max_user_instances=128 // sysctl fs.inotify.max_user_instances=128
// //
// To make the changes persist on reboot edit /etc/sysctl.conf or // To make the changes persist on reboot edit /etc/sysctl.conf or
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check // /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
// your distro's documentation): // your distro's documentation):
// //
// fs.inotify.max_user_watches=124983 // fs.inotify.max_user_watches=124983
// fs.inotify.max_user_instances=128 // fs.inotify.max_user_instances=128
// //
// Reaching the limit will result in a "no space left on device" or "too many open // Reaching the limit will result in a "no space left on device" or "too many open
// files" error. // files" error.
@ -65,14 +67,20 @@ import (
// control the maximum number of open files, as well as /etc/login.conf on BSD // control the maximum number of open files, as well as /etc/login.conf on BSD
// systems. // systems.
// //
// # macOS notes // # Windows notes
// //
// Spotlight indexing on macOS can result in multiple events (see [#15]). A // Paths can be added as "C:\path\to\dir", but forward slashes
// temporary workaround is to add your folder(s) to the "Spotlight Privacy // ("C:/path/to/dir") will also work.
// Settings" until we have a native FSEvents implementation (see [#11]).
// //
// [#11]: https://github.com/fsnotify/fsnotify/issues/11 // When a watched directory is removed it will always send an event for the
// [#15]: https://github.com/fsnotify/fsnotify/issues/15 // directory itself, but may not send events for all files in that directory.
// Sometimes it will send events for all times, sometimes it will send no
// events, and often only for some files.
//
// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
// value that is guaranteed to work with SMB filesystems. If you have many
// events in quick succession this may not be enough, and you will have to use
// [WithBufferSize] to increase the value.
type Watcher struct { type Watcher struct {
// Events sends the filesystem change events. // Events sends the filesystem change events.
// //
@ -99,18 +107,27 @@ type Watcher struct {
// initiated by the user may show up as one or multiple // initiated by the user may show up as one or multiple
// writes, depending on when the system syncs things to // writes, depending on when the system syncs things to
// disk. For example when compiling a large Go program // disk. For example when compiling a large Go program
// you may get hundreds of Write events, so you // you may get hundreds of Write events, and you may
// probably want to wait until you've stopped receiving // want to wait until you've stopped receiving them
// them (see the dedup example in cmd/fsnotify). // (see the dedup example in cmd/fsnotify).
//
// Some systems may send Write event for directories
// when the directory content changes.
// //
// fsnotify.Chmod Attributes were changed. On Linux this is also sent // fsnotify.Chmod Attributes were changed. On Linux this is also sent
// when a file is removed (or more accurately, when a // when a file is removed (or more accurately, when a
// link to an inode is removed). On kqueue it's sent // link to an inode is removed). On kqueue it's sent
// and on kqueue when a file is truncated. On Windows // when a file is truncated. On Windows it's never
// it's never sent. // sent.
Events chan Event Events chan Event
// Errors sends any errors. // Errors sends any errors.
//
// ErrEventOverflow is used to indicate there are too many events:
//
// - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
// - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
// - kqueue, fen: Not used.
Errors chan error Errors chan error
done chan struct{} done chan struct{}
@ -133,6 +150,18 @@ type pathInfo struct {
// NewWatcher creates a new Watcher. // NewWatcher creates a new Watcher.
func NewWatcher() (*Watcher, error) { func NewWatcher() (*Watcher, error) {
return NewBufferedWatcher(0)
}
// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
// channel.
//
// The main use case for this is situations with a very large number of events
// where the kernel buffer size can't be increased (e.g. due to lack of
// permissions). An unbuffered Watcher will perform better for almost all use
// cases, and whenever possible you will be better off increasing the kernel
// buffers instead of adding a large userspace buffer.
func NewBufferedWatcher(sz uint) (*Watcher, error) {
kq, closepipe, err := newKqueue() kq, closepipe, err := newKqueue()
if err != nil { if err != nil {
return nil, err return nil, err
@ -147,7 +176,7 @@ func NewWatcher() (*Watcher, error) {
paths: make(map[int]pathInfo), paths: make(map[int]pathInfo),
fileExists: make(map[string]struct{}), fileExists: make(map[string]struct{}),
userWatches: make(map[string]struct{}), userWatches: make(map[string]struct{}),
Events: make(chan Event), Events: make(chan Event, sz),
Errors: make(chan error), Errors: make(chan error),
done: make(chan struct{}), done: make(chan struct{}),
} }
@ -197,8 +226,8 @@ func (w *Watcher) sendEvent(e Event) bool {
case w.Events <- e: case w.Events <- e:
return true return true
case <-w.done: case <-w.done:
return false
} }
return false
} }
// Returns true if the error was sent, or false if watcher is closed. // Returns true if the error was sent, or false if watcher is closed.
@ -207,11 +236,11 @@ func (w *Watcher) sendError(err error) bool {
case w.Errors <- err: case w.Errors <- err:
return true return true
case <-w.done: case <-w.done:
return false
} }
return false
} }
// Close removes all watches and closes the events channel. // Close removes all watches and closes the Events channel.
func (w *Watcher) Close() error { func (w *Watcher) Close() error {
w.mu.Lock() w.mu.Lock()
if w.isClosed { if w.isClosed {
@ -239,17 +268,21 @@ func (w *Watcher) Close() error {
// Add starts monitoring the path for changes. // Add starts monitoring the path for changes.
// //
// A path can only be watched once; attempting to watch it more than once will // A path can only be watched once; watching it more than once is a no-op and will
// return an error. Paths that do not yet exist on the filesystem cannot be // not return an error. Paths that do not yet exist on the filesystem cannot be
// added. A watch will be automatically removed if the path is deleted. // watched.
// //
// A path will remain watched if it gets renamed to somewhere else on the same // A watch will be automatically removed if the watched path is deleted or
// filesystem, but the monitor will get removed if the path gets deleted and // renamed. The exception is the Windows backend, which doesn't remove the
// re-created, or if it's moved to a different filesystem. // watcher on renames.
// //
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special // Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
// filesystems (/proc, /sys, etc.) generally don't work. // filesystems (/proc, /sys, etc.) generally don't work.
// //
// Returns [ErrClosed] if [Watcher.Close] was called.
//
// See [Watcher.AddWith] for a version that allows adding options.
//
// # Watching directories // # Watching directories
// //
// All files in a directory are monitored, including new files that are created // All files in a directory are monitored, including new files that are created
@ -259,15 +292,28 @@ func (w *Watcher) Close() error {
// # Watching files // # Watching files
// //
// Watching individual files (rather than directories) is generally not // Watching individual files (rather than directories) is generally not
// recommended as many tools update files atomically. Instead of "just" writing // recommended as many programs (especially editors) update files atomically: it
// to the file a temporary file will be written to first, and if successful the // will write to a temporary file which is then moved to to destination,
// temporary file is moved to to destination removing the original, or some // overwriting the original (or some variant thereof). The watcher on the
// variant thereof. The watcher on the original file is now lost, as it no // original file is now lost, as that no longer exists.
// longer exists.
// //
// Instead, watch the parent directory and use Event.Name to filter out files // The upshot of this is that a power failure or crash won't leave a
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. // half-written file.
func (w *Watcher) Add(name string) error { //
// Watch the parent directory and use Event.Name to filter out files you're not
// interested in. There is an example of this in cmd/fsnotify/file.go.
func (w *Watcher) Add(name string) error { return w.AddWith(name) }
// AddWith is like [Watcher.Add], but allows adding options. When using Add()
// the defaults described below are used.
//
// Possible options are:
//
// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
// other platforms. The default is 64K (65536 bytes).
func (w *Watcher) AddWith(name string, opts ...addOpt) error {
_ = getOptions(opts...)
w.mu.Lock() w.mu.Lock()
w.userWatches[name] = struct{}{} w.userWatches[name] = struct{}{}
w.mu.Unlock() w.mu.Unlock()
@ -281,9 +327,19 @@ func (w *Watcher) Add(name string) error {
// /tmp/dir and /tmp/dir/subdir then you will need to remove both. // /tmp/dir and /tmp/dir/subdir then you will need to remove both.
// //
// Removing a path that has not yet been added returns [ErrNonExistentWatch]. // Removing a path that has not yet been added returns [ErrNonExistentWatch].
//
// Returns nil if [Watcher.Close] was called.
func (w *Watcher) Remove(name string) error { func (w *Watcher) Remove(name string) error {
return w.remove(name, true)
}
func (w *Watcher) remove(name string, unwatchFiles bool) error {
name = filepath.Clean(name) name = filepath.Clean(name)
w.mu.Lock() w.mu.Lock()
if w.isClosed {
w.mu.Unlock()
return nil
}
watchfd, ok := w.watches[name] watchfd, ok := w.watches[name]
w.mu.Unlock() w.mu.Unlock()
if !ok { if !ok {
@ -315,7 +371,7 @@ func (w *Watcher) Remove(name string) error {
w.mu.Unlock() w.mu.Unlock()
// Find all watched paths that are in this directory that are not external. // Find all watched paths that are in this directory that are not external.
if isDir { if unwatchFiles && isDir {
var pathsToRemove []string var pathsToRemove []string
w.mu.Lock() w.mu.Lock()
for fd := range w.watchesByDir[name] { for fd := range w.watchesByDir[name] {
@ -326,20 +382,25 @@ func (w *Watcher) Remove(name string) error {
} }
w.mu.Unlock() w.mu.Unlock()
for _, name := range pathsToRemove { for _, name := range pathsToRemove {
// Since these are internal, not much sense in propagating error // Since these are internal, not much sense in propagating error to
// to the user, as that will just confuse them with an error about // the user, as that will just confuse them with an error about a
// a path they did not explicitly watch themselves. // path they did not explicitly watch themselves.
w.Remove(name) w.Remove(name)
} }
} }
return nil return nil
} }
// WatchList returns all paths added with [Add] (and are not yet removed). // WatchList returns all paths explicitly added with [Watcher.Add] (and are not
// yet removed).
//
// Returns nil if [Watcher.Close] was called.
func (w *Watcher) WatchList() []string { func (w *Watcher) WatchList() []string {
w.mu.Lock() w.mu.Lock()
defer w.mu.Unlock() defer w.mu.Unlock()
if w.isClosed {
return nil
}
entries := make([]string, 0, len(w.userWatches)) entries := make([]string, 0, len(w.userWatches))
for pathname := range w.userWatches { for pathname := range w.userWatches {
@ -352,18 +413,18 @@ func (w *Watcher) WatchList() []string {
// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) // Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
// addWatch adds name to the watched file set. // addWatch adds name to the watched file set; the flags are interpreted as
// The flags are interpreted as described in kevent(2). // described in kevent(2).
// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. //
// Returns the real path to the file which was added, with symlinks resolved.
func (w *Watcher) addWatch(name string, flags uint32) (string, error) { func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
var isDir bool var isDir bool
// Make ./name and name equivalent
name = filepath.Clean(name) name = filepath.Clean(name)
w.mu.Lock() w.mu.Lock()
if w.isClosed { if w.isClosed {
w.mu.Unlock() w.mu.Unlock()
return "", errors.New("kevent instance already closed") return "", ErrClosed
} }
watchfd, alreadyWatching := w.watches[name] watchfd, alreadyWatching := w.watches[name]
// We already have a watch, but we can still override flags. // We already have a watch, but we can still override flags.
@ -383,27 +444,30 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
return "", nil return "", nil
} }
// Follow Symlinks // Follow Symlinks.
//
// Linux can add unresolvable symlinks to the watch list without issue,
// and Windows can't do symlinks period. To maintain consistency, we
// will act like everything is fine if the link can't be resolved.
// There will simply be no file events for broken symlinks. Hence the
// returns of nil on errors.
if fi.Mode()&os.ModeSymlink == os.ModeSymlink { if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
name, err = filepath.EvalSymlinks(name) link, err := os.Readlink(name)
if err != nil { if err != nil {
// Return nil because Linux can add unresolvable symlinks to the
// watch list without problems, so maintain consistency with
// that. There will be no file events for broken symlinks.
// TODO: more specific check; returns os.PathError; ENOENT?
return "", nil return "", nil
} }
w.mu.Lock() w.mu.Lock()
_, alreadyWatching = w.watches[name] _, alreadyWatching = w.watches[link]
w.mu.Unlock() w.mu.Unlock()
if alreadyWatching { if alreadyWatching {
return name, nil // Add to watches so we don't get spurious Create events later
// on when we diff the directories.
w.watches[name] = 0
w.fileExists[name] = struct{}{}
return link, nil
} }
name = link
fi, err = os.Lstat(name) fi, err = os.Lstat(name)
if err != nil { if err != nil {
return "", nil return "", nil
@ -411,7 +475,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
} }
// Retry on EINTR; open() can return EINTR in practice on macOS. // Retry on EINTR; open() can return EINTR in practice on macOS.
// See #354, and go issues 11180 and 39237. // See #354, and Go issues 11180 and 39237.
for { for {
watchfd, err = unix.Open(name, openMode, 0) watchfd, err = unix.Open(name, openMode, 0)
if err == nil { if err == nil {
@ -444,14 +508,13 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
w.watchesByDir[parentName] = watchesByDir w.watchesByDir[parentName] = watchesByDir
} }
watchesByDir[watchfd] = struct{}{} watchesByDir[watchfd] = struct{}{}
w.paths[watchfd] = pathInfo{name: name, isDir: isDir} w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
w.mu.Unlock() w.mu.Unlock()
} }
if isDir { if isDir {
// Watch the directory if it has not been watched before, // Watch the directory if it has not been watched before, or if it was
// or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
w.mu.Lock() w.mu.Lock()
watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
@ -473,13 +536,10 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
// Event values that it sends down the Events channel. // Event values that it sends down the Events channel.
func (w *Watcher) readEvents() { func (w *Watcher) readEvents() {
defer func() { defer func() {
err := unix.Close(w.kq)
if err != nil {
w.Errors <- err
}
unix.Close(w.closepipe[0])
close(w.Events) close(w.Events)
close(w.Errors) close(w.Errors)
_ = unix.Close(w.kq)
unix.Close(w.closepipe[0])
}() }()
eventBuffer := make([]unix.Kevent_t, 10) eventBuffer := make([]unix.Kevent_t, 10)
@ -513,18 +573,8 @@ func (w *Watcher) readEvents() {
event := w.newEvent(path.name, mask) event := w.newEvent(path.name, mask)
if path.isDir && !event.Has(Remove) {
// Double check to make sure the directory exists. This can
// happen when we do a rm -fr on a recursively watched folders
// and we receive a modification event first but the folder has
// been deleted and later receive the delete event.
if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
event.Op |= Remove
}
}
if event.Has(Rename) || event.Has(Remove) { if event.Has(Rename) || event.Has(Remove) {
w.Remove(event.Name) w.remove(event.Name, false)
w.mu.Lock() w.mu.Lock()
delete(w.fileExists, event.Name) delete(w.fileExists, event.Name)
w.mu.Unlock() w.mu.Unlock()
@ -540,26 +590,30 @@ func (w *Watcher) readEvents() {
} }
if event.Has(Remove) { if event.Has(Remove) {
// Look for a file that may have overwritten this. // Look for a file that may have overwritten this; for example,
// For example, mv f1 f2 will delete f2, then create f2. // mv f1 f2 will delete f2, then create f2.
if path.isDir { if path.isDir {
fileDir := filepath.Clean(event.Name) fileDir := filepath.Clean(event.Name)
w.mu.Lock() w.mu.Lock()
_, found := w.watches[fileDir] _, found := w.watches[fileDir]
w.mu.Unlock() w.mu.Unlock()
if found { if found {
// make sure the directory exists before we watch for changes. When we err := w.sendDirectoryChangeEvents(fileDir)
// do a recursive watch and perform rm -fr, the parent directory might if err != nil {
// have gone missing, ignore the missing directory and let the if !w.sendError(err) {
// upcoming delete event remove the watch from the parent directory. closed = true
if _, err := os.Lstat(fileDir); err == nil { }
w.sendDirectoryChangeEvents(fileDir)
} }
} }
} else { } else {
filePath := filepath.Clean(event.Name) filePath := filepath.Clean(event.Name)
if fileInfo, err := os.Lstat(filePath); err == nil { if fi, err := os.Lstat(filePath); err == nil {
w.sendFileCreatedEventIfNew(filePath, fileInfo) err := w.sendFileCreatedEventIfNew(filePath, fi)
if err != nil {
if !w.sendError(err) {
closed = true
}
}
} }
} }
} }
@ -582,21 +636,31 @@ func (w *Watcher) newEvent(name string, mask uint32) Event {
if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
e.Op |= Chmod e.Op |= Chmod
} }
// No point sending a write and delete event at the same time: if it's gone,
// then it's gone.
if e.Op.Has(Write) && e.Op.Has(Remove) {
e.Op &^= Write
}
return e return e
} }
// watchDirectoryFiles to mimic inotify when adding a watch on a directory // watchDirectoryFiles to mimic inotify when adding a watch on a directory
func (w *Watcher) watchDirectoryFiles(dirPath string) error { func (w *Watcher) watchDirectoryFiles(dirPath string) error {
// Get all files // Get all files
files, err := ioutil.ReadDir(dirPath) files, err := os.ReadDir(dirPath)
if err != nil { if err != nil {
return err return err
} }
for _, fileInfo := range files { for _, f := range files {
path := filepath.Join(dirPath, fileInfo.Name()) path := filepath.Join(dirPath, f.Name())
cleanPath, err := w.internalWatch(path, fileInfo) fi, err := f.Info()
if err != nil {
return fmt.Errorf("%q: %w", path, err)
}
cleanPath, err := w.internalWatch(path, fi)
if err != nil { if err != nil {
// No permission to read the file; that's not a problem: just skip. // No permission to read the file; that's not a problem: just skip.
// But do add it to w.fileExists to prevent it from being picked up // But do add it to w.fileExists to prevent it from being picked up
@ -606,7 +670,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error {
case errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM): case errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM):
cleanPath = filepath.Clean(path) cleanPath = filepath.Clean(path)
default: default:
return fmt.Errorf("%q: %w", filepath.Join(dirPath, fileInfo.Name()), err) return fmt.Errorf("%q: %w", path, err)
} }
} }
@ -622,26 +686,37 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error {
// //
// This functionality is to have the BSD watcher match the inotify, which sends // This functionality is to have the BSD watcher match the inotify, which sends
// a create event for files created in a watched directory. // a create event for files created in a watched directory.
func (w *Watcher) sendDirectoryChangeEvents(dir string) { func (w *Watcher) sendDirectoryChangeEvents(dir string) error {
// Get all files files, err := os.ReadDir(dir)
files, err := ioutil.ReadDir(dir)
if err != nil { if err != nil {
if !w.sendError(fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)) { // Directory no longer exists: we can ignore this safely. kqueue will
return // still give us the correct events.
if errors.Is(err, os.ErrNotExist) {
return nil
} }
return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)
} }
// Search for new files for _, f := range files {
for _, fi := range files { fi, err := f.Info()
err := w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi)
if err != nil { if err != nil {
return return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)
}
err = w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi)
if err != nil {
// Don't need to send an error if this file isn't readable.
if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) {
return nil
}
return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)
} }
} }
return nil
} }
// sendFileCreatedEvent sends a create event if the file isn't already being tracked. // sendFileCreatedEvent sends a create event if the file isn't already being tracked.
func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fi os.FileInfo) (err error) {
w.mu.Lock() w.mu.Lock()
_, doesExist := w.fileExists[filePath] _, doesExist := w.fileExists[filePath]
w.mu.Unlock() w.mu.Unlock()
@ -652,7 +727,7 @@ func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInf
} }
// like watchDirectoryFiles (but without doing another ReadDir) // like watchDirectoryFiles (but without doing another ReadDir)
filePath, err = w.internalWatch(filePath, fileInfo) filePath, err = w.internalWatch(filePath, fi)
if err != nil { if err != nil {
return err return err
} }
@ -664,10 +739,10 @@ func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInf
return nil return nil
} }
func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) {
if fileInfo.IsDir() { if fi.IsDir() {
// mimic Linux providing delete events for subdirectories // mimic Linux providing delete events for subdirectories, but preserve
// but preserve the flags used if currently watching subdirectory // the flags used if currently watching subdirectory
w.mu.Lock() w.mu.Lock()
flags := w.dirFlags[name] flags := w.dirFlags[name]
w.mu.Unlock() w.mu.Unlock()

View File

@ -1,39 +1,169 @@
//go:build !darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows //go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows)
// +build !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows // +build appengine !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows
// Note: the documentation on the Watcher type and methods is generated from
// mkdoc.zsh
package fsnotify package fsnotify
import ( import "errors"
"fmt"
"runtime"
)
// Watcher watches a set of files, delivering events to a channel. // Watcher watches a set of paths, delivering events on a channel.
type Watcher struct{} //
// A watcher should not be copied (e.g. pass it by pointer, rather than by
// value).
//
// # Linux notes
//
// When a file is removed a Remove event won't be emitted until all file
// descriptors are closed, and deletes will always emit a Chmod. For example:
//
// fp := os.Open("file")
// os.Remove("file") // Triggers Chmod
// fp.Close() // Triggers Remove
//
// This is the event that inotify sends, so not much can be changed about this.
//
// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
// for the number of watches per user, and fs.inotify.max_user_instances
// specifies the maximum number of inotify instances per user. Every Watcher you
// create is an "instance", and every path you add is a "watch".
//
// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
// /proc/sys/fs/inotify/max_user_instances
//
// To increase them you can use sysctl or write the value to the /proc file:
//
// # Default values on Linux 5.18
// sysctl fs.inotify.max_user_watches=124983
// sysctl fs.inotify.max_user_instances=128
//
// To make the changes persist on reboot edit /etc/sysctl.conf or
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
// your distro's documentation):
//
// fs.inotify.max_user_watches=124983
// fs.inotify.max_user_instances=128
//
// Reaching the limit will result in a "no space left on device" or "too many open
// files" error.
//
// # kqueue notes (macOS, BSD)
//
// kqueue requires opening a file descriptor for every file that's being watched;
// so if you're watching a directory with five files then that's six file
// descriptors. You will run in to your system's "max open files" limit faster on
// these platforms.
//
// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
// control the maximum number of open files, as well as /etc/login.conf on BSD
// systems.
//
// # Windows notes
//
// Paths can be added as "C:\path\to\dir", but forward slashes
// ("C:/path/to/dir") will also work.
//
// When a watched directory is removed it will always send an event for the
// directory itself, but may not send events for all files in that directory.
// Sometimes it will send events for all times, sometimes it will send no
// events, and often only for some files.
//
// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
// value that is guaranteed to work with SMB filesystems. If you have many
// events in quick succession this may not be enough, and you will have to use
// [WithBufferSize] to increase the value.
type Watcher struct {
// Events sends the filesystem change events.
//
// fsnotify can send the following events; a "path" here can refer to a
// file, directory, symbolic link, or special file like a FIFO.
//
// fsnotify.Create A new path was created; this may be followed by one
// or more Write events if data also gets written to a
// file.
//
// fsnotify.Remove A path was removed.
//
// fsnotify.Rename A path was renamed. A rename is always sent with the
// old path as Event.Name, and a Create event will be
// sent with the new name. Renames are only sent for
// paths that are currently watched; e.g. moving an
// unmonitored file into a monitored directory will
// show up as just a Create. Similarly, renaming a file
// to outside a monitored directory will show up as
// only a Rename.
//
// fsnotify.Write A file or named pipe was written to. A Truncate will
// also trigger a Write. A single "write action"
// initiated by the user may show up as one or multiple
// writes, depending on when the system syncs things to
// disk. For example when compiling a large Go program
// you may get hundreds of Write events, and you may
// want to wait until you've stopped receiving them
// (see the dedup example in cmd/fsnotify).
//
// Some systems may send Write event for directories
// when the directory content changes.
//
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
// when a file is removed (or more accurately, when a
// link to an inode is removed). On kqueue it's sent
// when a file is truncated. On Windows it's never
// sent.
Events chan Event
// Errors sends any errors.
//
// ErrEventOverflow is used to indicate there are too many events:
//
// - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
// - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
// - kqueue, fen: Not used.
Errors chan error
}
// NewWatcher creates a new Watcher. // NewWatcher creates a new Watcher.
func NewWatcher() (*Watcher, error) { func NewWatcher() (*Watcher, error) {
return nil, fmt.Errorf("fsnotify not supported on %s", runtime.GOOS) return nil, errors.New("fsnotify not supported on the current platform")
} }
// Close removes all watches and closes the events channel. // NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
func (w *Watcher) Close() error { // channel.
return nil //
} // The main use case for this is situations with a very large number of events
// where the kernel buffer size can't be increased (e.g. due to lack of
// permissions). An unbuffered Watcher will perform better for almost all use
// cases, and whenever possible you will be better off increasing the kernel
// buffers instead of adding a large userspace buffer.
func NewBufferedWatcher(sz uint) (*Watcher, error) { return NewWatcher() }
// Close removes all watches and closes the Events channel.
func (w *Watcher) Close() error { return nil }
// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
// yet removed).
//
// Returns nil if [Watcher.Close] was called.
func (w *Watcher) WatchList() []string { return nil }
// Add starts monitoring the path for changes. // Add starts monitoring the path for changes.
// //
// A path can only be watched once; attempting to watch it more than once will // A path can only be watched once; watching it more than once is a no-op and will
// return an error. Paths that do not yet exist on the filesystem cannot be // not return an error. Paths that do not yet exist on the filesystem cannot be
// added. A watch will be automatically removed if the path is deleted. // watched.
// //
// A path will remain watched if it gets renamed to somewhere else on the same // A watch will be automatically removed if the watched path is deleted or
// filesystem, but the monitor will get removed if the path gets deleted and // renamed. The exception is the Windows backend, which doesn't remove the
// re-created, or if it's moved to a different filesystem. // watcher on renames.
// //
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special // Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
// filesystems (/proc, /sys, etc.) generally don't work. // filesystems (/proc, /sys, etc.) generally don't work.
// //
// Returns [ErrClosed] if [Watcher.Close] was called.
//
// See [Watcher.AddWith] for a version that allows adding options.
//
// # Watching directories // # Watching directories
// //
// All files in a directory are monitored, including new files that are created // All files in a directory are monitored, including new files that are created
@ -43,17 +173,26 @@ func (w *Watcher) Close() error {
// # Watching files // # Watching files
// //
// Watching individual files (rather than directories) is generally not // Watching individual files (rather than directories) is generally not
// recommended as many tools update files atomically. Instead of "just" writing // recommended as many programs (especially editors) update files atomically: it
// to the file a temporary file will be written to first, and if successful the // will write to a temporary file which is then moved to to destination,
// temporary file is moved to to destination removing the original, or some // overwriting the original (or some variant thereof). The watcher on the
// variant thereof. The watcher on the original file is now lost, as it no // original file is now lost, as that no longer exists.
// longer exists.
// //
// Instead, watch the parent directory and use Event.Name to filter out files // The upshot of this is that a power failure or crash won't leave a
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. // half-written file.
func (w *Watcher) Add(name string) error { //
return nil // Watch the parent directory and use Event.Name to filter out files you're not
} // interested in. There is an example of this in cmd/fsnotify/file.go.
func (w *Watcher) Add(name string) error { return nil }
// AddWith is like [Watcher.Add], but allows adding options. When using Add()
// the defaults described below are used.
//
// Possible options are:
//
// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
// other platforms. The default is 64K (65536 bytes).
func (w *Watcher) AddWith(name string, opts ...addOpt) error { return nil }
// Remove stops monitoring the path for changes. // Remove stops monitoring the path for changes.
// //
@ -61,6 +200,6 @@ func (w *Watcher) Add(name string) error {
// /tmp/dir and /tmp/dir/subdir then you will need to remove both. // /tmp/dir and /tmp/dir/subdir then you will need to remove both.
// //
// Removing a path that has not yet been added returns [ErrNonExistentWatch]. // Removing a path that has not yet been added returns [ErrNonExistentWatch].
func (w *Watcher) Remove(name string) error { //
return nil // Returns nil if [Watcher.Close] was called.
} func (w *Watcher) Remove(name string) error { return nil }

View File

@ -1,6 +1,13 @@
//go:build windows //go:build windows
// +build windows // +build windows
// Windows backend based on ReadDirectoryChangesW()
//
// https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw
//
// Note: the documentation on the Watcher type and methods is generated from
// mkdoc.zsh
package fsnotify package fsnotify
import ( import (
@ -27,9 +34,9 @@ import (
// When a file is removed a Remove event won't be emitted until all file // When a file is removed a Remove event won't be emitted until all file
// descriptors are closed, and deletes will always emit a Chmod. For example: // descriptors are closed, and deletes will always emit a Chmod. For example:
// //
// fp := os.Open("file") // fp := os.Open("file")
// os.Remove("file") // Triggers Chmod // os.Remove("file") // Triggers Chmod
// fp.Close() // Triggers Remove // fp.Close() // Triggers Remove
// //
// This is the event that inotify sends, so not much can be changed about this. // This is the event that inotify sends, so not much can be changed about this.
// //
@ -43,16 +50,16 @@ import (
// //
// To increase them you can use sysctl or write the value to the /proc file: // To increase them you can use sysctl or write the value to the /proc file:
// //
// # Default values on Linux 5.18 // # Default values on Linux 5.18
// sysctl fs.inotify.max_user_watches=124983 // sysctl fs.inotify.max_user_watches=124983
// sysctl fs.inotify.max_user_instances=128 // sysctl fs.inotify.max_user_instances=128
// //
// To make the changes persist on reboot edit /etc/sysctl.conf or // To make the changes persist on reboot edit /etc/sysctl.conf or
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check // /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
// your distro's documentation): // your distro's documentation):
// //
// fs.inotify.max_user_watches=124983 // fs.inotify.max_user_watches=124983
// fs.inotify.max_user_instances=128 // fs.inotify.max_user_instances=128
// //
// Reaching the limit will result in a "no space left on device" or "too many open // Reaching the limit will result in a "no space left on device" or "too many open
// files" error. // files" error.
@ -68,14 +75,20 @@ import (
// control the maximum number of open files, as well as /etc/login.conf on BSD // control the maximum number of open files, as well as /etc/login.conf on BSD
// systems. // systems.
// //
// # macOS notes // # Windows notes
// //
// Spotlight indexing on macOS can result in multiple events (see [#15]). A // Paths can be added as "C:\path\to\dir", but forward slashes
// temporary workaround is to add your folder(s) to the "Spotlight Privacy // ("C:/path/to/dir") will also work.
// Settings" until we have a native FSEvents implementation (see [#11]).
// //
// [#11]: https://github.com/fsnotify/fsnotify/issues/11 // When a watched directory is removed it will always send an event for the
// [#15]: https://github.com/fsnotify/fsnotify/issues/15 // directory itself, but may not send events for all files in that directory.
// Sometimes it will send events for all times, sometimes it will send no
// events, and often only for some files.
//
// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
// value that is guaranteed to work with SMB filesystems. If you have many
// events in quick succession this may not be enough, and you will have to use
// [WithBufferSize] to increase the value.
type Watcher struct { type Watcher struct {
// Events sends the filesystem change events. // Events sends the filesystem change events.
// //
@ -102,31 +115,52 @@ type Watcher struct {
// initiated by the user may show up as one or multiple // initiated by the user may show up as one or multiple
// writes, depending on when the system syncs things to // writes, depending on when the system syncs things to
// disk. For example when compiling a large Go program // disk. For example when compiling a large Go program
// you may get hundreds of Write events, so you // you may get hundreds of Write events, and you may
// probably want to wait until you've stopped receiving // want to wait until you've stopped receiving them
// them (see the dedup example in cmd/fsnotify). // (see the dedup example in cmd/fsnotify).
//
// Some systems may send Write event for directories
// when the directory content changes.
// //
// fsnotify.Chmod Attributes were changed. On Linux this is also sent // fsnotify.Chmod Attributes were changed. On Linux this is also sent
// when a file is removed (or more accurately, when a // when a file is removed (or more accurately, when a
// link to an inode is removed). On kqueue it's sent // link to an inode is removed). On kqueue it's sent
// and on kqueue when a file is truncated. On Windows // when a file is truncated. On Windows it's never
// it's never sent. // sent.
Events chan Event Events chan Event
// Errors sends any errors. // Errors sends any errors.
//
// ErrEventOverflow is used to indicate there are too many events:
//
// - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
// - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
// - kqueue, fen: Not used.
Errors chan error Errors chan error
port windows.Handle // Handle to completion port port windows.Handle // Handle to completion port
input chan *input // Inputs to the reader are sent on this channel input chan *input // Inputs to the reader are sent on this channel
quit chan chan<- error quit chan chan<- error
mu sync.Mutex // Protects access to watches, isClosed mu sync.Mutex // Protects access to watches, closed
watches watchMap // Map of watches (key: i-number) watches watchMap // Map of watches (key: i-number)
isClosed bool // Set to true when Close() is first called closed bool // Set to true when Close() is first called
} }
// NewWatcher creates a new Watcher. // NewWatcher creates a new Watcher.
func NewWatcher() (*Watcher, error) { func NewWatcher() (*Watcher, error) {
return NewBufferedWatcher(50)
}
// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
// channel.
//
// The main use case for this is situations with a very large number of events
// where the kernel buffer size can't be increased (e.g. due to lack of
// permissions). An unbuffered Watcher will perform better for almost all use
// cases, and whenever possible you will be better off increasing the kernel
// buffers instead of adding a large userspace buffer.
func NewBufferedWatcher(sz uint) (*Watcher, error) {
port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0) port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0)
if err != nil { if err != nil {
return nil, os.NewSyscallError("CreateIoCompletionPort", err) return nil, os.NewSyscallError("CreateIoCompletionPort", err)
@ -135,7 +169,7 @@ func NewWatcher() (*Watcher, error) {
port: port, port: port,
watches: make(watchMap), watches: make(watchMap),
input: make(chan *input, 1), input: make(chan *input, 1),
Events: make(chan Event, 50), Events: make(chan Event, sz),
Errors: make(chan error), Errors: make(chan error),
quit: make(chan chan<- error, 1), quit: make(chan chan<- error, 1),
} }
@ -143,6 +177,12 @@ func NewWatcher() (*Watcher, error) {
return w, nil return w, nil
} }
func (w *Watcher) isClosed() bool {
w.mu.Lock()
defer w.mu.Unlock()
return w.closed
}
func (w *Watcher) sendEvent(name string, mask uint64) bool { func (w *Watcher) sendEvent(name string, mask uint64) bool {
if mask == 0 { if mask == 0 {
return false return false
@ -167,14 +207,14 @@ func (w *Watcher) sendError(err error) bool {
return false return false
} }
// Close removes all watches and closes the events channel. // Close removes all watches and closes the Events channel.
func (w *Watcher) Close() error { func (w *Watcher) Close() error {
w.mu.Lock() if w.isClosed() {
if w.isClosed {
w.mu.Unlock()
return nil return nil
} }
w.isClosed = true
w.mu.Lock()
w.closed = true
w.mu.Unlock() w.mu.Unlock()
// Send "quit" message to the reader goroutine // Send "quit" message to the reader goroutine
@ -188,17 +228,21 @@ func (w *Watcher) Close() error {
// Add starts monitoring the path for changes. // Add starts monitoring the path for changes.
// //
// A path can only be watched once; attempting to watch it more than once will // A path can only be watched once; watching it more than once is a no-op and will
// return an error. Paths that do not yet exist on the filesystem cannot be // not return an error. Paths that do not yet exist on the filesystem cannot be
// added. A watch will be automatically removed if the path is deleted. // watched.
// //
// A path will remain watched if it gets renamed to somewhere else on the same // A watch will be automatically removed if the watched path is deleted or
// filesystem, but the monitor will get removed if the path gets deleted and // renamed. The exception is the Windows backend, which doesn't remove the
// re-created, or if it's moved to a different filesystem. // watcher on renames.
// //
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special // Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
// filesystems (/proc, /sys, etc.) generally don't work. // filesystems (/proc, /sys, etc.) generally don't work.
// //
// Returns [ErrClosed] if [Watcher.Close] was called.
//
// See [Watcher.AddWith] for a version that allows adding options.
//
// # Watching directories // # Watching directories
// //
// All files in a directory are monitored, including new files that are created // All files in a directory are monitored, including new files that are created
@ -208,27 +252,41 @@ func (w *Watcher) Close() error {
// # Watching files // # Watching files
// //
// Watching individual files (rather than directories) is generally not // Watching individual files (rather than directories) is generally not
// recommended as many tools update files atomically. Instead of "just" writing // recommended as many programs (especially editors) update files atomically: it
// to the file a temporary file will be written to first, and if successful the // will write to a temporary file which is then moved to to destination,
// temporary file is moved to to destination removing the original, or some // overwriting the original (or some variant thereof). The watcher on the
// variant thereof. The watcher on the original file is now lost, as it no // original file is now lost, as that no longer exists.
// longer exists.
// //
// Instead, watch the parent directory and use Event.Name to filter out files // The upshot of this is that a power failure or crash won't leave a
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. // half-written file.
func (w *Watcher) Add(name string) error { //
w.mu.Lock() // Watch the parent directory and use Event.Name to filter out files you're not
if w.isClosed { // interested in. There is an example of this in cmd/fsnotify/file.go.
w.mu.Unlock() func (w *Watcher) Add(name string) error { return w.AddWith(name) }
return errors.New("watcher already closed")
// AddWith is like [Watcher.Add], but allows adding options. When using Add()
// the defaults described below are used.
//
// Possible options are:
//
// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
// other platforms. The default is 64K (65536 bytes).
func (w *Watcher) AddWith(name string, opts ...addOpt) error {
if w.isClosed() {
return ErrClosed
}
with := getOptions(opts...)
if with.bufsize < 4096 {
return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes")
} }
w.mu.Unlock()
in := &input{ in := &input{
op: opAddWatch, op: opAddWatch,
path: filepath.Clean(name), path: filepath.Clean(name),
flags: sysFSALLEVENTS, flags: sysFSALLEVENTS,
reply: make(chan error), reply: make(chan error),
bufsize: with.bufsize,
} }
w.input <- in w.input <- in
if err := w.wakeupReader(); err != nil { if err := w.wakeupReader(); err != nil {
@ -243,7 +301,13 @@ func (w *Watcher) Add(name string) error {
// /tmp/dir and /tmp/dir/subdir then you will need to remove both. // /tmp/dir and /tmp/dir/subdir then you will need to remove both.
// //
// Removing a path that has not yet been added returns [ErrNonExistentWatch]. // Removing a path that has not yet been added returns [ErrNonExistentWatch].
//
// Returns nil if [Watcher.Close] was called.
func (w *Watcher) Remove(name string) error { func (w *Watcher) Remove(name string) error {
if w.isClosed() {
return nil
}
in := &input{ in := &input{
op: opRemoveWatch, op: opRemoveWatch,
path: filepath.Clean(name), path: filepath.Clean(name),
@ -256,8 +320,15 @@ func (w *Watcher) Remove(name string) error {
return <-in.reply return <-in.reply
} }
// WatchList returns all paths added with [Add] (and are not yet removed). // WatchList returns all paths explicitly added with [Watcher.Add] (and are not
// yet removed).
//
// Returns nil if [Watcher.Close] was called.
func (w *Watcher) WatchList() []string { func (w *Watcher) WatchList() []string {
if w.isClosed() {
return nil
}
w.mu.Lock() w.mu.Lock()
defer w.mu.Unlock() defer w.mu.Unlock()
@ -279,7 +350,6 @@ func (w *Watcher) WatchList() []string {
// This should all be removed at some point, and just use windows.FILE_NOTIFY_* // This should all be removed at some point, and just use windows.FILE_NOTIFY_*
const ( const (
sysFSALLEVENTS = 0xfff sysFSALLEVENTS = 0xfff
sysFSATTRIB = 0x4
sysFSCREATE = 0x100 sysFSCREATE = 0x100
sysFSDELETE = 0x200 sysFSDELETE = 0x200
sysFSDELETESELF = 0x400 sysFSDELETESELF = 0x400
@ -305,9 +375,6 @@ func (w *Watcher) newEvent(name string, mask uint32) Event {
if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
e.Op |= Rename e.Op |= Rename
} }
if mask&sysFSATTRIB == sysFSATTRIB {
e.Op |= Chmod
}
return e return e
} }
@ -321,10 +388,11 @@ const (
) )
type input struct { type input struct {
op int op int
path string path string
flags uint32 flags uint32
reply chan error bufsize int
reply chan error
} }
type inode struct { type inode struct {
@ -334,13 +402,14 @@ type inode struct {
} }
type watch struct { type watch struct {
ov windows.Overlapped ov windows.Overlapped
ino *inode // i-number ino *inode // i-number
path string // Directory path recurse bool // Recursive watch?
mask uint64 // Directory itself is being watched with these notify flags path string // Directory path
names map[string]uint64 // Map of names being watched and their notify flags mask uint64 // Directory itself is being watched with these notify flags
rename string // Remembers the old name while renaming a file names map[string]uint64 // Map of names being watched and their notify flags
buf [65536]byte // 64K buffer rename string // Remembers the old name while renaming a file
buf []byte // buffer, allocated later
} }
type ( type (
@ -413,7 +482,10 @@ func (m watchMap) set(ino *inode, watch *watch) {
} }
// Must run within the I/O thread. // Must run within the I/O thread.
func (w *Watcher) addWatch(pathname string, flags uint64) error { func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error {
//pathname, recurse := recursivePath(pathname)
recurse := false
dir, err := w.getDir(pathname) dir, err := w.getDir(pathname)
if err != nil { if err != nil {
return err return err
@ -433,9 +505,11 @@ func (w *Watcher) addWatch(pathname string, flags uint64) error {
return os.NewSyscallError("CreateIoCompletionPort", err) return os.NewSyscallError("CreateIoCompletionPort", err)
} }
watchEntry = &watch{ watchEntry = &watch{
ino: ino, ino: ino,
path: dir, path: dir,
names: make(map[string]uint64), names: make(map[string]uint64),
recurse: recurse,
buf: make([]byte, bufsize),
} }
w.mu.Lock() w.mu.Lock()
w.watches.set(ino, watchEntry) w.watches.set(ino, watchEntry)
@ -465,6 +539,8 @@ func (w *Watcher) addWatch(pathname string, flags uint64) error {
// Must run within the I/O thread. // Must run within the I/O thread.
func (w *Watcher) remWatch(pathname string) error { func (w *Watcher) remWatch(pathname string) error {
pathname, recurse := recursivePath(pathname)
dir, err := w.getDir(pathname) dir, err := w.getDir(pathname)
if err != nil { if err != nil {
return err return err
@ -478,6 +554,10 @@ func (w *Watcher) remWatch(pathname string) error {
watch := w.watches.get(ino) watch := w.watches.get(ino)
w.mu.Unlock() w.mu.Unlock()
if recurse && !watch.recurse {
return fmt.Errorf("can't use \\... with non-recursive watch %q", pathname)
}
err = windows.CloseHandle(ino.handle) err = windows.CloseHandle(ino.handle)
if err != nil { if err != nil {
w.sendError(os.NewSyscallError("CloseHandle", err)) w.sendError(os.NewSyscallError("CloseHandle", err))
@ -535,8 +615,11 @@ func (w *Watcher) startRead(watch *watch) error {
return nil return nil
} }
rdErr := windows.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], // We need to pass the array, rather than the slice.
uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) hdr := (*reflect.SliceHeader)(unsafe.Pointer(&watch.buf))
rdErr := windows.ReadDirectoryChanges(watch.ino.handle,
(*byte)(unsafe.Pointer(hdr.Data)), uint32(hdr.Len),
watch.recurse, mask, nil, &watch.ov, 0)
if rdErr != nil { if rdErr != nil {
err := os.NewSyscallError("ReadDirectoryChanges", rdErr) err := os.NewSyscallError("ReadDirectoryChanges", rdErr)
if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
@ -563,9 +646,8 @@ func (w *Watcher) readEvents() {
runtime.LockOSThread() runtime.LockOSThread()
for { for {
// This error is handled after the watch == nil check below.
qErr := windows.GetQueuedCompletionStatus(w.port, &n, &key, &ov, windows.INFINITE) qErr := windows.GetQueuedCompletionStatus(w.port, &n, &key, &ov, windows.INFINITE)
// This error is handled after the watch == nil check below. NOTE: this
// seems odd, note sure if it's correct.
watch := (*watch)(unsafe.Pointer(ov)) watch := (*watch)(unsafe.Pointer(ov))
if watch == nil { if watch == nil {
@ -595,7 +677,7 @@ func (w *Watcher) readEvents() {
case in := <-w.input: case in := <-w.input:
switch in.op { switch in.op {
case opAddWatch: case opAddWatch:
in.reply <- w.addWatch(in.path, uint64(in.flags)) in.reply <- w.addWatch(in.path, uint64(in.flags), in.bufsize)
case opRemoveWatch: case opRemoveWatch:
in.reply <- w.remWatch(in.path) in.reply <- w.remWatch(in.path)
} }
@ -605,6 +687,8 @@ func (w *Watcher) readEvents() {
} }
switch qErr { switch qErr {
case nil:
// No error
case windows.ERROR_MORE_DATA: case windows.ERROR_MORE_DATA:
if watch == nil { if watch == nil {
w.sendError(errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")) w.sendError(errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer"))
@ -626,13 +710,12 @@ func (w *Watcher) readEvents() {
default: default:
w.sendError(os.NewSyscallError("GetQueuedCompletionPort", qErr)) w.sendError(os.NewSyscallError("GetQueuedCompletionPort", qErr))
continue continue
case nil:
} }
var offset uint32 var offset uint32
for { for {
if n == 0 { if n == 0 {
w.sendError(errors.New("short read in readEvents()")) w.sendError(ErrEventOverflow)
break break
} }
@ -703,8 +786,9 @@ func (w *Watcher) readEvents() {
// Error! // Error!
if offset >= n { if offset >= n {
//lint:ignore ST1005 Windows should be capitalized
w.sendError(errors.New( w.sendError(errors.New(
"Windows system assumed buffer larger than it is, events have likely been missed.")) "Windows system assumed buffer larger than it is, events have likely been missed"))
break break
} }
} }
@ -720,9 +804,6 @@ func (w *Watcher) toWindowsFlags(mask uint64) uint32 {
if mask&sysFSMODIFY != 0 { if mask&sysFSMODIFY != 0 {
m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE
} }
if mask&sysFSATTRIB != 0 {
m |= windows.FILE_NOTIFY_CHANGE_ATTRIBUTES
}
if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
m |= windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME m |= windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME
} }

View File

@ -1,13 +1,18 @@
//go:build !plan9
// +build !plan9
// Package fsnotify provides a cross-platform interface for file system // Package fsnotify provides a cross-platform interface for file system
// notifications. // notifications.
//
// Currently supported systems:
//
// Linux 2.6.32+ via inotify
// BSD, macOS via kqueue
// Windows via ReadDirectoryChangesW
// illumos via FEN
package fsnotify package fsnotify
import ( import (
"errors" "errors"
"fmt" "fmt"
"path/filepath"
"strings" "strings"
) )
@ -33,34 +38,52 @@ type Op uint32
// The operations fsnotify can trigger; see the documentation on [Watcher] for a // The operations fsnotify can trigger; see the documentation on [Watcher] for a
// full description, and check them with [Event.Has]. // full description, and check them with [Event.Has].
const ( const (
// A new pathname was created.
Create Op = 1 << iota Create Op = 1 << iota
// The pathname was written to; this does *not* mean the write has finished,
// and a write can be followed by more writes.
Write Write
// The path was removed; any watches on it will be removed. Some "remove"
// operations may trigger a Rename if the file is actually moved (for
// example "remove to trash" is often a rename).
Remove Remove
// The path was renamed to something else; any watched on it will be
// removed.
Rename Rename
// File attributes were changed.
//
// It's generally not recommended to take action on this event, as it may
// get triggered very frequently by some software. For example, Spotlight
// indexing on macOS, anti-virus software, backup software, etc.
Chmod Chmod
) )
// Common errors that can be reported by a watcher // Common errors that can be reported.
var ( var (
ErrNonExistentWatch = errors.New("can't remove non-existent watcher") ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch")
ErrEventOverflow = errors.New("fsnotify queue overflow") ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow")
ErrClosed = errors.New("fsnotify: watcher already closed")
) )
func (op Op) String() string { func (o Op) String() string {
var b strings.Builder var b strings.Builder
if op.Has(Create) { if o.Has(Create) {
b.WriteString("|CREATE") b.WriteString("|CREATE")
} }
if op.Has(Remove) { if o.Has(Remove) {
b.WriteString("|REMOVE") b.WriteString("|REMOVE")
} }
if op.Has(Write) { if o.Has(Write) {
b.WriteString("|WRITE") b.WriteString("|WRITE")
} }
if op.Has(Rename) { if o.Has(Rename) {
b.WriteString("|RENAME") b.WriteString("|RENAME")
} }
if op.Has(Chmod) { if o.Has(Chmod) {
b.WriteString("|CHMOD") b.WriteString("|CHMOD")
} }
if b.Len() == 0 { if b.Len() == 0 {
@ -70,7 +93,7 @@ func (op Op) String() string {
} }
// Has reports if this operation has the given operation. // Has reports if this operation has the given operation.
func (o Op) Has(h Op) bool { return o&h == h } func (o Op) Has(h Op) bool { return o&h != 0 }
// Has reports if this event has the given operation. // Has reports if this event has the given operation.
func (e Event) Has(op Op) bool { return e.Op.Has(op) } func (e Event) Has(op Op) bool { return e.Op.Has(op) }
@ -79,3 +102,45 @@ func (e Event) Has(op Op) bool { return e.Op.Has(op) }
func (e Event) String() string { func (e Event) String() string {
return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name) return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name)
} }
type (
addOpt func(opt *withOpts)
withOpts struct {
bufsize int
}
)
var defaultOpts = withOpts{
bufsize: 65536, // 64K
}
func getOptions(opts ...addOpt) withOpts {
with := defaultOpts
for _, o := range opts {
o(&with)
}
return with
}
// WithBufferSize sets the [ReadDirectoryChangesW] buffer size.
//
// This only has effect on Windows systems, and is a no-op for other backends.
//
// The default value is 64K (65536 bytes) which is the highest value that works
// on all filesystems and should be enough for most applications, but if you
// have a large burst of events it may not be enough. You can increase it if
// you're hitting "queue or buffer overflow" errors ([ErrEventOverflow]).
//
// [ReadDirectoryChangesW]: https://learn.microsoft.com/en-gb/windows/win32/api/winbase/nf-winbase-readdirectorychangesw
func WithBufferSize(bytes int) addOpt {
return func(opt *withOpts) { opt.bufsize = bytes }
}
// Check if this path is recursive (ends with "/..." or "\..."), and return the
// path with the /... stripped.
func recursivePath(path string) (string, bool) {
if filepath.Base(path) == "..." {
return filepath.Dir(path), true
}
return path, false
}

View File

@ -2,8 +2,8 @@
[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1 [ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1
setopt err_exit no_unset pipefail extended_glob setopt err_exit no_unset pipefail extended_glob
# Simple script to update the godoc comments on all watchers. Probably took me # Simple script to update the godoc comments on all watchers so you don't need
# more time to write this than doing it manually, but ah well 🙃 # to update the same comment 5 times.
watcher=$(<<EOF watcher=$(<<EOF
// Watcher watches a set of paths, delivering events on a channel. // Watcher watches a set of paths, delivering events on a channel.
@ -16,9 +16,9 @@ watcher=$(<<EOF
// When a file is removed a Remove event won't be emitted until all file // When a file is removed a Remove event won't be emitted until all file
// descriptors are closed, and deletes will always emit a Chmod. For example: // descriptors are closed, and deletes will always emit a Chmod. For example:
// //
// fp := os.Open("file") // fp := os.Open("file")
// os.Remove("file") // Triggers Chmod // os.Remove("file") // Triggers Chmod
// fp.Close() // Triggers Remove // fp.Close() // Triggers Remove
// //
// This is the event that inotify sends, so not much can be changed about this. // This is the event that inotify sends, so not much can be changed about this.
// //
@ -32,16 +32,16 @@ watcher=$(<<EOF
// //
// To increase them you can use sysctl or write the value to the /proc file: // To increase them you can use sysctl or write the value to the /proc file:
// //
// # Default values on Linux 5.18 // # Default values on Linux 5.18
// sysctl fs.inotify.max_user_watches=124983 // sysctl fs.inotify.max_user_watches=124983
// sysctl fs.inotify.max_user_instances=128 // sysctl fs.inotify.max_user_instances=128
// //
// To make the changes persist on reboot edit /etc/sysctl.conf or // To make the changes persist on reboot edit /etc/sysctl.conf or
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check // /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
// your distro's documentation): // your distro's documentation):
// //
// fs.inotify.max_user_watches=124983 // fs.inotify.max_user_watches=124983
// fs.inotify.max_user_instances=128 // fs.inotify.max_user_instances=128
// //
// Reaching the limit will result in a "no space left on device" or "too many open // Reaching the limit will result in a "no space left on device" or "too many open
// files" error. // files" error.
@ -57,14 +57,20 @@ watcher=$(<<EOF
// control the maximum number of open files, as well as /etc/login.conf on BSD // control the maximum number of open files, as well as /etc/login.conf on BSD
// systems. // systems.
// //
// # macOS notes // # Windows notes
// //
// Spotlight indexing on macOS can result in multiple events (see [#15]). A // Paths can be added as "C:\\path\\to\\dir", but forward slashes
// temporary workaround is to add your folder(s) to the "Spotlight Privacy // ("C:/path/to/dir") will also work.
// Settings" until we have a native FSEvents implementation (see [#11]).
// //
// [#11]: https://github.com/fsnotify/fsnotify/issues/11 // When a watched directory is removed it will always send an event for the
// [#15]: https://github.com/fsnotify/fsnotify/issues/15 // directory itself, but may not send events for all files in that directory.
// Sometimes it will send events for all times, sometimes it will send no
// events, and often only for some files.
//
// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
// value that is guaranteed to work with SMB filesystems. If you have many
// events in quick succession this may not be enough, and you will have to use
// [WithBufferSize] to increase the value.
EOF EOF
) )
@ -73,20 +79,36 @@ new=$(<<EOF
EOF EOF
) )
newbuffered=$(<<EOF
// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
// channel.
//
// The main use case for this is situations with a very large number of events
// where the kernel buffer size can't be increased (e.g. due to lack of
// permissions). An unbuffered Watcher will perform better for almost all use
// cases, and whenever possible you will be better off increasing the kernel
// buffers instead of adding a large userspace buffer.
EOF
)
add=$(<<EOF add=$(<<EOF
// Add starts monitoring the path for changes. // Add starts monitoring the path for changes.
// //
// A path can only be watched once; attempting to watch it more than once will // A path can only be watched once; watching it more than once is a no-op and will
// return an error. Paths that do not yet exist on the filesystem cannot be // not return an error. Paths that do not yet exist on the filesystem cannot be
// added. A watch will be automatically removed if the path is deleted. // watched.
// //
// A path will remain watched if it gets renamed to somewhere else on the same // A watch will be automatically removed if the watched path is deleted or
// filesystem, but the monitor will get removed if the path gets deleted and // renamed. The exception is the Windows backend, which doesn't remove the
// re-created, or if it's moved to a different filesystem. // watcher on renames.
// //
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special // Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
// filesystems (/proc, /sys, etc.) generally don't work. // filesystems (/proc, /sys, etc.) generally don't work.
// //
// Returns [ErrClosed] if [Watcher.Close] was called.
//
// See [Watcher.AddWith] for a version that allows adding options.
//
// # Watching directories // # Watching directories
// //
// All files in a directory are monitored, including new files that are created // All files in a directory are monitored, including new files that are created
@ -96,14 +118,27 @@ add=$(<<EOF
// # Watching files // # Watching files
// //
// Watching individual files (rather than directories) is generally not // Watching individual files (rather than directories) is generally not
// recommended as many tools update files atomically. Instead of "just" writing // recommended as many programs (especially editors) update files atomically: it
// to the file a temporary file will be written to first, and if successful the // will write to a temporary file which is then moved to to destination,
// temporary file is moved to to destination removing the original, or some // overwriting the original (or some variant thereof). The watcher on the
// variant thereof. The watcher on the original file is now lost, as it no // original file is now lost, as that no longer exists.
// longer exists.
// //
// Instead, watch the parent directory and use Event.Name to filter out files // The upshot of this is that a power failure or crash won't leave a
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. // half-written file.
//
// Watch the parent directory and use Event.Name to filter out files you're not
// interested in. There is an example of this in cmd/fsnotify/file.go.
EOF
)
addwith=$(<<EOF
// AddWith is like [Watcher.Add], but allows adding options. When using Add()
// the defaults described below are used.
//
// Possible options are:
//
// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
// other platforms. The default is 64K (65536 bytes).
EOF EOF
) )
@ -114,16 +149,21 @@ remove=$(<<EOF
// /tmp/dir and /tmp/dir/subdir then you will need to remove both. // /tmp/dir and /tmp/dir/subdir then you will need to remove both.
// //
// Removing a path that has not yet been added returns [ErrNonExistentWatch]. // Removing a path that has not yet been added returns [ErrNonExistentWatch].
//
// Returns nil if [Watcher.Close] was called.
EOF EOF
) )
close=$(<<EOF close=$(<<EOF
// Close removes all watches and closes the events channel. // Close removes all watches and closes the Events channel.
EOF EOF
) )
watchlist=$(<<EOF watchlist=$(<<EOF
// WatchList returns all paths added with [Add] (and are not yet removed). // WatchList returns all paths explicitly added with [Watcher.Add] (and are not
// yet removed).
//
// Returns nil if [Watcher.Close] was called.
EOF EOF
) )
@ -153,20 +193,29 @@ events=$(<<EOF
// initiated by the user may show up as one or multiple // initiated by the user may show up as one or multiple
// writes, depending on when the system syncs things to // writes, depending on when the system syncs things to
// disk. For example when compiling a large Go program // disk. For example when compiling a large Go program
// you may get hundreds of Write events, so you // you may get hundreds of Write events, and you may
// probably want to wait until you've stopped receiving // want to wait until you've stopped receiving them
// them (see the dedup example in cmd/fsnotify). // (see the dedup example in cmd/fsnotify).
//
// Some systems may send Write event for directories
// when the directory content changes.
// //
// fsnotify.Chmod Attributes were changed. On Linux this is also sent // fsnotify.Chmod Attributes were changed. On Linux this is also sent
// when a file is removed (or more accurately, when a // when a file is removed (or more accurately, when a
// link to an inode is removed). On kqueue it's sent // link to an inode is removed). On kqueue it's sent
// and on kqueue when a file is truncated. On Windows // when a file is truncated. On Windows it's never
// it's never sent. // sent.
EOF EOF
) )
errors=$(<<EOF errors=$(<<EOF
// Errors sends any errors. // Errors sends any errors.
//
// ErrEventOverflow is used to indicate there are too many events:
//
// - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
// - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
// - kqueue, fen: Not used.
EOF EOF
) )
@ -200,7 +249,9 @@ set-cmt() {
set-cmt '^type Watcher struct ' $watcher set-cmt '^type Watcher struct ' $watcher
set-cmt '^func NewWatcher(' $new set-cmt '^func NewWatcher(' $new
set-cmt '^func NewBufferedWatcher(' $newbuffered
set-cmt '^func (w \*Watcher) Add(' $add set-cmt '^func (w \*Watcher) Add(' $add
set-cmt '^func (w \*Watcher) AddWith(' $addwith
set-cmt '^func (w \*Watcher) Remove(' $remove set-cmt '^func (w \*Watcher) Remove(' $remove
set-cmt '^func (w \*Watcher) Close(' $close set-cmt '^func (w \*Watcher) Close(' $close
set-cmt '^func (w \*Watcher) WatchList(' $watchlist set-cmt '^func (w \*Watcher) WatchList(' $watchlist

View File

@ -15,6 +15,7 @@ go_library(
"macro.go", "macro.go",
"options.go", "options.go",
"program.go", "program.go",
"validator.go",
], ],
importpath = "github.com/google/cel-go/cel", importpath = "github.com/google/cel-go/cel",
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
@ -22,15 +23,18 @@ go_library(
"//checker:go_default_library", "//checker:go_default_library",
"//checker/decls:go_default_library", "//checker/decls:go_default_library",
"//common:go_default_library", "//common:go_default_library",
"//common/ast:go_default_library",
"//common/containers:go_default_library", "//common/containers:go_default_library",
"//common/decls:go_default_library",
"//common/functions:go_default_library",
"//common/operators:go_default_library", "//common/operators:go_default_library",
"//common/overloads:go_default_library", "//common/overloads:go_default_library",
"//common/stdlib:go_default_library",
"//common/types:go_default_library", "//common/types:go_default_library",
"//common/types/pb:go_default_library", "//common/types/pb:go_default_library",
"//common/types/ref:go_default_library", "//common/types/ref:go_default_library",
"//common/types/traits:go_default_library", "//common/types/traits:go_default_library",
"//interpreter:go_default_library", "//interpreter:go_default_library",
"//interpreter/functions:go_default_library",
"//parser:go_default_library", "//parser:go_default_library",
"@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//proto:go_default_library",
@ -72,6 +76,8 @@ go_test(
"@io_bazel_rules_go//proto/wkt:descriptor_go_proto", "@io_bazel_rules_go//proto/wkt:descriptor_go_proto",
"@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//proto:go_default_library",
"@org_golang_google_protobuf//encoding/prototext:go_default_library",
"@org_golang_google_protobuf//types/known/structpb:go_default_library", "@org_golang_google_protobuf//types/known/structpb:go_default_library",
"@org_golang_google_protobuf//types/known/wrapperspb:go_default_library",
], ],
) )

File diff suppressed because it is too large Load Diff

View File

@ -16,13 +16,14 @@ package cel
import ( import (
"errors" "errors"
"fmt"
"sync" "sync"
"github.com/google/cel-go/checker" "github.com/google/cel-go/checker"
"github.com/google/cel-go/checker/decls" chkdecls "github.com/google/cel-go/checker/decls"
"github.com/google/cel-go/common" "github.com/google/cel-go/common"
celast "github.com/google/cel-go/common/ast"
"github.com/google/cel-go/common/containers" "github.com/google/cel-go/common/containers"
"github.com/google/cel-go/common/decls"
"github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/interpreter" "github.com/google/cel-go/interpreter"
@ -40,8 +41,8 @@ type Ast struct {
expr *exprpb.Expr expr *exprpb.Expr
info *exprpb.SourceInfo info *exprpb.SourceInfo
source Source source Source
refMap map[int64]*exprpb.Reference refMap map[int64]*celast.ReferenceInfo
typeMap map[int64]*exprpb.Type typeMap map[int64]*types.Type
} }
// Expr returns the proto serializable instance of the parsed/checked expression. // Expr returns the proto serializable instance of the parsed/checked expression.
@ -60,21 +61,26 @@ func (ast *Ast) SourceInfo() *exprpb.SourceInfo {
} }
// ResultType returns the output type of the expression if the Ast has been type-checked, else // ResultType returns the output type of the expression if the Ast has been type-checked, else
// returns decls.Dyn as the parse step cannot infer the type. // returns chkdecls.Dyn as the parse step cannot infer the type.
// //
// Deprecated: use OutputType // Deprecated: use OutputType
func (ast *Ast) ResultType() *exprpb.Type { func (ast *Ast) ResultType() *exprpb.Type {
if !ast.IsChecked() { if !ast.IsChecked() {
return decls.Dyn return chkdecls.Dyn
} }
return ast.typeMap[ast.expr.GetId()] out := ast.OutputType()
t, err := TypeToExprType(out)
if err != nil {
return chkdecls.Dyn
}
return t
} }
// OutputType returns the output type of the expression if the Ast has been type-checked, else // OutputType returns the output type of the expression if the Ast has been type-checked, else
// returns cel.DynType as the parse step cannot infer types. // returns cel.DynType as the parse step cannot infer types.
func (ast *Ast) OutputType() *Type { func (ast *Ast) OutputType() *Type {
t, err := ExprTypeToType(ast.ResultType()) t, found := ast.typeMap[ast.expr.GetId()]
if err != nil { if !found {
return DynType return DynType
} }
return t return t
@ -87,22 +93,33 @@ func (ast *Ast) Source() Source {
} }
// FormatType converts a type message into a string representation. // FormatType converts a type message into a string representation.
//
// Deprecated: prefer FormatCELType
func FormatType(t *exprpb.Type) string { func FormatType(t *exprpb.Type) string {
return checker.FormatCheckedType(t) return checker.FormatCheckedType(t)
} }
// FormatCELType formats a cel.Type value to a string representation.
//
// The type formatting is identical to FormatType.
func FormatCELType(t *Type) string {
return checker.FormatCELType(t)
}
// Env encapsulates the context necessary to perform parsing, type checking, or generation of // Env encapsulates the context necessary to perform parsing, type checking, or generation of
// evaluable programs for different expressions. // evaluable programs for different expressions.
type Env struct { type Env struct {
Container *containers.Container Container *containers.Container
functions map[string]*functionDecl variables []*decls.VariableDecl
declarations []*exprpb.Decl functions map[string]*decls.FunctionDecl
macros []parser.Macro macros []parser.Macro
adapter ref.TypeAdapter adapter types.Adapter
provider ref.TypeProvider provider types.Provider
features map[int]bool features map[int]bool
appliedFeatures map[int]bool appliedFeatures map[int]bool
libraries map[string]bool libraries map[string]bool
validators []ASTValidator
costOptions []checker.CostOption
// Internal parser representation // Internal parser representation
prsr *parser.Parser prsr *parser.Parser
@ -154,8 +171,8 @@ func NewCustomEnv(opts ...EnvOption) (*Env, error) {
return nil, err return nil, err
} }
return (&Env{ return (&Env{
declarations: []*exprpb.Decl{}, variables: []*decls.VariableDecl{},
functions: map[string]*functionDecl{}, functions: map[string]*decls.FunctionDecl{},
macros: []parser.Macro{}, macros: []parser.Macro{},
Container: containers.DefaultContainer, Container: containers.DefaultContainer,
adapter: registry, adapter: registry,
@ -163,14 +180,20 @@ func NewCustomEnv(opts ...EnvOption) (*Env, error) {
features: map[int]bool{}, features: map[int]bool{},
appliedFeatures: map[int]bool{}, appliedFeatures: map[int]bool{},
libraries: map[string]bool{}, libraries: map[string]bool{},
validators: []ASTValidator{},
progOpts: []ProgramOption{}, progOpts: []ProgramOption{},
costOptions: []checker.CostOption{},
}).configure(opts) }).configure(opts)
} }
// Check performs type-checking on the input Ast and yields a checked Ast and/or set of Issues. // Check performs type-checking on the input Ast and yields a checked Ast and/or set of Issues.
// If any `ASTValidators` are configured on the environment, they will be applied after a valid
// type-check result. If any issues are detected, the validators will provide them on the
// output Issues object.
// //
// Checking has failed if the returned Issues value and its Issues.Err() value are non-nil. // Either checking or validation has failed if the returned Issues value and its Issues.Err()
// Issues should be inspected if they are non-nil, but may not represent a fatal error. // value are non-nil. Issues should be inspected if they are non-nil, but may not represent a
// fatal error.
// //
// It is possible to have both non-nil Ast and Issues values returned from this call: however, // It is possible to have both non-nil Ast and Issues values returned from this call: however,
// the mere presence of an Ast does not imply that it is valid for use. // the mere presence of an Ast does not imply that it is valid for use.
@ -183,21 +206,38 @@ func (e *Env) Check(ast *Ast) (*Ast, *Issues) {
if err != nil { if err != nil {
errs := common.NewErrors(ast.Source()) errs := common.NewErrors(ast.Source())
errs.ReportError(common.NoLocation, err.Error()) errs.ReportError(common.NoLocation, err.Error())
return nil, NewIssues(errs) return nil, NewIssuesWithSourceInfo(errs, ast.SourceInfo())
} }
res, errs := checker.Check(pe, ast.Source(), chk) res, errs := checker.Check(pe, ast.Source(), chk)
if len(errs.GetErrors()) > 0 { if len(errs.GetErrors()) > 0 {
return nil, NewIssues(errs) return nil, NewIssuesWithSourceInfo(errs, ast.SourceInfo())
} }
// Manually create the Ast to ensure that the Ast source information (which may be more // Manually create the Ast to ensure that the Ast source information (which may be more
// detailed than the information provided by Check), is returned to the caller. // detailed than the information provided by Check), is returned to the caller.
return &Ast{ ast = &Ast{
source: ast.Source(), source: ast.Source(),
expr: res.GetExpr(), expr: res.Expr,
info: res.GetSourceInfo(), info: res.SourceInfo,
refMap: res.GetReferenceMap(), refMap: res.ReferenceMap,
typeMap: res.GetTypeMap()}, nil typeMap: res.TypeMap}
// Generate a validator configuration from the set of configured validators.
vConfig := newValidatorConfig()
for _, v := range e.validators {
if cv, ok := v.(ASTValidatorConfigurer); ok {
cv.Configure(vConfig)
}
}
// Apply additional validators on the type-checked result.
iss := NewIssuesWithSourceInfo(errs, ast.SourceInfo())
for _, v := range e.validators {
v.Validate(e, vConfig, res, iss)
}
if iss.Err() != nil {
return nil, iss
}
return ast, nil
} }
// Compile combines the Parse and Check phases CEL program compilation to produce an Ast and // Compile combines the Parse and Check phases CEL program compilation to produce an Ast and
@ -255,7 +295,7 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) {
copy(chkOptsCopy, e.chkOpts) copy(chkOptsCopy, e.chkOpts)
// Copy the declarations if needed. // Copy the declarations if needed.
decsCopy := []*exprpb.Decl{} varsCopy := []*decls.VariableDecl{}
if chk != nil { if chk != nil {
// If the type-checker has already been instantiated, then the e.declarations have been // If the type-checker has already been instantiated, then the e.declarations have been
// validated within the chk instance. // validated within the chk instance.
@ -263,8 +303,8 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) {
} else { } else {
// If the type-checker has not been instantiated, ensure the unvalidated declarations are // If the type-checker has not been instantiated, ensure the unvalidated declarations are
// provided to the extended Env instance. // provided to the extended Env instance.
decsCopy = make([]*exprpb.Decl, len(e.declarations)) varsCopy = make([]*decls.VariableDecl, len(e.variables))
copy(decsCopy, e.declarations) copy(varsCopy, e.variables)
} }
// Copy macros and program options // Copy macros and program options
@ -276,8 +316,8 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) {
// Copy the adapter / provider if they appear to be mutable. // Copy the adapter / provider if they appear to be mutable.
adapter := e.adapter adapter := e.adapter
provider := e.provider provider := e.provider
adapterReg, isAdapterReg := e.adapter.(ref.TypeRegistry) adapterReg, isAdapterReg := e.adapter.(*types.Registry)
providerReg, isProviderReg := e.provider.(ref.TypeRegistry) providerReg, isProviderReg := e.provider.(*types.Registry)
// In most cases the provider and adapter will be a ref.TypeRegistry; // In most cases the provider and adapter will be a ref.TypeRegistry;
// however, in the rare cases where they are not, they are assumed to // however, in the rare cases where they are not, they are assumed to
// be immutable. Since it is possible to set the TypeProvider separately // be immutable. Since it is possible to set the TypeProvider separately
@ -308,7 +348,7 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) {
for k, v := range e.appliedFeatures { for k, v := range e.appliedFeatures {
appliedFeaturesCopy[k] = v appliedFeaturesCopy[k] = v
} }
funcsCopy := make(map[string]*functionDecl, len(e.functions)) funcsCopy := make(map[string]*decls.FunctionDecl, len(e.functions))
for k, v := range e.functions { for k, v := range e.functions {
funcsCopy[k] = v funcsCopy[k] = v
} }
@ -316,10 +356,14 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) {
for k, v := range e.libraries { for k, v := range e.libraries {
libsCopy[k] = v libsCopy[k] = v
} }
validatorsCopy := make([]ASTValidator, len(e.validators))
copy(validatorsCopy, e.validators)
costOptsCopy := make([]checker.CostOption, len(e.costOptions))
copy(costOptsCopy, e.costOptions)
ext := &Env{ ext := &Env{
Container: e.Container, Container: e.Container,
declarations: decsCopy, variables: varsCopy,
functions: funcsCopy, functions: funcsCopy,
macros: macsCopy, macros: macsCopy,
progOpts: progOptsCopy, progOpts: progOptsCopy,
@ -327,9 +371,11 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) {
features: featuresCopy, features: featuresCopy,
appliedFeatures: appliedFeaturesCopy, appliedFeatures: appliedFeaturesCopy,
libraries: libsCopy, libraries: libsCopy,
validators: validatorsCopy,
provider: provider, provider: provider,
chkOpts: chkOptsCopy, chkOpts: chkOptsCopy,
prsrOpts: prsrOptsCopy, prsrOpts: prsrOptsCopy,
costOptions: costOptsCopy,
} }
return ext.configure(opts) return ext.configure(opts)
} }
@ -347,6 +393,25 @@ func (e *Env) HasLibrary(libName string) bool {
return exists && configured return exists && configured
} }
// Libraries returns a list of SingletonLibrary that have been configured in the environment.
func (e *Env) Libraries() []string {
libraries := make([]string, 0, len(e.libraries))
for libName := range e.libraries {
libraries = append(libraries, libName)
}
return libraries
}
// HasValidator returns whether a specific ASTValidator has been configured in the environment.
func (e *Env) HasValidator(name string) bool {
for _, v := range e.validators {
if v.Name() == name {
return true
}
}
return false
}
// Parse parses the input expression value `txt` to a Ast and/or a set of Issues. // Parse parses the input expression value `txt` to a Ast and/or a set of Issues.
// //
// This form of Parse creates a Source value for the input `txt` and forwards to the // This form of Parse creates a Source value for the input `txt` and forwards to the
@ -388,36 +453,64 @@ func (e *Env) Program(ast *Ast, opts ...ProgramOption) (Program, error) {
return newProgram(e, ast, optSet) return newProgram(e, ast, optSet)
} }
// CELTypeAdapter returns the `types.Adapter` configured for the environment.
func (e *Env) CELTypeAdapter() types.Adapter {
return e.adapter
}
// CELTypeProvider returns the `types.Provider` configured for the environment.
func (e *Env) CELTypeProvider() types.Provider {
return e.provider
}
// TypeAdapter returns the `ref.TypeAdapter` configured for the environment. // TypeAdapter returns the `ref.TypeAdapter` configured for the environment.
//
// Deprecated: use CELTypeAdapter()
func (e *Env) TypeAdapter() ref.TypeAdapter { func (e *Env) TypeAdapter() ref.TypeAdapter {
return e.adapter return e.adapter
} }
// TypeProvider returns the `ref.TypeProvider` configured for the environment. // TypeProvider returns the `ref.TypeProvider` configured for the environment.
//
// Deprecated: use CELTypeProvider()
func (e *Env) TypeProvider() ref.TypeProvider { func (e *Env) TypeProvider() ref.TypeProvider {
return e.provider if legacyProvider, ok := e.provider.(ref.TypeProvider); ok {
return legacyProvider
}
return &interopLegacyTypeProvider{Provider: e.provider}
} }
// UnknownVars returns an interpreter.PartialActivation which marks all variables // UnknownVars returns an interpreter.PartialActivation which marks all variables declared in the
// declared in the Env as unknown AttributePattern values. // Env as unknown AttributePattern values.
// //
// Note, the UnknownVars will behave the same as an interpreter.EmptyActivation // Note, the UnknownVars will behave the same as an interpreter.EmptyActivation unless the
// unless the PartialAttributes option is provided as a ProgramOption. // PartialAttributes option is provided as a ProgramOption.
func (e *Env) UnknownVars() interpreter.PartialActivation { func (e *Env) UnknownVars() interpreter.PartialActivation {
var unknownPatterns []*interpreter.AttributePattern act := interpreter.EmptyActivation()
for _, d := range e.declarations { part, _ := PartialVars(act, e.computeUnknownVars(act)...)
switch d.GetDeclKind().(type) {
case *exprpb.Decl_Ident:
unknownPatterns = append(unknownPatterns,
interpreter.NewAttributePattern(d.GetName()))
}
}
part, _ := PartialVars(
interpreter.EmptyActivation(),
unknownPatterns...)
return part return part
} }
// PartialVars returns an interpreter.PartialActivation where all variables not in the input variable
// set, but which have been configured in the environment, are marked as unknown.
//
// The `vars` value may either be an interpreter.Activation or any valid input to the
// interpreter.NewActivation call.
//
// Note, this is equivalent to calling cel.PartialVars and manually configuring the set of unknown
// variables. For more advanced use cases of partial state where portions of an object graph, rather
// than top-level variables, are missing the PartialVars() method may be a more suitable choice.
//
// Note, the PartialVars will behave the same as an interpreter.EmptyActivation unless the
// PartialAttributes option is provided as a ProgramOption.
func (e *Env) PartialVars(vars any) (interpreter.PartialActivation, error) {
act, err := interpreter.NewActivation(vars)
if err != nil {
return nil, err
}
return PartialVars(act, e.computeUnknownVars(act)...)
}
// ResidualAst takes an Ast and its EvalDetails to produce a new Ast which only contains the // ResidualAst takes an Ast and its EvalDetails to produce a new Ast which only contains the
// attribute references which are unknown. // attribute references which are unknown.
// //
@ -463,11 +556,16 @@ func (e *Env) ResidualAst(a *Ast, details *EvalDetails) (*Ast, error) {
// EstimateCost estimates the cost of a type checked CEL expression using the length estimates of input data and // EstimateCost estimates the cost of a type checked CEL expression using the length estimates of input data and
// extension functions provided by estimator. // extension functions provided by estimator.
func (e *Env) EstimateCost(ast *Ast, estimator checker.CostEstimator, opts ...checker.CostOption) (checker.CostEstimate, error) { func (e *Env) EstimateCost(ast *Ast, estimator checker.CostEstimator, opts ...checker.CostOption) (checker.CostEstimate, error) {
checked, err := AstToCheckedExpr(ast) checked := &celast.CheckedAST{
if err != nil { Expr: ast.Expr(),
return checker.CostEstimate{}, fmt.Errorf("EsimateCost could not inspect Ast: %v", err) SourceInfo: ast.SourceInfo(),
TypeMap: ast.typeMap,
ReferenceMap: ast.refMap,
} }
return checker.Cost(checked, estimator, opts...) extendedOpts := make([]checker.CostOption, 0, len(e.costOptions))
extendedOpts = append(extendedOpts, opts...)
extendedOpts = append(extendedOpts, e.costOptions...)
return checker.Cost(checked, estimator, extendedOpts...)
} }
// configure applies a series of EnvOptions to the current environment. // configure applies a series of EnvOptions to the current environment.
@ -488,14 +586,6 @@ func (e *Env) configure(opts []EnvOption) (*Env, error) {
return nil, err return nil, err
} }
// Initialize all of the functions configured within the environment.
for _, fn := range e.functions {
err = fn.init()
if err != nil {
return nil, err
}
}
// Configure the parser. // Configure the parser.
prsrOpts := []parser.Option{} prsrOpts := []parser.Option{}
prsrOpts = append(prsrOpts, e.prsrOpts...) prsrOpts = append(prsrOpts, e.prsrOpts...)
@ -504,6 +594,9 @@ func (e *Env) configure(opts []EnvOption) (*Env, error) {
if e.HasFeature(featureEnableMacroCallTracking) { if e.HasFeature(featureEnableMacroCallTracking) {
prsrOpts = append(prsrOpts, parser.PopulateMacroCalls(true)) prsrOpts = append(prsrOpts, parser.PopulateMacroCalls(true))
} }
if e.HasFeature(featureVariadicLogicalASTs) {
prsrOpts = append(prsrOpts, parser.EnableVariadicOperatorASTs(true))
}
e.prsr, err = parser.NewParser(prsrOpts...) e.prsr, err = parser.NewParser(prsrOpts...)
if err != nil { if err != nil {
return nil, err return nil, err
@ -525,8 +618,6 @@ func (e *Env) initChecker() (*checker.Env, error) {
chkOpts := []checker.Option{} chkOpts := []checker.Option{}
chkOpts = append(chkOpts, e.chkOpts...) chkOpts = append(chkOpts, e.chkOpts...)
chkOpts = append(chkOpts, chkOpts = append(chkOpts,
checker.HomogeneousAggregateLiterals(
e.HasFeature(featureDisableDynamicAggregateLiterals)),
checker.CrossTypeNumericComparisons( checker.CrossTypeNumericComparisons(
e.HasFeature(featureCrossTypeNumericComparisons))) e.HasFeature(featureCrossTypeNumericComparisons)))
@ -536,19 +627,17 @@ func (e *Env) initChecker() (*checker.Env, error) {
return return
} }
// Add the statically configured declarations. // Add the statically configured declarations.
err = ce.Add(e.declarations...) err = ce.AddIdents(e.variables...)
if err != nil { if err != nil {
e.setCheckerOrError(nil, err) e.setCheckerOrError(nil, err)
return return
} }
// Add the function declarations which are derived from the FunctionDecl instances. // Add the function declarations which are derived from the FunctionDecl instances.
for _, fn := range e.functions { for _, fn := range e.functions {
fnDecl, err := functionDeclToExprDecl(fn) if fn.IsDeclarationDisabled() {
if err != nil { continue
e.setCheckerOrError(nil, err)
return
} }
err = ce.Add(fnDecl) err = ce.AddFunctions(fn)
if err != nil { if err != nil {
e.setCheckerOrError(nil, err) e.setCheckerOrError(nil, err)
return return
@ -596,17 +685,43 @@ func (e *Env) maybeApplyFeature(feature int, option EnvOption) (*Env, error) {
return e, nil return e, nil
} }
// computeUnknownVars determines a set of missing variables based on the input activation and the
// environment's configured declaration set.
func (e *Env) computeUnknownVars(vars interpreter.Activation) []*interpreter.AttributePattern {
var unknownPatterns []*interpreter.AttributePattern
for _, v := range e.variables {
varName := v.Name()
if _, found := vars.ResolveName(varName); found {
continue
}
unknownPatterns = append(unknownPatterns, interpreter.NewAttributePattern(varName))
}
return unknownPatterns
}
// Error type which references an expression id, a location within source, and a message.
type Error = common.Error
// Issues defines methods for inspecting the error details of parse and check calls. // Issues defines methods for inspecting the error details of parse and check calls.
// //
// Note: in the future, non-fatal warnings and notices may be inspectable via the Issues struct. // Note: in the future, non-fatal warnings and notices may be inspectable via the Issues struct.
type Issues struct { type Issues struct {
errs *common.Errors errs *common.Errors
info *exprpb.SourceInfo
} }
// NewIssues returns an Issues struct from a common.Errors object. // NewIssues returns an Issues struct from a common.Errors object.
func NewIssues(errs *common.Errors) *Issues { func NewIssues(errs *common.Errors) *Issues {
return NewIssuesWithSourceInfo(errs, nil)
}
// NewIssuesWithSourceInfo returns an Issues struct from a common.Errors object with SourceInfo metatata
// which can be used with the `ReportErrorAtID` method for additional error reports within the context
// information that's inferred from an expression id.
func NewIssuesWithSourceInfo(errs *common.Errors, info *exprpb.SourceInfo) *Issues {
return &Issues{ return &Issues{
errs: errs, errs: errs,
info: info,
} }
} }
@ -622,9 +737,9 @@ func (i *Issues) Err() error {
} }
// Errors returns the collection of errors encountered in more granular detail. // Errors returns the collection of errors encountered in more granular detail.
func (i *Issues) Errors() []common.Error { func (i *Issues) Errors() []*Error {
if i == nil { if i == nil {
return []common.Error{} return []*Error{}
} }
return i.errs.GetErrors() return i.errs.GetErrors()
} }
@ -648,6 +763,37 @@ func (i *Issues) String() string {
return i.errs.ToDisplayString() return i.errs.ToDisplayString()
} }
// ReportErrorAtID reports an error message with an optional set of formatting arguments.
//
// The source metadata for the expression at `id`, if present, is attached to the error report.
// To ensure that source metadata is attached to error reports, use NewIssuesWithSourceInfo.
func (i *Issues) ReportErrorAtID(id int64, message string, args ...any) {
i.errs.ReportErrorAtID(id, locationByID(id, i.info), message, args...)
}
// locationByID returns a common.Location given an expression id.
//
// TODO: move this functionality into the native SourceInfo and an overhaul of the common.Source
// as this implementation relies on the abstractions present in the protobuf SourceInfo object,
// and is replicated in the checker.
func locationByID(id int64, sourceInfo *exprpb.SourceInfo) common.Location {
positions := sourceInfo.GetPositions()
var line = 1
if offset, found := positions[id]; found {
col := int(offset)
for _, lineOffset := range sourceInfo.GetLineOffsets() {
if lineOffset < offset {
line++
col = int(offset - lineOffset)
} else {
break
}
}
return common.NewLocation(line, col)
}
return common.NoLocation
}
// getStdEnv lazy initializes the CEL standard environment. // getStdEnv lazy initializes the CEL standard environment.
func getStdEnv() (*Env, error) { func getStdEnv() (*Env, error) {
stdEnvInit.Do(func() { stdEnvInit.Do(func() {
@ -656,6 +802,90 @@ func getStdEnv() (*Env, error) {
return stdEnv, stdEnvErr return stdEnv, stdEnvErr
} }
// interopCELTypeProvider layers support for the types.Provider interface on top of a ref.TypeProvider.
type interopCELTypeProvider struct {
ref.TypeProvider
}
// FindStructType returns a types.Type instance for the given fully-qualified typeName if one exists.
//
// This method proxies to the underyling ref.TypeProvider's FindType method and converts protobuf type
// into a native type representation. If the conversion fails, the type is listed as not found.
func (p *interopCELTypeProvider) FindStructType(typeName string) (*types.Type, bool) {
if et, found := p.FindType(typeName); found {
t, err := types.ExprTypeToType(et)
if err != nil {
return nil, false
}
return t, true
}
return nil, false
}
// FindStructFieldType returns a types.FieldType instance for the given fully-qualified typeName and field
// name, if one exists.
//
// This method proxies to the underyling ref.TypeProvider's FindFieldType method and converts protobuf type
// into a native type representation. If the conversion fails, the type is listed as not found.
func (p *interopCELTypeProvider) FindStructFieldType(structType, fieldName string) (*types.FieldType, bool) {
if ft, found := p.FindFieldType(structType, fieldName); found {
t, err := types.ExprTypeToType(ft.Type)
if err != nil {
return nil, false
}
return &types.FieldType{
Type: t,
IsSet: ft.IsSet,
GetFrom: ft.GetFrom,
}, true
}
return nil, false
}
// interopLegacyTypeProvider layers support for the ref.TypeProvider interface on top of a types.Provider.
type interopLegacyTypeProvider struct {
types.Provider
}
// FindType retruns the protobuf Type representation for the input type name if one exists.
//
// This method proxies to the underlying types.Provider FindStructType method and converts the types.Type
// value to a protobuf Type representation.
//
// Failure to convert the type will result in the type not being found.
func (p *interopLegacyTypeProvider) FindType(typeName string) (*exprpb.Type, bool) {
if t, found := p.FindStructType(typeName); found {
et, err := types.TypeToExprType(t)
if err != nil {
return nil, false
}
return et, true
}
return nil, false
}
// FindFieldType returns the protobuf-based FieldType representation for the input type name and field,
// if one exists.
//
// This call proxies to the types.Provider FindStructFieldType method and converts the types.FIeldType
// value to a protobuf-based ref.FieldType representation if found.
//
// Failure to convert the FieldType will result in the field not being found.
func (p *interopLegacyTypeProvider) FindFieldType(structType, fieldName string) (*ref.FieldType, bool) {
if cft, found := p.FindStructFieldType(structType, fieldName); found {
et, err := types.TypeToExprType(cft.Type)
if err != nil {
return nil, false
}
return &ref.FieldType{
Type: et,
IsSet: cft.IsSet,
GetFrom: cft.GetFrom,
}, true
}
return nil, false
}
var ( var (
stdEnvInit sync.Once stdEnvInit sync.Once
stdEnv *Env stdEnv *Env

View File

@ -22,6 +22,7 @@ import (
"google.golang.org/protobuf/proto" "google.golang.org/protobuf/proto"
"github.com/google/cel-go/common" "github.com/google/cel-go/common"
"github.com/google/cel-go/common/ast"
"github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/common/types/traits" "github.com/google/cel-go/common/types/traits"
@ -33,7 +34,8 @@ import (
// CheckedExprToAst converts a checked expression proto message to an Ast. // CheckedExprToAst converts a checked expression proto message to an Ast.
func CheckedExprToAst(checkedExpr *exprpb.CheckedExpr) *Ast { func CheckedExprToAst(checkedExpr *exprpb.CheckedExpr) *Ast {
return CheckedExprToAstWithSource(checkedExpr, nil) checked, _ := CheckedExprToAstWithSource(checkedExpr, nil)
return checked
} }
// CheckedExprToAstWithSource converts a checked expression proto message to an Ast, // CheckedExprToAstWithSource converts a checked expression proto message to an Ast,
@ -44,29 +46,18 @@ func CheckedExprToAst(checkedExpr *exprpb.CheckedExpr) *Ast {
// through future calls. // through future calls.
// //
// Prefer CheckedExprToAst if loading expressions from storage. // Prefer CheckedExprToAst if loading expressions from storage.
func CheckedExprToAstWithSource(checkedExpr *exprpb.CheckedExpr, src Source) *Ast { func CheckedExprToAstWithSource(checkedExpr *exprpb.CheckedExpr, src Source) (*Ast, error) {
refMap := checkedExpr.GetReferenceMap() checkedAST, err := ast.CheckedExprToCheckedAST(checkedExpr)
if refMap == nil { if err != nil {
refMap = map[int64]*exprpb.Reference{} return nil, err
}
typeMap := checkedExpr.GetTypeMap()
if typeMap == nil {
typeMap = map[int64]*exprpb.Type{}
}
si := checkedExpr.GetSourceInfo()
if si == nil {
si = &exprpb.SourceInfo{}
}
if src == nil {
src = common.NewInfoSource(si)
} }
return &Ast{ return &Ast{
expr: checkedExpr.GetExpr(), expr: checkedAST.Expr,
info: si, info: checkedAST.SourceInfo,
source: src, source: src,
refMap: refMap, refMap: checkedAST.ReferenceMap,
typeMap: typeMap, typeMap: checkedAST.TypeMap,
} }, nil
} }
// AstToCheckedExpr converts an Ast to an protobuf CheckedExpr value. // AstToCheckedExpr converts an Ast to an protobuf CheckedExpr value.
@ -76,12 +67,13 @@ func AstToCheckedExpr(a *Ast) (*exprpb.CheckedExpr, error) {
if !a.IsChecked() { if !a.IsChecked() {
return nil, fmt.Errorf("cannot convert unchecked ast") return nil, fmt.Errorf("cannot convert unchecked ast")
} }
return &exprpb.CheckedExpr{ cAst := &ast.CheckedAST{
Expr: a.Expr(), Expr: a.expr,
SourceInfo: a.SourceInfo(), SourceInfo: a.info,
ReferenceMap: a.refMap, ReferenceMap: a.refMap,
TypeMap: a.typeMap, TypeMap: a.typeMap,
}, nil }
return ast.CheckedASTToCheckedExpr(cAst)
} }
// ParsedExprToAst converts a parsed expression proto message to an Ast. // ParsedExprToAst converts a parsed expression proto message to an Ast.
@ -202,7 +194,7 @@ func RefValueToValue(res ref.Val) (*exprpb.Value, error) {
} }
var ( var (
typeNameToTypeValue = map[string]*types.TypeValue{ typeNameToTypeValue = map[string]ref.Val{
"bool": types.BoolType, "bool": types.BoolType,
"bytes": types.BytesType, "bytes": types.BytesType,
"double": types.DoubleType, "double": types.DoubleType,
@ -219,7 +211,7 @@ var (
) )
// ValueToRefValue converts between exprpb.Value and ref.Val. // ValueToRefValue converts between exprpb.Value and ref.Val.
func ValueToRefValue(adapter ref.TypeAdapter, v *exprpb.Value) (ref.Val, error) { func ValueToRefValue(adapter types.Adapter, v *exprpb.Value) (ref.Val, error) {
switch v.Kind.(type) { switch v.Kind.(type) {
case *exprpb.Value_NullValue: case *exprpb.Value_NullValue:
return types.NullValue, nil return types.NullValue, nil

View File

@ -15,19 +15,18 @@
package cel package cel
import ( import (
"math"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"github.com/google/cel-go/checker"
"github.com/google/cel-go/common"
"github.com/google/cel-go/common/operators" "github.com/google/cel-go/common/operators"
"github.com/google/cel-go/common/overloads" "github.com/google/cel-go/common/overloads"
"github.com/google/cel-go/common/stdlib"
"github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/common/types/traits" "github.com/google/cel-go/common/types/traits"
"github.com/google/cel-go/interpreter" "github.com/google/cel-go/interpreter"
"github.com/google/cel-go/interpreter/functions"
"github.com/google/cel-go/parser" "github.com/google/cel-go/parser"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
@ -35,6 +34,7 @@ import (
const ( const (
optMapMacro = "optMap" optMapMacro = "optMap"
optFlatMapMacro = "optFlatMap"
hasValueFunc = "hasValue" hasValueFunc = "hasValue"
optionalNoneFunc = "optional.none" optionalNoneFunc = "optional.none"
optionalOfFunc = "optional.of" optionalOfFunc = "optional.of"
@ -106,44 +106,213 @@ func (stdLibrary) LibraryName() string {
return "cel.lib.std" return "cel.lib.std"
} }
// EnvOptions returns options for the standard CEL function declarations and macros. // CompileOptions returns options for the standard CEL function declarations and macros.
func (stdLibrary) CompileOptions() []EnvOption { func (stdLibrary) CompileOptions() []EnvOption {
return []EnvOption{ return []EnvOption{
Declarations(checker.StandardDeclarations()...), func(e *Env) (*Env, error) {
var err error
for _, fn := range stdlib.Functions() {
existing, found := e.functions[fn.Name()]
if found {
fn, err = existing.Merge(fn)
if err != nil {
return nil, err
}
}
e.functions[fn.Name()] = fn
}
return e, nil
},
func(e *Env) (*Env, error) {
e.variables = append(e.variables, stdlib.Types()...)
return e, nil
},
Macros(StandardMacros...), Macros(StandardMacros...),
} }
} }
// ProgramOptions returns function implementations for the standard CEL functions. // ProgramOptions returns function implementations for the standard CEL functions.
func (stdLibrary) ProgramOptions() []ProgramOption { func (stdLibrary) ProgramOptions() []ProgramOption {
return []ProgramOption{ return []ProgramOption{}
Functions(functions.StandardOverloads()...), }
// OptionalTypes enable support for optional syntax and types in CEL.
//
// The optional value type makes it possible to express whether variables have
// been provided, whether a result has been computed, and in the future whether
// an object field path, map key value, or list index has a value.
//
// # Syntax Changes
//
// OptionalTypes are unlike other CEL extensions because they modify the CEL
// syntax itself, notably through the use of a `?` preceding a field name or
// index value.
//
// ## Field Selection
//
// The optional syntax in field selection is denoted as `obj.?field`. In other
// words, if a field is set, return `optional.of(obj.field)“, else
// `optional.none()`. The optional field selection is viral in the sense that
// after the first optional selection all subsequent selections or indices
// are treated as optional, i.e. the following expressions are equivalent:
//
// obj.?field.subfield
// obj.?field.?subfield
//
// ## Indexing
//
// Similar to field selection, the optional syntax can be used in index
// expressions on maps and lists:
//
// list[?0]
// map[?key]
//
// ## Optional Field Setting
//
// When creating map or message literals, if a field may be optionally set
// based on its presence, then placing a `?` before the field name or key
// will ensure the type on the right-hand side must be optional(T) where T
// is the type of the field or key-value.
//
// The following returns a map with the key expression set only if the
// subfield is present, otherwise an empty map is created:
//
// {?key: obj.?field.subfield}
//
// ## Optional Element Setting
//
// When creating list literals, an element in the list may be optionally added
// when the element expression is preceded by a `?`:
//
// [a, ?b, ?c] // return a list with either [a], [a, b], [a, b, c], or [a, c]
//
// # Optional.Of
//
// Create an optional(T) value of a given value with type T.
//
// optional.of(10)
//
// # Optional.OfNonZeroValue
//
// Create an optional(T) value of a given value with type T if it is not a
// zero-value. A zero-value the default empty value for any given CEL type,
// including empty protobuf message types. If the value is empty, the result
// of this call will be optional.none().
//
// optional.ofNonZeroValue([1, 2, 3]) // optional(list(int))
// optional.ofNonZeroValue([]) // optional.none()
// optional.ofNonZeroValue(0) // optional.none()
// optional.ofNonZeroValue("") // optional.none()
//
// # Optional.None
//
// Create an empty optional value.
//
// # HasValue
//
// Determine whether the optional contains a value.
//
// optional.of(b'hello').hasValue() // true
// optional.ofNonZeroValue({}).hasValue() // false
//
// # Value
//
// Get the value contained by the optional. If the optional does not have a
// value, the result will be a CEL error.
//
// optional.of(b'hello').value() // b'hello'
// optional.ofNonZeroValue({}).value() // error
//
// # Or
//
// If the value on the left-hand side is optional.none(), the optional value
// on the right hand side is returned. If the value on the left-hand set is
// valued, then it is returned. This operation is short-circuiting and will
// only evaluate as many links in the `or` chain as are needed to return a
// non-empty optional value.
//
// obj.?field.or(m[?key])
// l[?index].or(obj.?field.subfield).or(obj.?other)
//
// # OrValue
//
// Either return the value contained within the optional on the left-hand side
// or return the alternative value on the right hand side.
//
// m[?key].orValue("none")
//
// # OptMap
//
// Apply a transformation to the optional's underlying value if it is not empty
// and return an optional typed result based on the transformation. The
// transformation expression type must return a type T which is wrapped into
// an optional.
//
// msg.?elements.optMap(e, e.size()).orValue(0)
//
// # OptFlatMap
//
// Introduced in version: 1
//
// Apply a transformation to the optional's underlying value if it is not empty
// and return the result. The transform expression must return an optional(T)
// rather than type T. This can be useful when dealing with zero values and
// conditionally generating an empty or non-empty result in ways which cannot
// be expressed with `optMap`.
//
// msg.?elements.optFlatMap(e, e[?0]) // return the first element if present.
func OptionalTypes(opts ...OptionalTypesOption) EnvOption {
lib := &optionalLib{version: math.MaxUint32}
for _, opt := range opts {
lib = opt(lib)
}
return Lib(lib)
}
type optionalLib struct {
version uint32
}
// OptionalTypesOption is a functional interface for configuring the strings library.
type OptionalTypesOption func(*optionalLib) *optionalLib
// OptionalTypesVersion configures the version of the optional type library.
//
// The version limits which functions are available. Only functions introduced
// below or equal to the given version included in the library. If this option
// is not set, all functions are available.
//
// See the library documentation to determine which version a function was introduced.
// If the documentation does not state which version a function was introduced, it can
// be assumed to be introduced at version 0, when the library was first created.
func OptionalTypesVersion(version uint32) OptionalTypesOption {
return func(lib *optionalLib) *optionalLib {
lib.version = version
return lib
} }
} }
type optionalLibrary struct{}
// LibraryName implements the SingletonLibrary interface method. // LibraryName implements the SingletonLibrary interface method.
func (optionalLibrary) LibraryName() string { func (lib *optionalLib) LibraryName() string {
return "cel.lib.optional" return "cel.lib.optional"
} }
// CompileOptions implements the Library interface method. // CompileOptions implements the Library interface method.
func (optionalLibrary) CompileOptions() []EnvOption { func (lib *optionalLib) CompileOptions() []EnvOption {
paramTypeK := TypeParamType("K") paramTypeK := TypeParamType("K")
paramTypeV := TypeParamType("V") paramTypeV := TypeParamType("V")
optionalTypeV := OptionalType(paramTypeV) optionalTypeV := OptionalType(paramTypeV)
listTypeV := ListType(paramTypeV) listTypeV := ListType(paramTypeV)
mapTypeKV := MapType(paramTypeK, paramTypeV) mapTypeKV := MapType(paramTypeK, paramTypeV)
return []EnvOption{ opts := []EnvOption{
// Enable the optional syntax in the parser. // Enable the optional syntax in the parser.
enableOptionalSyntax(), enableOptionalSyntax(),
// Introduce the optional type. // Introduce the optional type.
Types(types.OptionalType), Types(types.OptionalType),
// Configure the optMap macro. // Configure the optMap and optFlatMap macros.
Macros(NewReceiverMacro(optMapMacro, 2, optMap)), Macros(NewReceiverMacro(optMapMacro, 2, optMap)),
// Global and member functions for working with optional values. // Global and member functions for working with optional values.
@ -202,21 +371,29 @@ func (optionalLibrary) CompileOptions() []EnvOption {
// Index overloads to accommodate using an optional value as the operand. // Index overloads to accommodate using an optional value as the operand.
Function(operators.Index, Function(operators.Index,
Overload("optional_list_index_int", []*Type{OptionalType(listTypeV), IntType}, optionalTypeV), Overload("optional_list_index_int", []*Type{OptionalType(listTypeV), IntType}, optionalTypeV),
Overload("optional_map_index_optional_value", []*Type{OptionalType(mapTypeKV), paramTypeK}, optionalTypeV)), Overload("optional_map_index_value", []*Type{OptionalType(mapTypeKV), paramTypeK}, optionalTypeV)),
}
if lib.version >= 1 {
opts = append(opts, Macros(NewReceiverMacro(optFlatMapMacro, 2, optFlatMap)))
}
return opts
}
// ProgramOptions implements the Library interface method.
func (lib *optionalLib) ProgramOptions() []ProgramOption {
return []ProgramOption{
CustomDecorator(decorateOptionalOr),
} }
} }
func optMap(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { func optMap(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
varIdent := args[0] varIdent := args[0]
varName := "" varName := ""
switch varIdent.GetExprKind().(type) { switch varIdent.GetExprKind().(type) {
case *exprpb.Expr_IdentExpr: case *exprpb.Expr_IdentExpr:
varName = varIdent.GetIdentExpr().GetName() varName = varIdent.GetIdentExpr().GetName()
default: default:
return nil, &common.Error{ return nil, meh.NewError(varIdent.GetId(), "optMap() variable name must be a simple identifier")
Message: "optMap() variable name must be a simple identifier",
Location: meh.OffsetLocation(varIdent.GetId()),
}
} }
mapExpr := args[1] mapExpr := args[1]
return meh.GlobalCall( return meh.GlobalCall(
@ -237,11 +414,30 @@ func optMap(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exp
), nil ), nil
} }
// ProgramOptions implements the Library interface method. func optFlatMap(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
func (optionalLibrary) ProgramOptions() []ProgramOption { varIdent := args[0]
return []ProgramOption{ varName := ""
CustomDecorator(decorateOptionalOr), switch varIdent.GetExprKind().(type) {
case *exprpb.Expr_IdentExpr:
varName = varIdent.GetIdentExpr().GetName()
default:
return nil, meh.NewError(varIdent.GetId(), "optFlatMap() variable name must be a simple identifier")
} }
mapExpr := args[1]
return meh.GlobalCall(
operators.Conditional,
meh.ReceiverCall(hasValueFunc, target),
meh.Fold(
unusedIterVar,
meh.NewList(),
varName,
meh.ReceiverCall(valueFunc, target),
meh.LiteralBool(false),
meh.Ident(varName),
mapExpr,
),
meh.GlobalCall(optionalNoneFunc),
), nil
} }
func enableOptionalSyntax() EnvOption { func enableOptionalSyntax() EnvOption {
@ -358,28 +554,16 @@ var (
timeOverloadDeclarations = []EnvOption{ timeOverloadDeclarations = []EnvOption{
Function(overloads.TimeGetHours, Function(overloads.TimeGetHours,
MemberOverload(overloads.DurationToHours, []*Type{DurationType}, IntType, MemberOverload(overloads.DurationToHours, []*Type{DurationType}, IntType,
UnaryBinding(func(dur ref.Val) ref.Val { UnaryBinding(types.DurationGetHours))),
d := dur.(types.Duration)
return types.Int(d.Hours())
}))),
Function(overloads.TimeGetMinutes, Function(overloads.TimeGetMinutes,
MemberOverload(overloads.DurationToMinutes, []*Type{DurationType}, IntType, MemberOverload(overloads.DurationToMinutes, []*Type{DurationType}, IntType,
UnaryBinding(func(dur ref.Val) ref.Val { UnaryBinding(types.DurationGetMinutes))),
d := dur.(types.Duration)
return types.Int(d.Minutes())
}))),
Function(overloads.TimeGetSeconds, Function(overloads.TimeGetSeconds,
MemberOverload(overloads.DurationToSeconds, []*Type{DurationType}, IntType, MemberOverload(overloads.DurationToSeconds, []*Type{DurationType}, IntType,
UnaryBinding(func(dur ref.Val) ref.Val { UnaryBinding(types.DurationGetSeconds))),
d := dur.(types.Duration)
return types.Int(d.Seconds())
}))),
Function(overloads.TimeGetMilliseconds, Function(overloads.TimeGetMilliseconds,
MemberOverload(overloads.DurationToMilliseconds, []*Type{DurationType}, IntType, MemberOverload(overloads.DurationToMilliseconds, []*Type{DurationType}, IntType,
UnaryBinding(func(dur ref.Val) ref.Val { UnaryBinding(types.DurationGetMilliseconds))),
d := dur.(types.Duration)
return types.Int(d.Milliseconds())
}))),
Function(overloads.TimeGetFullYear, Function(overloads.TimeGetFullYear,
MemberOverload(overloads.TimestampToYear, []*Type{TimestampType}, IntType, MemberOverload(overloads.TimestampToYear, []*Type{TimestampType}, IntType,
UnaryBinding(func(ts ref.Val) ref.Val { UnaryBinding(func(ts ref.Val) ref.Val {

View File

@ -15,7 +15,6 @@
package cel package cel
import ( import (
"github.com/google/cel-go/common"
"github.com/google/cel-go/parser" "github.com/google/cel-go/parser"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
@ -63,21 +62,21 @@ func NewReceiverVarArgMacro(function string, expander MacroExpander) Macro {
} }
// HasMacroExpander expands the input call arguments into a presence test, e.g. has(<operand>.field) // HasMacroExpander expands the input call arguments into a presence test, e.g. has(<operand>.field)
func HasMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { func HasMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
return parser.MakeHas(meh, target, args) return parser.MakeHas(meh, target, args)
} }
// ExistsMacroExpander expands the input call arguments into a comprehension that returns true if any of the // ExistsMacroExpander expands the input call arguments into a comprehension that returns true if any of the
// elements in the range match the predicate expressions: // elements in the range match the predicate expressions:
// <iterRange>.exists(<iterVar>, <predicate>) // <iterRange>.exists(<iterVar>, <predicate>)
func ExistsMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { func ExistsMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
return parser.MakeExists(meh, target, args) return parser.MakeExists(meh, target, args)
} }
// ExistsOneMacroExpander expands the input call arguments into a comprehension that returns true if exactly // ExistsOneMacroExpander expands the input call arguments into a comprehension that returns true if exactly
// one of the elements in the range match the predicate expressions: // one of the elements in the range match the predicate expressions:
// <iterRange>.exists_one(<iterVar>, <predicate>) // <iterRange>.exists_one(<iterVar>, <predicate>)
func ExistsOneMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { func ExistsOneMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
return parser.MakeExistsOne(meh, target, args) return parser.MakeExistsOne(meh, target, args)
} }
@ -91,14 +90,14 @@ func ExistsOneMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*ex
// //
// In the second form only iterVar values which return true when provided to the predicate expression // In the second form only iterVar values which return true when provided to the predicate expression
// are transformed. // are transformed.
func MapMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { func MapMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
return parser.MakeMap(meh, target, args) return parser.MakeMap(meh, target, args)
} }
// FilterMacroExpander expands the input call arguments into a comprehension which produces a list which contains // FilterMacroExpander expands the input call arguments into a comprehension which produces a list which contains
// only elements which match the provided predicate expression: // only elements which match the provided predicate expression:
// <iterRange>.filter(<iterVar>, <predicate>) // <iterRange>.filter(<iterVar>, <predicate>)
func FilterMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { func FilterMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
return parser.MakeFilter(meh, target, args) return parser.MakeFilter(meh, target, args)
} }

View File

@ -23,12 +23,13 @@ import (
"google.golang.org/protobuf/reflect/protoregistry" "google.golang.org/protobuf/reflect/protoregistry"
"google.golang.org/protobuf/types/dynamicpb" "google.golang.org/protobuf/types/dynamicpb"
"github.com/google/cel-go/checker/decls" "github.com/google/cel-go/checker"
"github.com/google/cel-go/common/containers" "github.com/google/cel-go/common/containers"
"github.com/google/cel-go/common/functions"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/pb" "github.com/google/cel-go/common/types/pb"
"github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/interpreter" "github.com/google/cel-go/interpreter"
"github.com/google/cel-go/interpreter/functions"
"github.com/google/cel-go/parser" "github.com/google/cel-go/parser"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
@ -41,13 +42,6 @@ import (
const ( const (
_ = iota _ = iota
// Disallow heterogeneous aggregate (list, map) literals.
// Note, it is still possible to have heterogeneous aggregates when
// provided as variables to the expression, as well as via conversion
// of well-known dynamic types, or with unchecked expressions.
// Affects checking. Provides a subset of standard behavior.
featureDisableDynamicAggregateLiterals
// Enable the tracking of function call expressions replaced by macros. // Enable the tracking of function call expressions replaced by macros.
featureEnableMacroCallTracking featureEnableMacroCallTracking
@ -63,9 +57,10 @@ const (
// is not already in UTC. // is not already in UTC.
featureDefaultUTCTimeZone featureDefaultUTCTimeZone
// Enable the use of optional types in the syntax, type-system, type-checking, // Enable the serialization of logical operator ASTs as variadic calls, thus
// and runtime. // compressing the logic graph to a single call when multiple like-operator
featureOptionalTypes // expressions occur: e.g. a && b && c && d -> call(_&&_, [a, b, c, d])
featureVariadicLogicalASTs
) )
// EnvOption is a functional interface for configuring the environment. // EnvOption is a functional interface for configuring the environment.
@ -82,23 +77,26 @@ func ClearMacros() EnvOption {
} }
} }
// CustomTypeAdapter swaps the default ref.TypeAdapter implementation with a custom one. // CustomTypeAdapter swaps the default types.Adapter implementation with a custom one.
// //
// Note: This option must be specified before the Types and TypeDescs options when used together. // Note: This option must be specified before the Types and TypeDescs options when used together.
func CustomTypeAdapter(adapter ref.TypeAdapter) EnvOption { func CustomTypeAdapter(adapter types.Adapter) EnvOption {
return func(e *Env) (*Env, error) { return func(e *Env) (*Env, error) {
e.adapter = adapter e.adapter = adapter
return e, nil return e, nil
} }
} }
// CustomTypeProvider swaps the default ref.TypeProvider implementation with a custom one. // CustomTypeProvider replaces the types.Provider implementation with a custom one.
//
// The `provider` variable type may either be types.Provider or ref.TypeProvider (deprecated)
// //
// Note: This option must be specified before the Types and TypeDescs options when used together. // Note: This option must be specified before the Types and TypeDescs options when used together.
func CustomTypeProvider(provider ref.TypeProvider) EnvOption { func CustomTypeProvider(provider any) EnvOption {
return func(e *Env) (*Env, error) { return func(e *Env) (*Env, error) {
e.provider = provider var err error
return e, nil e.provider, err = maybeInteropProvider(provider)
return e, err
} }
} }
@ -108,8 +106,28 @@ func CustomTypeProvider(provider ref.TypeProvider) EnvOption {
// for the environment. The NewEnv call builds on top of the standard CEL declarations. For a // for the environment. The NewEnv call builds on top of the standard CEL declarations. For a
// purely custom set of declarations use NewCustomEnv. // purely custom set of declarations use NewCustomEnv.
func Declarations(decls ...*exprpb.Decl) EnvOption { func Declarations(decls ...*exprpb.Decl) EnvOption {
declOpts := []EnvOption{}
var err error
var opt EnvOption
// Convert the declarations to `EnvOption` values ahead of time.
// Surface any errors in conversion when the options are applied.
for _, d := range decls {
opt, err = ExprDeclToDeclaration(d)
if err != nil {
break
}
declOpts = append(declOpts, opt)
}
return func(e *Env) (*Env, error) { return func(e *Env) (*Env, error) {
e.declarations = append(e.declarations, decls...) if err != nil {
return nil, err
}
for _, o := range declOpts {
e, err = o(e)
if err != nil {
return nil, err
}
}
return e, nil return e, nil
} }
} }
@ -126,14 +144,25 @@ func EagerlyValidateDeclarations(enabled bool) EnvOption {
return features(featureEagerlyValidateDeclarations, enabled) return features(featureEagerlyValidateDeclarations, enabled)
} }
// HomogeneousAggregateLiterals option ensures that list and map literal entry types must agree // HomogeneousAggregateLiterals disables mixed type list and map literal values.
// during type-checking.
// //
// Note, it is still possible to have heterogeneous aggregates when provided as variables to the // Note, it is still possible to have heterogeneous aggregates when provided as variables to the
// expression, as well as via conversion of well-known dynamic types, or with unchecked // expression, as well as via conversion of well-known dynamic types, or with unchecked
// expressions. // expressions.
func HomogeneousAggregateLiterals() EnvOption { func HomogeneousAggregateLiterals() EnvOption {
return features(featureDisableDynamicAggregateLiterals, true) return ASTValidators(ValidateHomogeneousAggregateLiterals())
}
// variadicLogicalOperatorASTs flatten like-operator chained logical expressions into a single
// variadic call with N-terms. This behavior is useful when serializing to a protocol buffer as
// it will reduce the number of recursive calls needed to deserialize the AST later.
//
// For example, given the following expression the call graph will be rendered accordingly:
//
// expression: a && b && c && (d || e)
// ast: call(_&&_, [a, b, c, call(_||_, [d, e])])
func variadicLogicalOperatorASTs() EnvOption {
return features(featureVariadicLogicalASTs, true)
} }
// Macros option extends the macro set configured in the environment. // Macros option extends the macro set configured in the environment.
@ -226,7 +255,12 @@ func Abbrevs(qualifiedNames ...string) EnvOption {
// Note: This option must be specified after the CustomTypeProvider option when used together. // Note: This option must be specified after the CustomTypeProvider option when used together.
func Types(addTypes ...any) EnvOption { func Types(addTypes ...any) EnvOption {
return func(e *Env) (*Env, error) { return func(e *Env) (*Env, error) {
reg, isReg := e.provider.(ref.TypeRegistry) var reg ref.TypeRegistry
var isReg bool
reg, isReg = e.provider.(*types.Registry)
if !isReg {
reg, isReg = e.provider.(ref.TypeRegistry)
}
if !isReg { if !isReg {
return nil, fmt.Errorf("custom types not supported by provider: %T", e.provider) return nil, fmt.Errorf("custom types not supported by provider: %T", e.provider)
} }
@ -436,6 +470,24 @@ func InterruptCheckFrequency(checkFrequency uint) ProgramOption {
} }
} }
// CostEstimatorOptions configure type-check time options for estimating expression cost.
func CostEstimatorOptions(costOpts ...checker.CostOption) EnvOption {
return func(e *Env) (*Env, error) {
e.costOptions = append(e.costOptions, costOpts...)
return e, nil
}
}
// CostTrackerOptions configures a set of options for cost-tracking.
//
// Note, CostTrackerOptions is a no-op unless CostTracking is also enabled.
func CostTrackerOptions(costOpts ...interpreter.CostTrackerOption) ProgramOption {
return func(p *prog) (*prog, error) {
p.costOptions = append(p.costOptions, costOpts...)
return p, nil
}
}
// CostTracking enables cost tracking and registers a ActualCostEstimator that can optionally provide a runtime cost estimate for any function calls. // CostTracking enables cost tracking and registers a ActualCostEstimator that can optionally provide a runtime cost estimate for any function calls.
func CostTracking(costEstimator interpreter.ActualCostEstimator) ProgramOption { func CostTracking(costEstimator interpreter.ActualCostEstimator) ProgramOption {
return func(p *prog) (*prog, error) { return func(p *prog) (*prog, error) {
@ -457,25 +509,21 @@ func CostLimit(costLimit uint64) ProgramOption {
} }
} }
func fieldToCELType(field protoreflect.FieldDescriptor) (*exprpb.Type, error) { func fieldToCELType(field protoreflect.FieldDescriptor) (*Type, error) {
if field.Kind() == protoreflect.MessageKind || field.Kind() == protoreflect.GroupKind { if field.Kind() == protoreflect.MessageKind || field.Kind() == protoreflect.GroupKind {
msgName := (string)(field.Message().FullName()) msgName := (string)(field.Message().FullName())
wellKnownType, found := pb.CheckedWellKnowns[msgName] return ObjectType(msgName), nil
if found {
return wellKnownType, nil
}
return decls.NewObjectType(msgName), nil
} }
if primitiveType, found := pb.CheckedPrimitives[field.Kind()]; found { if primitiveType, found := types.ProtoCELPrimitives[field.Kind()]; found {
return primitiveType, nil return primitiveType, nil
} }
if field.Kind() == protoreflect.EnumKind { if field.Kind() == protoreflect.EnumKind {
return decls.Int, nil return IntType, nil
} }
return nil, fmt.Errorf("field %s type %s not implemented", field.FullName(), field.Kind().String()) return nil, fmt.Errorf("field %s type %s not implemented", field.FullName(), field.Kind().String())
} }
func fieldToDecl(field protoreflect.FieldDescriptor) (*exprpb.Decl, error) { func fieldToVariable(field protoreflect.FieldDescriptor) (EnvOption, error) {
name := string(field.Name()) name := string(field.Name())
if field.IsMap() { if field.IsMap() {
mapKey := field.MapKey() mapKey := field.MapKey()
@ -488,20 +536,20 @@ func fieldToDecl(field protoreflect.FieldDescriptor) (*exprpb.Decl, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
return decls.NewVar(name, decls.NewMapType(keyType, valueType)), nil return Variable(name, MapType(keyType, valueType)), nil
} }
if field.IsList() { if field.IsList() {
elemType, err := fieldToCELType(field) elemType, err := fieldToCELType(field)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return decls.NewVar(name, decls.NewListType(elemType)), nil return Variable(name, ListType(elemType)), nil
} }
celType, err := fieldToCELType(field) celType, err := fieldToCELType(field)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return decls.NewVar(name, celType), nil return Variable(name, celType), nil
} }
// DeclareContextProto returns an option to extend CEL environment with declarations from the given context proto. // DeclareContextProto returns an option to extend CEL environment with declarations from the given context proto.
@ -509,25 +557,53 @@ func fieldToDecl(field protoreflect.FieldDescriptor) (*exprpb.Decl, error) {
// https://github.com/google/cel-spec/blob/master/doc/langdef.md#evaluation-environment // https://github.com/google/cel-spec/blob/master/doc/langdef.md#evaluation-environment
func DeclareContextProto(descriptor protoreflect.MessageDescriptor) EnvOption { func DeclareContextProto(descriptor protoreflect.MessageDescriptor) EnvOption {
return func(e *Env) (*Env, error) { return func(e *Env) (*Env, error) {
var decls []*exprpb.Decl
fields := descriptor.Fields() fields := descriptor.Fields()
for i := 0; i < fields.Len(); i++ { for i := 0; i < fields.Len(); i++ {
field := fields.Get(i) field := fields.Get(i)
decl, err := fieldToDecl(field) variable, err := fieldToVariable(field)
if err != nil {
return nil, err
}
e, err = variable(e)
if err != nil { if err != nil {
return nil, err return nil, err
} }
decls = append(decls, decl)
}
var err error
e, err = Declarations(decls...)(e)
if err != nil {
return nil, err
} }
return Types(dynamicpb.NewMessage(descriptor))(e) return Types(dynamicpb.NewMessage(descriptor))(e)
} }
} }
// ContextProtoVars uses the fields of the input proto.Messages as top-level variables within an Activation.
//
// Consider using with `DeclareContextProto` to simplify variable type declarations and publishing when using
// protocol buffers.
func ContextProtoVars(ctx proto.Message) (interpreter.Activation, error) {
if ctx == nil || !ctx.ProtoReflect().IsValid() {
return interpreter.EmptyActivation(), nil
}
reg, err := types.NewRegistry(ctx)
if err != nil {
return nil, err
}
pbRef := ctx.ProtoReflect()
typeName := string(pbRef.Descriptor().FullName())
fields := pbRef.Descriptor().Fields()
vars := make(map[string]any, fields.Len())
for i := 0; i < fields.Len(); i++ {
field := fields.Get(i)
sft, found := reg.FindStructFieldType(typeName, field.TextName())
if !found {
return nil, fmt.Errorf("no such field: %s", field.TextName())
}
fieldVal, err := sft.GetFrom(ctx)
if err != nil {
return nil, err
}
vars[field.TextName()] = fieldVal
}
return interpreter.NewActivation(vars)
}
// EnableMacroCallTracking ensures that call expressions which are replaced by macros // EnableMacroCallTracking ensures that call expressions which are replaced by macros
// are tracked in the `SourceInfo` of parsed and checked expressions. // are tracked in the `SourceInfo` of parsed and checked expressions.
func EnableMacroCallTracking() EnvOption { func EnableMacroCallTracking() EnvOption {
@ -545,13 +621,6 @@ func DefaultUTCTimeZone(enabled bool) EnvOption {
return features(featureDefaultUTCTimeZone, enabled) return features(featureDefaultUTCTimeZone, enabled)
} }
// OptionalTypes enable support for optional syntax and types in CEL. The optional value type makes
// it possible to express whether variables have been provided, whether a result has been computed,
// and in the future whether an object field path, map key value, or list index has a value.
func OptionalTypes() EnvOption {
return Lib(optionalLibrary{})
}
// features sets the given feature flags. See list of Feature constants above. // features sets the given feature flags. See list of Feature constants above.
func features(flag int, enabled bool) EnvOption { func features(flag int, enabled bool) EnvOption {
return func(e *Env) (*Env, error) { return func(e *Env) (*Env, error) {
@ -577,3 +646,14 @@ func ParserExpressionSizeLimit(limit int) EnvOption {
return e, nil return e, nil
} }
} }
func maybeInteropProvider(provider any) (types.Provider, error) {
switch p := provider.(type) {
case types.Provider:
return p, nil
case ref.TypeProvider:
return &interopCELTypeProvider{TypeProvider: p}, nil
default:
return nil, fmt.Errorf("unsupported type provider: %T", provider)
}
}

View File

@ -19,11 +19,10 @@ import (
"fmt" "fmt"
"sync" "sync"
celast "github.com/google/cel-go/common/ast"
"github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/interpreter" "github.com/google/cel-go/interpreter"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
) )
// Program is an evaluable view of an Ast. // Program is an evaluable view of an Ast.
@ -62,6 +61,9 @@ func NoVars() interpreter.Activation {
// PartialVars returns a PartialActivation which contains variables and a set of AttributePattern // PartialVars returns a PartialActivation which contains variables and a set of AttributePattern
// values that indicate variables or parts of variables whose value are not yet known. // values that indicate variables or parts of variables whose value are not yet known.
// //
// This method relies on manually configured sets of missing attribute patterns. For a method which
// infers the missing variables from the input and the configured environment, use Env.PartialVars().
//
// The `vars` value may either be an interpreter.Activation or any valid input to the // The `vars` value may either be an interpreter.Activation or any valid input to the
// interpreter.NewActivation call. // interpreter.NewActivation call.
func PartialVars(vars any, func PartialVars(vars any,
@ -104,7 +106,7 @@ func (ed *EvalDetails) State() interpreter.EvalState {
// ActualCost returns the tracked cost through the course of execution when `CostTracking` is enabled. // ActualCost returns the tracked cost through the course of execution when `CostTracking` is enabled.
// Otherwise, returns nil if the cost was not enabled. // Otherwise, returns nil if the cost was not enabled.
func (ed *EvalDetails) ActualCost() *uint64 { func (ed *EvalDetails) ActualCost() *uint64 {
if ed.costTracker == nil { if ed == nil || ed.costTracker == nil {
return nil return nil
} }
cost := ed.costTracker.ActualCost() cost := ed.costTracker.ActualCost()
@ -128,10 +130,14 @@ type prog struct {
// Interpretable configured from an Ast and aggregate decorator set based on program options. // Interpretable configured from an Ast and aggregate decorator set based on program options.
interpretable interpreter.Interpretable interpretable interpreter.Interpretable
callCostEstimator interpreter.ActualCostEstimator callCostEstimator interpreter.ActualCostEstimator
costOptions []interpreter.CostTrackerOption
costLimit *uint64 costLimit *uint64
} }
func (p *prog) clone() *prog { func (p *prog) clone() *prog {
costOptsCopy := make([]interpreter.CostTrackerOption, len(p.costOptions))
copy(costOptsCopy, p.costOptions)
return &prog{ return &prog{
Env: p.Env, Env: p.Env,
evalOpts: p.evalOpts, evalOpts: p.evalOpts,
@ -153,9 +159,10 @@ func newProgram(e *Env, ast *Ast, opts []ProgramOption) (Program, error) {
// Ensure the default attribute factory is set after the adapter and provider are // Ensure the default attribute factory is set after the adapter and provider are
// configured. // configured.
p := &prog{ p := &prog{
Env: e, Env: e,
decorators: []interpreter.InterpretableDecorator{}, decorators: []interpreter.InterpretableDecorator{},
dispatcher: disp, dispatcher: disp,
costOptions: []interpreter.CostTrackerOption{},
} }
// Configure the program via the ProgramOption values. // Configure the program via the ProgramOption values.
@ -169,7 +176,7 @@ func newProgram(e *Env, ast *Ast, opts []ProgramOption) (Program, error) {
// Add the function bindings created via Function() options. // Add the function bindings created via Function() options.
for _, fn := range e.functions { for _, fn := range e.functions {
bindings, err := fn.bindings() bindings, err := fn.Bindings()
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -208,14 +215,11 @@ func newProgram(e *Env, ast *Ast, opts []ProgramOption) (Program, error) {
} }
// Enable compile-time checking of syntax/cardinality for string.format calls. // Enable compile-time checking of syntax/cardinality for string.format calls.
if p.evalOpts&OptCheckStringFormat == OptCheckStringFormat { if p.evalOpts&OptCheckStringFormat == OptCheckStringFormat {
var isValidType func(id int64, validTypes ...*types.TypeValue) (bool, error) var isValidType func(id int64, validTypes ...ref.Type) (bool, error)
if ast.IsChecked() { if ast.IsChecked() {
isValidType = func(id int64, validTypes ...*types.TypeValue) (bool, error) { isValidType = func(id int64, validTypes ...ref.Type) (bool, error) {
t, err := ExprTypeToType(ast.typeMap[id]) t := ast.typeMap[id]
if err != nil { if t.Kind() == DynKind {
return false, err
}
if t.kind == DynKind {
return true, nil return true, nil
} }
for _, vt := range validTypes { for _, vt := range validTypes {
@ -223,7 +227,7 @@ func newProgram(e *Env, ast *Ast, opts []ProgramOption) (Program, error) {
if err != nil { if err != nil {
return false, err return false, err
} }
if k == t.kind { if t.Kind() == k {
return true, nil return true, nil
} }
} }
@ -231,7 +235,7 @@ func newProgram(e *Env, ast *Ast, opts []ProgramOption) (Program, error) {
} }
} else { } else {
// if the AST isn't type-checked, short-circuit validation // if the AST isn't type-checked, short-circuit validation
isValidType = func(id int64, validTypes ...*types.TypeValue) (bool, error) { isValidType = func(id int64, validTypes ...ref.Type) (bool, error) {
return true, nil return true, nil
} }
} }
@ -243,6 +247,12 @@ func newProgram(e *Env, ast *Ast, opts []ProgramOption) (Program, error) {
factory := func(state interpreter.EvalState, costTracker *interpreter.CostTracker) (Program, error) { factory := func(state interpreter.EvalState, costTracker *interpreter.CostTracker) (Program, error) {
costTracker.Estimator = p.callCostEstimator costTracker.Estimator = p.callCostEstimator
costTracker.Limit = p.costLimit costTracker.Limit = p.costLimit
for _, costOpt := range p.costOptions {
err := costOpt(costTracker)
if err != nil {
return nil, err
}
}
// Limit capacity to guarantee a reallocation when calling 'append(decs, ...)' below. This // Limit capacity to guarantee a reallocation when calling 'append(decs, ...)' below. This
// prevents the underlying memory from being shared between factory function calls causing // prevents the underlying memory from being shared between factory function calls causing
// undesired mutations. // undesired mutations.
@ -284,10 +294,11 @@ func (p *prog) initInterpretable(ast *Ast, decs []interpreter.InterpretableDecor
} }
// When the AST has been checked it contains metadata that can be used to speed up program execution. // When the AST has been checked it contains metadata that can be used to speed up program execution.
var checked *exprpb.CheckedExpr checked := &celast.CheckedAST{
checked, err := AstToCheckedExpr(ast) Expr: ast.Expr(),
if err != nil { SourceInfo: ast.SourceInfo(),
return nil, err TypeMap: ast.typeMap,
ReferenceMap: ast.refMap,
} }
interpretable, err := p.interpreter.NewInterpretable(checked, decs...) interpretable, err := p.interpreter.NewInterpretable(checked, decs...)
if err != nil { if err != nil {
@ -371,7 +382,11 @@ type progGen struct {
// the test is successful. // the test is successful.
func newProgGen(factory progFactory) (Program, error) { func newProgGen(factory progFactory) (Program, error) {
// Test the factory to make sure that configuration errors are spotted at config // Test the factory to make sure that configuration errors are spotted at config
_, err := factory(interpreter.NewEvalState(), &interpreter.CostTracker{}) tracker, err := interpreter.NewCostTracker(nil)
if err != nil {
return nil, err
}
_, err = factory(interpreter.NewEvalState(), tracker)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -384,7 +399,10 @@ func (gen *progGen) Eval(input any) (ref.Val, *EvalDetails, error) {
// new EvalState instance for each call to ensure that unique evaluations yield unique stateful // new EvalState instance for each call to ensure that unique evaluations yield unique stateful
// results. // results.
state := interpreter.NewEvalState() state := interpreter.NewEvalState()
costTracker := &interpreter.CostTracker{} costTracker, err := interpreter.NewCostTracker(nil)
if err != nil {
return nil, nil, err
}
det := &EvalDetails{state: state, costTracker: costTracker} det := &EvalDetails{state: state, costTracker: costTracker}
// Generate a new instance of the interpretable using the factory configured during the call to // Generate a new instance of the interpretable using the factory configured during the call to
@ -412,7 +430,10 @@ func (gen *progGen) ContextEval(ctx context.Context, input any) (ref.Val, *EvalD
// new EvalState instance for each call to ensure that unique evaluations yield unique stateful // new EvalState instance for each call to ensure that unique evaluations yield unique stateful
// results. // results.
state := interpreter.NewEvalState() state := interpreter.NewEvalState()
costTracker := &interpreter.CostTracker{} costTracker, err := interpreter.NewCostTracker(nil)
if err != nil {
return nil, nil, err
}
det := &EvalDetails{state: state, costTracker: costTracker} det := &EvalDetails{state: state, costTracker: costTracker}
// Generate a new instance of the interpretable using the factory configured during the call to // Generate a new instance of the interpretable using the factory configured during the call to
@ -498,7 +519,7 @@ type evalActivation struct {
// The lazy binding will only be invoked once per evaluation. // The lazy binding will only be invoked once per evaluation.
// //
// Values which are not represented as ref.Val types on input may be adapted to a ref.Val using // Values which are not represented as ref.Val types on input may be adapted to a ref.Val using
// the ref.TypeAdapter configured in the environment. // the types.Adapter configured in the environment.
func (a *evalActivation) ResolveName(name string) (any, bool) { func (a *evalActivation) ResolveName(name string) (any, bool) {
v, found := a.vars[name] v, found := a.vars[name]
if !found { if !found {

388
vendor/github.com/google/cel-go/cel/validator.go generated vendored Normal file
View File

@ -0,0 +1,388 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cel
import (
"fmt"
"reflect"
"regexp"
"github.com/google/cel-go/common/ast"
"github.com/google/cel-go/common/overloads"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
const (
homogeneousValidatorName = "cel.lib.std.validate.types.homogeneous"
// HomogeneousAggregateLiteralExemptFunctions is the ValidatorConfig key used to configure
// the set of function names which are exempt from homogeneous type checks. The expected type
// is a string list of function names.
//
// As an example, the `<string>.format([args])` call expects the input arguments list to be
// comprised of a variety of types which correspond to the types expected by the format control
// clauses; however, all other uses of a mixed element type list, would be unexpected.
HomogeneousAggregateLiteralExemptFunctions = homogeneousValidatorName + ".exempt"
)
// ASTValidators configures a set of ASTValidator instances into the target environment.
//
// Validators are applied in the order in which the are specified and are treated as singletons.
// The same ASTValidator with a given name will not be applied more than once.
func ASTValidators(validators ...ASTValidator) EnvOption {
return func(e *Env) (*Env, error) {
for _, v := range validators {
if !e.HasValidator(v.Name()) {
e.validators = append(e.validators, v)
}
}
return e, nil
}
}
// ASTValidator defines a singleton interface for validating a type-checked Ast against an environment.
//
// Note: the Issues argument is mutable in the sense that it is intended to collect errors which will be
// reported to the caller.
type ASTValidator interface {
// Name returns the name of the validator. Names must be unique.
Name() string
// Validate validates a given Ast within an Environment and collects a set of potential issues.
//
// The ValidatorConfig is generated from the set of ASTValidatorConfigurer instances prior to
// the invocation of the Validate call. The expectation is that the validator configuration
// is created in sequence and immutable once provided to the Validate call.
//
// See individual validators for more information on their configuration keys and configuration
// properties.
Validate(*Env, ValidatorConfig, *ast.CheckedAST, *Issues)
}
// ValidatorConfig provides an accessor method for querying validator configuration state.
type ValidatorConfig interface {
GetOrDefault(name string, value any) any
}
// MutableValidatorConfig provides mutation methods for querying and updating validator configuration
// settings.
type MutableValidatorConfig interface {
ValidatorConfig
Set(name string, value any) error
}
// ASTValidatorConfigurer indicates that this object, currently expected to be an ASTValidator,
// participates in validator configuration settings.
//
// This interface may be split from the expectation of being an ASTValidator instance in the future.
type ASTValidatorConfigurer interface {
Configure(MutableValidatorConfig) error
}
// validatorConfig implements the ValidatorConfig and MutableValidatorConfig interfaces.
type validatorConfig struct {
data map[string]any
}
// newValidatorConfig initializes the validator config with default values for core CEL validators.
func newValidatorConfig() *validatorConfig {
return &validatorConfig{
data: map[string]any{
HomogeneousAggregateLiteralExemptFunctions: []string{},
},
}
}
// GetOrDefault returns the configured value for the name, if present, else the input default value.
//
// Note, the type-agreement between the input default and configured value is not checked on read.
func (config *validatorConfig) GetOrDefault(name string, value any) any {
v, found := config.data[name]
if !found {
return value
}
return v
}
// Set configures a validator option with the given name and value.
//
// If the value had previously been set, the new value must have the same reflection type as the old one,
// or the call will error.
func (config *validatorConfig) Set(name string, value any) error {
v, found := config.data[name]
if found && reflect.TypeOf(v) != reflect.TypeOf(value) {
return fmt.Errorf("incompatible configuration type for %s, got %T, wanted %T", name, value, v)
}
config.data[name] = value
return nil
}
// ExtendedValidations collects a set of common AST validations which reduce the likelihood of runtime errors.
//
// - Validate duration and timestamp literals
// - Ensure regex strings are valid
// - Disable mixed type list and map literals
func ExtendedValidations() EnvOption {
return ASTValidators(
ValidateDurationLiterals(),
ValidateTimestampLiterals(),
ValidateRegexLiterals(),
ValidateHomogeneousAggregateLiterals(),
)
}
// ValidateDurationLiterals ensures that duration literal arguments are valid immediately after type-check.
func ValidateDurationLiterals() ASTValidator {
return newFormatValidator(overloads.TypeConvertDuration, 0, evalCall)
}
// ValidateTimestampLiterals ensures that timestamp literal arguments are valid immediately after type-check.
func ValidateTimestampLiterals() ASTValidator {
return newFormatValidator(overloads.TypeConvertTimestamp, 0, evalCall)
}
// ValidateRegexLiterals ensures that regex patterns are validated after type-check.
func ValidateRegexLiterals() ASTValidator {
return newFormatValidator(overloads.Matches, 0, compileRegex)
}
// ValidateHomogeneousAggregateLiterals checks that all list and map literals entries have the same types, i.e.
// no mixed list element types or mixed map key or map value types.
//
// Note: the string format call relies on a mixed element type list for ease of use, so this check skips all
// literals which occur within string format calls.
func ValidateHomogeneousAggregateLiterals() ASTValidator {
return homogeneousAggregateLiteralValidator{}
}
// ValidateComprehensionNestingLimit ensures that comprehension nesting does not exceed the specified limit.
//
// This validator can be useful for preventing arbitrarily nested comprehensions which can take high polynomial
// time to complete.
//
// Note, this limit does not apply to comprehensions with an empty iteration range, as these comprehensions have
// no actual looping cost. The cel.bind() utilizes the comprehension structure to perform local variable
// assignments and supplies an empty iteration range, so they won't count against the nesting limit either.
func ValidateComprehensionNestingLimit(limit int) ASTValidator {
return nestingLimitValidator{limit: limit}
}
type argChecker func(env *Env, call, arg ast.NavigableExpr) error
func newFormatValidator(funcName string, argNum int, check argChecker) formatValidator {
return formatValidator{
funcName: funcName,
check: check,
argNum: argNum,
}
}
type formatValidator struct {
funcName string
argNum int
check argChecker
}
// Name returns the unique name of this function format validator.
func (v formatValidator) Name() string {
return fmt.Sprintf("cel.lib.std.validate.functions.%s", v.funcName)
}
// Validate searches the AST for uses of a given function name with a constant argument and performs a check
// on whether the argument is a valid literal value.
func (v formatValidator) Validate(e *Env, _ ValidatorConfig, a *ast.CheckedAST, iss *Issues) {
root := ast.NavigateCheckedAST(a)
funcCalls := ast.MatchDescendants(root, ast.FunctionMatcher(v.funcName))
for _, call := range funcCalls {
callArgs := call.AsCall().Args()
if len(callArgs) <= v.argNum {
continue
}
litArg := callArgs[v.argNum]
if litArg.Kind() != ast.LiteralKind {
continue
}
if err := v.check(e, call, litArg); err != nil {
iss.ReportErrorAtID(litArg.ID(), "invalid %s argument", v.funcName)
}
}
}
func evalCall(env *Env, call, arg ast.NavigableExpr) error {
ast := ParsedExprToAst(&exprpb.ParsedExpr{Expr: call.ToExpr()})
prg, err := env.Program(ast)
if err != nil {
return err
}
_, _, err = prg.Eval(NoVars())
return err
}
func compileRegex(_ *Env, _, arg ast.NavigableExpr) error {
pattern := arg.AsLiteral().Value().(string)
_, err := regexp.Compile(pattern)
return err
}
type homogeneousAggregateLiteralValidator struct{}
// Name returns the unique name of the homogeneous type validator.
func (homogeneousAggregateLiteralValidator) Name() string {
return homogeneousValidatorName
}
// Configure implements the ASTValidatorConfigurer interface and currently sets the list of standard
// and exempt functions from homogeneous aggregate literal checks.
//
// TODO: Move this call into the string.format() ASTValidator once ported.
func (homogeneousAggregateLiteralValidator) Configure(c MutableValidatorConfig) error {
emptyList := []string{}
exemptFunctions := c.GetOrDefault(HomogeneousAggregateLiteralExemptFunctions, emptyList).([]string)
exemptFunctions = append(exemptFunctions, "format")
return c.Set(HomogeneousAggregateLiteralExemptFunctions, exemptFunctions)
}
// Validate validates that all lists and map literals have homogeneous types, i.e. don't contain dyn types.
//
// This validator makes an exception for list and map literals which occur at any level of nesting within
// string format calls.
func (v homogeneousAggregateLiteralValidator) Validate(_ *Env, c ValidatorConfig, a *ast.CheckedAST, iss *Issues) {
var exemptedFunctions []string
exemptedFunctions = c.GetOrDefault(HomogeneousAggregateLiteralExemptFunctions, exemptedFunctions).([]string)
root := ast.NavigateCheckedAST(a)
listExprs := ast.MatchDescendants(root, ast.KindMatcher(ast.ListKind))
for _, listExpr := range listExprs {
if inExemptFunction(listExpr, exemptedFunctions) {
continue
}
l := listExpr.AsList()
elements := l.Elements()
optIndices := l.OptionalIndices()
var elemType *Type
for i, e := range elements {
et := e.Type()
if isOptionalIndex(i, optIndices) {
et = et.Parameters()[0]
}
if elemType == nil {
elemType = et
continue
}
if !elemType.IsEquivalentType(et) {
v.typeMismatch(iss, e.ID(), elemType, et)
break
}
}
}
mapExprs := ast.MatchDescendants(root, ast.KindMatcher(ast.MapKind))
for _, mapExpr := range mapExprs {
if inExemptFunction(mapExpr, exemptedFunctions) {
continue
}
m := mapExpr.AsMap()
entries := m.Entries()
var keyType, valType *Type
for _, e := range entries {
key, val := e.Key(), e.Value()
kt, vt := key.Type(), val.Type()
if e.IsOptional() {
vt = vt.Parameters()[0]
}
if keyType == nil && valType == nil {
keyType, valType = kt, vt
continue
}
if !keyType.IsEquivalentType(kt) {
v.typeMismatch(iss, key.ID(), keyType, kt)
}
if !valType.IsEquivalentType(vt) {
v.typeMismatch(iss, val.ID(), valType, vt)
}
}
}
}
func inExemptFunction(e ast.NavigableExpr, exemptFunctions []string) bool {
if parent, found := e.Parent(); found {
if parent.Kind() == ast.CallKind {
fnName := parent.AsCall().FunctionName()
for _, exempt := range exemptFunctions {
if exempt == fnName {
return true
}
}
}
if parent.Kind() == ast.ListKind || parent.Kind() == ast.MapKind {
return inExemptFunction(parent, exemptFunctions)
}
}
return false
}
func isOptionalIndex(i int, optIndices []int32) bool {
for _, optInd := range optIndices {
if i == int(optInd) {
return true
}
}
return false
}
func (homogeneousAggregateLiteralValidator) typeMismatch(iss *Issues, id int64, expected, actual *Type) {
iss.ReportErrorAtID(id, "expected type '%s' but found '%s'", FormatCELType(expected), FormatCELType(actual))
}
type nestingLimitValidator struct {
limit int
}
func (v nestingLimitValidator) Name() string {
return "cel.lib.std.validate.comprehension_nesting_limit"
}
func (v nestingLimitValidator) Validate(e *Env, _ ValidatorConfig, a *ast.CheckedAST, iss *Issues) {
root := ast.NavigateCheckedAST(a)
comprehensions := ast.MatchDescendants(root, ast.KindMatcher(ast.ComprehensionKind))
if len(comprehensions) <= v.limit {
return
}
for _, comp := range comprehensions {
count := 0
e := comp
hasParent := true
for hasParent {
// When the expression is not a comprehension, continue to the next ancestor.
if e.Kind() != ast.ComprehensionKind {
e, hasParent = e.Parent()
continue
}
// When the comprehension has an empty range, continue to the next ancestor
// as this comprehension does not have any associated cost.
iterRange := e.AsComprehension().IterRange()
if iterRange.Kind() == ast.ListKind && iterRange.AsList().Size() == 0 {
e, hasParent = e.Parent()
continue
}
// Otherwise check the nesting limit.
count++
if count > v.limit {
iss.ReportErrorAtID(comp.ID(), "comprehension exceeds nesting limit")
break
}
e, hasParent = e.Parent()
}
}
}

View File

@ -11,9 +11,11 @@ go_library(
"cost.go", "cost.go",
"env.go", "env.go",
"errors.go", "errors.go",
"format.go",
"mapping.go", "mapping.go",
"options.go", "options.go",
"printer.go", "printer.go",
"scopes.go",
"standard.go", "standard.go",
"types.go", "types.go",
], ],
@ -22,10 +24,13 @@ go_library(
deps = [ deps = [
"//checker/decls:go_default_library", "//checker/decls:go_default_library",
"//common:go_default_library", "//common:go_default_library",
"//common/ast:go_default_library",
"//common/containers:go_default_library", "//common/containers:go_default_library",
"//common/debug:go_default_library", "//common/debug:go_default_library",
"//common/decls:go_default_library",
"//common/operators:go_default_library", "//common/operators:go_default_library",
"//common/overloads:go_default_library", "//common/overloads:go_default_library",
"//common/stdlib:go_default_library",
"//common/types:go_default_library", "//common/types:go_default_library",
"//common/types/pb:go_default_library", "//common/types/pb:go_default_library",
"//common/types/ref:go_default_library", "//common/types/ref:go_default_library",
@ -44,6 +49,7 @@ go_test(
"checker_test.go", "checker_test.go",
"cost_test.go", "cost_test.go",
"env_test.go", "env_test.go",
"format_test.go",
], ],
embed = [ embed = [
":go_default_library", ":go_default_library",

View File

@ -18,15 +18,13 @@ package checker
import ( import (
"fmt" "fmt"
"reflect"
"github.com/google/cel-go/checker/decls"
"github.com/google/cel-go/common" "github.com/google/cel-go/common"
"github.com/google/cel-go/common/ast"
"github.com/google/cel-go/common/containers" "github.com/google/cel-go/common/containers"
"github.com/google/cel-go/common/decls"
"github.com/google/cel-go/common/operators" "github.com/google/cel-go/common/operators"
"github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types"
"google.golang.org/protobuf/proto"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
) )
@ -37,8 +35,8 @@ type checker struct {
mappings *mapping mappings *mapping
freeTypeVarCounter int freeTypeVarCounter int
sourceInfo *exprpb.SourceInfo sourceInfo *exprpb.SourceInfo
types map[int64]*exprpb.Type types map[int64]*types.Type
references map[int64]*exprpb.Reference references map[int64]*ast.ReferenceInfo
} }
// Check performs type checking, giving a typed AST. // Check performs type checking, giving a typed AST.
@ -47,40 +45,38 @@ type checker struct {
// descriptions of protocol buffers, and a registry for errors. // descriptions of protocol buffers, and a registry for errors.
// Returns a CheckedExpr proto, which might not be usable if // Returns a CheckedExpr proto, which might not be usable if
// there are errors in the error registry. // there are errors in the error registry.
func Check(parsedExpr *exprpb.ParsedExpr, func Check(parsedExpr *exprpb.ParsedExpr, source common.Source, env *Env) (*ast.CheckedAST, *common.Errors) {
source common.Source, errs := common.NewErrors(source)
env *Env) (*exprpb.CheckedExpr, *common.Errors) {
c := checker{ c := checker{
env: env, env: env,
errors: &typeErrors{common.NewErrors(source)}, errors: &typeErrors{errs: errs},
mappings: newMapping(), mappings: newMapping(),
freeTypeVarCounter: 0, freeTypeVarCounter: 0,
sourceInfo: parsedExpr.GetSourceInfo(), sourceInfo: parsedExpr.GetSourceInfo(),
types: make(map[int64]*exprpb.Type), types: make(map[int64]*types.Type),
references: make(map[int64]*exprpb.Reference), references: make(map[int64]*ast.ReferenceInfo),
} }
c.check(parsedExpr.GetExpr()) c.check(parsedExpr.GetExpr())
// Walk over the final type map substituting any type parameters either by their bound value or // Walk over the final type map substituting any type parameters either by their bound value or
// by DYN. // by DYN.
m := make(map[int64]*exprpb.Type) m := make(map[int64]*types.Type)
for k, v := range c.types { for id, t := range c.types {
m[k] = substitute(c.mappings, v, true) m[id] = substitute(c.mappings, t, true)
} }
return &exprpb.CheckedExpr{ return &ast.CheckedAST{
Expr: parsedExpr.GetExpr(), Expr: parsedExpr.GetExpr(),
SourceInfo: parsedExpr.GetSourceInfo(), SourceInfo: parsedExpr.GetSourceInfo(),
TypeMap: m, TypeMap: m,
ReferenceMap: c.references, ReferenceMap: c.references,
}, c.errors.Errors }, errs
} }
func (c *checker) check(e *exprpb.Expr) { func (c *checker) check(e *exprpb.Expr) {
if e == nil { if e == nil {
return return
} }
switch e.GetExprKind().(type) { switch e.GetExprKind().(type) {
case *exprpb.Expr_ConstExpr: case *exprpb.Expr_ConstExpr:
literal := e.GetConstExpr() literal := e.GetConstExpr()
@ -113,53 +109,51 @@ func (c *checker) check(e *exprpb.Expr) {
case *exprpb.Expr_ComprehensionExpr: case *exprpb.Expr_ComprehensionExpr:
c.checkComprehension(e) c.checkComprehension(e)
default: default:
c.errors.ReportError( c.errors.unexpectedASTType(e.GetId(), c.location(e), e)
c.location(e), "Unrecognized ast type: %v", reflect.TypeOf(e))
} }
} }
func (c *checker) checkInt64Literal(e *exprpb.Expr) { func (c *checker) checkInt64Literal(e *exprpb.Expr) {
c.setType(e, decls.Int) c.setType(e, types.IntType)
} }
func (c *checker) checkUint64Literal(e *exprpb.Expr) { func (c *checker) checkUint64Literal(e *exprpb.Expr) {
c.setType(e, decls.Uint) c.setType(e, types.UintType)
} }
func (c *checker) checkStringLiteral(e *exprpb.Expr) { func (c *checker) checkStringLiteral(e *exprpb.Expr) {
c.setType(e, decls.String) c.setType(e, types.StringType)
} }
func (c *checker) checkBytesLiteral(e *exprpb.Expr) { func (c *checker) checkBytesLiteral(e *exprpb.Expr) {
c.setType(e, decls.Bytes) c.setType(e, types.BytesType)
} }
func (c *checker) checkDoubleLiteral(e *exprpb.Expr) { func (c *checker) checkDoubleLiteral(e *exprpb.Expr) {
c.setType(e, decls.Double) c.setType(e, types.DoubleType)
} }
func (c *checker) checkBoolLiteral(e *exprpb.Expr) { func (c *checker) checkBoolLiteral(e *exprpb.Expr) {
c.setType(e, decls.Bool) c.setType(e, types.BoolType)
} }
func (c *checker) checkNullLiteral(e *exprpb.Expr) { func (c *checker) checkNullLiteral(e *exprpb.Expr) {
c.setType(e, decls.Null) c.setType(e, types.NullType)
} }
func (c *checker) checkIdent(e *exprpb.Expr) { func (c *checker) checkIdent(e *exprpb.Expr) {
identExpr := e.GetIdentExpr() identExpr := e.GetIdentExpr()
// Check to see if the identifier is declared. // Check to see if the identifier is declared.
if ident := c.env.LookupIdent(identExpr.GetName()); ident != nil { if ident := c.env.LookupIdent(identExpr.GetName()); ident != nil {
c.setType(e, ident.GetIdent().GetType()) c.setType(e, ident.Type())
c.setReference(e, newIdentReference(ident.GetName(), ident.GetIdent().GetValue())) c.setReference(e, ast.NewIdentReference(ident.Name(), ident.Value()))
// Overwrite the identifier with its fully qualified name. // Overwrite the identifier with its fully qualified name.
identExpr.Name = ident.GetName() identExpr.Name = ident.Name()
return return
} }
c.setType(e, decls.Error) c.setType(e, types.ErrorType)
c.errors.undeclaredReference( c.errors.undeclaredReference(e.GetId(), c.location(e), c.env.container.Name(), identExpr.GetName())
c.location(e), c.env.container.Name(), identExpr.GetName())
} }
func (c *checker) checkSelect(e *exprpb.Expr) { func (c *checker) checkSelect(e *exprpb.Expr) {
@ -174,9 +168,9 @@ func (c *checker) checkSelect(e *exprpb.Expr) {
// Rewrite the node to be a variable reference to the resolved fully-qualified // Rewrite the node to be a variable reference to the resolved fully-qualified
// variable name. // variable name.
c.setType(e, ident.GetIdent().GetType()) c.setType(e, ident.Type())
c.setReference(e, newIdentReference(ident.GetName(), ident.GetIdent().GetValue())) c.setReference(e, ast.NewIdentReference(ident.Name(), ident.Value()))
identName := ident.GetName() identName := ident.Name()
e.ExprKind = &exprpb.Expr_IdentExpr{ e.ExprKind = &exprpb.Expr_IdentExpr{
IdentExpr: &exprpb.Expr_Ident{ IdentExpr: &exprpb.Expr_Ident{
Name: identName, Name: identName,
@ -188,7 +182,7 @@ func (c *checker) checkSelect(e *exprpb.Expr) {
resultType := c.checkSelectField(e, sel.GetOperand(), sel.GetField(), false) resultType := c.checkSelectField(e, sel.GetOperand(), sel.GetField(), false)
if sel.TestOnly { if sel.TestOnly {
resultType = decls.Bool resultType = types.BoolType
} }
c.setType(e, substitute(c.mappings, resultType, false)) c.setType(e, substitute(c.mappings, resultType, false))
} }
@ -200,16 +194,17 @@ func (c *checker) checkOptSelect(e *exprpb.Expr) {
field := call.GetArgs()[1] field := call.GetArgs()[1]
fieldName, isString := maybeUnwrapString(field) fieldName, isString := maybeUnwrapString(field)
if !isString { if !isString {
c.errors.ReportError(c.location(field), "unsupported optional field selection: %v", field) c.errors.notAnOptionalFieldSelection(field.GetId(), c.location(field), field)
return return
} }
// Perform type-checking using the field selection logic. // Perform type-checking using the field selection logic.
resultType := c.checkSelectField(e, operand, fieldName, true) resultType := c.checkSelectField(e, operand, fieldName, true)
c.setType(e, substitute(c.mappings, resultType, false)) c.setType(e, substitute(c.mappings, resultType, false))
c.setReference(e, ast.NewFunctionReference("select_optional_field"))
} }
func (c *checker) checkSelectField(e, operand *exprpb.Expr, field string, optional bool) *exprpb.Type { func (c *checker) checkSelectField(e, operand *exprpb.Expr, field string, optional bool) *types.Type {
// Interpret as field selection, first traversing down the operand. // Interpret as field selection, first traversing down the operand.
c.check(operand) c.check(operand)
operandType := substitute(c.mappings, c.getType(operand), false) operandType := substitute(c.mappings, c.getType(operand), false)
@ -218,38 +213,37 @@ func (c *checker) checkSelectField(e, operand *exprpb.Expr, field string, option
targetType, isOpt := maybeUnwrapOptional(operandType) targetType, isOpt := maybeUnwrapOptional(operandType)
// Assume error type by default as most types do not support field selection. // Assume error type by default as most types do not support field selection.
resultType := decls.Error resultType := types.ErrorType
switch kindOf(targetType) { switch targetType.Kind() {
case kindMap: case types.MapKind:
// Maps yield their value type as the selection result type. // Maps yield their value type as the selection result type.
mapType := targetType.GetMapType() resultType = targetType.Parameters()[1]
resultType = mapType.GetValueType() case types.StructKind:
case kindObject:
// Objects yield their field type declaration as the selection result type, but only if // Objects yield their field type declaration as the selection result type, but only if
// the field is defined. // the field is defined.
messageType := targetType messageType := targetType
if fieldType, found := c.lookupFieldType(c.location(e), messageType.GetMessageType(), field); found { if fieldType, found := c.lookupFieldType(e.GetId(), messageType.TypeName(), field); found {
resultType = fieldType.Type resultType = fieldType
} }
case kindTypeParam: case types.TypeParamKind:
// Set the operand type to DYN to prevent assignment to a potentially incorrect type // Set the operand type to DYN to prevent assignment to a potentially incorrect type
// at a later point in type-checking. The isAssignable call will update the type // at a later point in type-checking. The isAssignable call will update the type
// substitutions for the type param under the covers. // substitutions for the type param under the covers.
c.isAssignable(decls.Dyn, targetType) c.isAssignable(types.DynType, targetType)
// Also, set the result type to DYN. // Also, set the result type to DYN.
resultType = decls.Dyn resultType = types.DynType
default: default:
// Dynamic / error values are treated as DYN type. Errors are handled this way as well // Dynamic / error values are treated as DYN type. Errors are handled this way as well
// in order to allow forward progress on the check. // in order to allow forward progress on the check.
if !isDynOrError(targetType) { if !isDynOrError(targetType) {
c.errors.typeDoesNotSupportFieldSelection(c.location(e), targetType) c.errors.typeDoesNotSupportFieldSelection(e.GetId(), c.location(e), targetType)
} }
resultType = decls.Dyn resultType = types.DynType
} }
// If the target type was optional coming in, then the result must be optional going out. // If the target type was optional coming in, then the result must be optional going out.
if isOpt || optional { if isOpt || optional {
return decls.NewOptionalType(resultType) return types.NewOptionalType(resultType)
} }
return resultType return resultType
} }
@ -277,15 +271,14 @@ func (c *checker) checkCall(e *exprpb.Expr) {
// Check for the existence of the function. // Check for the existence of the function.
fn := c.env.LookupFunction(fnName) fn := c.env.LookupFunction(fnName)
if fn == nil { if fn == nil {
c.errors.undeclaredReference( c.errors.undeclaredReference(e.GetId(), c.location(e), c.env.container.Name(), fnName)
c.location(e), c.env.container.Name(), fnName) c.setType(e, types.ErrorType)
c.setType(e, decls.Error)
return return
} }
// Overwrite the function name with its fully qualified resolved name. // Overwrite the function name with its fully qualified resolved name.
call.Function = fn.GetName() call.Function = fn.Name()
// Check to see whether the overload resolves. // Check to see whether the overload resolves.
c.resolveOverloadOrError(c.location(e), e, fn, nil, args) c.resolveOverloadOrError(e, fn, nil, args)
return return
} }
@ -303,8 +296,8 @@ func (c *checker) checkCall(e *exprpb.Expr) {
// be an inaccurate representation of the desired evaluation behavior. // be an inaccurate representation of the desired evaluation behavior.
// Overwrite with fully-qualified resolved function name sans receiver target. // Overwrite with fully-qualified resolved function name sans receiver target.
call.Target = nil call.Target = nil
call.Function = fn.GetName() call.Function = fn.Name()
c.resolveOverloadOrError(c.location(e), e, fn, nil, args) c.resolveOverloadOrError(e, fn, nil, args)
return return
} }
} }
@ -314,22 +307,21 @@ func (c *checker) checkCall(e *exprpb.Expr) {
fn := c.env.LookupFunction(fnName) fn := c.env.LookupFunction(fnName)
// Function found, attempt overload resolution. // Function found, attempt overload resolution.
if fn != nil { if fn != nil {
c.resolveOverloadOrError(c.location(e), e, fn, target, args) c.resolveOverloadOrError(e, fn, target, args)
return return
} }
// Function name not declared, record error. // Function name not declared, record error.
c.errors.undeclaredReference(c.location(e), c.env.container.Name(), fnName) c.setType(e, types.ErrorType)
c.errors.undeclaredReference(e.GetId(), c.location(e), c.env.container.Name(), fnName)
} }
func (c *checker) resolveOverloadOrError( func (c *checker) resolveOverloadOrError(
loc common.Location, e *exprpb.Expr, fn *decls.FunctionDecl, target *exprpb.Expr, args []*exprpb.Expr) {
e *exprpb.Expr,
fn *exprpb.Decl, target *exprpb.Expr, args []*exprpb.Expr) {
// Attempt to resolve the overload. // Attempt to resolve the overload.
resolution := c.resolveOverload(loc, fn, target, args) resolution := c.resolveOverload(e, fn, target, args)
// No such overload, error noted in the resolveOverload call, type recorded here. // No such overload, error noted in the resolveOverload call, type recorded here.
if resolution == nil { if resolution == nil {
c.setType(e, decls.Error) c.setType(e, types.ErrorType)
return return
} }
// Overload found. // Overload found.
@ -338,10 +330,9 @@ func (c *checker) resolveOverloadOrError(
} }
func (c *checker) resolveOverload( func (c *checker) resolveOverload(
loc common.Location, call *exprpb.Expr, fn *decls.FunctionDecl, target *exprpb.Expr, args []*exprpb.Expr) *overloadResolution {
fn *exprpb.Decl, target *exprpb.Expr, args []*exprpb.Expr) *overloadResolution {
var argTypes []*exprpb.Type var argTypes []*types.Type
if target != nil { if target != nil {
argTypes = append(argTypes, c.getType(target)) argTypes = append(argTypes, c.getType(target))
} }
@ -349,55 +340,75 @@ func (c *checker) resolveOverload(
argTypes = append(argTypes, c.getType(arg)) argTypes = append(argTypes, c.getType(arg))
} }
var resultType *exprpb.Type var resultType *types.Type
var checkedRef *exprpb.Reference var checkedRef *ast.ReferenceInfo
for _, overload := range fn.GetFunction().GetOverloads() { for _, overload := range fn.OverloadDecls() {
// Determine whether the overload is currently considered. // Determine whether the overload is currently considered.
if c.env.isOverloadDisabled(overload.GetOverloadId()) { if c.env.isOverloadDisabled(overload.ID()) {
continue continue
} }
// Ensure the call style for the overload matches. // Ensure the call style for the overload matches.
if (target == nil && overload.GetIsInstanceFunction()) || if (target == nil && overload.IsMemberFunction()) ||
(target != nil && !overload.GetIsInstanceFunction()) { (target != nil && !overload.IsMemberFunction()) {
// not a compatible call style. // not a compatible call style.
continue continue
} }
overloadType := decls.NewFunctionType(overload.ResultType, overload.Params...) // Alternative type-checking behavior when the logical operators are compacted into
if len(overload.GetTypeParams()) > 0 { // variadic AST representations.
if fn.Name() == operators.LogicalAnd || fn.Name() == operators.LogicalOr {
checkedRef = ast.NewFunctionReference(overload.ID())
for i, argType := range argTypes {
if !c.isAssignable(argType, types.BoolType) {
c.errors.typeMismatch(
args[i].GetId(),
c.locationByID(args[i].GetId()),
types.BoolType,
argType)
resultType = types.ErrorType
}
}
if isError(resultType) {
return nil
}
return newResolution(checkedRef, types.BoolType)
}
overloadType := newFunctionType(overload.ResultType(), overload.ArgTypes()...)
typeParams := overload.TypeParams()
if len(typeParams) != 0 {
// Instantiate overload's type with fresh type variables. // Instantiate overload's type with fresh type variables.
substitutions := newMapping() substitutions := newMapping()
for _, typePar := range overload.GetTypeParams() { for _, typePar := range typeParams {
substitutions.add(decls.NewTypeParamType(typePar), c.newTypeVar()) substitutions.add(types.NewTypeParamType(typePar), c.newTypeVar())
} }
overloadType = substitute(substitutions, overloadType, false) overloadType = substitute(substitutions, overloadType, false)
} }
candidateArgTypes := overloadType.GetFunction().GetArgTypes() candidateArgTypes := overloadType.Parameters()[1:]
if c.isAssignableList(argTypes, candidateArgTypes) { if c.isAssignableList(argTypes, candidateArgTypes) {
if checkedRef == nil { if checkedRef == nil {
checkedRef = newFunctionReference(overload.GetOverloadId()) checkedRef = ast.NewFunctionReference(overload.ID())
} else { } else {
checkedRef.OverloadId = append(checkedRef.GetOverloadId(), overload.GetOverloadId()) checkedRef.AddOverload(overload.ID())
} }
// First matching overload, determines result type. // First matching overload, determines result type.
fnResultType := substitute(c.mappings, overloadType.GetFunction().GetResultType(), false) fnResultType := substitute(c.mappings, overloadType.Parameters()[0], false)
if resultType == nil { if resultType == nil {
resultType = fnResultType resultType = fnResultType
} else if !isDyn(resultType) && !proto.Equal(fnResultType, resultType) { } else if !isDyn(resultType) && !fnResultType.IsExactType(resultType) {
resultType = decls.Dyn resultType = types.DynType
} }
} }
} }
if resultType == nil { if resultType == nil {
for i, arg := range argTypes { for i, argType := range argTypes {
argTypes[i] = substitute(c.mappings, arg, true) argTypes[i] = substitute(c.mappings, argType, true)
} }
c.errors.noMatchingOverload(loc, fn.GetName(), argTypes, target != nil) c.errors.noMatchingOverload(call.GetId(), c.location(call), fn.Name(), argTypes, target != nil)
resultType = decls.Error
return nil return nil
} }
@ -406,7 +417,7 @@ func (c *checker) resolveOverload(
func (c *checker) checkCreateList(e *exprpb.Expr) { func (c *checker) checkCreateList(e *exprpb.Expr) {
create := e.GetListExpr() create := e.GetListExpr()
var elemsType *exprpb.Type var elemsType *types.Type
optionalIndices := create.GetOptionalIndices() optionalIndices := create.GetOptionalIndices()
optionals := make(map[int32]bool, len(optionalIndices)) optionals := make(map[int32]bool, len(optionalIndices))
for _, optInd := range optionalIndices { for _, optInd := range optionalIndices {
@ -419,16 +430,16 @@ func (c *checker) checkCreateList(e *exprpb.Expr) {
var isOptional bool var isOptional bool
elemType, isOptional = maybeUnwrapOptional(elemType) elemType, isOptional = maybeUnwrapOptional(elemType)
if !isOptional && !isDyn(elemType) { if !isOptional && !isDyn(elemType) {
c.errors.typeMismatch(c.location(e), decls.NewOptionalType(elemType), elemType) c.errors.typeMismatch(e.GetId(), c.location(e), types.NewOptionalType(elemType), elemType)
} }
} }
elemsType = c.joinTypes(c.location(e), elemsType, elemType) elemsType = c.joinTypes(e, elemsType, elemType)
} }
if elemsType == nil { if elemsType == nil {
// If the list is empty, assign free type var to elem type. // If the list is empty, assign free type var to elem type.
elemsType = c.newTypeVar() elemsType = c.newTypeVar()
} }
c.setType(e, decls.NewListType(elemsType)) c.setType(e, types.NewListType(elemsType))
} }
func (c *checker) checkCreateStruct(e *exprpb.Expr) { func (c *checker) checkCreateStruct(e *exprpb.Expr) {
@ -442,12 +453,12 @@ func (c *checker) checkCreateStruct(e *exprpb.Expr) {
func (c *checker) checkCreateMap(e *exprpb.Expr) { func (c *checker) checkCreateMap(e *exprpb.Expr) {
mapVal := e.GetStructExpr() mapVal := e.GetStructExpr()
var mapKeyType *exprpb.Type var mapKeyType *types.Type
var mapValueType *exprpb.Type var mapValueType *types.Type
for _, ent := range mapVal.GetEntries() { for _, ent := range mapVal.GetEntries() {
key := ent.GetMapKey() key := ent.GetMapKey()
c.check(key) c.check(key)
mapKeyType = c.joinTypes(c.location(key), mapKeyType, c.getType(key)) mapKeyType = c.joinTypes(key, mapKeyType, c.getType(key))
val := ent.GetValue() val := ent.GetValue()
c.check(val) c.check(val)
@ -456,50 +467,54 @@ func (c *checker) checkCreateMap(e *exprpb.Expr) {
var isOptional bool var isOptional bool
valType, isOptional = maybeUnwrapOptional(valType) valType, isOptional = maybeUnwrapOptional(valType)
if !isOptional && !isDyn(valType) { if !isOptional && !isDyn(valType) {
c.errors.typeMismatch(c.location(val), decls.NewOptionalType(valType), valType) c.errors.typeMismatch(val.GetId(), c.location(val), types.NewOptionalType(valType), valType)
} }
} }
mapValueType = c.joinTypes(c.location(val), mapValueType, valType) mapValueType = c.joinTypes(val, mapValueType, valType)
} }
if mapKeyType == nil { if mapKeyType == nil {
// If the map is empty, assign free type variables to typeKey and value type. // If the map is empty, assign free type variables to typeKey and value type.
mapKeyType = c.newTypeVar() mapKeyType = c.newTypeVar()
mapValueType = c.newTypeVar() mapValueType = c.newTypeVar()
} }
c.setType(e, decls.NewMapType(mapKeyType, mapValueType)) c.setType(e, types.NewMapType(mapKeyType, mapValueType))
} }
func (c *checker) checkCreateMessage(e *exprpb.Expr) { func (c *checker) checkCreateMessage(e *exprpb.Expr) {
msgVal := e.GetStructExpr() msgVal := e.GetStructExpr()
// Determine the type of the message. // Determine the type of the message.
messageType := decls.Error resultType := types.ErrorType
decl := c.env.LookupIdent(msgVal.GetMessageName()) ident := c.env.LookupIdent(msgVal.GetMessageName())
if decl == nil { if ident == nil {
c.errors.undeclaredReference( c.errors.undeclaredReference(
c.location(e), c.env.container.Name(), msgVal.GetMessageName()) e.GetId(), c.location(e), c.env.container.Name(), msgVal.GetMessageName())
c.setType(e, types.ErrorType)
return return
} }
// Ensure the type name is fully qualified in the AST. // Ensure the type name is fully qualified in the AST.
msgVal.MessageName = decl.GetName() typeName := ident.Name()
c.setReference(e, newIdentReference(decl.GetName(), nil)) msgVal.MessageName = typeName
ident := decl.GetIdent() c.setReference(e, ast.NewIdentReference(ident.Name(), nil))
identKind := kindOf(ident.GetType()) identKind := ident.Type().Kind()
if identKind != kindError { if identKind != types.ErrorKind {
if identKind != kindType { if identKind != types.TypeKind {
c.errors.notAType(c.location(e), ident.GetType()) c.errors.notAType(e.GetId(), c.location(e), ident.Type().DeclaredTypeName())
} else { } else {
messageType = ident.GetType().GetType() resultType = ident.Type().Parameters()[0]
if kindOf(messageType) != kindObject { // Backwards compatibility test between well-known types and message types
c.errors.notAMessageType(c.location(e), messageType) // In this context, the type is being instantiated by its protobuf name which
messageType = decls.Error // is not ideal or recommended, but some users expect this to work.
if isWellKnownType(resultType) {
typeName = getWellKnownTypeName(resultType)
} else if resultType.Kind() == types.StructKind {
typeName = resultType.DeclaredTypeName()
} else {
c.errors.notAMessageType(e.GetId(), c.location(e), resultType.DeclaredTypeName())
resultType = types.ErrorType
} }
} }
} }
if isObjectWellKnownType(messageType) { c.setType(e, resultType)
c.setType(e, getObjectWellKnownType(messageType))
} else {
c.setType(e, messageType)
}
// Check the field initializers. // Check the field initializers.
for _, ent := range msgVal.GetEntries() { for _, ent := range msgVal.GetEntries() {
@ -507,10 +522,10 @@ func (c *checker) checkCreateMessage(e *exprpb.Expr) {
value := ent.GetValue() value := ent.GetValue()
c.check(value) c.check(value)
fieldType := decls.Error fieldType := types.ErrorType
ft, found := c.lookupFieldType(c.locationByID(ent.GetId()), messageType.GetMessageType(), field) ft, found := c.lookupFieldType(ent.GetId(), typeName, field)
if found { if found {
fieldType = ft.Type fieldType = ft
} }
valType := c.getType(value) valType := c.getType(value)
@ -518,11 +533,11 @@ func (c *checker) checkCreateMessage(e *exprpb.Expr) {
var isOptional bool var isOptional bool
valType, isOptional = maybeUnwrapOptional(valType) valType, isOptional = maybeUnwrapOptional(valType)
if !isOptional && !isDyn(valType) { if !isOptional && !isDyn(valType) {
c.errors.typeMismatch(c.location(value), decls.NewOptionalType(valType), valType) c.errors.typeMismatch(value.GetId(), c.location(value), types.NewOptionalType(valType), valType)
} }
} }
if !c.isAssignable(fieldType, valType) { if !c.isAssignable(fieldType, valType) {
c.errors.fieldTypeMismatch(c.locationByID(ent.Id), field, fieldType, valType) c.errors.fieldTypeMismatch(ent.GetId(), c.locationByID(ent.GetId()), field, fieldType, valType)
} }
} }
} }
@ -533,36 +548,36 @@ func (c *checker) checkComprehension(e *exprpb.Expr) {
c.check(comp.GetAccuInit()) c.check(comp.GetAccuInit())
accuType := c.getType(comp.GetAccuInit()) accuType := c.getType(comp.GetAccuInit())
rangeType := substitute(c.mappings, c.getType(comp.GetIterRange()), false) rangeType := substitute(c.mappings, c.getType(comp.GetIterRange()), false)
var varType *exprpb.Type var varType *types.Type
switch kindOf(rangeType) { switch rangeType.Kind() {
case kindList: case types.ListKind:
varType = rangeType.GetListType().GetElemType() varType = rangeType.Parameters()[0]
case kindMap: case types.MapKind:
// Ranges over the keys. // Ranges over the keys.
varType = rangeType.GetMapType().GetKeyType() varType = rangeType.Parameters()[0]
case kindDyn, kindError, kindTypeParam: case types.DynKind, types.ErrorKind, types.TypeParamKind:
// Set the range type to DYN to prevent assignment to a potentially incorrect type // Set the range type to DYN to prevent assignment to a potentially incorrect type
// at a later point in type-checking. The isAssignable call will update the type // at a later point in type-checking. The isAssignable call will update the type
// substitutions for the type param under the covers. // substitutions for the type param under the covers.
c.isAssignable(decls.Dyn, rangeType) c.isAssignable(types.DynType, rangeType)
// Set the range iteration variable to type DYN as well. // Set the range iteration variable to type DYN as well.
varType = decls.Dyn varType = types.DynType
default: default:
c.errors.notAComprehensionRange(c.location(comp.GetIterRange()), rangeType) c.errors.notAComprehensionRange(comp.GetIterRange().GetId(), c.location(comp.GetIterRange()), rangeType)
varType = decls.Error varType = types.ErrorType
} }
// Create a scope for the comprehension since it has a local accumulation variable. // Create a scope for the comprehension since it has a local accumulation variable.
// This scope will contain the accumulation variable used to compute the result. // This scope will contain the accumulation variable used to compute the result.
c.env = c.env.enterScope() c.env = c.env.enterScope()
c.env.Add(decls.NewVar(comp.GetAccuVar(), accuType)) c.env.AddIdents(decls.NewVariable(comp.GetAccuVar(), accuType))
// Create a block scope for the loop. // Create a block scope for the loop.
c.env = c.env.enterScope() c.env = c.env.enterScope()
c.env.Add(decls.NewVar(comp.GetIterVar(), varType)) c.env.AddIdents(decls.NewVariable(comp.GetIterVar(), varType))
// Check the variable references in the condition and step. // Check the variable references in the condition and step.
c.check(comp.GetLoopCondition()) c.check(comp.GetLoopCondition())
c.assertType(comp.GetLoopCondition(), decls.Bool) c.assertType(comp.GetLoopCondition(), types.BoolType)
c.check(comp.GetLoopStep()) c.check(comp.GetLoopStep())
c.assertType(comp.GetLoopStep(), accuType) c.assertType(comp.GetLoopStep(), accuType)
// Exit the loop's block scope before checking the result. // Exit the loop's block scope before checking the result.
@ -574,9 +589,7 @@ func (c *checker) checkComprehension(e *exprpb.Expr) {
} }
// Checks compatibility of joined types, and returns the most general common type. // Checks compatibility of joined types, and returns the most general common type.
func (c *checker) joinTypes(loc common.Location, func (c *checker) joinTypes(e *exprpb.Expr, previous, current *types.Type) *types.Type {
previous *exprpb.Type,
current *exprpb.Type) *exprpb.Type {
if previous == nil { if previous == nil {
return current return current
} }
@ -584,23 +597,23 @@ func (c *checker) joinTypes(loc common.Location,
return mostGeneral(previous, current) return mostGeneral(previous, current)
} }
if c.dynAggregateLiteralElementTypesEnabled() { if c.dynAggregateLiteralElementTypesEnabled() {
return decls.Dyn return types.DynType
} }
c.errors.typeMismatch(loc, previous, current) c.errors.typeMismatch(e.GetId(), c.location(e), previous, current)
return decls.Error return types.ErrorType
} }
func (c *checker) dynAggregateLiteralElementTypesEnabled() bool { func (c *checker) dynAggregateLiteralElementTypesEnabled() bool {
return c.env.aggLitElemType == dynElementType return c.env.aggLitElemType == dynElementType
} }
func (c *checker) newTypeVar() *exprpb.Type { func (c *checker) newTypeVar() *types.Type {
id := c.freeTypeVarCounter id := c.freeTypeVarCounter
c.freeTypeVarCounter++ c.freeTypeVarCounter++
return decls.NewTypeParamType(fmt.Sprintf("_var%d", id)) return types.NewTypeParamType(fmt.Sprintf("_var%d", id))
} }
func (c *checker) isAssignable(t1 *exprpb.Type, t2 *exprpb.Type) bool { func (c *checker) isAssignable(t1, t2 *types.Type) bool {
subs := isAssignable(c.mappings, t1, t2) subs := isAssignable(c.mappings, t1, t2)
if subs != nil { if subs != nil {
c.mappings = subs c.mappings = subs
@ -610,7 +623,7 @@ func (c *checker) isAssignable(t1 *exprpb.Type, t2 *exprpb.Type) bool {
return false return false
} }
func (c *checker) isAssignableList(l1 []*exprpb.Type, l2 []*exprpb.Type) bool { func (c *checker) isAssignableList(l1, l2 []*types.Type) bool {
subs := isAssignableList(c.mappings, l1, l2) subs := isAssignableList(c.mappings, l1, l2)
if subs != nil { if subs != nil {
c.mappings = subs c.mappings = subs
@ -620,57 +633,52 @@ func (c *checker) isAssignableList(l1 []*exprpb.Type, l2 []*exprpb.Type) bool {
return false return false
} }
func (c *checker) lookupFieldType(l common.Location, messageType string, fieldName string) (*ref.FieldType, bool) { func maybeUnwrapString(e *exprpb.Expr) (string, bool) {
if _, found := c.env.provider.FindType(messageType); !found { switch e.GetExprKind().(type) {
// This should not happen, anyway, report an error. case *exprpb.Expr_ConstExpr:
c.errors.unexpectedFailedResolution(l, messageType) literal := e.GetConstExpr()
return nil, false switch literal.GetConstantKind().(type) {
case *exprpb.Constant_StringValue:
return literal.GetStringValue(), true
}
} }
return "", false
if ft, found := c.env.provider.FindFieldType(messageType, fieldName); found {
return ft, found
}
c.errors.undefinedField(l, fieldName)
return nil, false
} }
func (c *checker) setType(e *exprpb.Expr, t *exprpb.Type) { func (c *checker) setType(e *exprpb.Expr, t *types.Type) {
if old, found := c.types[e.GetId()]; found && !proto.Equal(old, t) { if old, found := c.types[e.GetId()]; found && !old.IsExactType(t) {
c.errors.ReportError(c.location(e), c.errors.incompatibleType(e.GetId(), c.location(e), e, old, t)
"(Incompatible) Type already exists for expression: %v(%d) old:%v, new:%v", e, e.GetId(), old, t)
return return
} }
c.types[e.GetId()] = t c.types[e.GetId()] = t
} }
func (c *checker) getType(e *exprpb.Expr) *exprpb.Type { func (c *checker) getType(e *exprpb.Expr) *types.Type {
return c.types[e.GetId()] return c.types[e.GetId()]
} }
func (c *checker) setReference(e *exprpb.Expr, r *exprpb.Reference) { func (c *checker) setReference(e *exprpb.Expr, r *ast.ReferenceInfo) {
if old, found := c.references[e.GetId()]; found && !proto.Equal(old, r) { if old, found := c.references[e.GetId()]; found && !old.Equals(r) {
c.errors.ReportError(c.location(e), c.errors.referenceRedefinition(e.GetId(), c.location(e), e, old, r)
"Reference already exists for expression: %v(%d) old:%v, new:%v", e, e.GetId(), old, r)
return return
} }
c.references[e.GetId()] = r c.references[e.GetId()] = r
} }
func (c *checker) assertType(e *exprpb.Expr, t *exprpb.Type) { func (c *checker) assertType(e *exprpb.Expr, t *types.Type) {
if !c.isAssignable(t, c.getType(e)) { if !c.isAssignable(t, c.getType(e)) {
c.errors.typeMismatch(c.location(e), t, c.getType(e)) c.errors.typeMismatch(e.GetId(), c.location(e), t, c.getType(e))
} }
} }
type overloadResolution struct { type overloadResolution struct {
Reference *exprpb.Reference Type *types.Type
Type *exprpb.Type Reference *ast.ReferenceInfo
} }
func newResolution(checkedRef *exprpb.Reference, t *exprpb.Type) *overloadResolution { func newResolution(r *ast.ReferenceInfo, t *types.Type) *overloadResolution {
return &overloadResolution{ return &overloadResolution{
Reference: checkedRef, Reference: r,
Type: t, Type: t,
} }
} }
@ -697,10 +705,56 @@ func (c *checker) locationByID(id int64) common.Location {
return common.NoLocation return common.NoLocation
} }
func newIdentReference(name string, value *exprpb.Constant) *exprpb.Reference { func (c *checker) lookupFieldType(exprID int64, structType, fieldName string) (*types.Type, bool) {
return &exprpb.Reference{Name: name, Value: value} if _, found := c.env.provider.FindStructType(structType); !found {
// This should not happen, anyway, report an error.
c.errors.unexpectedFailedResolution(exprID, c.locationByID(exprID), structType)
return nil, false
}
if ft, found := c.env.provider.FindStructFieldType(structType, fieldName); found {
return ft.Type, found
}
c.errors.undefinedField(exprID, c.locationByID(exprID), fieldName)
return nil, false
} }
func newFunctionReference(overloads ...string) *exprpb.Reference { func isWellKnownType(t *types.Type) bool {
return &exprpb.Reference{OverloadId: overloads} switch t.Kind() {
case types.AnyKind, types.TimestampKind, types.DurationKind, types.DynKind, types.NullTypeKind:
return true
case types.BoolKind, types.BytesKind, types.DoubleKind, types.IntKind, types.StringKind, types.UintKind:
return t.IsAssignableType(types.NullType)
case types.ListKind:
return t.Parameters()[0] == types.DynType
case types.MapKind:
return t.Parameters()[0] == types.StringType && t.Parameters()[1] == types.DynType
}
return false
} }
func getWellKnownTypeName(t *types.Type) string {
if name, found := wellKnownTypes[t.Kind()]; found {
return name
}
return ""
}
var (
wellKnownTypes = map[types.Kind]string{
types.AnyKind: "google.protobuf.Any",
types.BoolKind: "google.protobuf.BoolValue",
types.BytesKind: "google.protobuf.BytesValue",
types.DoubleKind: "google.protobuf.DoubleValue",
types.DurationKind: "google.protobuf.Duration",
types.DynKind: "google.protobuf.Value",
types.IntKind: "google.protobuf.Int64Value",
types.ListKind: "google.protobuf.ListValue",
types.NullTypeKind: "google.protobuf.NullValue",
types.MapKind: "google.protobuf.Struct",
types.StringKind: "google.protobuf.StringValue",
types.TimestampKind: "google.protobuf.Timestamp",
types.UintKind: "google.protobuf.UInt64Value",
}
)

View File

@ -18,7 +18,9 @@ import (
"math" "math"
"github.com/google/cel-go/common" "github.com/google/cel-go/common"
"github.com/google/cel-go/common/ast"
"github.com/google/cel-go/common/overloads" "github.com/google/cel-go/common/overloads"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/parser" "github.com/google/cel-go/parser"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
@ -54,7 +56,7 @@ type AstNode interface {
// The first path element is a variable. All subsequent path elements are one of: field name, '@items', '@keys', '@values'. // The first path element is a variable. All subsequent path elements are one of: field name, '@items', '@keys', '@values'.
Path() []string Path() []string
// Type returns the deduced type of the AstNode. // Type returns the deduced type of the AstNode.
Type() *exprpb.Type Type() *types.Type
// Expr returns the expression of the AstNode. // Expr returns the expression of the AstNode.
Expr() *exprpb.Expr Expr() *exprpb.Expr
// ComputedSize returns a size estimate of the AstNode derived from information available in the CEL expression. // ComputedSize returns a size estimate of the AstNode derived from information available in the CEL expression.
@ -66,7 +68,7 @@ type AstNode interface {
type astNode struct { type astNode struct {
path []string path []string
t *exprpb.Type t *types.Type
expr *exprpb.Expr expr *exprpb.Expr
derivedSize *SizeEstimate derivedSize *SizeEstimate
} }
@ -75,7 +77,7 @@ func (e astNode) Path() []string {
return e.path return e.path
} }
func (e astNode) Type() *exprpb.Type { func (e astNode) Type() *types.Type {
return e.t return e.t
} }
@ -228,7 +230,7 @@ func addUint64NoOverflow(x, y uint64) uint64 {
// multiplyUint64NoOverflow multiplies non-negative ints. If the result is exceeds math.MaxUint64, math.MaxUint64 // multiplyUint64NoOverflow multiplies non-negative ints. If the result is exceeds math.MaxUint64, math.MaxUint64
// is returned. // is returned.
func multiplyUint64NoOverflow(x, y uint64) uint64 { func multiplyUint64NoOverflow(x, y uint64) uint64 {
if x > 0 && y > 0 && x > math.MaxUint64/y { if y != 0 && x > math.MaxUint64/y {
return math.MaxUint64 return math.MaxUint64
} }
return x * y return x * y
@ -240,7 +242,11 @@ func multiplyByCostFactor(x uint64, y float64) uint64 {
if xFloat > 0 && y > 0 && xFloat > math.MaxUint64/y { if xFloat > 0 && y > 0 && xFloat > math.MaxUint64/y {
return math.MaxUint64 return math.MaxUint64
} }
return uint64(math.Ceil(xFloat * y)) ceil := math.Ceil(xFloat * y)
if ceil >= doubleTwoTo64 {
return math.MaxUint64
}
return uint64(ceil)
} }
var ( var (
@ -258,9 +264,10 @@ type coster struct {
// iterRanges tracks the iterRange of each iterVar. // iterRanges tracks the iterRange of each iterVar.
iterRanges iterRangeScopes iterRanges iterRangeScopes
// computedSizes tracks the computed sizes of call results. // computedSizes tracks the computed sizes of call results.
computedSizes map[int64]SizeEstimate computedSizes map[int64]SizeEstimate
checkedExpr *exprpb.CheckedExpr checkedAST *ast.CheckedAST
estimator CostEstimator estimator CostEstimator
overloadEstimators map[string]FunctionEstimator
// presenceTestCost will either be a zero or one based on whether has() macros count against cost computations. // presenceTestCost will either be a zero or one based on whether has() macros count against cost computations.
presenceTestCost CostEstimate presenceTestCost CostEstimate
} }
@ -289,6 +296,7 @@ func (vs iterRangeScopes) peek(varName string) (int64, bool) {
type CostOption func(*coster) error type CostOption func(*coster) error
// PresenceTestHasCost determines whether presence testing has a cost of one or zero. // PresenceTestHasCost determines whether presence testing has a cost of one or zero.
//
// Defaults to presence test has a cost of one. // Defaults to presence test has a cost of one.
func PresenceTestHasCost(hasCost bool) CostOption { func PresenceTestHasCost(hasCost bool) CostOption {
return func(c *coster) error { return func(c *coster) error {
@ -301,15 +309,30 @@ func PresenceTestHasCost(hasCost bool) CostOption {
} }
} }
// FunctionEstimator provides a CallEstimate given the target and arguments for a specific function, overload pair.
type FunctionEstimator func(estimator CostEstimator, target *AstNode, args []AstNode) *CallEstimate
// OverloadCostEstimate binds a FunctionCoster to a specific function overload ID.
//
// When a OverloadCostEstimate is provided, it will override the cost calculation of the CostEstimator provided to
// the Cost() call.
func OverloadCostEstimate(overloadID string, functionCoster FunctionEstimator) CostOption {
return func(c *coster) error {
c.overloadEstimators[overloadID] = functionCoster
return nil
}
}
// Cost estimates the cost of the parsed and type checked CEL expression. // Cost estimates the cost of the parsed and type checked CEL expression.
func Cost(checker *exprpb.CheckedExpr, estimator CostEstimator, opts ...CostOption) (CostEstimate, error) { func Cost(checker *ast.CheckedAST, estimator CostEstimator, opts ...CostOption) (CostEstimate, error) {
c := &coster{ c := &coster{
checkedExpr: checker, checkedAST: checker,
estimator: estimator, estimator: estimator,
exprPath: map[int64][]string{}, overloadEstimators: map[string]FunctionEstimator{},
iterRanges: map[string][]int64{}, exprPath: map[int64][]string{},
computedSizes: map[int64]SizeEstimate{}, iterRanges: map[string][]int64{},
presenceTestCost: CostEstimate{Min: 1, Max: 1}, computedSizes: map[int64]SizeEstimate{},
presenceTestCost: CostEstimate{Min: 1, Max: 1},
} }
for _, opt := range opts { for _, opt := range opts {
err := opt(c) err := opt(c)
@ -317,7 +340,7 @@ func Cost(checker *exprpb.CheckedExpr, estimator CostEstimator, opts ...CostOpti
return CostEstimate{}, err return CostEstimate{}, err
} }
} }
return c.cost(checker.GetExpr()), nil return c.cost(checker.Expr), nil
} }
func (c *coster) cost(e *exprpb.Expr) CostEstimate { func (c *coster) cost(e *exprpb.Expr) CostEstimate {
@ -351,10 +374,10 @@ func (c *coster) costIdent(e *exprpb.Expr) CostEstimate {
// build and track the field path // build and track the field path
if iterRange, ok := c.iterRanges.peek(identExpr.GetName()); ok { if iterRange, ok := c.iterRanges.peek(identExpr.GetName()); ok {
switch c.checkedExpr.TypeMap[iterRange].GetTypeKind().(type) { switch c.checkedAST.TypeMap[iterRange].Kind() {
case *exprpb.Type_ListType_: case types.ListKind:
c.addPath(e, append(c.exprPath[iterRange], "@items")) c.addPath(e, append(c.exprPath[iterRange], "@items"))
case *exprpb.Type_MapType_: case types.MapKind:
c.addPath(e, append(c.exprPath[iterRange], "@keys")) c.addPath(e, append(c.exprPath[iterRange], "@keys"))
} }
} else { } else {
@ -378,8 +401,8 @@ func (c *coster) costSelect(e *exprpb.Expr) CostEstimate {
} }
sum = sum.Add(c.cost(sel.GetOperand())) sum = sum.Add(c.cost(sel.GetOperand()))
targetType := c.getType(sel.GetOperand()) targetType := c.getType(sel.GetOperand())
switch kindOf(targetType) { switch targetType.Kind() {
case kindMap, kindObject, kindTypeParam: case types.MapKind, types.StructKind, types.TypeParamKind:
sum = sum.Add(selectAndIdentCost) sum = sum.Add(selectAndIdentCost)
} }
@ -403,8 +426,8 @@ func (c *coster) costCall(e *exprpb.Expr) CostEstimate {
argTypes[i] = c.newAstNode(arg) argTypes[i] = c.newAstNode(arg)
} }
ref := c.checkedExpr.ReferenceMap[e.GetId()] ref := c.checkedAST.ReferenceMap[e.GetId()]
if ref == nil || len(ref.GetOverloadId()) == 0 { if ref == nil || len(ref.OverloadIDs) == 0 {
return CostEstimate{} return CostEstimate{}
} }
var targetType AstNode var targetType AstNode
@ -417,7 +440,7 @@ func (c *coster) costCall(e *exprpb.Expr) CostEstimate {
// Pick a cost estimate range that covers all the overload cost estimation ranges // Pick a cost estimate range that covers all the overload cost estimation ranges
fnCost := CostEstimate{Min: uint64(math.MaxUint64), Max: 0} fnCost := CostEstimate{Min: uint64(math.MaxUint64), Max: 0}
var resultSize *SizeEstimate var resultSize *SizeEstimate
for _, overload := range ref.GetOverloadId() { for _, overload := range ref.OverloadIDs {
overloadCost := c.functionCost(call.GetFunction(), overload, &targetType, argTypes, argCosts) overloadCost := c.functionCost(call.GetFunction(), overload, &targetType, argTypes, argCosts)
fnCost = fnCost.Union(overloadCost.CostEstimate) fnCost = fnCost.Union(overloadCost.CostEstimate)
if overloadCost.ResultSize != nil { if overloadCost.ResultSize != nil {
@ -530,7 +553,14 @@ func (c *coster) functionCost(function, overloadID string, target *AstNode, args
} }
return sum return sum
} }
if len(c.overloadEstimators) != 0 {
if estimator, found := c.overloadEstimators[overloadID]; found {
if est := estimator(c.estimator, target, args); est != nil {
callEst := *est
return CallEstimate{CostEstimate: callEst.Add(argCostSum()), ResultSize: est.ResultSize}
}
}
}
if est := c.estimator.EstimateCallCost(function, overloadID, target, args); est != nil { if est := c.estimator.EstimateCallCost(function, overloadID, target, args); est != nil {
callEst := *est callEst := *est
return CallEstimate{CostEstimate: callEst.Add(argCostSum()), ResultSize: est.ResultSize} return CallEstimate{CostEstimate: callEst.Add(argCostSum()), ResultSize: est.ResultSize}
@ -641,8 +671,8 @@ func (c *coster) functionCost(function, overloadID string, target *AstNode, args
return CallEstimate{CostEstimate: CostEstimate{Min: 1, Max: 1}.Add(argCostSum())} return CallEstimate{CostEstimate: CostEstimate{Min: 1, Max: 1}.Add(argCostSum())}
} }
func (c *coster) getType(e *exprpb.Expr) *exprpb.Type { func (c *coster) getType(e *exprpb.Expr) *types.Type {
return c.checkedExpr.TypeMap[e.GetId()] return c.checkedAST.TypeMap[e.GetId()]
} }
func (c *coster) getPath(e *exprpb.Expr) []string { func (c *coster) getPath(e *exprpb.Expr) []string {
@ -663,22 +693,24 @@ func (c *coster) newAstNode(e *exprpb.Expr) *astNode {
if size, ok := c.computedSizes[e.GetId()]; ok { if size, ok := c.computedSizes[e.GetId()]; ok {
derivedSize = &size derivedSize = &size
} }
return &astNode{path: path, t: c.getType(e), expr: e, derivedSize: derivedSize} return &astNode{
path: path,
t: c.getType(e),
expr: e,
derivedSize: derivedSize}
} }
// isScalar returns true if the given type is known to be of a constant size at // isScalar returns true if the given type is known to be of a constant size at
// compile time. isScalar will return false for strings (they are variable-width) // compile time. isScalar will return false for strings (they are variable-width)
// in addition to protobuf.Any and protobuf.Value (their size is not knowable at compile time). // in addition to protobuf.Any and protobuf.Value (their size is not knowable at compile time).
func isScalar(t *exprpb.Type) bool { func isScalar(t *types.Type) bool {
switch kindOf(t) { switch t.Kind() {
case kindPrimitive: case types.BoolKind, types.DoubleKind, types.DurationKind, types.IntKind, types.TimestampKind, types.UintKind:
if t.GetPrimitive() != exprpb.Type_STRING && t.GetPrimitive() != exprpb.Type_BYTES { return true
return true
}
case kindWellKnown:
if t.GetWellKnown() == exprpb.Type_DURATION || t.GetWellKnown() == exprpb.Type_TIMESTAMP {
return true
}
} }
return false return false
} }
var (
doubleTwoTo64 = math.Ldexp(1.0, 64)
)

View File

@ -9,7 +9,6 @@ go_library(
name = "go_default_library", name = "go_default_library",
srcs = [ srcs = [
"decls.go", "decls.go",
"scopes.go",
], ],
importpath = "github.com/google/cel-go/checker/decls", importpath = "github.com/google/cel-go/checker/decls",
deps = [ deps = [

View File

@ -18,17 +18,11 @@ import (
"fmt" "fmt"
"strings" "strings"
"google.golang.org/protobuf/proto"
"github.com/google/cel-go/checker/decls"
"github.com/google/cel-go/common/containers" "github.com/google/cel-go/common/containers"
"github.com/google/cel-go/common/decls"
"github.com/google/cel-go/common/overloads" "github.com/google/cel-go/common/overloads"
"github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/pb"
"github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/parser" "github.com/google/cel-go/parser"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
) )
type aggregateLiteralElementType int type aggregateLiteralElementType int
@ -76,15 +70,15 @@ var (
// which can be used to assist with type-checking. // which can be used to assist with type-checking.
type Env struct { type Env struct {
container *containers.Container container *containers.Container
provider ref.TypeProvider provider types.Provider
declarations *decls.Scopes declarations *Scopes
aggLitElemType aggregateLiteralElementType aggLitElemType aggregateLiteralElementType
filteredOverloadIDs map[string]struct{} filteredOverloadIDs map[string]struct{}
} }
// NewEnv returns a new *Env with the given parameters. // NewEnv returns a new *Env with the given parameters.
func NewEnv(container *containers.Container, provider ref.TypeProvider, opts ...Option) (*Env, error) { func NewEnv(container *containers.Container, provider types.Provider, opts ...Option) (*Env, error) {
declarations := decls.NewScopes() declarations := newScopes()
declarations.Push() declarations.Push()
envOptions := &options{} envOptions := &options{}
@ -113,24 +107,31 @@ func NewEnv(container *containers.Container, provider ref.TypeProvider, opts ...
}, nil }, nil
} }
// Add adds new Decl protos to the Env. // AddIdents configures the checker with a list of variable declarations.
// Returns an error for identifier redeclarations. //
func (e *Env) Add(decls ...*exprpb.Decl) error { // If there are overlapping declarations, the method will error.
func (e *Env) AddIdents(declarations ...*decls.VariableDecl) error {
errMsgs := make([]errorMsg, 0) errMsgs := make([]errorMsg, 0)
for _, decl := range decls { for _, d := range declarations {
switch decl.DeclKind.(type) { errMsgs = append(errMsgs, e.addIdent(d))
case *exprpb.Decl_Ident: }
errMsgs = append(errMsgs, e.addIdent(sanitizeIdent(decl))) return formatError(errMsgs)
case *exprpb.Decl_Function: }
errMsgs = append(errMsgs, e.setFunction(sanitizeFunction(decl))...)
} // AddFunctions configures the checker with a list of function declarations.
//
// If there are overlapping declarations, the method will error.
func (e *Env) AddFunctions(declarations ...*decls.FunctionDecl) error {
errMsgs := make([]errorMsg, 0)
for _, d := range declarations {
errMsgs = append(errMsgs, e.setFunction(d)...)
} }
return formatError(errMsgs) return formatError(errMsgs)
} }
// LookupIdent returns a Decl proto for typeName as an identifier in the Env. // LookupIdent returns a Decl proto for typeName as an identifier in the Env.
// Returns nil if no such identifier is found in the Env. // Returns nil if no such identifier is found in the Env.
func (e *Env) LookupIdent(name string) *exprpb.Decl { func (e *Env) LookupIdent(name string) *decls.VariableDecl {
for _, candidate := range e.container.ResolveCandidateNames(name) { for _, candidate := range e.container.ResolveCandidateNames(name) {
if ident := e.declarations.FindIdent(candidate); ident != nil { if ident := e.declarations.FindIdent(candidate); ident != nil {
return ident return ident
@ -139,8 +140,8 @@ func (e *Env) LookupIdent(name string) *exprpb.Decl {
// Next try to import the name as a reference to a message type. If found, // Next try to import the name as a reference to a message type. If found,
// the declaration is added to the outest (global) scope of the // the declaration is added to the outest (global) scope of the
// environment, so next time we can access it faster. // environment, so next time we can access it faster.
if t, found := e.provider.FindType(candidate); found { if t, found := e.provider.FindStructType(candidate); found {
decl := decls.NewVar(candidate, t) decl := decls.NewVariable(candidate, t)
e.declarations.AddIdent(decl) e.declarations.AddIdent(decl)
return decl return decl
} }
@ -148,11 +149,7 @@ func (e *Env) LookupIdent(name string) *exprpb.Decl {
// Next try to import this as an enum value by splitting the name in a type prefix and // Next try to import this as an enum value by splitting the name in a type prefix and
// the enum inside. // the enum inside.
if enumValue := e.provider.EnumValue(candidate); enumValue.Type() != types.ErrType { if enumValue := e.provider.EnumValue(candidate); enumValue.Type() != types.ErrType {
decl := decls.NewIdent(candidate, decl := decls.NewConstant(candidate, types.IntType, enumValue)
decls.Int,
&exprpb.Constant{
ConstantKind: &exprpb.Constant_Int64Value{
Int64Value: int64(enumValue.(types.Int))}})
e.declarations.AddIdent(decl) e.declarations.AddIdent(decl)
return decl return decl
} }
@ -162,7 +159,7 @@ func (e *Env) LookupIdent(name string) *exprpb.Decl {
// LookupFunction returns a Decl proto for typeName as a function in env. // LookupFunction returns a Decl proto for typeName as a function in env.
// Returns nil if no such function is found in env. // Returns nil if no such function is found in env.
func (e *Env) LookupFunction(name string) *exprpb.Decl { func (e *Env) LookupFunction(name string) *decls.FunctionDecl {
for _, candidate := range e.container.ResolveCandidateNames(name) { for _, candidate := range e.container.ResolveCandidateNames(name) {
if fn := e.declarations.FindFunction(candidate); fn != nil { if fn := e.declarations.FindFunction(candidate); fn != nil {
return fn return fn
@ -171,88 +168,46 @@ func (e *Env) LookupFunction(name string) *exprpb.Decl {
return nil return nil
} }
// addOverload adds overload to function declaration f.
// Returns one or more errorMsg values if the overload overlaps with an existing overload or macro.
func (e *Env) addOverload(f *exprpb.Decl, overload *exprpb.Decl_FunctionDecl_Overload) []errorMsg {
errMsgs := make([]errorMsg, 0)
function := f.GetFunction()
emptyMappings := newMapping()
overloadFunction := decls.NewFunctionType(overload.GetResultType(),
overload.GetParams()...)
overloadErased := substitute(emptyMappings, overloadFunction, true)
for _, existing := range function.GetOverloads() {
existingFunction := decls.NewFunctionType(existing.GetResultType(), existing.GetParams()...)
existingErased := substitute(emptyMappings, existingFunction, true)
overlap := isAssignable(emptyMappings, overloadErased, existingErased) != nil ||
isAssignable(emptyMappings, existingErased, overloadErased) != nil
if overlap &&
overload.GetIsInstanceFunction() == existing.GetIsInstanceFunction() {
errMsgs = append(errMsgs,
overlappingOverloadError(f.Name,
overload.GetOverloadId(), overloadFunction,
existing.GetOverloadId(), existingFunction))
}
}
for _, macro := range parser.AllMacros {
if macro.Function() == f.Name &&
macro.IsReceiverStyle() == overload.GetIsInstanceFunction() &&
macro.ArgCount() == len(overload.GetParams()) {
errMsgs = append(errMsgs, overlappingMacroError(f.Name, macro.ArgCount()))
}
}
if len(errMsgs) > 0 {
return errMsgs
}
function.Overloads = append(function.GetOverloads(), overload)
return errMsgs
}
// setFunction adds the function Decl to the Env. // setFunction adds the function Decl to the Env.
// Adds a function decl if one doesn't already exist, then adds all overloads from the Decl. // Adds a function decl if one doesn't already exist, then adds all overloads from the Decl.
// If overload overlaps with an existing overload, adds to the errors in the Env instead. // If overload overlaps with an existing overload, adds to the errors in the Env instead.
func (e *Env) setFunction(decl *exprpb.Decl) []errorMsg { func (e *Env) setFunction(fn *decls.FunctionDecl) []errorMsg {
errorMsgs := make([]errorMsg, 0) errMsgs := make([]errorMsg, 0)
overloads := decl.GetFunction().GetOverloads() current := e.declarations.FindFunction(fn.Name())
current := e.declarations.FindFunction(decl.Name) if current != nil {
if current == nil { var err error
//Add the function declaration without overloads and check the overloads below. current, err = current.Merge(fn)
current = decls.NewFunction(decl.Name) if err != nil {
} else { return append(errMsgs, errorMsg(err.Error()))
existingOverloads := map[string]*exprpb.Decl_FunctionDecl_Overload{}
for _, overload := range current.GetFunction().GetOverloads() {
existingOverloads[overload.GetOverloadId()] = overload
} }
newOverloads := []*exprpb.Decl_FunctionDecl_Overload{} } else {
for _, overload := range overloads { current = fn
existing, found := existingOverloads[overload.GetOverloadId()] }
if !found || !overloadsEqual(existing, overload) { for _, overload := range current.OverloadDecls() {
newOverloads = append(newOverloads, overload) for _, macro := range parser.AllMacros {
if macro.Function() == current.Name() &&
macro.IsReceiverStyle() == overload.IsMemberFunction() &&
macro.ArgCount() == len(overload.ArgTypes()) {
errMsgs = append(errMsgs, overlappingMacroError(current.Name(), macro.ArgCount()))
} }
} }
overloads = newOverloads if len(errMsgs) > 0 {
if len(newOverloads) == 0 { return errMsgs
return errorMsgs
} }
// Copy on write since we don't know where this original definition came from.
current = proto.Clone(current).(*exprpb.Decl)
} }
e.declarations.SetFunction(current) e.declarations.SetFunction(current)
for _, overload := range overloads { return errMsgs
errorMsgs = append(errorMsgs, e.addOverload(current, overload)...)
}
return errorMsgs
} }
// addIdent adds the Decl to the declarations in the Env. // addIdent adds the Decl to the declarations in the Env.
// Returns a non-empty errorMsg if the identifier is already declared in the scope. // Returns a non-empty errorMsg if the identifier is already declared in the scope.
func (e *Env) addIdent(decl *exprpb.Decl) errorMsg { func (e *Env) addIdent(decl *decls.VariableDecl) errorMsg {
current := e.declarations.FindIdentInScope(decl.Name) current := e.declarations.FindIdentInScope(decl.Name())
if current != nil { if current != nil {
if proto.Equal(current, decl) { if current.DeclarationIsEquivalent(decl) {
return "" return ""
} }
return overlappingIdentifierError(decl.Name) return overlappingIdentifierError(decl.Name())
} }
e.declarations.AddIdent(decl) e.declarations.AddIdent(decl)
return "" return ""
@ -264,111 +219,9 @@ func (e *Env) isOverloadDisabled(overloadID string) bool {
return found return found
} }
// overloadsEqual returns whether two overloads have identical signatures.
//
// type parameter names are ignored as they may be specified in any order and have no bearing on overload
// equivalence
func overloadsEqual(o1, o2 *exprpb.Decl_FunctionDecl_Overload) bool {
return o1.GetOverloadId() == o2.GetOverloadId() &&
o1.GetIsInstanceFunction() == o2.GetIsInstanceFunction() &&
paramsEqual(o1.GetParams(), o2.GetParams()) &&
proto.Equal(o1.GetResultType(), o2.GetResultType())
}
// paramsEqual returns whether two lists have equal length and all types are equal
func paramsEqual(p1, p2 []*exprpb.Type) bool {
if len(p1) != len(p2) {
return false
}
for i, a := range p1 {
b := p2[i]
if !proto.Equal(a, b) {
return false
}
}
return true
}
// sanitizeFunction replaces well-known types referenced by message name with their equivalent
// CEL built-in type instances.
func sanitizeFunction(decl *exprpb.Decl) *exprpb.Decl {
fn := decl.GetFunction()
// Determine whether the declaration requires replacements from proto-based message type
// references to well-known CEL type references.
var needsSanitizing bool
for _, o := range fn.GetOverloads() {
if isObjectWellKnownType(o.GetResultType()) {
needsSanitizing = true
break
}
for _, p := range o.GetParams() {
if isObjectWellKnownType(p) {
needsSanitizing = true
break
}
}
}
// Early return if the declaration requires no modification.
if !needsSanitizing {
return decl
}
// Sanitize all of the overloads if any overload requires an update to its type references.
overloads := make([]*exprpb.Decl_FunctionDecl_Overload, len(fn.GetOverloads()))
for i, o := range fn.GetOverloads() {
rt := o.GetResultType()
if isObjectWellKnownType(rt) {
rt = getObjectWellKnownType(rt)
}
params := make([]*exprpb.Type, len(o.GetParams()))
copy(params, o.GetParams())
for j, p := range params {
if isObjectWellKnownType(p) {
params[j] = getObjectWellKnownType(p)
}
}
// If sanitized, replace the overload definition.
if o.IsInstanceFunction {
overloads[i] =
decls.NewInstanceOverload(o.GetOverloadId(), params, rt)
} else {
overloads[i] =
decls.NewOverload(o.GetOverloadId(), params, rt)
}
}
return decls.NewFunction(decl.GetName(), overloads...)
}
// sanitizeIdent replaces the identifier's well-known types referenced by message name with
// references to CEL built-in type instances.
func sanitizeIdent(decl *exprpb.Decl) *exprpb.Decl {
id := decl.GetIdent()
t := id.GetType()
if !isObjectWellKnownType(t) {
return decl
}
return decls.NewIdent(decl.GetName(), getObjectWellKnownType(t), id.GetValue())
}
// isObjectWellKnownType returns true if the input type is an OBJECT type with a message name
// that corresponds the message name of a built-in CEL type.
func isObjectWellKnownType(t *exprpb.Type) bool {
if kindOf(t) != kindObject {
return false
}
_, found := pb.CheckedWellKnowns[t.GetMessageType()]
return found
}
// getObjectWellKnownType returns the built-in CEL type declaration for input type's message name.
func getObjectWellKnownType(t *exprpb.Type) *exprpb.Type {
return pb.CheckedWellKnowns[t.GetMessageType()]
}
// validatedDeclarations returns a reference to the validated variable and function declaration scope stack. // validatedDeclarations returns a reference to the validated variable and function declaration scope stack.
// must be copied before use. // must be copied before use.
func (e *Env) validatedDeclarations() *decls.Scopes { func (e *Env) validatedDeclarations() *Scopes {
return e.declarations return e.declarations
} }
@ -402,19 +255,6 @@ func overlappingIdentifierError(name string) errorMsg {
return errorMsg(fmt.Sprintf("overlapping identifier for name '%s'", name)) return errorMsg(fmt.Sprintf("overlapping identifier for name '%s'", name))
} }
func overlappingOverloadError(name string,
overloadID1 string, f1 *exprpb.Type,
overloadID2 string, f2 *exprpb.Type) errorMsg {
return errorMsg(fmt.Sprintf(
"overlapping overload for name '%s' (type '%s' with overloadId: '%s' "+
"cannot be distinguished from '%s' with overloadId: '%s')",
name,
FormatCheckedType(f1),
overloadID1,
FormatCheckedType(f2),
overloadID2))
}
func overlappingMacroError(name string, argCount int) errorMsg { func overlappingMacroError(name string, argCount int) errorMsg {
return errorMsg(fmt.Sprintf( return errorMsg(fmt.Sprintf(
"overlapping macro for name '%s' with %d args", name, argCount)) "overlapping macro for name '%s' with %d args", name, argCount))

View File

@ -15,82 +15,78 @@
package checker package checker
import ( import (
"reflect"
"github.com/google/cel-go/common" "github.com/google/cel-go/common"
"github.com/google/cel-go/common/ast"
"github.com/google/cel-go/common/types"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
) )
// typeErrors is a specialization of Errors. // typeErrors is a specialization of Errors.
type typeErrors struct { type typeErrors struct {
*common.Errors errs *common.Errors
} }
func (e *typeErrors) undeclaredReference(l common.Location, container string, name string) { func (e *typeErrors) fieldTypeMismatch(id int64, l common.Location, name string, field, value *types.Type) {
e.ReportError(l, "undeclared reference to '%s' (in container '%s')", name, container) e.errs.ReportErrorAtID(id, l, "expected type of field '%s' is '%s' but provided type is '%s'",
name, FormatCELType(field), FormatCELType(value))
} }
func (e *typeErrors) typeDoesNotSupportFieldSelection(l common.Location, t *exprpb.Type) { func (e *typeErrors) incompatibleType(id int64, l common.Location, ex *exprpb.Expr, prev, next *types.Type) {
e.ReportError(l, "type '%s' does not support field selection", t) e.errs.ReportErrorAtID(id, l,
"incompatible type already exists for expression: %v(%d) old:%v, new:%v", ex, ex.GetId(), prev, next)
} }
func (e *typeErrors) undefinedField(l common.Location, field string) { func (e *typeErrors) noMatchingOverload(id int64, l common.Location, name string, args []*types.Type, isInstance bool) {
e.ReportError(l, "undefined field '%s'", field) signature := formatFunctionDeclType(nil, args, isInstance)
e.errs.ReportErrorAtID(id, l, "found no matching overload for '%s' applied to '%s'", name, signature)
} }
func (e *typeErrors) noMatchingOverload(l common.Location, name string, args []*exprpb.Type, isInstance bool) { func (e *typeErrors) notAComprehensionRange(id int64, l common.Location, t *types.Type) {
signature := formatFunction(nil, args, isInstance) e.errs.ReportErrorAtID(id, l, "expression of type '%s' cannot be range of a comprehension (must be list, map, or dynamic)",
e.ReportError(l, "found no matching overload for '%s' applied to '%s'", name, signature) FormatCELType(t))
} }
func (e *typeErrors) notAType(l common.Location, t *exprpb.Type) { func (e *typeErrors) notAnOptionalFieldSelection(id int64, l common.Location, field *exprpb.Expr) {
e.ReportError(l, "'%s(%v)' is not a type", FormatCheckedType(t), t) e.errs.ReportErrorAtID(id, l, "unsupported optional field selection: %v", field)
} }
func (e *typeErrors) notAMessageType(l common.Location, t *exprpb.Type) { func (e *typeErrors) notAType(id int64, l common.Location, typeName string) {
e.ReportError(l, "'%s' is not a message type", FormatCheckedType(t)) e.errs.ReportErrorAtID(id, l, "'%s' is not a type", typeName)
} }
func (e *typeErrors) fieldTypeMismatch(l common.Location, name string, field *exprpb.Type, value *exprpb.Type) { func (e *typeErrors) notAMessageType(id int64, l common.Location, typeName string) {
e.ReportError(l, "expected type of field '%s' is '%s' but provided type is '%s'", e.errs.ReportErrorAtID(id, l, "'%s' is not a message type", typeName)
name, FormatCheckedType(field), FormatCheckedType(value))
} }
func (e *typeErrors) unexpectedFailedResolution(l common.Location, typeName string) { func (e *typeErrors) referenceRedefinition(id int64, l common.Location, ex *exprpb.Expr, prev, next *ast.ReferenceInfo) {
e.ReportError(l, "[internal] unexpected failed resolution of '%s'", typeName) e.errs.ReportErrorAtID(id, l,
"reference already exists for expression: %v(%d) old:%v, new:%v", ex, ex.GetId(), prev, next)
} }
func (e *typeErrors) notAComprehensionRange(l common.Location, t *exprpb.Type) { func (e *typeErrors) typeDoesNotSupportFieldSelection(id int64, l common.Location, t *types.Type) {
e.ReportError(l, "expression of type '%s' cannot be range of a comprehension (must be list, map, or dynamic)", e.errs.ReportErrorAtID(id, l, "type '%s' does not support field selection", FormatCELType(t))
FormatCheckedType(t))
} }
func (e *typeErrors) typeMismatch(l common.Location, expected *exprpb.Type, actual *exprpb.Type) { func (e *typeErrors) typeMismatch(id int64, l common.Location, expected, actual *types.Type) {
e.ReportError(l, "expected type '%s' but found '%s'", e.errs.ReportErrorAtID(id, l, "expected type '%s' but found '%s'",
FormatCheckedType(expected), FormatCheckedType(actual)) FormatCELType(expected), FormatCELType(actual))
} }
func formatFunction(resultType *exprpb.Type, argTypes []*exprpb.Type, isInstance bool) string { func (e *typeErrors) undefinedField(id int64, l common.Location, field string) {
result := "" e.errs.ReportErrorAtID(id, l, "undefined field '%s'", field)
if isInstance { }
target := argTypes[0]
argTypes = argTypes[1:] func (e *typeErrors) undeclaredReference(id int64, l common.Location, container string, name string) {
e.errs.ReportErrorAtID(id, l, "undeclared reference to '%s' (in container '%s')", name, container)
result += FormatCheckedType(target) }
result += "."
} func (e *typeErrors) unexpectedFailedResolution(id int64, l common.Location, typeName string) {
e.errs.ReportErrorAtID(id, l, "unexpected failed resolution of '%s'", typeName)
result += "(" }
for i, arg := range argTypes {
if i > 0 { func (e *typeErrors) unexpectedASTType(id int64, l common.Location, ex *exprpb.Expr) {
result += ", " e.errs.ReportErrorAtID(id, l, "unrecognized ast type: %v", reflect.TypeOf(ex))
}
result += FormatCheckedType(arg)
}
result += ")"
if resultType != nil {
result += " -> "
result += FormatCheckedType(resultType)
}
return result
} }

216
vendor/github.com/google/cel-go/checker/format.go generated vendored Normal file
View File

@ -0,0 +1,216 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package checker
import (
"fmt"
"strings"
chkdecls "github.com/google/cel-go/checker/decls"
"github.com/google/cel-go/common/types"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
const (
kindUnknown = iota + 1
kindError
kindFunction
kindDyn
kindPrimitive
kindWellKnown
kindWrapper
kindNull
kindAbstract
kindType
kindList
kindMap
kindObject
kindTypeParam
)
// FormatCheckedType converts a type message into a string representation.
func FormatCheckedType(t *exprpb.Type) string {
switch kindOf(t) {
case kindDyn:
return "dyn"
case kindFunction:
return formatFunctionExprType(t.GetFunction().GetResultType(),
t.GetFunction().GetArgTypes(),
false)
case kindList:
return fmt.Sprintf("list(%s)", FormatCheckedType(t.GetListType().GetElemType()))
case kindObject:
return t.GetMessageType()
case kindMap:
return fmt.Sprintf("map(%s, %s)",
FormatCheckedType(t.GetMapType().GetKeyType()),
FormatCheckedType(t.GetMapType().GetValueType()))
case kindNull:
return "null"
case kindPrimitive:
switch t.GetPrimitive() {
case exprpb.Type_UINT64:
return "uint"
case exprpb.Type_INT64:
return "int"
}
return strings.Trim(strings.ToLower(t.GetPrimitive().String()), " ")
case kindType:
if t.GetType() == nil || t.GetType().GetTypeKind() == nil {
return "type"
}
return fmt.Sprintf("type(%s)", FormatCheckedType(t.GetType()))
case kindWellKnown:
switch t.GetWellKnown() {
case exprpb.Type_ANY:
return "any"
case exprpb.Type_DURATION:
return "duration"
case exprpb.Type_TIMESTAMP:
return "timestamp"
}
case kindWrapper:
return fmt.Sprintf("wrapper(%s)",
FormatCheckedType(chkdecls.NewPrimitiveType(t.GetWrapper())))
case kindError:
return "!error!"
case kindTypeParam:
return t.GetTypeParam()
case kindAbstract:
at := t.GetAbstractType()
params := at.GetParameterTypes()
paramStrs := make([]string, len(params))
for i, p := range params {
paramStrs[i] = FormatCheckedType(p)
}
return fmt.Sprintf("%s(%s)", at.GetName(), strings.Join(paramStrs, ", "))
}
return t.String()
}
type formatter func(any) string
// FormatCELType formats a types.Type value to a string representation.
//
// The type formatting is identical to FormatCheckedType.
func FormatCELType(t any) string {
dt := t.(*types.Type)
switch dt.Kind() {
case types.AnyKind:
return "any"
case types.DurationKind:
return "duration"
case types.ErrorKind:
return "!error!"
case types.NullTypeKind:
return "null"
case types.TimestampKind:
return "timestamp"
case types.TypeParamKind:
return dt.TypeName()
case types.OpaqueKind:
if dt.TypeName() == "function" {
// There is no explicit function type in the new types representation, so information like
// whether the function is a member function is absent.
return formatFunctionDeclType(dt.Parameters()[0], dt.Parameters()[1:], false)
}
case types.UnspecifiedKind:
return ""
}
if len(dt.Parameters()) == 0 {
return dt.DeclaredTypeName()
}
paramTypeNames := make([]string, 0, len(dt.Parameters()))
for _, p := range dt.Parameters() {
paramTypeNames = append(paramTypeNames, FormatCELType(p))
}
return fmt.Sprintf("%s(%s)", dt.TypeName(), strings.Join(paramTypeNames, ", "))
}
func formatExprType(t any) string {
if t == nil {
return ""
}
return FormatCheckedType(t.(*exprpb.Type))
}
func formatFunctionExprType(resultType *exprpb.Type, argTypes []*exprpb.Type, isInstance bool) string {
return formatFunctionInternal[*exprpb.Type](resultType, argTypes, isInstance, formatExprType)
}
func formatFunctionDeclType(resultType *types.Type, argTypes []*types.Type, isInstance bool) string {
return formatFunctionInternal[*types.Type](resultType, argTypes, isInstance, FormatCELType)
}
func formatFunctionInternal[T any](resultType T, argTypes []T, isInstance bool, format formatter) string {
result := ""
if isInstance {
target := argTypes[0]
argTypes = argTypes[1:]
result += format(target)
result += "."
}
result += "("
for i, arg := range argTypes {
if i > 0 {
result += ", "
}
result += format(arg)
}
result += ")"
rt := format(resultType)
if rt != "" {
result += " -> "
result += rt
}
return result
}
// kindOf returns the kind of the type as defined in the checked.proto.
func kindOf(t *exprpb.Type) int {
if t == nil || t.TypeKind == nil {
return kindUnknown
}
switch t.GetTypeKind().(type) {
case *exprpb.Type_Error:
return kindError
case *exprpb.Type_Function:
return kindFunction
case *exprpb.Type_Dyn:
return kindDyn
case *exprpb.Type_Primitive:
return kindPrimitive
case *exprpb.Type_WellKnown:
return kindWellKnown
case *exprpb.Type_Wrapper:
return kindWrapper
case *exprpb.Type_Null:
return kindNull
case *exprpb.Type_Type:
return kindType
case *exprpb.Type_ListType_:
return kindList
case *exprpb.Type_MapType_:
return kindMap
case *exprpb.Type_MessageType:
return kindObject
case *exprpb.Type_TypeParam:
return kindTypeParam
case *exprpb.Type_AbstractType_:
return kindAbstract
}
return kindUnknown
}

View File

@ -15,25 +15,25 @@
package checker package checker
import ( import (
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" "github.com/google/cel-go/common/types"
) )
type mapping struct { type mapping struct {
mapping map[string]*exprpb.Type mapping map[string]*types.Type
} }
func newMapping() *mapping { func newMapping() *mapping {
return &mapping{ return &mapping{
mapping: make(map[string]*exprpb.Type), mapping: make(map[string]*types.Type),
} }
} }
func (m *mapping) add(from *exprpb.Type, to *exprpb.Type) { func (m *mapping) add(from, to *types.Type) {
m.mapping[typeKey(from)] = to m.mapping[FormatCELType(from)] = to
} }
func (m *mapping) find(from *exprpb.Type) (*exprpb.Type, bool) { func (m *mapping) find(from *types.Type) (*types.Type, bool) {
if r, found := m.mapping[typeKey(from)]; found { if r, found := m.mapping[FormatCELType(from)]; found {
return r, found return r, found
} }
return nil, false return nil, false

View File

@ -14,12 +14,10 @@
package checker package checker
import "github.com/google/cel-go/checker/decls"
type options struct { type options struct {
crossTypeNumericComparisons bool crossTypeNumericComparisons bool
homogeneousAggregateLiterals bool homogeneousAggregateLiterals bool
validatedDeclarations *decls.Scopes validatedDeclarations *Scopes
} }
// Option is a functional option for configuring the type-checker // Option is a functional option for configuring the type-checker
@ -34,15 +32,6 @@ func CrossTypeNumericComparisons(enabled bool) Option {
} }
} }
// HomogeneousAggregateLiterals toggles support for constructing lists and maps whose elements all
// have the same type.
func HomogeneousAggregateLiterals(enabled bool) Option {
return func(opts *options) error {
opts.homogeneousAggregateLiterals = enabled
return nil
}
}
// ValidatedDeclarations provides a references to validated declarations which will be copied // ValidatedDeclarations provides a references to validated declarations which will be copied
// into new checker instances. // into new checker instances.
func ValidatedDeclarations(env *Env) Option { func ValidatedDeclarations(env *Env) Option {

View File

@ -15,6 +15,8 @@
package checker package checker
import ( import (
"sort"
"github.com/google/cel-go/common/debug" "github.com/google/cel-go/common/debug"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
@ -47,6 +49,7 @@ func (a *semanticAdorner) GetMetadata(elem any) string {
if len(ref.GetOverloadId()) == 0 { if len(ref.GetOverloadId()) == 0 {
result += "^" + ref.Name result += "^" + ref.Name
} else { } else {
sort.Strings(ref.GetOverloadId())
for i, overload := range ref.GetOverloadId() { for i, overload := range ref.GetOverloadId() {
if i == 0 { if i == 0 {
result += "^" result += "^"

View File

@ -12,9 +12,11 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
package decls package checker
import exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" import (
"github.com/google/cel-go/common/decls"
)
// Scopes represents nested Decl sets where the Scopes value contains a Groups containing all // Scopes represents nested Decl sets where the Scopes value contains a Groups containing all
// identifiers in scope and an optional parent representing outer scopes. // identifiers in scope and an optional parent representing outer scopes.
@ -25,9 +27,9 @@ type Scopes struct {
scopes *Group scopes *Group
} }
// NewScopes creates a new, empty Scopes. // newScopes creates a new, empty Scopes.
// Some operations can't be safely performed until a Group is added with Push. // Some operations can't be safely performed until a Group is added with Push.
func NewScopes() *Scopes { func newScopes() *Scopes {
return &Scopes{ return &Scopes{
scopes: newGroup(), scopes: newGroup(),
} }
@ -35,7 +37,7 @@ func NewScopes() *Scopes {
// Copy creates a copy of the current Scopes values, including a copy of its parent if non-nil. // Copy creates a copy of the current Scopes values, including a copy of its parent if non-nil.
func (s *Scopes) Copy() *Scopes { func (s *Scopes) Copy() *Scopes {
cpy := NewScopes() cpy := newScopes()
if s == nil { if s == nil {
return cpy return cpy
} }
@ -66,14 +68,14 @@ func (s *Scopes) Pop() *Scopes {
// AddIdent adds the ident Decl in the current scope. // AddIdent adds the ident Decl in the current scope.
// Note: If the name collides with an existing identifier in the scope, the Decl is overwritten. // Note: If the name collides with an existing identifier in the scope, the Decl is overwritten.
func (s *Scopes) AddIdent(decl *exprpb.Decl) { func (s *Scopes) AddIdent(decl *decls.VariableDecl) {
s.scopes.idents[decl.Name] = decl s.scopes.idents[decl.Name()] = decl
} }
// FindIdent finds the first ident Decl with a matching name in Scopes, or nil if one cannot be // FindIdent finds the first ident Decl with a matching name in Scopes, or nil if one cannot be
// found. // found.
// Note: The search is performed from innermost to outermost. // Note: The search is performed from innermost to outermost.
func (s *Scopes) FindIdent(name string) *exprpb.Decl { func (s *Scopes) FindIdent(name string) *decls.VariableDecl {
if ident, found := s.scopes.idents[name]; found { if ident, found := s.scopes.idents[name]; found {
return ident return ident
} }
@ -86,7 +88,7 @@ func (s *Scopes) FindIdent(name string) *exprpb.Decl {
// FindIdentInScope finds the first ident Decl with a matching name in the current Scopes value, or // FindIdentInScope finds the first ident Decl with a matching name in the current Scopes value, or
// nil if one does not exist. // nil if one does not exist.
// Note: The search is only performed on the current scope and does not search outer scopes. // Note: The search is only performed on the current scope and does not search outer scopes.
func (s *Scopes) FindIdentInScope(name string) *exprpb.Decl { func (s *Scopes) FindIdentInScope(name string) *decls.VariableDecl {
if ident, found := s.scopes.idents[name]; found { if ident, found := s.scopes.idents[name]; found {
return ident return ident
} }
@ -95,14 +97,14 @@ func (s *Scopes) FindIdentInScope(name string) *exprpb.Decl {
// SetFunction adds the function Decl to the current scope. // SetFunction adds the function Decl to the current scope.
// Note: Any previous entry for a function in the current scope with the same name is overwritten. // Note: Any previous entry for a function in the current scope with the same name is overwritten.
func (s *Scopes) SetFunction(fn *exprpb.Decl) { func (s *Scopes) SetFunction(fn *decls.FunctionDecl) {
s.scopes.functions[fn.Name] = fn s.scopes.functions[fn.Name()] = fn
} }
// FindFunction finds the first function Decl with a matching name in Scopes. // FindFunction finds the first function Decl with a matching name in Scopes.
// The search is performed from innermost to outermost. // The search is performed from innermost to outermost.
// Returns nil if no such function in Scopes. // Returns nil if no such function in Scopes.
func (s *Scopes) FindFunction(name string) *exprpb.Decl { func (s *Scopes) FindFunction(name string) *decls.FunctionDecl {
if fn, found := s.scopes.functions[name]; found { if fn, found := s.scopes.functions[name]; found {
return fn return fn
} }
@ -116,16 +118,16 @@ func (s *Scopes) FindFunction(name string) *exprpb.Decl {
// Contains separate namespaces for identifier and function Decls. // Contains separate namespaces for identifier and function Decls.
// (Should be named "Scope" perhaps?) // (Should be named "Scope" perhaps?)
type Group struct { type Group struct {
idents map[string]*exprpb.Decl idents map[string]*decls.VariableDecl
functions map[string]*exprpb.Decl functions map[string]*decls.FunctionDecl
} }
// copy creates a new Group instance with a shallow copy of the variables and functions. // copy creates a new Group instance with a shallow copy of the variables and functions.
// If callers need to mutate the exprpb.Decl definitions for a Function, they should copy-on-write. // If callers need to mutate the exprpb.Decl definitions for a Function, they should copy-on-write.
func (g *Group) copy() *Group { func (g *Group) copy() *Group {
cpy := &Group{ cpy := &Group{
idents: make(map[string]*exprpb.Decl, len(g.idents)), idents: make(map[string]*decls.VariableDecl, len(g.idents)),
functions: make(map[string]*exprpb.Decl, len(g.functions)), functions: make(map[string]*decls.FunctionDecl, len(g.functions)),
} }
for n, id := range g.idents { for n, id := range g.idents {
cpy.idents[n] = id cpy.idents[n] = id
@ -139,7 +141,7 @@ func (g *Group) copy() *Group {
// newGroup creates a new Group with empty maps for identifiers and functions. // newGroup creates a new Group with empty maps for identifiers and functions.
func newGroup() *Group { func newGroup() *Group {
return &Group{ return &Group{
idents: make(map[string]*exprpb.Decl), idents: make(map[string]*decls.VariableDecl),
functions: make(map[string]*exprpb.Decl), functions: make(map[string]*decls.FunctionDecl),
} }
} }

View File

@ -15,480 +15,21 @@
package checker package checker
import ( import (
"github.com/google/cel-go/checker/decls" "github.com/google/cel-go/common/stdlib"
"github.com/google/cel-go/common/operators"
"github.com/google/cel-go/common/overloads"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
) )
var ( // StandardFunctions returns the Decls for all functions in the evaluator.
standardDeclarations []*exprpb.Decl //
) // Deprecated: prefer stdlib.FunctionExprDecls()
func StandardFunctions() []*exprpb.Decl {
func init() { return stdlib.FunctionExprDecls()
// Some shortcuts we use when building declarations.
paramA := decls.NewTypeParamType("A")
typeParamAList := []string{"A"}
listOfA := decls.NewListType(paramA)
paramB := decls.NewTypeParamType("B")
typeParamABList := []string{"A", "B"}
mapOfAB := decls.NewMapType(paramA, paramB)
var idents []*exprpb.Decl
for _, t := range []*exprpb.Type{
decls.Int, decls.Uint, decls.Bool,
decls.Double, decls.Bytes, decls.String} {
idents = append(idents,
decls.NewVar(FormatCheckedType(t), decls.NewTypeType(t)))
}
idents = append(idents,
decls.NewVar("list", decls.NewTypeType(listOfA)),
decls.NewVar("map", decls.NewTypeType(mapOfAB)),
decls.NewVar("null_type", decls.NewTypeType(decls.Null)),
decls.NewVar("type", decls.NewTypeType(decls.NewTypeType(nil))))
standardDeclarations = append(standardDeclarations, idents...)
standardDeclarations = append(standardDeclarations, []*exprpb.Decl{
// Booleans
decls.NewFunction(operators.Conditional,
decls.NewParameterizedOverload(overloads.Conditional,
[]*exprpb.Type{decls.Bool, paramA, paramA}, paramA,
typeParamAList)),
decls.NewFunction(operators.LogicalAnd,
decls.NewOverload(overloads.LogicalAnd,
[]*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool)),
decls.NewFunction(operators.LogicalOr,
decls.NewOverload(overloads.LogicalOr,
[]*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool)),
decls.NewFunction(operators.LogicalNot,
decls.NewOverload(overloads.LogicalNot,
[]*exprpb.Type{decls.Bool}, decls.Bool)),
decls.NewFunction(operators.NotStrictlyFalse,
decls.NewOverload(overloads.NotStrictlyFalse,
[]*exprpb.Type{decls.Bool}, decls.Bool)),
decls.NewFunction(operators.Equals,
decls.NewParameterizedOverload(overloads.Equals,
[]*exprpb.Type{paramA, paramA}, decls.Bool,
typeParamAList)),
decls.NewFunction(operators.NotEquals,
decls.NewParameterizedOverload(overloads.NotEquals,
[]*exprpb.Type{paramA, paramA}, decls.Bool,
typeParamAList)),
// Algebra.
decls.NewFunction(operators.Subtract,
decls.NewOverload(overloads.SubtractInt64,
[]*exprpb.Type{decls.Int, decls.Int}, decls.Int),
decls.NewOverload(overloads.SubtractUint64,
[]*exprpb.Type{decls.Uint, decls.Uint}, decls.Uint),
decls.NewOverload(overloads.SubtractDouble,
[]*exprpb.Type{decls.Double, decls.Double}, decls.Double),
decls.NewOverload(overloads.SubtractTimestampTimestamp,
[]*exprpb.Type{decls.Timestamp, decls.Timestamp}, decls.Duration),
decls.NewOverload(overloads.SubtractTimestampDuration,
[]*exprpb.Type{decls.Timestamp, decls.Duration}, decls.Timestamp),
decls.NewOverload(overloads.SubtractDurationDuration,
[]*exprpb.Type{decls.Duration, decls.Duration}, decls.Duration)),
decls.NewFunction(operators.Multiply,
decls.NewOverload(overloads.MultiplyInt64,
[]*exprpb.Type{decls.Int, decls.Int}, decls.Int),
decls.NewOverload(overloads.MultiplyUint64,
[]*exprpb.Type{decls.Uint, decls.Uint}, decls.Uint),
decls.NewOverload(overloads.MultiplyDouble,
[]*exprpb.Type{decls.Double, decls.Double}, decls.Double)),
decls.NewFunction(operators.Divide,
decls.NewOverload(overloads.DivideInt64,
[]*exprpb.Type{decls.Int, decls.Int}, decls.Int),
decls.NewOverload(overloads.DivideUint64,
[]*exprpb.Type{decls.Uint, decls.Uint}, decls.Uint),
decls.NewOverload(overloads.DivideDouble,
[]*exprpb.Type{decls.Double, decls.Double}, decls.Double)),
decls.NewFunction(operators.Modulo,
decls.NewOverload(overloads.ModuloInt64,
[]*exprpb.Type{decls.Int, decls.Int}, decls.Int),
decls.NewOverload(overloads.ModuloUint64,
[]*exprpb.Type{decls.Uint, decls.Uint}, decls.Uint)),
decls.NewFunction(operators.Add,
decls.NewOverload(overloads.AddInt64,
[]*exprpb.Type{decls.Int, decls.Int}, decls.Int),
decls.NewOverload(overloads.AddUint64,
[]*exprpb.Type{decls.Uint, decls.Uint}, decls.Uint),
decls.NewOverload(overloads.AddDouble,
[]*exprpb.Type{decls.Double, decls.Double}, decls.Double),
decls.NewOverload(overloads.AddString,
[]*exprpb.Type{decls.String, decls.String}, decls.String),
decls.NewOverload(overloads.AddBytes,
[]*exprpb.Type{decls.Bytes, decls.Bytes}, decls.Bytes),
decls.NewParameterizedOverload(overloads.AddList,
[]*exprpb.Type{listOfA, listOfA}, listOfA,
typeParamAList),
decls.NewOverload(overloads.AddTimestampDuration,
[]*exprpb.Type{decls.Timestamp, decls.Duration}, decls.Timestamp),
decls.NewOverload(overloads.AddDurationTimestamp,
[]*exprpb.Type{decls.Duration, decls.Timestamp}, decls.Timestamp),
decls.NewOverload(overloads.AddDurationDuration,
[]*exprpb.Type{decls.Duration, decls.Duration}, decls.Duration)),
decls.NewFunction(operators.Negate,
decls.NewOverload(overloads.NegateInt64,
[]*exprpb.Type{decls.Int}, decls.Int),
decls.NewOverload(overloads.NegateDouble,
[]*exprpb.Type{decls.Double}, decls.Double)),
// Index.
decls.NewFunction(operators.Index,
decls.NewParameterizedOverload(overloads.IndexList,
[]*exprpb.Type{listOfA, decls.Int}, paramA,
typeParamAList),
decls.NewParameterizedOverload(overloads.IndexMap,
[]*exprpb.Type{mapOfAB, paramA}, paramB,
typeParamABList)),
// Collections.
decls.NewFunction(overloads.Size,
decls.NewInstanceOverload(overloads.SizeStringInst,
[]*exprpb.Type{decls.String}, decls.Int),
decls.NewInstanceOverload(overloads.SizeBytesInst,
[]*exprpb.Type{decls.Bytes}, decls.Int),
decls.NewParameterizedInstanceOverload(overloads.SizeListInst,
[]*exprpb.Type{listOfA}, decls.Int, typeParamAList),
decls.NewParameterizedInstanceOverload(overloads.SizeMapInst,
[]*exprpb.Type{mapOfAB}, decls.Int, typeParamABList),
decls.NewOverload(overloads.SizeString,
[]*exprpb.Type{decls.String}, decls.Int),
decls.NewOverload(overloads.SizeBytes,
[]*exprpb.Type{decls.Bytes}, decls.Int),
decls.NewParameterizedOverload(overloads.SizeList,
[]*exprpb.Type{listOfA}, decls.Int, typeParamAList),
decls.NewParameterizedOverload(overloads.SizeMap,
[]*exprpb.Type{mapOfAB}, decls.Int, typeParamABList)),
decls.NewFunction(operators.In,
decls.NewParameterizedOverload(overloads.InList,
[]*exprpb.Type{paramA, listOfA}, decls.Bool,
typeParamAList),
decls.NewParameterizedOverload(overloads.InMap,
[]*exprpb.Type{paramA, mapOfAB}, decls.Bool,
typeParamABList)),
// Deprecated 'in()' function.
decls.NewFunction(overloads.DeprecatedIn,
decls.NewParameterizedOverload(overloads.InList,
[]*exprpb.Type{paramA, listOfA}, decls.Bool,
typeParamAList),
decls.NewParameterizedOverload(overloads.InMap,
[]*exprpb.Type{paramA, mapOfAB}, decls.Bool,
typeParamABList)),
// Conversions to type.
decls.NewFunction(overloads.TypeConvertType,
decls.NewParameterizedOverload(overloads.TypeConvertType,
[]*exprpb.Type{paramA}, decls.NewTypeType(paramA), typeParamAList)),
// Conversions to int.
decls.NewFunction(overloads.TypeConvertInt,
decls.NewOverload(overloads.IntToInt, []*exprpb.Type{decls.Int}, decls.Int),
decls.NewOverload(overloads.UintToInt, []*exprpb.Type{decls.Uint}, decls.Int),
decls.NewOverload(overloads.DoubleToInt, []*exprpb.Type{decls.Double}, decls.Int),
decls.NewOverload(overloads.StringToInt, []*exprpb.Type{decls.String}, decls.Int),
decls.NewOverload(overloads.TimestampToInt, []*exprpb.Type{decls.Timestamp}, decls.Int),
decls.NewOverload(overloads.DurationToInt, []*exprpb.Type{decls.Duration}, decls.Int)),
// Conversions to uint.
decls.NewFunction(overloads.TypeConvertUint,
decls.NewOverload(overloads.UintToUint, []*exprpb.Type{decls.Uint}, decls.Uint),
decls.NewOverload(overloads.IntToUint, []*exprpb.Type{decls.Int}, decls.Uint),
decls.NewOverload(overloads.DoubleToUint, []*exprpb.Type{decls.Double}, decls.Uint),
decls.NewOverload(overloads.StringToUint, []*exprpb.Type{decls.String}, decls.Uint)),
// Conversions to double.
decls.NewFunction(overloads.TypeConvertDouble,
decls.NewOverload(overloads.DoubleToDouble, []*exprpb.Type{decls.Double}, decls.Double),
decls.NewOverload(overloads.IntToDouble, []*exprpb.Type{decls.Int}, decls.Double),
decls.NewOverload(overloads.UintToDouble, []*exprpb.Type{decls.Uint}, decls.Double),
decls.NewOverload(overloads.StringToDouble, []*exprpb.Type{decls.String}, decls.Double)),
// Conversions to bool.
decls.NewFunction(overloads.TypeConvertBool,
decls.NewOverload(overloads.BoolToBool, []*exprpb.Type{decls.Bool}, decls.Bool),
decls.NewOverload(overloads.StringToBool, []*exprpb.Type{decls.String}, decls.Bool)),
// Conversions to string.
decls.NewFunction(overloads.TypeConvertString,
decls.NewOverload(overloads.StringToString, []*exprpb.Type{decls.String}, decls.String),
decls.NewOverload(overloads.BoolToString, []*exprpb.Type{decls.Bool}, decls.String),
decls.NewOverload(overloads.IntToString, []*exprpb.Type{decls.Int}, decls.String),
decls.NewOverload(overloads.UintToString, []*exprpb.Type{decls.Uint}, decls.String),
decls.NewOverload(overloads.DoubleToString, []*exprpb.Type{decls.Double}, decls.String),
decls.NewOverload(overloads.BytesToString, []*exprpb.Type{decls.Bytes}, decls.String),
decls.NewOverload(overloads.TimestampToString, []*exprpb.Type{decls.Timestamp}, decls.String),
decls.NewOverload(overloads.DurationToString, []*exprpb.Type{decls.Duration}, decls.String)),
// Conversions to bytes.
decls.NewFunction(overloads.TypeConvertBytes,
decls.NewOverload(overloads.BytesToBytes, []*exprpb.Type{decls.Bytes}, decls.Bytes),
decls.NewOverload(overloads.StringToBytes, []*exprpb.Type{decls.String}, decls.Bytes)),
// Conversions to timestamps.
decls.NewFunction(overloads.TypeConvertTimestamp,
decls.NewOverload(overloads.TimestampToTimestamp,
[]*exprpb.Type{decls.Timestamp}, decls.Timestamp),
decls.NewOverload(overloads.StringToTimestamp,
[]*exprpb.Type{decls.String}, decls.Timestamp),
decls.NewOverload(overloads.IntToTimestamp,
[]*exprpb.Type{decls.Int}, decls.Timestamp)),
// Conversions to durations.
decls.NewFunction(overloads.TypeConvertDuration,
decls.NewOverload(overloads.DurationToDuration,
[]*exprpb.Type{decls.Duration}, decls.Duration),
decls.NewOverload(overloads.StringToDuration,
[]*exprpb.Type{decls.String}, decls.Duration),
decls.NewOverload(overloads.IntToDuration,
[]*exprpb.Type{decls.Int}, decls.Duration)),
// Conversions to Dyn.
decls.NewFunction(overloads.TypeConvertDyn,
decls.NewParameterizedOverload(overloads.ToDyn,
[]*exprpb.Type{paramA}, decls.Dyn,
typeParamAList)),
// String functions.
decls.NewFunction(overloads.Contains,
decls.NewInstanceOverload(overloads.ContainsString,
[]*exprpb.Type{decls.String, decls.String}, decls.Bool)),
decls.NewFunction(overloads.EndsWith,
decls.NewInstanceOverload(overloads.EndsWithString,
[]*exprpb.Type{decls.String, decls.String}, decls.Bool)),
decls.NewFunction(overloads.Matches,
decls.NewOverload(overloads.Matches,
[]*exprpb.Type{decls.String, decls.String}, decls.Bool),
decls.NewInstanceOverload(overloads.MatchesString,
[]*exprpb.Type{decls.String, decls.String}, decls.Bool)),
decls.NewFunction(overloads.StartsWith,
decls.NewInstanceOverload(overloads.StartsWithString,
[]*exprpb.Type{decls.String, decls.String}, decls.Bool)),
// Date/time functions.
decls.NewFunction(overloads.TimeGetFullYear,
decls.NewInstanceOverload(overloads.TimestampToYear,
[]*exprpb.Type{decls.Timestamp}, decls.Int),
decls.NewInstanceOverload(overloads.TimestampToYearWithTz,
[]*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)),
decls.NewFunction(overloads.TimeGetMonth,
decls.NewInstanceOverload(overloads.TimestampToMonth,
[]*exprpb.Type{decls.Timestamp}, decls.Int),
decls.NewInstanceOverload(overloads.TimestampToMonthWithTz,
[]*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)),
decls.NewFunction(overloads.TimeGetDayOfYear,
decls.NewInstanceOverload(overloads.TimestampToDayOfYear,
[]*exprpb.Type{decls.Timestamp}, decls.Int),
decls.NewInstanceOverload(overloads.TimestampToDayOfYearWithTz,
[]*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)),
decls.NewFunction(overloads.TimeGetDayOfMonth,
decls.NewInstanceOverload(overloads.TimestampToDayOfMonthZeroBased,
[]*exprpb.Type{decls.Timestamp}, decls.Int),
decls.NewInstanceOverload(overloads.TimestampToDayOfMonthZeroBasedWithTz,
[]*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)),
decls.NewFunction(overloads.TimeGetDate,
decls.NewInstanceOverload(overloads.TimestampToDayOfMonthOneBased,
[]*exprpb.Type{decls.Timestamp}, decls.Int),
decls.NewInstanceOverload(overloads.TimestampToDayOfMonthOneBasedWithTz,
[]*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)),
decls.NewFunction(overloads.TimeGetDayOfWeek,
decls.NewInstanceOverload(overloads.TimestampToDayOfWeek,
[]*exprpb.Type{decls.Timestamp}, decls.Int),
decls.NewInstanceOverload(overloads.TimestampToDayOfWeekWithTz,
[]*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)),
decls.NewFunction(overloads.TimeGetHours,
decls.NewInstanceOverload(overloads.TimestampToHours,
[]*exprpb.Type{decls.Timestamp}, decls.Int),
decls.NewInstanceOverload(overloads.TimestampToHoursWithTz,
[]*exprpb.Type{decls.Timestamp, decls.String}, decls.Int),
decls.NewInstanceOverload(overloads.DurationToHours,
[]*exprpb.Type{decls.Duration}, decls.Int)),
decls.NewFunction(overloads.TimeGetMinutes,
decls.NewInstanceOverload(overloads.TimestampToMinutes,
[]*exprpb.Type{decls.Timestamp}, decls.Int),
decls.NewInstanceOverload(overloads.TimestampToMinutesWithTz,
[]*exprpb.Type{decls.Timestamp, decls.String}, decls.Int),
decls.NewInstanceOverload(overloads.DurationToMinutes,
[]*exprpb.Type{decls.Duration}, decls.Int)),
decls.NewFunction(overloads.TimeGetSeconds,
decls.NewInstanceOverload(overloads.TimestampToSeconds,
[]*exprpb.Type{decls.Timestamp}, decls.Int),
decls.NewInstanceOverload(overloads.TimestampToSecondsWithTz,
[]*exprpb.Type{decls.Timestamp, decls.String}, decls.Int),
decls.NewInstanceOverload(overloads.DurationToSeconds,
[]*exprpb.Type{decls.Duration}, decls.Int)),
decls.NewFunction(overloads.TimeGetMilliseconds,
decls.NewInstanceOverload(overloads.TimestampToMilliseconds,
[]*exprpb.Type{decls.Timestamp}, decls.Int),
decls.NewInstanceOverload(overloads.TimestampToMillisecondsWithTz,
[]*exprpb.Type{decls.Timestamp, decls.String}, decls.Int),
decls.NewInstanceOverload(overloads.DurationToMilliseconds,
[]*exprpb.Type{decls.Duration}, decls.Int)),
// Relations.
decls.NewFunction(operators.Less,
decls.NewOverload(overloads.LessBool,
[]*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool),
decls.NewOverload(overloads.LessInt64,
[]*exprpb.Type{decls.Int, decls.Int}, decls.Bool),
decls.NewOverload(overloads.LessInt64Double,
[]*exprpb.Type{decls.Int, decls.Double}, decls.Bool),
decls.NewOverload(overloads.LessInt64Uint64,
[]*exprpb.Type{decls.Int, decls.Uint}, decls.Bool),
decls.NewOverload(overloads.LessUint64,
[]*exprpb.Type{decls.Uint, decls.Uint}, decls.Bool),
decls.NewOverload(overloads.LessUint64Double,
[]*exprpb.Type{decls.Uint, decls.Double}, decls.Bool),
decls.NewOverload(overloads.LessUint64Int64,
[]*exprpb.Type{decls.Uint, decls.Int}, decls.Bool),
decls.NewOverload(overloads.LessDouble,
[]*exprpb.Type{decls.Double, decls.Double}, decls.Bool),
decls.NewOverload(overloads.LessDoubleInt64,
[]*exprpb.Type{decls.Double, decls.Int}, decls.Bool),
decls.NewOverload(overloads.LessDoubleUint64,
[]*exprpb.Type{decls.Double, decls.Uint}, decls.Bool),
decls.NewOverload(overloads.LessString,
[]*exprpb.Type{decls.String, decls.String}, decls.Bool),
decls.NewOverload(overloads.LessBytes,
[]*exprpb.Type{decls.Bytes, decls.Bytes}, decls.Bool),
decls.NewOverload(overloads.LessTimestamp,
[]*exprpb.Type{decls.Timestamp, decls.Timestamp}, decls.Bool),
decls.NewOverload(overloads.LessDuration,
[]*exprpb.Type{decls.Duration, decls.Duration}, decls.Bool)),
decls.NewFunction(operators.LessEquals,
decls.NewOverload(overloads.LessEqualsBool,
[]*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool),
decls.NewOverload(overloads.LessEqualsInt64,
[]*exprpb.Type{decls.Int, decls.Int}, decls.Bool),
decls.NewOverload(overloads.LessEqualsInt64Double,
[]*exprpb.Type{decls.Int, decls.Double}, decls.Bool),
decls.NewOverload(overloads.LessEqualsInt64Uint64,
[]*exprpb.Type{decls.Int, decls.Uint}, decls.Bool),
decls.NewOverload(overloads.LessEqualsUint64,
[]*exprpb.Type{decls.Uint, decls.Uint}, decls.Bool),
decls.NewOverload(overloads.LessEqualsUint64Double,
[]*exprpb.Type{decls.Uint, decls.Double}, decls.Bool),
decls.NewOverload(overloads.LessEqualsUint64Int64,
[]*exprpb.Type{decls.Uint, decls.Int}, decls.Bool),
decls.NewOverload(overloads.LessEqualsDouble,
[]*exprpb.Type{decls.Double, decls.Double}, decls.Bool),
decls.NewOverload(overloads.LessEqualsDoubleInt64,
[]*exprpb.Type{decls.Double, decls.Int}, decls.Bool),
decls.NewOverload(overloads.LessEqualsDoubleUint64,
[]*exprpb.Type{decls.Double, decls.Uint}, decls.Bool),
decls.NewOverload(overloads.LessEqualsString,
[]*exprpb.Type{decls.String, decls.String}, decls.Bool),
decls.NewOverload(overloads.LessEqualsBytes,
[]*exprpb.Type{decls.Bytes, decls.Bytes}, decls.Bool),
decls.NewOverload(overloads.LessEqualsTimestamp,
[]*exprpb.Type{decls.Timestamp, decls.Timestamp}, decls.Bool),
decls.NewOverload(overloads.LessEqualsDuration,
[]*exprpb.Type{decls.Duration, decls.Duration}, decls.Bool)),
decls.NewFunction(operators.Greater,
decls.NewOverload(overloads.GreaterBool,
[]*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool),
decls.NewOverload(overloads.GreaterInt64,
[]*exprpb.Type{decls.Int, decls.Int}, decls.Bool),
decls.NewOverload(overloads.GreaterInt64Double,
[]*exprpb.Type{decls.Int, decls.Double}, decls.Bool),
decls.NewOverload(overloads.GreaterInt64Uint64,
[]*exprpb.Type{decls.Int, decls.Uint}, decls.Bool),
decls.NewOverload(overloads.GreaterUint64,
[]*exprpb.Type{decls.Uint, decls.Uint}, decls.Bool),
decls.NewOverload(overloads.GreaterUint64Double,
[]*exprpb.Type{decls.Uint, decls.Double}, decls.Bool),
decls.NewOverload(overloads.GreaterUint64Int64,
[]*exprpb.Type{decls.Uint, decls.Int}, decls.Bool),
decls.NewOverload(overloads.GreaterDouble,
[]*exprpb.Type{decls.Double, decls.Double}, decls.Bool),
decls.NewOverload(overloads.GreaterDoubleInt64,
[]*exprpb.Type{decls.Double, decls.Int}, decls.Bool),
decls.NewOverload(overloads.GreaterDoubleUint64,
[]*exprpb.Type{decls.Double, decls.Uint}, decls.Bool),
decls.NewOverload(overloads.GreaterString,
[]*exprpb.Type{decls.String, decls.String}, decls.Bool),
decls.NewOverload(overloads.GreaterBytes,
[]*exprpb.Type{decls.Bytes, decls.Bytes}, decls.Bool),
decls.NewOverload(overloads.GreaterTimestamp,
[]*exprpb.Type{decls.Timestamp, decls.Timestamp}, decls.Bool),
decls.NewOverload(overloads.GreaterDuration,
[]*exprpb.Type{decls.Duration, decls.Duration}, decls.Bool)),
decls.NewFunction(operators.GreaterEquals,
decls.NewOverload(overloads.GreaterEqualsBool,
[]*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool),
decls.NewOverload(overloads.GreaterEqualsInt64,
[]*exprpb.Type{decls.Int, decls.Int}, decls.Bool),
decls.NewOverload(overloads.GreaterEqualsInt64Double,
[]*exprpb.Type{decls.Int, decls.Double}, decls.Bool),
decls.NewOverload(overloads.GreaterEqualsInt64Uint64,
[]*exprpb.Type{decls.Int, decls.Uint}, decls.Bool),
decls.NewOverload(overloads.GreaterEqualsUint64,
[]*exprpb.Type{decls.Uint, decls.Uint}, decls.Bool),
decls.NewOverload(overloads.GreaterEqualsUint64Double,
[]*exprpb.Type{decls.Uint, decls.Double}, decls.Bool),
decls.NewOverload(overloads.GreaterEqualsUint64Int64,
[]*exprpb.Type{decls.Uint, decls.Int}, decls.Bool),
decls.NewOverload(overloads.GreaterEqualsDouble,
[]*exprpb.Type{decls.Double, decls.Double}, decls.Bool),
decls.NewOverload(overloads.GreaterEqualsDoubleInt64,
[]*exprpb.Type{decls.Double, decls.Int}, decls.Bool),
decls.NewOverload(overloads.GreaterEqualsDoubleUint64,
[]*exprpb.Type{decls.Double, decls.Uint}, decls.Bool),
decls.NewOverload(overloads.GreaterEqualsString,
[]*exprpb.Type{decls.String, decls.String}, decls.Bool),
decls.NewOverload(overloads.GreaterEqualsBytes,
[]*exprpb.Type{decls.Bytes, decls.Bytes}, decls.Bool),
decls.NewOverload(overloads.GreaterEqualsTimestamp,
[]*exprpb.Type{decls.Timestamp, decls.Timestamp}, decls.Bool),
decls.NewOverload(overloads.GreaterEqualsDuration,
[]*exprpb.Type{decls.Duration, decls.Duration}, decls.Bool)),
}...)
} }
// StandardDeclarations returns the Decls for all functions and constants in the evaluator. // StandardTypes returns the set of type identifiers for standard library types.
func StandardDeclarations() []*exprpb.Decl { //
return standardDeclarations // Deprecated: prefer stdlib.TypeExprDecls()
func StandardTypes() []*exprpb.Decl {
return stdlib.TypeExprDecls()
} }

View File

@ -15,154 +15,54 @@
package checker package checker
import ( import (
"fmt" "github.com/google/cel-go/common/types"
"strings"
"github.com/google/cel-go/checker/decls"
"google.golang.org/protobuf/proto"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
) )
const (
kindUnknown = iota + 1
kindError
kindFunction
kindDyn
kindPrimitive
kindWellKnown
kindWrapper
kindNull
kindAbstract
kindType
kindList
kindMap
kindObject
kindTypeParam
)
// FormatCheckedType converts a type message into a string representation.
func FormatCheckedType(t *exprpb.Type) string {
switch kindOf(t) {
case kindDyn:
return "dyn"
case kindFunction:
return formatFunction(t.GetFunction().GetResultType(),
t.GetFunction().GetArgTypes(),
false)
case kindList:
return fmt.Sprintf("list(%s)", FormatCheckedType(t.GetListType().GetElemType()))
case kindObject:
return t.GetMessageType()
case kindMap:
return fmt.Sprintf("map(%s, %s)",
FormatCheckedType(t.GetMapType().GetKeyType()),
FormatCheckedType(t.GetMapType().GetValueType()))
case kindNull:
return "null"
case kindPrimitive:
switch t.GetPrimitive() {
case exprpb.Type_UINT64:
return "uint"
case exprpb.Type_INT64:
return "int"
}
return strings.Trim(strings.ToLower(t.GetPrimitive().String()), " ")
case kindType:
if t.GetType() == nil {
return "type"
}
return fmt.Sprintf("type(%s)", FormatCheckedType(t.GetType()))
case kindWellKnown:
switch t.GetWellKnown() {
case exprpb.Type_ANY:
return "any"
case exprpb.Type_DURATION:
return "duration"
case exprpb.Type_TIMESTAMP:
return "timestamp"
}
case kindWrapper:
return fmt.Sprintf("wrapper(%s)",
FormatCheckedType(decls.NewPrimitiveType(t.GetWrapper())))
case kindError:
return "!error!"
case kindTypeParam:
return t.GetTypeParam()
case kindAbstract:
at := t.GetAbstractType()
params := at.GetParameterTypes()
paramStrs := make([]string, len(params))
for i, p := range params {
paramStrs[i] = FormatCheckedType(p)
}
return fmt.Sprintf("%s(%s)", at.GetName(), strings.Join(paramStrs, ", "))
}
return t.String()
}
// isDyn returns true if the input t is either type DYN or a well-known ANY message. // isDyn returns true if the input t is either type DYN or a well-known ANY message.
func isDyn(t *exprpb.Type) bool { func isDyn(t *types.Type) bool {
// Note: object type values that are well-known and map to a DYN value in practice // Note: object type values that are well-known and map to a DYN value in practice
// are sanitized prior to being added to the environment. // are sanitized prior to being added to the environment.
switch kindOf(t) { switch t.Kind() {
case kindDyn: case types.DynKind, types.AnyKind:
return true return true
case kindWellKnown:
return t.GetWellKnown() == exprpb.Type_ANY
default: default:
return false return false
} }
} }
// isDynOrError returns true if the input is either an Error, DYN, or well-known ANY message. // isDynOrError returns true if the input is either an Error, DYN, or well-known ANY message.
func isDynOrError(t *exprpb.Type) bool { func isDynOrError(t *types.Type) bool {
return isError(t) || isDyn(t) return isError(t) || isDyn(t)
} }
func isError(t *exprpb.Type) bool { func isError(t *types.Type) bool {
return kindOf(t) == kindError return t.Kind() == types.ErrorKind
} }
func isOptional(t *exprpb.Type) bool { func isOptional(t *types.Type) bool {
if kindOf(t) == kindAbstract { if t.Kind() == types.OpaqueKind {
at := t.GetAbstractType() return t.TypeName() == "optional"
return at.GetName() == "optional"
} }
return false return false
} }
func maybeUnwrapOptional(t *exprpb.Type) (*exprpb.Type, bool) { func maybeUnwrapOptional(t *types.Type) (*types.Type, bool) {
if isOptional(t) { if isOptional(t) {
at := t.GetAbstractType() return t.Parameters()[0], true
return at.GetParameterTypes()[0], true
} }
return t, false return t, false
} }
func maybeUnwrapString(e *exprpb.Expr) (string, bool) {
switch e.GetExprKind().(type) {
case *exprpb.Expr_ConstExpr:
literal := e.GetConstExpr()
switch literal.GetConstantKind().(type) {
case *exprpb.Constant_StringValue:
return literal.GetStringValue(), true
}
}
return "", false
}
// isEqualOrLessSpecific checks whether one type is equal or less specific than the other one. // isEqualOrLessSpecific checks whether one type is equal or less specific than the other one.
// A type is less specific if it matches the other type using the DYN type. // A type is less specific if it matches the other type using the DYN type.
func isEqualOrLessSpecific(t1 *exprpb.Type, t2 *exprpb.Type) bool { func isEqualOrLessSpecific(t1, t2 *types.Type) bool {
kind1, kind2 := kindOf(t1), kindOf(t2) kind1, kind2 := t1.Kind(), t2.Kind()
// The first type is less specific. // The first type is less specific.
if isDyn(t1) || kind1 == kindTypeParam { if isDyn(t1) || kind1 == types.TypeParamKind {
return true return true
} }
// The first type is not less specific. // The first type is not less specific.
if isDyn(t2) || kind2 == kindTypeParam { if isDyn(t2) || kind2 == types.TypeParamKind {
return false return false
} }
// Types must be of the same kind to be equal. // Types must be of the same kind to be equal.
@ -173,38 +73,34 @@ func isEqualOrLessSpecific(t1 *exprpb.Type, t2 *exprpb.Type) bool {
// With limited exceptions for ANY and JSON values, the types must agree and be equivalent in // With limited exceptions for ANY and JSON values, the types must agree and be equivalent in
// order to return true. // order to return true.
switch kind1 { switch kind1 {
case kindAbstract: case types.OpaqueKind:
a1 := t1.GetAbstractType() if t1.TypeName() != t2.TypeName() ||
a2 := t2.GetAbstractType() len(t1.Parameters()) != len(t2.Parameters()) {
if a1.GetName() != a2.GetName() ||
len(a1.GetParameterTypes()) != len(a2.GetParameterTypes()) {
return false return false
} }
for i, p1 := range a1.GetParameterTypes() { for i, p1 := range t1.Parameters() {
if !isEqualOrLessSpecific(p1, a2.GetParameterTypes()[i]) { if !isEqualOrLessSpecific(p1, t2.Parameters()[i]) {
return false return false
} }
} }
return true return true
case kindList: case types.ListKind:
return isEqualOrLessSpecific(t1.GetListType().GetElemType(), t2.GetListType().GetElemType()) return isEqualOrLessSpecific(t1.Parameters()[0], t2.Parameters()[0])
case kindMap: case types.MapKind:
m1 := t1.GetMapType() return isEqualOrLessSpecific(t1.Parameters()[0], t2.Parameters()[0]) &&
m2 := t2.GetMapType() isEqualOrLessSpecific(t1.Parameters()[1], t2.Parameters()[1])
return isEqualOrLessSpecific(m1.GetKeyType(), m2.GetKeyType()) && case types.TypeKind:
isEqualOrLessSpecific(m1.GetValueType(), m2.GetValueType())
case kindType:
return true return true
default: default:
return proto.Equal(t1, t2) return t1.IsExactType(t2)
} }
} }
// / internalIsAssignable returns true if t1 is assignable to t2. // / internalIsAssignable returns true if t1 is assignable to t2.
func internalIsAssignable(m *mapping, t1 *exprpb.Type, t2 *exprpb.Type) bool { func internalIsAssignable(m *mapping, t1, t2 *types.Type) bool {
// Process type parameters. // Process type parameters.
kind1, kind2 := kindOf(t1), kindOf(t2) kind1, kind2 := t1.Kind(), t2.Kind()
if kind2 == kindTypeParam { if kind2 == types.TypeParamKind {
// If t2 is a valid type substitution for t1, return true. // If t2 is a valid type substitution for t1, return true.
valid, t2HasSub := isValidTypeSubstitution(m, t1, t2) valid, t2HasSub := isValidTypeSubstitution(m, t1, t2)
if valid { if valid {
@ -217,7 +113,7 @@ func internalIsAssignable(m *mapping, t1 *exprpb.Type, t2 *exprpb.Type) bool {
} }
// Otherwise, fall through to check whether t1 is a possible substitution for t2. // Otherwise, fall through to check whether t1 is a possible substitution for t2.
} }
if kind1 == kindTypeParam { if kind1 == types.TypeParamKind {
// Return whether t1 is a valid substitution for t2. If not, do no additional checks as the // Return whether t1 is a valid substitution for t2. If not, do no additional checks as the
// possible type substitutions have been searched in both directions. // possible type substitutions have been searched in both directions.
valid, _ := isValidTypeSubstitution(m, t2, t1) valid, _ := isValidTypeSubstitution(m, t2, t1)
@ -228,40 +124,25 @@ func internalIsAssignable(m *mapping, t1 *exprpb.Type, t2 *exprpb.Type) bool {
if isDynOrError(t1) || isDynOrError(t2) { if isDynOrError(t1) || isDynOrError(t2) {
return true return true
} }
// Preserve the nullness checks of the legacy type-checker.
if kind1 == types.NullTypeKind {
return internalIsAssignableNull(t2)
}
if kind2 == types.NullTypeKind {
return internalIsAssignableNull(t1)
}
// Test for when the types do not need to agree, but are more specific than dyn. // Test for when the types do not need to agree, but are more specific than dyn.
switch kind1 { switch kind1 {
case kindNull: case types.BoolKind, types.BytesKind, types.DoubleKind, types.IntKind, types.StringKind, types.UintKind,
return internalIsAssignableNull(t2) types.AnyKind, types.DurationKind, types.TimestampKind,
case kindPrimitive: types.StructKind:
return internalIsAssignablePrimitive(t1.GetPrimitive(), t2) return t1.IsAssignableType(t2)
case kindWrapper: case types.TypeKind:
return internalIsAssignable(m, decls.NewPrimitiveType(t1.GetWrapper()), t2) return kind2 == types.TypeKind
default: case types.OpaqueKind, types.ListKind, types.MapKind:
if kind1 != kind2 { return t1.Kind() == t2.Kind() && t1.TypeName() == t2.TypeName() &&
return false internalIsAssignableList(m, t1.Parameters(), t2.Parameters())
}
}
// Test for when the types must agree.
switch kind1 {
// ERROR, TYPE_PARAM, and DYN handled above.
case kindAbstract:
return internalIsAssignableAbstractType(m, t1.GetAbstractType(), t2.GetAbstractType())
case kindFunction:
return internalIsAssignableFunction(m, t1.GetFunction(), t2.GetFunction())
case kindList:
return internalIsAssignable(m, t1.GetListType().GetElemType(), t2.GetListType().GetElemType())
case kindMap:
return internalIsAssignableMap(m, t1.GetMapType(), t2.GetMapType())
case kindObject:
return t1.GetMessageType() == t2.GetMessageType()
case kindType:
// A type is a type is a type, any additional parameterization of the
// type cannot affect method resolution or assignability.
return true
case kindWellKnown:
return t1.GetWellKnown() == t2.GetWellKnown()
default: default:
return false return false
} }
@ -274,16 +155,16 @@ func internalIsAssignable(m *mapping, t1 *exprpb.Type, t2 *exprpb.Type) bool {
// - t2 has a type substitution (t2sub) equal to t1 // - t2 has a type substitution (t2sub) equal to t1
// - t2 has a type substitution (t2sub) assignable to t1 // - t2 has a type substitution (t2sub) assignable to t1
// - t2 does not occur within t1. // - t2 does not occur within t1.
func isValidTypeSubstitution(m *mapping, t1, t2 *exprpb.Type) (valid, hasSub bool) { func isValidTypeSubstitution(m *mapping, t1, t2 *types.Type) (valid, hasSub bool) {
// Early return if the t1 and t2 are the same instance. // Early return if the t1 and t2 are the same instance.
kind1, kind2 := kindOf(t1), kindOf(t2) kind1, kind2 := t1.Kind(), t2.Kind()
if kind1 == kind2 && (t1 == t2 || proto.Equal(t1, t2)) { if kind1 == kind2 && t1.IsExactType(t2) {
return true, true return true, true
} }
if t2Sub, found := m.find(t2); found { if t2Sub, found := m.find(t2); found {
// Early return if t1 and t2Sub are the same instance as otherwise the mapping // Early return if t1 and t2Sub are the same instance as otherwise the mapping
// might mark a type as being a subtitution for itself. // might mark a type as being a subtitution for itself.
if kind1 == kindOf(t2Sub) && (t1 == t2Sub || proto.Equal(t1, t2Sub)) { if kind1 == t2Sub.Kind() && t1.IsExactType(t2Sub) {
return true, true return true, true
} }
// If the types are compatible, pick the more general type and return true // If the types are compatible, pick the more general type and return true
@ -305,28 +186,10 @@ func isValidTypeSubstitution(m *mapping, t1, t2 *exprpb.Type) (valid, hasSub boo
return false, false return false, false
} }
// internalIsAssignableAbstractType returns true if the abstract type names agree and all type
// parameters are assignable.
func internalIsAssignableAbstractType(m *mapping, a1 *exprpb.Type_AbstractType, a2 *exprpb.Type_AbstractType) bool {
return a1.GetName() == a2.GetName() &&
internalIsAssignableList(m, a1.GetParameterTypes(), a2.GetParameterTypes())
}
// internalIsAssignableFunction returns true if the function return type and arg types are
// assignable.
func internalIsAssignableFunction(m *mapping, f1 *exprpb.Type_FunctionType, f2 *exprpb.Type_FunctionType) bool {
f1ArgTypes := flattenFunctionTypes(f1)
f2ArgTypes := flattenFunctionTypes(f2)
if internalIsAssignableList(m, f1ArgTypes, f2ArgTypes) {
return true
}
return false
}
// internalIsAssignableList returns true if the element types at each index in the list are // internalIsAssignableList returns true if the element types at each index in the list are
// assignable from l1[i] to l2[i]. The list lengths must also agree for the lists to be // assignable from l1[i] to l2[i]. The list lengths must also agree for the lists to be
// assignable. // assignable.
func internalIsAssignableList(m *mapping, l1 []*exprpb.Type, l2 []*exprpb.Type) bool { func internalIsAssignableList(m *mapping, l1, l2 []*types.Type) bool {
if len(l1) != len(l2) { if len(l1) != len(l2) {
return false return false
} }
@ -338,41 +201,22 @@ func internalIsAssignableList(m *mapping, l1 []*exprpb.Type, l2 []*exprpb.Type)
return true return true
} }
// internalIsAssignableMap returns true if map m1 may be assigned to map m2. // internalIsAssignableNull returns true if the type is nullable.
func internalIsAssignableMap(m *mapping, m1 *exprpb.Type_MapType, m2 *exprpb.Type_MapType) bool { func internalIsAssignableNull(t *types.Type) bool {
if internalIsAssignableList(m, return isLegacyNullable(t) || t.IsAssignableType(types.NullType)
[]*exprpb.Type{m1.GetKeyType(), m1.GetValueType()}, }
[]*exprpb.Type{m2.GetKeyType(), m2.GetValueType()}) {
// isLegacyNullable preserves the null-ness compatibility of the original type-checker implementation.
func isLegacyNullable(t *types.Type) bool {
switch t.Kind() {
case types.OpaqueKind, types.StructKind, types.AnyKind, types.DurationKind, types.TimestampKind:
return true return true
} }
return false return false
} }
// internalIsAssignableNull returns true if the type is nullable.
func internalIsAssignableNull(t *exprpb.Type) bool {
switch kindOf(t) {
case kindAbstract, kindObject, kindNull, kindWellKnown, kindWrapper:
return true
default:
return false
}
}
// internalIsAssignablePrimitive returns true if the target type is the same or if it is a wrapper
// for the primitive type.
func internalIsAssignablePrimitive(p exprpb.Type_PrimitiveType, target *exprpb.Type) bool {
switch kindOf(target) {
case kindPrimitive:
return p == target.GetPrimitive()
case kindWrapper:
return p == target.GetWrapper()
default:
return false
}
}
// isAssignable returns an updated type substitution mapping if t1 is assignable to t2. // isAssignable returns an updated type substitution mapping if t1 is assignable to t2.
func isAssignable(m *mapping, t1 *exprpb.Type, t2 *exprpb.Type) *mapping { func isAssignable(m *mapping, t1, t2 *types.Type) *mapping {
mCopy := m.copy() mCopy := m.copy()
if internalIsAssignable(mCopy, t1, t2) { if internalIsAssignable(mCopy, t1, t2) {
return mCopy return mCopy
@ -381,7 +225,7 @@ func isAssignable(m *mapping, t1 *exprpb.Type, t2 *exprpb.Type) *mapping {
} }
// isAssignableList returns an updated type substitution mapping if l1 is assignable to l2. // isAssignableList returns an updated type substitution mapping if l1 is assignable to l2.
func isAssignableList(m *mapping, l1 []*exprpb.Type, l2 []*exprpb.Type) *mapping { func isAssignableList(m *mapping, l1, l2 []*types.Type) *mapping {
mCopy := m.copy() mCopy := m.copy()
if internalIsAssignableList(mCopy, l1, l2) { if internalIsAssignableList(mCopy, l1, l2) {
return mCopy return mCopy
@ -389,44 +233,8 @@ func isAssignableList(m *mapping, l1 []*exprpb.Type, l2 []*exprpb.Type) *mapping
return nil return nil
} }
// kindOf returns the kind of the type as defined in the checked.proto.
func kindOf(t *exprpb.Type) int {
if t == nil || t.TypeKind == nil {
return kindUnknown
}
switch t.GetTypeKind().(type) {
case *exprpb.Type_Error:
return kindError
case *exprpb.Type_Function:
return kindFunction
case *exprpb.Type_Dyn:
return kindDyn
case *exprpb.Type_Primitive:
return kindPrimitive
case *exprpb.Type_WellKnown:
return kindWellKnown
case *exprpb.Type_Wrapper:
return kindWrapper
case *exprpb.Type_Null:
return kindNull
case *exprpb.Type_Type:
return kindType
case *exprpb.Type_ListType_:
return kindList
case *exprpb.Type_MapType_:
return kindMap
case *exprpb.Type_MessageType:
return kindObject
case *exprpb.Type_TypeParam:
return kindTypeParam
case *exprpb.Type_AbstractType_:
return kindAbstract
}
return kindUnknown
}
// mostGeneral returns the more general of two types which are known to unify. // mostGeneral returns the more general of two types which are known to unify.
func mostGeneral(t1 *exprpb.Type, t2 *exprpb.Type) *exprpb.Type { func mostGeneral(t1, t2 *types.Type) *types.Type {
if isEqualOrLessSpecific(t1, t2) { if isEqualOrLessSpecific(t1, t2) {
return t1 return t1
} }
@ -436,32 +244,25 @@ func mostGeneral(t1 *exprpb.Type, t2 *exprpb.Type) *exprpb.Type {
// notReferencedIn checks whether the type doesn't appear directly or transitively within the other // notReferencedIn checks whether the type doesn't appear directly or transitively within the other
// type. This is a standard requirement for type unification, commonly referred to as the "occurs // type. This is a standard requirement for type unification, commonly referred to as the "occurs
// check". // check".
func notReferencedIn(m *mapping, t *exprpb.Type, withinType *exprpb.Type) bool { func notReferencedIn(m *mapping, t, withinType *types.Type) bool {
if proto.Equal(t, withinType) { if t.IsExactType(withinType) {
return false return false
} }
withinKind := kindOf(withinType) withinKind := withinType.Kind()
switch withinKind { switch withinKind {
case kindTypeParam: case types.TypeParamKind:
wtSub, found := m.find(withinType) wtSub, found := m.find(withinType)
if !found { if !found {
return true return true
} }
return notReferencedIn(m, t, wtSub) return notReferencedIn(m, t, wtSub)
case kindAbstract: case types.OpaqueKind, types.ListKind, types.MapKind:
for _, pt := range withinType.GetAbstractType().GetParameterTypes() { for _, pt := range withinType.Parameters() {
if !notReferencedIn(m, t, pt) { if !notReferencedIn(m, t, pt) {
return false return false
} }
} }
return true return true
case kindList:
return notReferencedIn(m, t, withinType.GetListType().GetElemType())
case kindMap:
mt := withinType.GetMapType()
return notReferencedIn(m, t, mt.GetKeyType()) && notReferencedIn(m, t, mt.GetValueType())
case kindWrapper:
return notReferencedIn(m, t, decls.NewPrimitiveType(withinType.GetWrapper()))
default: default:
return true return true
} }
@ -469,39 +270,25 @@ func notReferencedIn(m *mapping, t *exprpb.Type, withinType *exprpb.Type) bool {
// substitute replaces all direct and indirect occurrences of bound type parameters. Unbound type // substitute replaces all direct and indirect occurrences of bound type parameters. Unbound type
// parameters are replaced by DYN if typeParamToDyn is true. // parameters are replaced by DYN if typeParamToDyn is true.
func substitute(m *mapping, t *exprpb.Type, typeParamToDyn bool) *exprpb.Type { func substitute(m *mapping, t *types.Type, typeParamToDyn bool) *types.Type {
if tSub, found := m.find(t); found { if tSub, found := m.find(t); found {
return substitute(m, tSub, typeParamToDyn) return substitute(m, tSub, typeParamToDyn)
} }
kind := kindOf(t) kind := t.Kind()
if typeParamToDyn && kind == kindTypeParam { if typeParamToDyn && kind == types.TypeParamKind {
return decls.Dyn return types.DynType
} }
switch kind { switch kind {
case kindAbstract: case types.OpaqueKind:
at := t.GetAbstractType() return types.NewOpaqueType(t.TypeName(), substituteParams(m, t.Parameters(), typeParamToDyn)...)
params := make([]*exprpb.Type, len(at.GetParameterTypes())) case types.ListKind:
for i, p := range at.GetParameterTypes() { return types.NewListType(substitute(m, t.Parameters()[0], typeParamToDyn))
params[i] = substitute(m, p, typeParamToDyn) case types.MapKind:
} return types.NewMapType(substitute(m, t.Parameters()[0], typeParamToDyn),
return decls.NewAbstractType(at.GetName(), params...) substitute(m, t.Parameters()[1], typeParamToDyn))
case kindFunction: case types.TypeKind:
fn := t.GetFunction() if len(t.Parameters()) > 0 {
rt := substitute(m, fn.ResultType, typeParamToDyn) return types.NewTypeTypeWithParam(substitute(m, t.Parameters()[0], typeParamToDyn))
args := make([]*exprpb.Type, len(fn.GetArgTypes()))
for i, a := range fn.ArgTypes {
args[i] = substitute(m, a, typeParamToDyn)
}
return decls.NewFunctionType(rt, args...)
case kindList:
return decls.NewListType(substitute(m, t.GetListType().GetElemType(), typeParamToDyn))
case kindMap:
mt := t.GetMapType()
return decls.NewMapType(substitute(m, mt.GetKeyType(), typeParamToDyn),
substitute(m, mt.GetValueType(), typeParamToDyn))
case kindType:
if t.GetType() != nil {
return decls.NewTypeType(substitute(m, t.GetType(), typeParamToDyn))
} }
return t return t
default: default:
@ -509,21 +296,14 @@ func substitute(m *mapping, t *exprpb.Type, typeParamToDyn bool) *exprpb.Type {
} }
} }
func typeKey(t *exprpb.Type) string { func substituteParams(m *mapping, typeParams []*types.Type, typeParamToDyn bool) []*types.Type {
return FormatCheckedType(t) subParams := make([]*types.Type, len(typeParams))
for i, tp := range typeParams {
subParams[i] = substitute(m, tp, typeParamToDyn)
}
return subParams
} }
// flattenFunctionTypes takes a function with arg types T1, T2, ..., TN and result type TR func newFunctionType(resultType *types.Type, argTypes ...*types.Type) *types.Type {
// and returns a slice containing {T1, T2, ..., TN, TR}. return types.NewOpaqueType("function", append([]*types.Type{resultType}, argTypes...)...)
func flattenFunctionTypes(f *exprpb.Type_FunctionType) []*exprpb.Type {
argTypes := f.GetArgTypes()
if len(argTypes) == 0 {
return []*exprpb.Type{f.GetResultType()}
}
flattend := make([]*exprpb.Type, len(argTypes)+1, len(argTypes)+1)
for i, at := range argTypes {
flattend[i] = at
}
flattend[len(argTypes)] = f.GetResultType()
return flattend
} }

52
vendor/github.com/google/cel-go/common/ast/BUILD.bazel generated vendored Normal file
View File

@ -0,0 +1,52 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
package(
default_visibility = [
"//cel:__subpackages__",
"//checker:__subpackages__",
"//common:__subpackages__",
"//interpreter:__subpackages__",
],
licenses = ["notice"], # Apache 2.0
)
go_library(
name = "go_default_library",
srcs = [
"ast.go",
"expr.go",
],
importpath = "github.com/google/cel-go/common/ast",
deps = [
"//common/types:go_default_library",
"//common/types/ref:go_default_library",
"@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
"@org_golang_google_protobuf//types/known/structpb:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"ast_test.go",
"expr_test.go",
],
embed = [
":go_default_library",
],
deps = [
"//checker:go_default_library",
"//checker/decls:go_default_library",
"//common:go_default_library",
"//common/containers:go_default_library",
"//common/decls:go_default_library",
"//common/overloads:go_default_library",
"//common/stdlib:go_default_library",
"//common/types:go_default_library",
"//common/types/ref:go_default_library",
"//parser:go_default_library",
"//test/proto3pb:go_default_library",
"@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)

226
vendor/github.com/google/cel-go/common/ast/ast.go generated vendored Normal file
View File

@ -0,0 +1,226 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package ast declares data structures useful for parsed and checked abstract syntax trees
package ast
import (
"fmt"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
structpb "google.golang.org/protobuf/types/known/structpb"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
// CheckedAST contains a protobuf expression and source info along with CEL-native type and reference information.
type CheckedAST struct {
Expr *exprpb.Expr
SourceInfo *exprpb.SourceInfo
TypeMap map[int64]*types.Type
ReferenceMap map[int64]*ReferenceInfo
}
// CheckedASTToCheckedExpr converts a CheckedAST to a CheckedExpr protobouf.
func CheckedASTToCheckedExpr(ast *CheckedAST) (*exprpb.CheckedExpr, error) {
refMap := make(map[int64]*exprpb.Reference, len(ast.ReferenceMap))
for id, ref := range ast.ReferenceMap {
r, err := ReferenceInfoToReferenceExpr(ref)
if err != nil {
return nil, err
}
refMap[id] = r
}
typeMap := make(map[int64]*exprpb.Type, len(ast.TypeMap))
for id, typ := range ast.TypeMap {
t, err := types.TypeToExprType(typ)
if err != nil {
return nil, err
}
typeMap[id] = t
}
return &exprpb.CheckedExpr{
Expr: ast.Expr,
SourceInfo: ast.SourceInfo,
ReferenceMap: refMap,
TypeMap: typeMap,
}, nil
}
// CheckedExprToCheckedAST converts a CheckedExpr protobuf to a CheckedAST instance.
func CheckedExprToCheckedAST(checked *exprpb.CheckedExpr) (*CheckedAST, error) {
refMap := make(map[int64]*ReferenceInfo, len(checked.GetReferenceMap()))
for id, ref := range checked.GetReferenceMap() {
r, err := ReferenceExprToReferenceInfo(ref)
if err != nil {
return nil, err
}
refMap[id] = r
}
typeMap := make(map[int64]*types.Type, len(checked.GetTypeMap()))
for id, typ := range checked.GetTypeMap() {
t, err := types.ExprTypeToType(typ)
if err != nil {
return nil, err
}
typeMap[id] = t
}
return &CheckedAST{
Expr: checked.GetExpr(),
SourceInfo: checked.GetSourceInfo(),
ReferenceMap: refMap,
TypeMap: typeMap,
}, nil
}
// ReferenceInfo contains a CEL native representation of an identifier reference which may refer to
// either a qualified identifier name, a set of overload ids, or a constant value from an enum.
type ReferenceInfo struct {
Name string
OverloadIDs []string
Value ref.Val
}
// NewIdentReference creates a ReferenceInfo instance for an identifier with an optional constant value.
func NewIdentReference(name string, value ref.Val) *ReferenceInfo {
return &ReferenceInfo{Name: name, Value: value}
}
// NewFunctionReference creates a ReferenceInfo instance for a set of function overloads.
func NewFunctionReference(overloads ...string) *ReferenceInfo {
info := &ReferenceInfo{}
for _, id := range overloads {
info.AddOverload(id)
}
return info
}
// AddOverload appends a function overload ID to the ReferenceInfo.
func (r *ReferenceInfo) AddOverload(overloadID string) {
for _, id := range r.OverloadIDs {
if id == overloadID {
return
}
}
r.OverloadIDs = append(r.OverloadIDs, overloadID)
}
// Equals returns whether two references are identical to each other.
func (r *ReferenceInfo) Equals(other *ReferenceInfo) bool {
if r.Name != other.Name {
return false
}
if len(r.OverloadIDs) != len(other.OverloadIDs) {
return false
}
if len(r.OverloadIDs) != 0 {
overloadMap := make(map[string]struct{}, len(r.OverloadIDs))
for _, id := range r.OverloadIDs {
overloadMap[id] = struct{}{}
}
for _, id := range other.OverloadIDs {
_, found := overloadMap[id]
if !found {
return false
}
}
}
if r.Value == nil && other.Value == nil {
return true
}
if r.Value == nil && other.Value != nil ||
r.Value != nil && other.Value == nil ||
r.Value.Equal(other.Value) != types.True {
return false
}
return true
}
// ReferenceInfoToReferenceExpr converts a ReferenceInfo instance to a protobuf Reference suitable for serialization.
func ReferenceInfoToReferenceExpr(info *ReferenceInfo) (*exprpb.Reference, error) {
c, err := ValToConstant(info.Value)
if err != nil {
return nil, err
}
return &exprpb.Reference{
Name: info.Name,
OverloadId: info.OverloadIDs,
Value: c,
}, nil
}
// ReferenceExprToReferenceInfo converts a protobuf Reference into a CEL-native ReferenceInfo instance.
func ReferenceExprToReferenceInfo(ref *exprpb.Reference) (*ReferenceInfo, error) {
v, err := ConstantToVal(ref.GetValue())
if err != nil {
return nil, err
}
return &ReferenceInfo{
Name: ref.GetName(),
OverloadIDs: ref.GetOverloadId(),
Value: v,
}, nil
}
// ValToConstant converts a CEL-native ref.Val to a protobuf Constant.
//
// Only simple scalar types are supported by this method.
func ValToConstant(v ref.Val) (*exprpb.Constant, error) {
if v == nil {
return nil, nil
}
switch v.Type() {
case types.BoolType:
return &exprpb.Constant{ConstantKind: &exprpb.Constant_BoolValue{BoolValue: v.Value().(bool)}}, nil
case types.BytesType:
return &exprpb.Constant{ConstantKind: &exprpb.Constant_BytesValue{BytesValue: v.Value().([]byte)}}, nil
case types.DoubleType:
return &exprpb.Constant{ConstantKind: &exprpb.Constant_DoubleValue{DoubleValue: v.Value().(float64)}}, nil
case types.IntType:
return &exprpb.Constant{ConstantKind: &exprpb.Constant_Int64Value{Int64Value: v.Value().(int64)}}, nil
case types.NullType:
return &exprpb.Constant{ConstantKind: &exprpb.Constant_NullValue{NullValue: structpb.NullValue_NULL_VALUE}}, nil
case types.StringType:
return &exprpb.Constant{ConstantKind: &exprpb.Constant_StringValue{StringValue: v.Value().(string)}}, nil
case types.UintType:
return &exprpb.Constant{ConstantKind: &exprpb.Constant_Uint64Value{Uint64Value: v.Value().(uint64)}}, nil
}
return nil, fmt.Errorf("unsupported constant kind: %v", v.Type())
}
// ConstantToVal converts a protobuf Constant to a CEL-native ref.Val.
func ConstantToVal(c *exprpb.Constant) (ref.Val, error) {
if c == nil {
return nil, nil
}
switch c.GetConstantKind().(type) {
case *exprpb.Constant_BoolValue:
return types.Bool(c.GetBoolValue()), nil
case *exprpb.Constant_BytesValue:
return types.Bytes(c.GetBytesValue()), nil
case *exprpb.Constant_DoubleValue:
return types.Double(c.GetDoubleValue()), nil
case *exprpb.Constant_Int64Value:
return types.Int(c.GetInt64Value()), nil
case *exprpb.Constant_NullValue:
return types.NullValue, nil
case *exprpb.Constant_StringValue:
return types.String(c.GetStringValue()), nil
case *exprpb.Constant_Uint64Value:
return types.Uint(c.GetUint64Value()), nil
}
return nil, fmt.Errorf("unsupported constant kind: %v", c.GetConstantKind())
}

709
vendor/github.com/google/cel-go/common/ast/expr.go generated vendored Normal file
View File

@ -0,0 +1,709 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ast
import (
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
// ExprKind represents the expression node kind.
type ExprKind int
const (
// UnspecifiedKind represents an unset expression with no specified properties.
UnspecifiedKind ExprKind = iota
// LiteralKind represents a primitive scalar literal.
LiteralKind
// IdentKind represents a simple variable, constant, or type identifier.
IdentKind
// SelectKind represents a field selection expression.
SelectKind
// CallKind represents a function call.
CallKind
// ListKind represents a list literal expression.
ListKind
// MapKind represents a map literal expression.
MapKind
// StructKind represents a struct literal expression.
StructKind
// ComprehensionKind represents a comprehension expression generated by a macro.
ComprehensionKind
)
// NavigateCheckedAST converts a CheckedAST to a NavigableExpr
func NavigateCheckedAST(ast *CheckedAST) NavigableExpr {
return newNavigableExpr(nil, ast.Expr, ast.TypeMap)
}
// ExprMatcher takes a NavigableExpr in and indicates whether the value is a match.
//
// This function type should be use with the `Match` and `MatchList` calls.
type ExprMatcher func(NavigableExpr) bool
// ConstantValueMatcher returns an ExprMatcher which will return true if the input NavigableExpr
// is comprised of all constant values, such as a simple literal or even list and map literal.
func ConstantValueMatcher() ExprMatcher {
return matchIsConstantValue
}
// KindMatcher returns an ExprMatcher which will return true if the input NavigableExpr.Kind() matches
// the specified `kind`.
func KindMatcher(kind ExprKind) ExprMatcher {
return func(e NavigableExpr) bool {
return e.Kind() == kind
}
}
// FunctionMatcher returns an ExprMatcher which will match NavigableExpr nodes of CallKind type whose
// function name is equal to `funcName`.
func FunctionMatcher(funcName string) ExprMatcher {
return func(e NavigableExpr) bool {
if e.Kind() != CallKind {
return false
}
return e.AsCall().FunctionName() == funcName
}
}
// AllMatcher returns true for all descendants of a NavigableExpr, effectively flattening them into a list.
//
// Such a result would work well with subsequent MatchList calls.
func AllMatcher() ExprMatcher {
return func(NavigableExpr) bool {
return true
}
}
// MatchDescendants takes a NavigableExpr and ExprMatcher and produces a list of NavigableExpr values of the
// descendants which match.
func MatchDescendants(expr NavigableExpr, matcher ExprMatcher) []NavigableExpr {
return matchListInternal([]NavigableExpr{expr}, matcher, true)
}
// MatchSubset applies an ExprMatcher to a list of NavigableExpr values and their descendants, producing a
// subset of NavigableExpr values which match.
func MatchSubset(exprs []NavigableExpr, matcher ExprMatcher) []NavigableExpr {
visit := make([]NavigableExpr, len(exprs))
copy(visit, exprs)
return matchListInternal(visit, matcher, false)
}
func matchListInternal(visit []NavigableExpr, matcher ExprMatcher, visitDescendants bool) []NavigableExpr {
var matched []NavigableExpr
for len(visit) != 0 {
e := visit[0]
if matcher(e) {
matched = append(matched, e)
}
if visitDescendants {
visit = append(visit[1:], e.Children()...)
} else {
visit = visit[1:]
}
}
return matched
}
func matchIsConstantValue(e NavigableExpr) bool {
if e.Kind() == LiteralKind {
return true
}
if e.Kind() == StructKind || e.Kind() == MapKind || e.Kind() == ListKind {
for _, child := range e.Children() {
if !matchIsConstantValue(child) {
return false
}
}
return true
}
return false
}
// NavigableExpr represents the base navigable expression value.
//
// Depending on the `Kind()` value, the NavigableExpr may be converted to a concrete expression types
// as indicated by the `As<Kind>` methods.
//
// NavigableExpr values and their concrete expression types should be nil-safe. Conversion of an expr
// to the wrong kind should produce a nil value.
type NavigableExpr interface {
// ID of the expression as it appears in the AST
ID() int64
// Kind of the expression node. See ExprKind for the valid enum values.
Kind() ExprKind
// Type of the expression node.
Type() *types.Type
// Parent returns the parent expression node, if one exists.
Parent() (NavigableExpr, bool)
// Children returns a list of child expression nodes.
Children() []NavigableExpr
// ToExpr adapts this NavigableExpr to a protobuf representation.
ToExpr() *exprpb.Expr
// AsCall adapts the expr into a NavigableCallExpr
//
// The Kind() must be equal to a CallKind for the conversion to be well-defined.
AsCall() NavigableCallExpr
// AsComprehension adapts the expr into a NavigableComprehensionExpr.
//
// The Kind() must be equal to a ComprehensionKind for the conversion to be well-defined.
AsComprehension() NavigableComprehensionExpr
// AsIdent adapts the expr into an identifier string.
//
// The Kind() must be equal to an IdentKind for the conversion to be well-defined.
AsIdent() string
// AsLiteral adapts the expr into a constant ref.Val.
//
// The Kind() must be equal to a LiteralKind for the conversion to be well-defined.
AsLiteral() ref.Val
// AsList adapts the expr into a NavigableListExpr.
//
// The Kind() must be equal to a ListKind for the conversion to be well-defined.
AsList() NavigableListExpr
// AsMap adapts the expr into a NavigableMapExpr.
//
// The Kind() must be equal to a MapKind for the conversion to be well-defined.
AsMap() NavigableMapExpr
// AsSelect adapts the expr into a NavigableSelectExpr.
//
// The Kind() must be equal to a SelectKind for the conversion to be well-defined.
AsSelect() NavigableSelectExpr
// AsStruct adapts the expr into a NavigableStructExpr.
//
// The Kind() must be equal to a StructKind for the conversion to be well-defined.
AsStruct() NavigableStructExpr
// marker interface method
isNavigable()
}
// NavigableCallExpr defines an interface for inspecting a function call and its arugments.
type NavigableCallExpr interface {
// FunctionName returns the name of the function.
FunctionName() string
// Target returns the target of the expression if one is present.
Target() NavigableExpr
// Args returns the list of call arguments, excluding the target.
Args() []NavigableExpr
// ReturnType returns the result type of the call.
ReturnType() *types.Type
// marker interface method
isNavigable()
}
// NavigableListExpr defines an interface for inspecting a list literal expression.
type NavigableListExpr interface {
// Elements returns the list elements as navigable expressions.
Elements() []NavigableExpr
// OptionalIndicies returns the list of optional indices in the list literal.
OptionalIndices() []int32
// Size returns the number of elements in the list.
Size() int
// marker interface method
isNavigable()
}
// NavigableSelectExpr defines an interface for inspecting a select expression.
type NavigableSelectExpr interface {
// Operand returns the selection operand expression.
Operand() NavigableExpr
// FieldName returns the field name being selected from the operand.
FieldName() string
// IsTestOnly indicates whether the select expression is a presence test generated by a macro.
IsTestOnly() bool
// marker interface method
isNavigable()
}
// NavigableMapExpr defines an interface for inspecting a map expression.
type NavigableMapExpr interface {
// Entries returns the map key value pairs as NavigableEntry values.
Entries() []NavigableEntry
// Size returns the number of entries in the map.
Size() int
// marker interface method
isNavigable()
}
// NavigableEntry defines an interface for inspecting a map entry.
type NavigableEntry interface {
// Key returns the map entry key expression.
Key() NavigableExpr
// Value returns the map entry value expression.
Value() NavigableExpr
// IsOptional returns whether the entry is optional.
IsOptional() bool
// marker interface method
isNavigable()
}
// NavigableStructExpr defines an interfaces for inspecting a struct and its field initializers.
type NavigableStructExpr interface {
// TypeName returns the struct type name.
TypeName() string
// Fields returns the set of field initializers in the struct expression as NavigableField values.
Fields() []NavigableField
// marker interface method
isNavigable()
}
// NavigableField defines an interface for inspecting a struct field initialization.
type NavigableField interface {
// FieldName returns the name of the field.
FieldName() string
// Value returns the field initialization expression.
Value() NavigableExpr
// IsOptional returns whether the field is optional.
IsOptional() bool
// marker interface method
isNavigable()
}
// NavigableComprehensionExpr defines an interface for inspecting a comprehension expression.
type NavigableComprehensionExpr interface {
// IterRange returns the iteration range expression.
IterRange() NavigableExpr
// IterVar returns the iteration variable name.
IterVar() string
// AccuVar returns the accumulation variable name.
AccuVar() string
// AccuInit returns the accumulation variable initialization expression.
AccuInit() NavigableExpr
// LoopCondition returns the loop condition expression.
LoopCondition() NavigableExpr
// LoopStep returns the loop step expression.
LoopStep() NavigableExpr
// Result returns the comprehension result expression.
Result() NavigableExpr
// marker interface method
isNavigable()
}
func newNavigableExpr(parent NavigableExpr, expr *exprpb.Expr, typeMap map[int64]*types.Type) NavigableExpr {
kind, factory := kindOf(expr)
nav := &navigableExprImpl{
parent: parent,
kind: kind,
expr: expr,
typeMap: typeMap,
createChildren: factory,
}
return nav
}
type navigableExprImpl struct {
parent NavigableExpr
kind ExprKind
expr *exprpb.Expr
typeMap map[int64]*types.Type
createChildren childFactory
}
func (nav *navigableExprImpl) ID() int64 {
return nav.ToExpr().GetId()
}
func (nav *navigableExprImpl) Kind() ExprKind {
return nav.kind
}
func (nav *navigableExprImpl) Type() *types.Type {
if t, found := nav.typeMap[nav.ID()]; found {
return t
}
return types.DynType
}
func (nav *navigableExprImpl) Parent() (NavigableExpr, bool) {
if nav.parent != nil {
return nav.parent, true
}
return nil, false
}
func (nav *navigableExprImpl) Children() []NavigableExpr {
return nav.createChildren(nav)
}
func (nav *navigableExprImpl) ToExpr() *exprpb.Expr {
return nav.expr
}
func (nav *navigableExprImpl) AsCall() NavigableCallExpr {
return navigableCallImpl{navigableExprImpl: nav}
}
func (nav *navigableExprImpl) AsComprehension() NavigableComprehensionExpr {
return navigableComprehensionImpl{navigableExprImpl: nav}
}
func (nav *navigableExprImpl) AsIdent() string {
return nav.ToExpr().GetIdentExpr().GetName()
}
func (nav *navigableExprImpl) AsLiteral() ref.Val {
if nav.Kind() != LiteralKind {
return nil
}
val, err := ConstantToVal(nav.ToExpr().GetConstExpr())
if err != nil {
panic(err)
}
return val
}
func (nav *navigableExprImpl) AsList() NavigableListExpr {
return navigableListImpl{navigableExprImpl: nav}
}
func (nav *navigableExprImpl) AsMap() NavigableMapExpr {
return navigableMapImpl{navigableExprImpl: nav}
}
func (nav *navigableExprImpl) AsSelect() NavigableSelectExpr {
return navigableSelectImpl{navigableExprImpl: nav}
}
func (nav *navigableExprImpl) AsStruct() NavigableStructExpr {
return navigableStructImpl{navigableExprImpl: nav}
}
func (nav *navigableExprImpl) createChild(e *exprpb.Expr) NavigableExpr {
return newNavigableExpr(nav, e, nav.typeMap)
}
func (nav *navigableExprImpl) isNavigable() {}
type navigableCallImpl struct {
*navigableExprImpl
}
func (call navigableCallImpl) FunctionName() string {
return call.ToExpr().GetCallExpr().GetFunction()
}
func (call navigableCallImpl) Target() NavigableExpr {
t := call.ToExpr().GetCallExpr().GetTarget()
if t != nil {
return call.createChild(t)
}
return nil
}
func (call navigableCallImpl) Args() []NavigableExpr {
args := call.ToExpr().GetCallExpr().GetArgs()
navArgs := make([]NavigableExpr, len(args))
for i, a := range args {
navArgs[i] = call.createChild(a)
}
return navArgs
}
func (call navigableCallImpl) ReturnType() *types.Type {
return call.Type()
}
type navigableComprehensionImpl struct {
*navigableExprImpl
}
func (comp navigableComprehensionImpl) IterRange() NavigableExpr {
return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetIterRange())
}
func (comp navigableComprehensionImpl) IterVar() string {
return comp.ToExpr().GetComprehensionExpr().GetIterVar()
}
func (comp navigableComprehensionImpl) AccuVar() string {
return comp.ToExpr().GetComprehensionExpr().GetAccuVar()
}
func (comp navigableComprehensionImpl) AccuInit() NavigableExpr {
return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetAccuInit())
}
func (comp navigableComprehensionImpl) LoopCondition() NavigableExpr {
return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetLoopCondition())
}
func (comp navigableComprehensionImpl) LoopStep() NavigableExpr {
return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetLoopStep())
}
func (comp navigableComprehensionImpl) Result() NavigableExpr {
return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetResult())
}
type navigableListImpl struct {
*navigableExprImpl
}
func (l navigableListImpl) Elements() []NavigableExpr {
return l.Children()
}
func (l navigableListImpl) OptionalIndices() []int32 {
return l.ToExpr().GetListExpr().GetOptionalIndices()
}
func (l navigableListImpl) Size() int {
return len(l.ToExpr().GetListExpr().GetElements())
}
type navigableMapImpl struct {
*navigableExprImpl
}
func (m navigableMapImpl) Entries() []NavigableEntry {
mapExpr := m.ToExpr().GetStructExpr()
entries := make([]NavigableEntry, len(mapExpr.GetEntries()))
for i, e := range mapExpr.GetEntries() {
entries[i] = navigableEntryImpl{
key: m.createChild(e.GetMapKey()),
val: m.createChild(e.GetValue()),
isOpt: e.GetOptionalEntry(),
}
}
return entries
}
func (m navigableMapImpl) Size() int {
return len(m.ToExpr().GetStructExpr().GetEntries())
}
type navigableEntryImpl struct {
key NavigableExpr
val NavigableExpr
isOpt bool
}
func (e navigableEntryImpl) Key() NavigableExpr {
return e.key
}
func (e navigableEntryImpl) Value() NavigableExpr {
return e.val
}
func (e navigableEntryImpl) IsOptional() bool {
return e.isOpt
}
func (e navigableEntryImpl) isNavigable() {}
type navigableSelectImpl struct {
*navigableExprImpl
}
func (sel navigableSelectImpl) FieldName() string {
return sel.ToExpr().GetSelectExpr().GetField()
}
func (sel navigableSelectImpl) IsTestOnly() bool {
return sel.ToExpr().GetSelectExpr().GetTestOnly()
}
func (sel navigableSelectImpl) Operand() NavigableExpr {
return sel.createChild(sel.ToExpr().GetSelectExpr().GetOperand())
}
type navigableStructImpl struct {
*navigableExprImpl
}
func (s navigableStructImpl) TypeName() string {
return s.ToExpr().GetStructExpr().GetMessageName()
}
func (s navigableStructImpl) Fields() []NavigableField {
fieldInits := s.ToExpr().GetStructExpr().GetEntries()
fields := make([]NavigableField, len(fieldInits))
for i, f := range fieldInits {
fields[i] = navigableFieldImpl{
name: f.GetFieldKey(),
val: s.createChild(f.GetValue()),
isOpt: f.GetOptionalEntry(),
}
}
return fields
}
type navigableFieldImpl struct {
name string
val NavigableExpr
isOpt bool
}
func (f navigableFieldImpl) FieldName() string {
return f.name
}
func (f navigableFieldImpl) Value() NavigableExpr {
return f.val
}
func (f navigableFieldImpl) IsOptional() bool {
return f.isOpt
}
func (f navigableFieldImpl) isNavigable() {}
func kindOf(expr *exprpb.Expr) (ExprKind, childFactory) {
switch expr.GetExprKind().(type) {
case *exprpb.Expr_ConstExpr:
return LiteralKind, noopFactory
case *exprpb.Expr_IdentExpr:
return IdentKind, noopFactory
case *exprpb.Expr_SelectExpr:
return SelectKind, selectFactory
case *exprpb.Expr_CallExpr:
return CallKind, callArgFactory
case *exprpb.Expr_ListExpr:
return ListKind, listElemFactory
case *exprpb.Expr_StructExpr:
if expr.GetStructExpr().GetMessageName() != "" {
return StructKind, structEntryFactory
}
return MapKind, mapEntryFactory
case *exprpb.Expr_ComprehensionExpr:
return ComprehensionKind, comprehensionFactory
default:
return UnspecifiedKind, noopFactory
}
}
type childFactory func(*navigableExprImpl) []NavigableExpr
func noopFactory(*navigableExprImpl) []NavigableExpr {
return nil
}
func selectFactory(nav *navigableExprImpl) []NavigableExpr {
return []NavigableExpr{
nav.createChild(nav.ToExpr().GetSelectExpr().GetOperand()),
}
}
func callArgFactory(nav *navigableExprImpl) []NavigableExpr {
call := nav.ToExpr().GetCallExpr()
argCount := len(call.GetArgs())
if call.GetTarget() != nil {
argCount++
}
navExprs := make([]NavigableExpr, argCount)
i := 0
if call.GetTarget() != nil {
navExprs[i] = nav.createChild(call.GetTarget())
i++
}
for _, arg := range call.GetArgs() {
navExprs[i] = nav.createChild(arg)
i++
}
return navExprs
}
func listElemFactory(nav *navigableExprImpl) []NavigableExpr {
l := nav.ToExpr().GetListExpr()
navExprs := make([]NavigableExpr, len(l.GetElements()))
for i, e := range l.GetElements() {
navExprs[i] = nav.createChild(e)
}
return navExprs
}
func structEntryFactory(nav *navigableExprImpl) []NavigableExpr {
s := nav.ToExpr().GetStructExpr()
entries := make([]NavigableExpr, len(s.GetEntries()))
for i, e := range s.GetEntries() {
entries[i] = nav.createChild(e.GetValue())
}
return entries
}
func mapEntryFactory(nav *navigableExprImpl) []NavigableExpr {
s := nav.ToExpr().GetStructExpr()
entries := make([]NavigableExpr, len(s.GetEntries())*2)
j := 0
for _, e := range s.GetEntries() {
entries[j] = nav.createChild(e.GetMapKey())
entries[j+1] = nav.createChild(e.GetValue())
j += 2
}
return entries
}
func comprehensionFactory(nav *navigableExprImpl) []NavigableExpr {
compre := nav.ToExpr().GetComprehensionExpr()
return []NavigableExpr{
nav.createChild(compre.GetIterRange()),
nav.createChild(compre.GetAccuInit()),
nav.createChild(compre.GetLoopCondition()),
nav.createChild(compre.GetLoopStep()),
nav.createChild(compre.GetResult()),
}
}

View File

@ -0,0 +1,39 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
package(
default_visibility = ["//visibility:public"],
licenses = ["notice"], # Apache 2.0
)
go_library(
name = "go_default_library",
srcs = [
"decls.go",
],
importpath = "github.com/google/cel-go/common/decls",
deps = [
"//checker/decls:go_default_library",
"//common/functions:go_default_library",
"//common/types:go_default_library",
"//common/types/ref:go_default_library",
"//common/types/traits:go_default_library",
"@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"decls_test.go",
],
embed = [":go_default_library"],
deps = [
"//checker/decls:go_default_library",
"//common/overloads:go_default_library",
"//common/types:go_default_library",
"//common/types/ref:go_default_library",
"//common/types/traits:go_default_library",
"@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)

844
vendor/github.com/google/cel-go/common/decls/decls.go generated vendored Normal file
View File

@ -0,0 +1,844 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package decls contains function and variable declaration structs and helper methods.
package decls
import (
"fmt"
"strings"
chkdecls "github.com/google/cel-go/checker/decls"
"github.com/google/cel-go/common/functions"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
// NewFunction creates a new function declaration with a set of function options to configure overloads
// and function definitions (implementations).
//
// Functions are checked for name collisions and singleton redefinition.
func NewFunction(name string, opts ...FunctionOpt) (*FunctionDecl, error) {
fn := &FunctionDecl{
name: name,
overloads: map[string]*OverloadDecl{},
overloadOrdinals: []string{},
}
var err error
for _, opt := range opts {
fn, err = opt(fn)
if err != nil {
return nil, err
}
}
if len(fn.overloads) == 0 {
return nil, fmt.Errorf("function %s must have at least one overload", name)
}
return fn, nil
}
// FunctionDecl defines a function name, overload set, and optionally a singleton definition for all
// overload instances.
type FunctionDecl struct {
name string
// overloads associated with the function name.
overloads map[string]*OverloadDecl
// singleton implementation of the function for all overloads.
//
// If this option is set, an error will occur if any overloads specify a per-overload implementation
// or if another function with the same name attempts to redefine the singleton.
singleton *functions.Overload
// disableTypeGuards is a performance optimization to disable detailed runtime type checks which could
// add overhead on common operations. Setting this option true leaves error checks and argument checks
// intact.
disableTypeGuards bool
// state indicates that the binding should be provided as a declaration, as a runtime binding, or both.
state declarationState
// overloadOrdinals indicates the order in which the overload was declared.
overloadOrdinals []string
}
type declarationState int
const (
declarationStateUnset declarationState = iota
declarationDisabled
declarationEnabled
)
// Name returns the function name in human-readable terms, e.g. 'contains' of 'math.least'
func (f *FunctionDecl) Name() string {
if f == nil {
return ""
}
return f.name
}
// IsDeclarationDisabled indicates that the function implementation should be added to the dispatcher, but the
// declaration should not be exposed for use in expressions.
func (f *FunctionDecl) IsDeclarationDisabled() bool {
return f.state == declarationDisabled
}
// Merge combines an existing function declaration with another.
//
// If a function is extended, by say adding new overloads to an existing function, then it is merged with the
// prior definition of the function at which point its overloads must not collide with pre-existing overloads
// and its bindings (singleton, or per-overload) must not conflict with previous definitions either.
func (f *FunctionDecl) Merge(other *FunctionDecl) (*FunctionDecl, error) {
if f == other {
return f, nil
}
if f.Name() != other.Name() {
return nil, fmt.Errorf("cannot merge unrelated functions. %s and %s", f.Name(), other.Name())
}
merged := &FunctionDecl{
name: f.Name(),
overloads: make(map[string]*OverloadDecl, len(f.overloads)),
singleton: f.singleton,
overloadOrdinals: make([]string, len(f.overloads)),
// if one function is expecting type-guards and the other is not, then they
// must not be disabled.
disableTypeGuards: f.disableTypeGuards && other.disableTypeGuards,
// default to the current functions declaration state.
state: f.state,
}
// If the other state indicates that the declaration should be explicitly enabled or
// disabled, then update the merged state with the most recent value.
if other.state != declarationStateUnset {
merged.state = other.state
}
// baseline copy of the overloads and their ordinals
copy(merged.overloadOrdinals, f.overloadOrdinals)
for oID, o := range f.overloads {
merged.overloads[oID] = o
}
// overloads and their ordinals are added from the left
for _, oID := range other.overloadOrdinals {
o := other.overloads[oID]
err := merged.AddOverload(o)
if err != nil {
return nil, fmt.Errorf("function declaration merge failed: %v", err)
}
}
if other.singleton != nil {
if merged.singleton != nil && merged.singleton != other.singleton {
return nil, fmt.Errorf("function already has a singleton binding: %s", f.Name())
}
merged.singleton = other.singleton
}
return merged, nil
}
// AddOverload ensures that the new overload does not collide with an existing overload signature;
// however, if the function signatures are identical, the implementation may be rewritten as its
// difficult to compare functions by object identity.
func (f *FunctionDecl) AddOverload(overload *OverloadDecl) error {
if f == nil {
return fmt.Errorf("nil function cannot add overload: %s", overload.ID())
}
for oID, o := range f.overloads {
if oID != overload.ID() && o.SignatureOverlaps(overload) {
return fmt.Errorf("overload signature collision in function %s: %s collides with %s", f.Name(), oID, overload.ID())
}
if oID == overload.ID() {
if o.SignatureEquals(overload) && o.IsNonStrict() == overload.IsNonStrict() {
// Allow redefinition of an overload implementation so long as the signatures match.
f.overloads[oID] = overload
return nil
}
return fmt.Errorf("overload redefinition in function. %s: %s has multiple definitions", f.Name(), oID)
}
}
f.overloadOrdinals = append(f.overloadOrdinals, overload.ID())
f.overloads[overload.ID()] = overload
return nil
}
// OverloadDecls returns the overload declarations in the order in which they were declared.
func (f *FunctionDecl) OverloadDecls() []*OverloadDecl {
if f == nil {
return []*OverloadDecl{}
}
overloads := make([]*OverloadDecl, 0, len(f.overloads))
for _, oID := range f.overloadOrdinals {
overloads = append(overloads, f.overloads[oID])
}
return overloads
}
// Bindings produces a set of function bindings, if any are defined.
func (f *FunctionDecl) Bindings() ([]*functions.Overload, error) {
if f == nil {
return []*functions.Overload{}, nil
}
overloads := []*functions.Overload{}
nonStrict := false
for _, oID := range f.overloadOrdinals {
o := f.overloads[oID]
if o.hasBinding() {
overload := &functions.Overload{
Operator: o.ID(),
Unary: o.guardedUnaryOp(f.Name(), f.disableTypeGuards),
Binary: o.guardedBinaryOp(f.Name(), f.disableTypeGuards),
Function: o.guardedFunctionOp(f.Name(), f.disableTypeGuards),
OperandTrait: o.OperandTrait(),
NonStrict: o.IsNonStrict(),
}
overloads = append(overloads, overload)
nonStrict = nonStrict || o.IsNonStrict()
}
}
if f.singleton != nil {
if len(overloads) != 0 {
return nil, fmt.Errorf("singleton function incompatible with specialized overloads: %s", f.Name())
}
overloads = []*functions.Overload{
{
Operator: f.Name(),
Unary: f.singleton.Unary,
Binary: f.singleton.Binary,
Function: f.singleton.Function,
OperandTrait: f.singleton.OperandTrait,
},
}
// fall-through to return single overload case.
}
if len(overloads) == 0 {
return overloads, nil
}
// Single overload. Replicate an entry for it using the function name as well.
if len(overloads) == 1 {
if overloads[0].Operator == f.Name() {
return overloads, nil
}
return append(overloads, &functions.Overload{
Operator: f.Name(),
Unary: overloads[0].Unary,
Binary: overloads[0].Binary,
Function: overloads[0].Function,
NonStrict: overloads[0].NonStrict,
OperandTrait: overloads[0].OperandTrait,
}), nil
}
// All of the defined overloads are wrapped into a top-level function which
// performs dynamic dispatch to the proper overload based on the argument types.
bindings := append([]*functions.Overload{}, overloads...)
funcDispatch := func(args ...ref.Val) ref.Val {
for _, oID := range f.overloadOrdinals {
o := f.overloads[oID]
// During dynamic dispatch over multiple functions, signature agreement checks
// are preserved in order to assist with the function resolution step.
switch len(args) {
case 1:
if o.unaryOp != nil && o.matchesRuntimeSignature( /* disableTypeGuards=*/ false, args...) {
return o.unaryOp(args[0])
}
case 2:
if o.binaryOp != nil && o.matchesRuntimeSignature( /* disableTypeGuards=*/ false, args...) {
return o.binaryOp(args[0], args[1])
}
}
if o.functionOp != nil && o.matchesRuntimeSignature( /* disableTypeGuards=*/ false, args...) {
return o.functionOp(args...)
}
// eventually this will fall through to the noSuchOverload below.
}
return MaybeNoSuchOverload(f.Name(), args...)
}
function := &functions.Overload{
Operator: f.Name(),
Function: funcDispatch,
NonStrict: nonStrict,
}
return append(bindings, function), nil
}
// MaybeNoSuchOverload determines whether to propagate an error if one is provided as an argument, or
// to return an unknown set, or to produce a new error for a missing function signature.
func MaybeNoSuchOverload(funcName string, args ...ref.Val) ref.Val {
argTypes := make([]string, len(args))
var unk *types.Unknown = nil
for i, arg := range args {
if types.IsError(arg) {
return arg
}
if types.IsUnknown(arg) {
unk = types.MergeUnknowns(arg.(*types.Unknown), unk)
}
argTypes[i] = arg.Type().TypeName()
}
if unk != nil {
return unk
}
signature := strings.Join(argTypes, ", ")
return types.NewErr("no such overload: %s(%s)", funcName, signature)
}
// FunctionOpt defines a functional option for mutating a function declaration.
type FunctionOpt func(*FunctionDecl) (*FunctionDecl, error)
// DisableTypeGuards disables automatically generated function invocation guards on direct overload calls.
// Type guards remain on during dynamic dispatch for parsed-only expressions.
func DisableTypeGuards(value bool) FunctionOpt {
return func(fn *FunctionDecl) (*FunctionDecl, error) {
fn.disableTypeGuards = value
return fn, nil
}
}
// DisableDeclaration indicates that the function declaration should be disabled, but the runtime function
// binding should be provided. Marking a function as runtime-only is a safe way to manage deprecations
// of function declarations while still preserving the runtime behavior for previously compiled expressions.
func DisableDeclaration(value bool) FunctionOpt {
return func(fn *FunctionDecl) (*FunctionDecl, error) {
if value {
fn.state = declarationDisabled
} else {
fn.state = declarationEnabled
}
return fn, nil
}
}
// SingletonUnaryBinding creates a singleton function definition to be used for all function overloads.
//
// Note, this approach works well if operand is expected to have a specific trait which it implements,
// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
func SingletonUnaryBinding(fn functions.UnaryOp, traits ...int) FunctionOpt {
trait := 0
for _, t := range traits {
trait = trait | t
}
return func(f *FunctionDecl) (*FunctionDecl, error) {
if f.singleton != nil {
return nil, fmt.Errorf("function already has a singleton binding: %s", f.Name())
}
f.singleton = &functions.Overload{
Operator: f.Name(),
Unary: fn,
OperandTrait: trait,
}
return f, nil
}
}
// SingletonBinaryBinding creates a singleton function definition to be used with all function overloads.
//
// Note, this approach works well if operand is expected to have a specific trait which it implements,
// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
func SingletonBinaryBinding(fn functions.BinaryOp, traits ...int) FunctionOpt {
trait := 0
for _, t := range traits {
trait = trait | t
}
return func(f *FunctionDecl) (*FunctionDecl, error) {
if f.singleton != nil {
return nil, fmt.Errorf("function already has a singleton binding: %s", f.Name())
}
f.singleton = &functions.Overload{
Operator: f.Name(),
Binary: fn,
OperandTrait: trait,
}
return f, nil
}
}
// SingletonFunctionBinding creates a singleton function definition to be used with all function overloads.
//
// Note, this approach works well if operand is expected to have a specific trait which it implements,
// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
func SingletonFunctionBinding(fn functions.FunctionOp, traits ...int) FunctionOpt {
trait := 0
for _, t := range traits {
trait = trait | t
}
return func(f *FunctionDecl) (*FunctionDecl, error) {
if f.singleton != nil {
return nil, fmt.Errorf("function already has a singleton binding: %s", f.Name())
}
f.singleton = &functions.Overload{
Operator: f.Name(),
Function: fn,
OperandTrait: trait,
}
return f, nil
}
}
// Overload defines a new global overload with an overload id, argument types, and result type. Through the
// use of OverloadOpt options, the overload may also be configured with a binding, an operand trait, and to
// be non-strict.
//
// Note: function bindings should be commonly configured with Overload instances whereas operand traits and
// strict-ness should be rare occurrences.
func Overload(overloadID string,
args []*types.Type, resultType *types.Type,
opts ...OverloadOpt) FunctionOpt {
return newOverload(overloadID, false, args, resultType, opts...)
}
// MemberOverload defines a new receiver-style overload (or member function) with an overload id, argument types,
// and result type. Through the use of OverloadOpt options, the overload may also be configured with a binding,
// an operand trait, and to be non-strict.
//
// Note: function bindings should be commonly configured with Overload instances whereas operand traits and
// strict-ness should be rare occurrences.
func MemberOverload(overloadID string,
args []*types.Type, resultType *types.Type,
opts ...OverloadOpt) FunctionOpt {
return newOverload(overloadID, true, args, resultType, opts...)
}
func newOverload(overloadID string,
memberFunction bool, args []*types.Type, resultType *types.Type,
opts ...OverloadOpt) FunctionOpt {
return func(f *FunctionDecl) (*FunctionDecl, error) {
overload, err := newOverloadInternal(overloadID, memberFunction, args, resultType, opts...)
if err != nil {
return nil, err
}
err = f.AddOverload(overload)
if err != nil {
return nil, err
}
return f, nil
}
}
func newOverloadInternal(overloadID string,
memberFunction bool, args []*types.Type, resultType *types.Type,
opts ...OverloadOpt) (*OverloadDecl, error) {
overload := &OverloadDecl{
id: overloadID,
argTypes: args,
resultType: resultType,
isMemberFunction: memberFunction,
}
var err error
for _, opt := range opts {
overload, err = opt(overload)
if err != nil {
return nil, err
}
}
return overload, nil
}
// OverloadDecl contains the definition of a single overload id with a specific signature, and an optional
// implementation.
type OverloadDecl struct {
id string
argTypes []*types.Type
resultType *types.Type
isMemberFunction bool
// nonStrict indicates that the function will accept error and unknown arguments as inputs.
nonStrict bool
// operandTrait indicates whether the member argument should have a specific type-trait.
//
// This is useful for creating overloads which operate on a type-interface rather than a concrete type.
operandTrait int
// Function implementation options. Optional, but encouraged.
// unaryOp is a function binding that takes a single argument.
unaryOp functions.UnaryOp
// binaryOp is a function binding that takes two arguments.
binaryOp functions.BinaryOp
// functionOp is a catch-all for zero-arity and three-plus arity functions.
functionOp functions.FunctionOp
}
// ID mirrors the overload signature and provides a unique id which may be referenced within the type-checker
// and interpreter to optimize performance.
//
// The ID format is usually one of two styles:
// global: <functionName>_<argType>_<argTypeN>
// member: <memberType>_<functionName>_<argType>_<argTypeN>
func (o *OverloadDecl) ID() string {
if o == nil {
return ""
}
return o.id
}
// ArgTypes contains the set of argument types expected by the overload.
//
// For member functions ArgTypes[0] represents the member operand type.
func (o *OverloadDecl) ArgTypes() []*types.Type {
if o == nil {
return emptyArgs
}
return o.argTypes
}
// IsMemberFunction indicates whether the overload is a member function
func (o *OverloadDecl) IsMemberFunction() bool {
if o == nil {
return false
}
return o.isMemberFunction
}
// IsNonStrict returns whether the overload accepts errors and unknown values as arguments.
func (o *OverloadDecl) IsNonStrict() bool {
if o == nil {
return false
}
return o.nonStrict
}
// OperandTrait returns the trait mask of the first operand to the overload call, e.g.
// `traits.Indexer`
func (o *OverloadDecl) OperandTrait() int {
if o == nil {
return 0
}
return o.operandTrait
}
// ResultType indicates the output type from calling the function.
func (o *OverloadDecl) ResultType() *types.Type {
if o == nil {
// *types.Type is nil-safe
return nil
}
return o.resultType
}
// TypeParams returns the type parameter names associated with the overload.
func (o *OverloadDecl) TypeParams() []string {
typeParams := map[string]struct{}{}
collectParamNames(typeParams, o.ResultType())
for _, arg := range o.ArgTypes() {
collectParamNames(typeParams, arg)
}
params := make([]string, 0, len(typeParams))
for param := range typeParams {
params = append(params, param)
}
return params
}
// SignatureEquals determines whether the incoming overload declaration signature is equal to the current signature.
//
// Result type, operand trait, and strict-ness are not considered as part of signature equality.
func (o *OverloadDecl) SignatureEquals(other *OverloadDecl) bool {
if o == other {
return true
}
if o.ID() != other.ID() || o.IsMemberFunction() != other.IsMemberFunction() || len(o.ArgTypes()) != len(other.ArgTypes()) {
return false
}
for i, at := range o.ArgTypes() {
oat := other.ArgTypes()[i]
if !at.IsEquivalentType(oat) {
return false
}
}
return o.ResultType().IsEquivalentType(other.ResultType())
}
// SignatureOverlaps indicates whether two functions have non-equal, but overloapping function signatures.
//
// For example, list(dyn) collides with list(string) since the 'dyn' type can contain a 'string' type.
func (o *OverloadDecl) SignatureOverlaps(other *OverloadDecl) bool {
if o.IsMemberFunction() != other.IsMemberFunction() || len(o.ArgTypes()) != len(other.ArgTypes()) {
return false
}
argsOverlap := true
for i, argType := range o.ArgTypes() {
otherArgType := other.ArgTypes()[i]
argsOverlap = argsOverlap &&
(argType.IsAssignableType(otherArgType) ||
otherArgType.IsAssignableType(argType))
}
return argsOverlap
}
// hasBinding indicates whether the overload already has a definition.
func (o *OverloadDecl) hasBinding() bool {
return o != nil && (o.unaryOp != nil || o.binaryOp != nil || o.functionOp != nil)
}
// guardedUnaryOp creates an invocation guard around the provided unary operator, if one is defined.
func (o *OverloadDecl) guardedUnaryOp(funcName string, disableTypeGuards bool) functions.UnaryOp {
if o.unaryOp == nil {
return nil
}
return func(arg ref.Val) ref.Val {
if !o.matchesRuntimeUnarySignature(disableTypeGuards, arg) {
return MaybeNoSuchOverload(funcName, arg)
}
return o.unaryOp(arg)
}
}
// guardedBinaryOp creates an invocation guard around the provided binary operator, if one is defined.
func (o *OverloadDecl) guardedBinaryOp(funcName string, disableTypeGuards bool) functions.BinaryOp {
if o.binaryOp == nil {
return nil
}
return func(arg1, arg2 ref.Val) ref.Val {
if !o.matchesRuntimeBinarySignature(disableTypeGuards, arg1, arg2) {
return MaybeNoSuchOverload(funcName, arg1, arg2)
}
return o.binaryOp(arg1, arg2)
}
}
// guardedFunctionOp creates an invocation guard around the provided variadic function binding, if one is provided.
func (o *OverloadDecl) guardedFunctionOp(funcName string, disableTypeGuards bool) functions.FunctionOp {
if o.functionOp == nil {
return nil
}
return func(args ...ref.Val) ref.Val {
if !o.matchesRuntimeSignature(disableTypeGuards, args...) {
return MaybeNoSuchOverload(funcName, args...)
}
return o.functionOp(args...)
}
}
// matchesRuntimeUnarySignature indicates whether the argument type is runtime assiganble to the overload's expected argument.
func (o *OverloadDecl) matchesRuntimeUnarySignature(disableTypeGuards bool, arg ref.Val) bool {
return matchRuntimeArgType(o.IsNonStrict(), disableTypeGuards, o.ArgTypes()[0], arg) &&
matchOperandTrait(o.OperandTrait(), arg)
}
// matchesRuntimeBinarySignature indicates whether the argument types are runtime assiganble to the overload's expected arguments.
func (o *OverloadDecl) matchesRuntimeBinarySignature(disableTypeGuards bool, arg1, arg2 ref.Val) bool {
return matchRuntimeArgType(o.IsNonStrict(), disableTypeGuards, o.ArgTypes()[0], arg1) &&
matchRuntimeArgType(o.IsNonStrict(), disableTypeGuards, o.ArgTypes()[1], arg2) &&
matchOperandTrait(o.OperandTrait(), arg1)
}
// matchesRuntimeSignature indicates whether the argument types are runtime assiganble to the overload's expected arguments.
func (o *OverloadDecl) matchesRuntimeSignature(disableTypeGuards bool, args ...ref.Val) bool {
if len(args) != len(o.ArgTypes()) {
return false
}
if len(args) == 0 {
return true
}
for i, arg := range args {
if !matchRuntimeArgType(o.IsNonStrict(), disableTypeGuards, o.ArgTypes()[i], arg) {
return false
}
}
return matchOperandTrait(o.OperandTrait(), args[0])
}
func matchRuntimeArgType(nonStrict, disableTypeGuards bool, argType *types.Type, arg ref.Val) bool {
if nonStrict && (disableTypeGuards || types.IsUnknownOrError(arg)) {
return true
}
if types.IsUnknownOrError(arg) {
return false
}
return disableTypeGuards || argType.IsAssignableRuntimeType(arg)
}
func matchOperandTrait(trait int, arg ref.Val) bool {
return trait == 0 || arg.Type().HasTrait(trait) || types.IsUnknownOrError(arg)
}
// OverloadOpt is a functional option for configuring a function overload.
type OverloadOpt func(*OverloadDecl) (*OverloadDecl, error)
// UnaryBinding provides the implementation of a unary overload. The provided function is protected by a runtime
// type-guard which ensures runtime type agreement between the overload signature and runtime argument types.
func UnaryBinding(binding functions.UnaryOp) OverloadOpt {
return func(o *OverloadDecl) (*OverloadDecl, error) {
if o.hasBinding() {
return nil, fmt.Errorf("overload already has a binding: %s", o.ID())
}
if len(o.ArgTypes()) != 1 {
return nil, fmt.Errorf("unary function bound to non-unary overload: %s", o.ID())
}
o.unaryOp = binding
return o, nil
}
}
// BinaryBinding provides the implementation of a binary overload. The provided function is protected by a runtime
// type-guard which ensures runtime type agreement between the overload signature and runtime argument types.
func BinaryBinding(binding functions.BinaryOp) OverloadOpt {
return func(o *OverloadDecl) (*OverloadDecl, error) {
if o.hasBinding() {
return nil, fmt.Errorf("overload already has a binding: %s", o.ID())
}
if len(o.ArgTypes()) != 2 {
return nil, fmt.Errorf("binary function bound to non-binary overload: %s", o.ID())
}
o.binaryOp = binding
return o, nil
}
}
// FunctionBinding provides the implementation of a variadic overload. The provided function is protected by a runtime
// type-guard which ensures runtime type agreement between the overload signature and runtime argument types.
func FunctionBinding(binding functions.FunctionOp) OverloadOpt {
return func(o *OverloadDecl) (*OverloadDecl, error) {
if o.hasBinding() {
return nil, fmt.Errorf("overload already has a binding: %s", o.ID())
}
o.functionOp = binding
return o, nil
}
}
// OverloadIsNonStrict enables the function to be called with error and unknown argument values.
//
// Note: do not use this option unless absoluately necessary as it should be an uncommon feature.
func OverloadIsNonStrict() OverloadOpt {
return func(o *OverloadDecl) (*OverloadDecl, error) {
o.nonStrict = true
return o, nil
}
}
// OverloadOperandTrait configures a set of traits which the first argument to the overload must implement in order to be
// successfully invoked.
func OverloadOperandTrait(trait int) OverloadOpt {
return func(o *OverloadDecl) (*OverloadDecl, error) {
o.operandTrait = trait
return o, nil
}
}
// NewConstant creates a new constant declaration.
func NewConstant(name string, t *types.Type, v ref.Val) *VariableDecl {
return &VariableDecl{name: name, varType: t, value: v}
}
// NewVariable creates a new variable declaration.
func NewVariable(name string, t *types.Type) *VariableDecl {
return &VariableDecl{name: name, varType: t}
}
// VariableDecl defines a variable declaration which may optionally have a constant value.
type VariableDecl struct {
name string
varType *types.Type
value ref.Val
}
// Name returns the fully-qualified variable name
func (v *VariableDecl) Name() string {
if v == nil {
return ""
}
return v.name
}
// Type returns the types.Type value associated with the variable.
func (v *VariableDecl) Type() *types.Type {
if v == nil {
// types.Type is nil-safe
return nil
}
return v.varType
}
// Value returns the constant value associated with the declaration.
func (v *VariableDecl) Value() ref.Val {
if v == nil {
return nil
}
return v.value
}
// DeclarationIsEquivalent returns true if one variable declaration has the same name and same type as the input.
func (v *VariableDecl) DeclarationIsEquivalent(other *VariableDecl) bool {
if v == other {
return true
}
return v.Name() == other.Name() && v.Type().IsEquivalentType(other.Type())
}
// VariableDeclToExprDecl converts a go-native variable declaration into a protobuf-type variable declaration.
func VariableDeclToExprDecl(v *VariableDecl) (*exprpb.Decl, error) {
varType, err := types.TypeToExprType(v.Type())
if err != nil {
return nil, err
}
return chkdecls.NewVar(v.Name(), varType), nil
}
// TypeVariable creates a new type identifier for use within a types.Provider
func TypeVariable(t *types.Type) *VariableDecl {
return NewVariable(t.TypeName(), types.NewTypeTypeWithParam(t))
}
// FunctionDeclToExprDecl converts a go-native function declaration into a protobuf-typed function declaration.
func FunctionDeclToExprDecl(f *FunctionDecl) (*exprpb.Decl, error) {
overloads := make([]*exprpb.Decl_FunctionDecl_Overload, len(f.overloads))
for i, oID := range f.overloadOrdinals {
o := f.overloads[oID]
paramNames := map[string]struct{}{}
argTypes := make([]*exprpb.Type, len(o.ArgTypes()))
for j, a := range o.ArgTypes() {
collectParamNames(paramNames, a)
at, err := types.TypeToExprType(a)
if err != nil {
return nil, err
}
argTypes[j] = at
}
collectParamNames(paramNames, o.ResultType())
resultType, err := types.TypeToExprType(o.ResultType())
if err != nil {
return nil, err
}
if len(paramNames) == 0 {
if o.IsMemberFunction() {
overloads[i] = chkdecls.NewInstanceOverload(oID, argTypes, resultType)
} else {
overloads[i] = chkdecls.NewOverload(oID, argTypes, resultType)
}
} else {
params := []string{}
for pn := range paramNames {
params = append(params, pn)
}
if o.IsMemberFunction() {
overloads[i] = chkdecls.NewParameterizedInstanceOverload(oID, argTypes, resultType, params)
} else {
overloads[i] = chkdecls.NewParameterizedOverload(oID, argTypes, resultType, params)
}
}
}
return chkdecls.NewFunction(f.Name(), overloads...), nil
}
func collectParamNames(paramNames map[string]struct{}, arg *types.Type) {
if arg.Kind() == types.TypeParamKind {
paramNames[arg.TypeName()] = struct{}{}
}
for _, param := range arg.Parameters() {
collectParamNames(paramNames, param)
}
}
var (
emptyArgs = []*types.Type{}
)

View File

@ -22,10 +22,16 @@ import (
"golang.org/x/text/width" "golang.org/x/text/width"
) )
// Error type which references a location within source and a message. // NewError creates an error associated with an expression id with the given message at the given location.
func NewError(id int64, message string, location Location) *Error {
return &Error{Message: message, Location: location, ExprID: id}
}
// Error type which references an expression id, a location within source, and a message.
type Error struct { type Error struct {
Location Location Location Location
Message string Message string
ExprID int64
} }
const ( const (

View File

@ -22,7 +22,7 @@ import (
// Errors type which contains a list of errors observed during parsing. // Errors type which contains a list of errors observed during parsing.
type Errors struct { type Errors struct {
errors []Error errors []*Error
source Source source Source
numErrors int numErrors int
maxErrorsToReport int maxErrorsToReport int
@ -31,7 +31,7 @@ type Errors struct {
// NewErrors creates a new instance of the Errors type. // NewErrors creates a new instance of the Errors type.
func NewErrors(source Source) *Errors { func NewErrors(source Source) *Errors {
return &Errors{ return &Errors{
errors: []Error{}, errors: []*Error{},
source: source, source: source,
maxErrorsToReport: 100, maxErrorsToReport: 100,
} }
@ -39,11 +39,17 @@ func NewErrors(source Source) *Errors {
// ReportError records an error at a source location. // ReportError records an error at a source location.
func (e *Errors) ReportError(l Location, format string, args ...any) { func (e *Errors) ReportError(l Location, format string, args ...any) {
e.ReportErrorAtID(0, l, format, args...)
}
// ReportErrorAtID records an error at a source location and expression id.
func (e *Errors) ReportErrorAtID(id int64, l Location, format string, args ...any) {
e.numErrors++ e.numErrors++
if e.numErrors > e.maxErrorsToReport { if e.numErrors > e.maxErrorsToReport {
return return
} }
err := Error{ err := &Error{
ExprID: id,
Location: l, Location: l,
Message: fmt.Sprintf(format, args...), Message: fmt.Sprintf(format, args...),
} }
@ -51,12 +57,12 @@ func (e *Errors) ReportError(l Location, format string, args ...any) {
} }
// GetErrors returns the list of observed errors. // GetErrors returns the list of observed errors.
func (e *Errors) GetErrors() []Error { func (e *Errors) GetErrors() []*Error {
return e.errors[:] return e.errors[:]
} }
// Append creates a new Errors object with the current and input errors. // Append creates a new Errors object with the current and input errors.
func (e *Errors) Append(errs []Error) *Errors { func (e *Errors) Append(errs []*Error) *Errors {
return &Errors{ return &Errors{
errors: append(e.errors, errs...), errors: append(e.errors, errs...),
source: e.source, source: e.source,

View File

@ -0,0 +1,17 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
package(
default_visibility = ["//visibility:public"],
licenses = ["notice"], # Apache 2.0
)
go_library(
name = "go_default_library",
srcs = [
"functions.go",
],
importpath = "github.com/google/cel-go/common/functions",
deps = [
"//common/types/ref:go_default_library",
],
)

View File

@ -0,0 +1,61 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package functions defines the standard builtin functions supported by the interpreter
package functions
import "github.com/google/cel-go/common/types/ref"
// Overload defines a named overload of a function, indicating an operand trait
// which must be present on the first argument to the overload as well as one
// of either a unary, binary, or function implementation.
//
// The majority of operators within the expression language are unary or binary
// and the specializations simplify the call contract for implementers of
// types with operator overloads. Any added complexity is assumed to be handled
// by the generic FunctionOp.
type Overload struct {
// Operator name as written in an expression or defined within
// operators.go.
Operator string
// Operand trait used to dispatch the call. The zero-value indicates a
// global function overload or that one of the Unary / Binary / Function
// definitions should be used to execute the call.
OperandTrait int
// Unary defines the overload with a UnaryOp implementation. May be nil.
Unary UnaryOp
// Binary defines the overload with a BinaryOp implementation. May be nil.
Binary BinaryOp
// Function defines the overload with a FunctionOp implementation. May be
// nil.
Function FunctionOp
// NonStrict specifies whether the Overload will tolerate arguments that
// are types.Err or types.Unknown.
NonStrict bool
}
// UnaryOp is a function that takes a single value and produces an output.
type UnaryOp func(value ref.Val) ref.Val
// BinaryOp is a function that takes two values and produces an output.
type BinaryOp func(lhs ref.Val, rhs ref.Val) ref.Val
// FunctionOp is a function with accepts zero or more arguments and produces
// a value or error as a result.
type FunctionOp func(values ...ref.Val) ref.Val

View File

@ -64,7 +64,6 @@ type sourceImpl struct {
runes.Buffer runes.Buffer
description string description string
lineOffsets []int32 lineOffsets []int32
idOffsets map[int64]int32
} }
var _ runes.Buffer = &sourceImpl{} var _ runes.Buffer = &sourceImpl{}
@ -92,7 +91,6 @@ func NewStringSource(contents string, description string) Source {
Buffer: runes.NewBuffer(contents), Buffer: runes.NewBuffer(contents),
description: description, description: description,
lineOffsets: offsets, lineOffsets: offsets,
idOffsets: map[int64]int32{},
} }
} }
@ -102,7 +100,6 @@ func NewInfoSource(info *exprpb.SourceInfo) Source {
Buffer: runes.NewBuffer(""), Buffer: runes.NewBuffer(""),
description: info.GetLocation(), description: info.GetLocation(),
lineOffsets: info.GetLineOffsets(), lineOffsets: info.GetLineOffsets(),
idOffsets: info.GetPositions(),
} }
} }

View File

@ -0,0 +1,25 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
package(
default_visibility = ["//visibility:public"],
licenses = ["notice"], # Apache 2.0
)
go_library(
name = "go_default_library",
srcs = [
"standard.go",
],
importpath = "github.com/google/cel-go/common/stdlib",
deps = [
"//checker/decls:go_default_library",
"//common/decls:go_default_library",
"//common/functions:go_default_library",
"//common/operators:go_default_library",
"//common/overloads:go_default_library",
"//common/types:go_default_library",
"//common/types/ref:go_default_library",
"//common/types/traits:go_default_library",
"@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
],
)

View File

@ -0,0 +1,661 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package stdlib contains all of the standard library function declarations and definitions for CEL.
package stdlib
import (
"github.com/google/cel-go/common/decls"
"github.com/google/cel-go/common/functions"
"github.com/google/cel-go/common/operators"
"github.com/google/cel-go/common/overloads"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/common/types/traits"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
var (
stdFunctions []*decls.FunctionDecl
stdFnDecls []*exprpb.Decl
stdTypes []*decls.VariableDecl
stdTypeDecls []*exprpb.Decl
)
func init() {
paramA := types.NewTypeParamType("A")
paramB := types.NewTypeParamType("B")
listOfA := types.NewListType(paramA)
mapOfAB := types.NewMapType(paramA, paramB)
stdTypes = []*decls.VariableDecl{
decls.TypeVariable(types.BoolType),
decls.TypeVariable(types.BytesType),
decls.TypeVariable(types.DoubleType),
decls.TypeVariable(types.DurationType),
decls.TypeVariable(types.IntType),
decls.TypeVariable(listOfA),
decls.TypeVariable(mapOfAB),
decls.TypeVariable(types.NullType),
decls.TypeVariable(types.StringType),
decls.TypeVariable(types.TimestampType),
decls.TypeVariable(types.TypeType),
decls.TypeVariable(types.UintType),
}
stdTypeDecls = make([]*exprpb.Decl, 0, len(stdTypes))
for _, stdType := range stdTypes {
typeVar, err := decls.VariableDeclToExprDecl(stdType)
if err != nil {
panic(err)
}
stdTypeDecls = append(stdTypeDecls, typeVar)
}
stdFunctions = []*decls.FunctionDecl{
// Logical operators. Special-cased within the interpreter.
// Note, the singleton binding prevents extensions from overriding the operator behavior.
function(operators.Conditional,
decls.Overload(overloads.Conditional, argTypes(types.BoolType, paramA, paramA), paramA,
decls.OverloadIsNonStrict()),
decls.SingletonFunctionBinding(noFunctionOverrides)),
function(operators.LogicalAnd,
decls.Overload(overloads.LogicalAnd, argTypes(types.BoolType, types.BoolType), types.BoolType,
decls.OverloadIsNonStrict()),
decls.SingletonBinaryBinding(noBinaryOverrides)),
function(operators.LogicalOr,
decls.Overload(overloads.LogicalOr, argTypes(types.BoolType, types.BoolType), types.BoolType,
decls.OverloadIsNonStrict()),
decls.SingletonBinaryBinding(noBinaryOverrides)),
function(operators.LogicalNot,
decls.Overload(overloads.LogicalNot, argTypes(types.BoolType), types.BoolType),
decls.SingletonUnaryBinding(func(val ref.Val) ref.Val {
b, ok := val.(types.Bool)
if !ok {
return types.MaybeNoSuchOverloadErr(val)
}
return b.Negate()
})),
// Comprehension short-circuiting related function
function(operators.NotStrictlyFalse,
decls.Overload(overloads.NotStrictlyFalse, argTypes(types.BoolType), types.BoolType,
decls.OverloadIsNonStrict(),
decls.UnaryBinding(notStrictlyFalse))),
// Deprecated: __not_strictly_false__
function(operators.OldNotStrictlyFalse,
decls.DisableDeclaration(true), // safe deprecation
decls.Overload(operators.OldNotStrictlyFalse, argTypes(types.BoolType), types.BoolType,
decls.OverloadIsNonStrict(),
decls.UnaryBinding(notStrictlyFalse))),
// Equality / inequality. Special-cased in the interpreter
function(operators.Equals,
decls.Overload(overloads.Equals, argTypes(paramA, paramA), types.BoolType),
decls.SingletonBinaryBinding(noBinaryOverrides)),
function(operators.NotEquals,
decls.Overload(overloads.NotEquals, argTypes(paramA, paramA), types.BoolType),
decls.SingletonBinaryBinding(noBinaryOverrides)),
// Mathematical operators
function(operators.Add,
decls.Overload(overloads.AddBytes,
argTypes(types.BytesType, types.BytesType), types.BytesType),
decls.Overload(overloads.AddDouble,
argTypes(types.DoubleType, types.DoubleType), types.DoubleType),
decls.Overload(overloads.AddDurationDuration,
argTypes(types.DurationType, types.DurationType), types.DurationType),
decls.Overload(overloads.AddDurationTimestamp,
argTypes(types.DurationType, types.TimestampType), types.TimestampType),
decls.Overload(overloads.AddTimestampDuration,
argTypes(types.TimestampType, types.DurationType), types.TimestampType),
decls.Overload(overloads.AddInt64,
argTypes(types.IntType, types.IntType), types.IntType),
decls.Overload(overloads.AddList,
argTypes(listOfA, listOfA), listOfA),
decls.Overload(overloads.AddString,
argTypes(types.StringType, types.StringType), types.StringType),
decls.Overload(overloads.AddUint64,
argTypes(types.UintType, types.UintType), types.UintType),
decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
return lhs.(traits.Adder).Add(rhs)
}, traits.AdderType)),
function(operators.Divide,
decls.Overload(overloads.DivideDouble,
argTypes(types.DoubleType, types.DoubleType), types.DoubleType),
decls.Overload(overloads.DivideInt64,
argTypes(types.IntType, types.IntType), types.IntType),
decls.Overload(overloads.DivideUint64,
argTypes(types.UintType, types.UintType), types.UintType),
decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
return lhs.(traits.Divider).Divide(rhs)
}, traits.DividerType)),
function(operators.Modulo,
decls.Overload(overloads.ModuloInt64,
argTypes(types.IntType, types.IntType), types.IntType),
decls.Overload(overloads.ModuloUint64,
argTypes(types.UintType, types.UintType), types.UintType),
decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
return lhs.(traits.Modder).Modulo(rhs)
}, traits.ModderType)),
function(operators.Multiply,
decls.Overload(overloads.MultiplyDouble,
argTypes(types.DoubleType, types.DoubleType), types.DoubleType),
decls.Overload(overloads.MultiplyInt64,
argTypes(types.IntType, types.IntType), types.IntType),
decls.Overload(overloads.MultiplyUint64,
argTypes(types.UintType, types.UintType), types.UintType),
decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
return lhs.(traits.Multiplier).Multiply(rhs)
}, traits.MultiplierType)),
function(operators.Negate,
decls.Overload(overloads.NegateDouble, argTypes(types.DoubleType), types.DoubleType),
decls.Overload(overloads.NegateInt64, argTypes(types.IntType), types.IntType),
decls.SingletonUnaryBinding(func(val ref.Val) ref.Val {
if types.IsBool(val) {
return types.MaybeNoSuchOverloadErr(val)
}
return val.(traits.Negater).Negate()
}, traits.NegatorType)),
function(operators.Subtract,
decls.Overload(overloads.SubtractDouble,
argTypes(types.DoubleType, types.DoubleType), types.DoubleType),
decls.Overload(overloads.SubtractDurationDuration,
argTypes(types.DurationType, types.DurationType), types.DurationType),
decls.Overload(overloads.SubtractInt64,
argTypes(types.IntType, types.IntType), types.IntType),
decls.Overload(overloads.SubtractTimestampDuration,
argTypes(types.TimestampType, types.DurationType), types.TimestampType),
decls.Overload(overloads.SubtractTimestampTimestamp,
argTypes(types.TimestampType, types.TimestampType), types.DurationType),
decls.Overload(overloads.SubtractUint64,
argTypes(types.UintType, types.UintType), types.UintType),
decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
return lhs.(traits.Subtractor).Subtract(rhs)
}, traits.SubtractorType)),
// Relations operators
function(operators.Less,
decls.Overload(overloads.LessBool,
argTypes(types.BoolType, types.BoolType), types.BoolType),
decls.Overload(overloads.LessInt64,
argTypes(types.IntType, types.IntType), types.BoolType),
decls.Overload(overloads.LessInt64Double,
argTypes(types.IntType, types.DoubleType), types.BoolType),
decls.Overload(overloads.LessInt64Uint64,
argTypes(types.IntType, types.UintType), types.BoolType),
decls.Overload(overloads.LessUint64,
argTypes(types.UintType, types.UintType), types.BoolType),
decls.Overload(overloads.LessUint64Double,
argTypes(types.UintType, types.DoubleType), types.BoolType),
decls.Overload(overloads.LessUint64Int64,
argTypes(types.UintType, types.IntType), types.BoolType),
decls.Overload(overloads.LessDouble,
argTypes(types.DoubleType, types.DoubleType), types.BoolType),
decls.Overload(overloads.LessDoubleInt64,
argTypes(types.DoubleType, types.IntType), types.BoolType),
decls.Overload(overloads.LessDoubleUint64,
argTypes(types.DoubleType, types.UintType), types.BoolType),
decls.Overload(overloads.LessString,
argTypes(types.StringType, types.StringType), types.BoolType),
decls.Overload(overloads.LessBytes,
argTypes(types.BytesType, types.BytesType), types.BoolType),
decls.Overload(overloads.LessTimestamp,
argTypes(types.TimestampType, types.TimestampType), types.BoolType),
decls.Overload(overloads.LessDuration,
argTypes(types.DurationType, types.DurationType), types.BoolType),
decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
cmp := lhs.(traits.Comparer).Compare(rhs)
if cmp == types.IntNegOne {
return types.True
}
if cmp == types.IntOne || cmp == types.IntZero {
return types.False
}
return cmp
}, traits.ComparerType)),
function(operators.LessEquals,
decls.Overload(overloads.LessEqualsBool,
argTypes(types.BoolType, types.BoolType), types.BoolType),
decls.Overload(overloads.LessEqualsInt64,
argTypes(types.IntType, types.IntType), types.BoolType),
decls.Overload(overloads.LessEqualsInt64Double,
argTypes(types.IntType, types.DoubleType), types.BoolType),
decls.Overload(overloads.LessEqualsInt64Uint64,
argTypes(types.IntType, types.UintType), types.BoolType),
decls.Overload(overloads.LessEqualsUint64,
argTypes(types.UintType, types.UintType), types.BoolType),
decls.Overload(overloads.LessEqualsUint64Double,
argTypes(types.UintType, types.DoubleType), types.BoolType),
decls.Overload(overloads.LessEqualsUint64Int64,
argTypes(types.UintType, types.IntType), types.BoolType),
decls.Overload(overloads.LessEqualsDouble,
argTypes(types.DoubleType, types.DoubleType), types.BoolType),
decls.Overload(overloads.LessEqualsDoubleInt64,
argTypes(types.DoubleType, types.IntType), types.BoolType),
decls.Overload(overloads.LessEqualsDoubleUint64,
argTypes(types.DoubleType, types.UintType), types.BoolType),
decls.Overload(overloads.LessEqualsString,
argTypes(types.StringType, types.StringType), types.BoolType),
decls.Overload(overloads.LessEqualsBytes,
argTypes(types.BytesType, types.BytesType), types.BoolType),
decls.Overload(overloads.LessEqualsTimestamp,
argTypes(types.TimestampType, types.TimestampType), types.BoolType),
decls.Overload(overloads.LessEqualsDuration,
argTypes(types.DurationType, types.DurationType), types.BoolType),
decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
cmp := lhs.(traits.Comparer).Compare(rhs)
if cmp == types.IntNegOne || cmp == types.IntZero {
return types.True
}
if cmp == types.IntOne {
return types.False
}
return cmp
}, traits.ComparerType)),
function(operators.Greater,
decls.Overload(overloads.GreaterBool,
argTypes(types.BoolType, types.BoolType), types.BoolType),
decls.Overload(overloads.GreaterInt64,
argTypes(types.IntType, types.IntType), types.BoolType),
decls.Overload(overloads.GreaterInt64Double,
argTypes(types.IntType, types.DoubleType), types.BoolType),
decls.Overload(overloads.GreaterInt64Uint64,
argTypes(types.IntType, types.UintType), types.BoolType),
decls.Overload(overloads.GreaterUint64,
argTypes(types.UintType, types.UintType), types.BoolType),
decls.Overload(overloads.GreaterUint64Double,
argTypes(types.UintType, types.DoubleType), types.BoolType),
decls.Overload(overloads.GreaterUint64Int64,
argTypes(types.UintType, types.IntType), types.BoolType),
decls.Overload(overloads.GreaterDouble,
argTypes(types.DoubleType, types.DoubleType), types.BoolType),
decls.Overload(overloads.GreaterDoubleInt64,
argTypes(types.DoubleType, types.IntType), types.BoolType),
decls.Overload(overloads.GreaterDoubleUint64,
argTypes(types.DoubleType, types.UintType), types.BoolType),
decls.Overload(overloads.GreaterString,
argTypes(types.StringType, types.StringType), types.BoolType),
decls.Overload(overloads.GreaterBytes,
argTypes(types.BytesType, types.BytesType), types.BoolType),
decls.Overload(overloads.GreaterTimestamp,
argTypes(types.TimestampType, types.TimestampType), types.BoolType),
decls.Overload(overloads.GreaterDuration,
argTypes(types.DurationType, types.DurationType), types.BoolType),
decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
cmp := lhs.(traits.Comparer).Compare(rhs)
if cmp == types.IntOne {
return types.True
}
if cmp == types.IntNegOne || cmp == types.IntZero {
return types.False
}
return cmp
}, traits.ComparerType)),
function(operators.GreaterEquals,
decls.Overload(overloads.GreaterEqualsBool,
argTypes(types.BoolType, types.BoolType), types.BoolType),
decls.Overload(overloads.GreaterEqualsInt64,
argTypes(types.IntType, types.IntType), types.BoolType),
decls.Overload(overloads.GreaterEqualsInt64Double,
argTypes(types.IntType, types.DoubleType), types.BoolType),
decls.Overload(overloads.GreaterEqualsInt64Uint64,
argTypes(types.IntType, types.UintType), types.BoolType),
decls.Overload(overloads.GreaterEqualsUint64,
argTypes(types.UintType, types.UintType), types.BoolType),
decls.Overload(overloads.GreaterEqualsUint64Double,
argTypes(types.UintType, types.DoubleType), types.BoolType),
decls.Overload(overloads.GreaterEqualsUint64Int64,
argTypes(types.UintType, types.IntType), types.BoolType),
decls.Overload(overloads.GreaterEqualsDouble,
argTypes(types.DoubleType, types.DoubleType), types.BoolType),
decls.Overload(overloads.GreaterEqualsDoubleInt64,
argTypes(types.DoubleType, types.IntType), types.BoolType),
decls.Overload(overloads.GreaterEqualsDoubleUint64,
argTypes(types.DoubleType, types.UintType), types.BoolType),
decls.Overload(overloads.GreaterEqualsString,
argTypes(types.StringType, types.StringType), types.BoolType),
decls.Overload(overloads.GreaterEqualsBytes,
argTypes(types.BytesType, types.BytesType), types.BoolType),
decls.Overload(overloads.GreaterEqualsTimestamp,
argTypes(types.TimestampType, types.TimestampType), types.BoolType),
decls.Overload(overloads.GreaterEqualsDuration,
argTypes(types.DurationType, types.DurationType), types.BoolType),
decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
cmp := lhs.(traits.Comparer).Compare(rhs)
if cmp == types.IntOne || cmp == types.IntZero {
return types.True
}
if cmp == types.IntNegOne {
return types.False
}
return cmp
}, traits.ComparerType)),
// Indexing
function(operators.Index,
decls.Overload(overloads.IndexList, argTypes(listOfA, types.IntType), paramA),
decls.Overload(overloads.IndexMap, argTypes(mapOfAB, paramA), paramB),
decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
return lhs.(traits.Indexer).Get(rhs)
}, traits.IndexerType)),
// Collections operators
function(operators.In,
decls.Overload(overloads.InList, argTypes(paramA, listOfA), types.BoolType),
decls.Overload(overloads.InMap, argTypes(paramA, mapOfAB), types.BoolType),
decls.SingletonBinaryBinding(inAggregate)),
function(operators.OldIn,
decls.DisableDeclaration(true), // safe deprecation
decls.Overload(overloads.InList, argTypes(paramA, listOfA), types.BoolType),
decls.Overload(overloads.InMap, argTypes(paramA, mapOfAB), types.BoolType),
decls.SingletonBinaryBinding(inAggregate)),
function(overloads.DeprecatedIn,
decls.DisableDeclaration(true), // safe deprecation
decls.Overload(overloads.InList, argTypes(paramA, listOfA), types.BoolType),
decls.Overload(overloads.InMap, argTypes(paramA, mapOfAB), types.BoolType),
decls.SingletonBinaryBinding(inAggregate)),
function(overloads.Size,
decls.Overload(overloads.SizeBytes, argTypes(types.BytesType), types.IntType),
decls.MemberOverload(overloads.SizeBytesInst, argTypes(types.BytesType), types.IntType),
decls.Overload(overloads.SizeList, argTypes(listOfA), types.IntType),
decls.MemberOverload(overloads.SizeListInst, argTypes(listOfA), types.IntType),
decls.Overload(overloads.SizeMap, argTypes(mapOfAB), types.IntType),
decls.MemberOverload(overloads.SizeMapInst, argTypes(mapOfAB), types.IntType),
decls.Overload(overloads.SizeString, argTypes(types.StringType), types.IntType),
decls.MemberOverload(overloads.SizeStringInst, argTypes(types.StringType), types.IntType),
decls.SingletonUnaryBinding(func(val ref.Val) ref.Val {
return val.(traits.Sizer).Size()
}, traits.SizerType)),
// Type conversions
function(overloads.TypeConvertType,
decls.Overload(overloads.TypeConvertType, argTypes(paramA), types.NewTypeTypeWithParam(paramA)),
decls.SingletonUnaryBinding(convertToType(types.TypeType))),
// Bool conversions
function(overloads.TypeConvertBool,
decls.Overload(overloads.BoolToBool, argTypes(types.BoolType), types.BoolType,
decls.UnaryBinding(identity)),
decls.Overload(overloads.StringToBool, argTypes(types.StringType), types.BoolType,
decls.UnaryBinding(convertToType(types.BoolType)))),
// Bytes conversions
function(overloads.TypeConvertBytes,
decls.Overload(overloads.BytesToBytes, argTypes(types.BytesType), types.BytesType,
decls.UnaryBinding(identity)),
decls.Overload(overloads.StringToBytes, argTypes(types.StringType), types.BytesType,
decls.UnaryBinding(convertToType(types.BytesType)))),
// Double conversions
function(overloads.TypeConvertDouble,
decls.Overload(overloads.DoubleToDouble, argTypes(types.DoubleType), types.DoubleType,
decls.UnaryBinding(identity)),
decls.Overload(overloads.IntToDouble, argTypes(types.IntType), types.DoubleType,
decls.UnaryBinding(convertToType(types.DoubleType))),
decls.Overload(overloads.StringToDouble, argTypes(types.StringType), types.DoubleType,
decls.UnaryBinding(convertToType(types.DoubleType))),
decls.Overload(overloads.UintToDouble, argTypes(types.UintType), types.DoubleType,
decls.UnaryBinding(convertToType(types.DoubleType)))),
// Duration conversions
function(overloads.TypeConvertDuration,
decls.Overload(overloads.DurationToDuration, argTypes(types.DurationType), types.DurationType,
decls.UnaryBinding(identity)),
decls.Overload(overloads.IntToDuration, argTypes(types.IntType), types.DurationType,
decls.UnaryBinding(convertToType(types.DurationType))),
decls.Overload(overloads.StringToDuration, argTypes(types.StringType), types.DurationType,
decls.UnaryBinding(convertToType(types.DurationType)))),
// Dyn conversions
function(overloads.TypeConvertDyn,
decls.Overload(overloads.ToDyn, argTypes(paramA), types.DynType),
decls.SingletonUnaryBinding(identity)),
// Int conversions
function(overloads.TypeConvertInt,
decls.Overload(overloads.IntToInt, argTypes(types.IntType), types.IntType,
decls.UnaryBinding(identity)),
decls.Overload(overloads.DoubleToInt, argTypes(types.DoubleType), types.IntType,
decls.UnaryBinding(convertToType(types.IntType))),
decls.Overload(overloads.DurationToInt, argTypes(types.DurationType), types.IntType,
decls.UnaryBinding(convertToType(types.IntType))),
decls.Overload(overloads.StringToInt, argTypes(types.StringType), types.IntType,
decls.UnaryBinding(convertToType(types.IntType))),
decls.Overload(overloads.TimestampToInt, argTypes(types.TimestampType), types.IntType,
decls.UnaryBinding(convertToType(types.IntType))),
decls.Overload(overloads.UintToInt, argTypes(types.UintType), types.IntType,
decls.UnaryBinding(convertToType(types.IntType))),
),
// String conversions
function(overloads.TypeConvertString,
decls.Overload(overloads.StringToString, argTypes(types.StringType), types.StringType,
decls.UnaryBinding(identity)),
decls.Overload(overloads.BoolToString, argTypes(types.BoolType), types.StringType,
decls.UnaryBinding(convertToType(types.StringType))),
decls.Overload(overloads.BytesToString, argTypes(types.BytesType), types.StringType,
decls.UnaryBinding(convertToType(types.StringType))),
decls.Overload(overloads.DoubleToString, argTypes(types.DoubleType), types.StringType,
decls.UnaryBinding(convertToType(types.StringType))),
decls.Overload(overloads.DurationToString, argTypes(types.DurationType), types.StringType,
decls.UnaryBinding(convertToType(types.StringType))),
decls.Overload(overloads.IntToString, argTypes(types.IntType), types.StringType,
decls.UnaryBinding(convertToType(types.StringType))),
decls.Overload(overloads.TimestampToString, argTypes(types.TimestampType), types.StringType,
decls.UnaryBinding(convertToType(types.StringType))),
decls.Overload(overloads.UintToString, argTypes(types.UintType), types.StringType,
decls.UnaryBinding(convertToType(types.StringType)))),
// Timestamp conversions
function(overloads.TypeConvertTimestamp,
decls.Overload(overloads.TimestampToTimestamp, argTypes(types.TimestampType), types.TimestampType,
decls.UnaryBinding(identity)),
decls.Overload(overloads.IntToTimestamp, argTypes(types.IntType), types.TimestampType,
decls.UnaryBinding(convertToType(types.TimestampType))),
decls.Overload(overloads.StringToTimestamp, argTypes(types.StringType), types.TimestampType,
decls.UnaryBinding(convertToType(types.TimestampType)))),
// Uint conversions
function(overloads.TypeConvertUint,
decls.Overload(overloads.UintToUint, argTypes(types.UintType), types.UintType,
decls.UnaryBinding(identity)),
decls.Overload(overloads.DoubleToUint, argTypes(types.DoubleType), types.UintType,
decls.UnaryBinding(convertToType(types.UintType))),
decls.Overload(overloads.IntToUint, argTypes(types.IntType), types.UintType,
decls.UnaryBinding(convertToType(types.UintType))),
decls.Overload(overloads.StringToUint, argTypes(types.StringType), types.UintType,
decls.UnaryBinding(convertToType(types.UintType)))),
// String functions
function(overloads.Contains,
decls.MemberOverload(overloads.ContainsString,
argTypes(types.StringType, types.StringType), types.BoolType,
decls.BinaryBinding(types.StringContains)),
decls.DisableTypeGuards(true)),
function(overloads.EndsWith,
decls.MemberOverload(overloads.EndsWithString,
argTypes(types.StringType, types.StringType), types.BoolType,
decls.BinaryBinding(types.StringEndsWith)),
decls.DisableTypeGuards(true)),
function(overloads.StartsWith,
decls.MemberOverload(overloads.StartsWithString,
argTypes(types.StringType, types.StringType), types.BoolType,
decls.BinaryBinding(types.StringStartsWith)),
decls.DisableTypeGuards(true)),
function(overloads.Matches,
decls.Overload(overloads.Matches, argTypes(types.StringType, types.StringType), types.BoolType),
decls.MemberOverload(overloads.MatchesString,
argTypes(types.StringType, types.StringType), types.BoolType),
decls.SingletonBinaryBinding(func(str, pat ref.Val) ref.Val {
return str.(traits.Matcher).Match(pat)
}, traits.MatcherType)),
// Timestamp / duration functions
function(overloads.TimeGetFullYear,
decls.MemberOverload(overloads.TimestampToYear,
argTypes(types.TimestampType), types.IntType),
decls.MemberOverload(overloads.TimestampToYearWithTz,
argTypes(types.TimestampType, types.StringType), types.IntType)),
function(overloads.TimeGetMonth,
decls.MemberOverload(overloads.TimestampToMonth,
argTypes(types.TimestampType), types.IntType),
decls.MemberOverload(overloads.TimestampToMonthWithTz,
argTypes(types.TimestampType, types.StringType), types.IntType)),
function(overloads.TimeGetDayOfYear,
decls.MemberOverload(overloads.TimestampToDayOfYear,
argTypes(types.TimestampType), types.IntType),
decls.MemberOverload(overloads.TimestampToDayOfYearWithTz,
argTypes(types.TimestampType, types.StringType), types.IntType)),
function(overloads.TimeGetDayOfMonth,
decls.MemberOverload(overloads.TimestampToDayOfMonthZeroBased,
argTypes(types.TimestampType), types.IntType),
decls.MemberOverload(overloads.TimestampToDayOfMonthZeroBasedWithTz,
argTypes(types.TimestampType, types.StringType), types.IntType)),
function(overloads.TimeGetDate,
decls.MemberOverload(overloads.TimestampToDayOfMonthOneBased,
argTypes(types.TimestampType), types.IntType),
decls.MemberOverload(overloads.TimestampToDayOfMonthOneBasedWithTz,
argTypes(types.TimestampType, types.StringType), types.IntType)),
function(overloads.TimeGetDayOfWeek,
decls.MemberOverload(overloads.TimestampToDayOfWeek,
argTypes(types.TimestampType), types.IntType),
decls.MemberOverload(overloads.TimestampToDayOfWeekWithTz,
argTypes(types.TimestampType, types.StringType), types.IntType)),
function(overloads.TimeGetHours,
decls.MemberOverload(overloads.TimestampToHours,
argTypes(types.TimestampType), types.IntType),
decls.MemberOverload(overloads.TimestampToHoursWithTz,
argTypes(types.TimestampType, types.StringType), types.IntType),
decls.MemberOverload(overloads.DurationToHours,
argTypes(types.DurationType), types.IntType)),
function(overloads.TimeGetMinutes,
decls.MemberOverload(overloads.TimestampToMinutes,
argTypes(types.TimestampType), types.IntType),
decls.MemberOverload(overloads.TimestampToMinutesWithTz,
argTypes(types.TimestampType, types.StringType), types.IntType),
decls.MemberOverload(overloads.DurationToMinutes,
argTypes(types.DurationType), types.IntType)),
function(overloads.TimeGetSeconds,
decls.MemberOverload(overloads.TimestampToSeconds,
argTypes(types.TimestampType), types.IntType),
decls.MemberOverload(overloads.TimestampToSecondsWithTz,
argTypes(types.TimestampType, types.StringType), types.IntType),
decls.MemberOverload(overloads.DurationToSeconds,
argTypes(types.DurationType), types.IntType)),
function(overloads.TimeGetMilliseconds,
decls.MemberOverload(overloads.TimestampToMilliseconds,
argTypes(types.TimestampType), types.IntType),
decls.MemberOverload(overloads.TimestampToMillisecondsWithTz,
argTypes(types.TimestampType, types.StringType), types.IntType),
decls.MemberOverload(overloads.DurationToMilliseconds,
argTypes(types.DurationType), types.IntType)),
}
stdFnDecls = make([]*exprpb.Decl, 0, len(stdFunctions))
for _, fn := range stdFunctions {
if fn.IsDeclarationDisabled() {
continue
}
ed, err := decls.FunctionDeclToExprDecl(fn)
if err != nil {
panic(err)
}
stdFnDecls = append(stdFnDecls, ed)
}
}
// Functions returns the set of standard library function declarations and definitions for CEL.
func Functions() []*decls.FunctionDecl {
return stdFunctions
}
// FunctionExprDecls returns the legacy style protobuf-typed declarations for all functions and overloads
// in the CEL standard environment.
//
// Deprecated: use Functions
func FunctionExprDecls() []*exprpb.Decl {
return stdFnDecls
}
// Types returns the set of standard library types for CEL.
func Types() []*decls.VariableDecl {
return stdTypes
}
// TypeExprDecls returns the legacy style protobuf-typed declarations for all types in the CEL
// standard environment.
//
// Deprecated: use Types
func TypeExprDecls() []*exprpb.Decl {
return stdTypeDecls
}
func notStrictlyFalse(value ref.Val) ref.Val {
if types.IsBool(value) {
return value
}
return types.True
}
func inAggregate(lhs ref.Val, rhs ref.Val) ref.Val {
if rhs.Type().HasTrait(traits.ContainerType) {
return rhs.(traits.Container).Contains(lhs)
}
return types.ValOrErr(rhs, "no such overload")
}
func function(name string, opts ...decls.FunctionOpt) *decls.FunctionDecl {
fn, err := decls.NewFunction(name, opts...)
if err != nil {
panic(err)
}
return fn
}
func argTypes(args ...*types.Type) []*types.Type {
return args
}
func noBinaryOverrides(rhs, lhs ref.Val) ref.Val {
return types.NoSuchOverloadErr()
}
func noFunctionOverrides(args ...ref.Val) ref.Val {
return types.NoSuchOverloadErr()
}
func identity(val ref.Val) ref.Val {
return val
}
func convertToType(t ref.Type) functions.UnaryOp {
return func(val ref.Val) ref.Val {
return val.ConvertToType(t)
}
}

View File

@ -27,20 +27,20 @@ go_library(
"provider.go", "provider.go",
"string.go", "string.go",
"timestamp.go", "timestamp.go",
"type.go", "types.go",
"uint.go", "uint.go",
"unknown.go", "unknown.go",
"util.go", "util.go",
], ],
importpath = "github.com/google/cel-go/common/types", importpath = "github.com/google/cel-go/common/types",
deps = [ deps = [
"//checker/decls:go_default_library",
"//common/overloads:go_default_library", "//common/overloads:go_default_library",
"//common/types/pb:go_default_library", "//common/types/pb:go_default_library",
"//common/types/ref:go_default_library", "//common/types/ref:go_default_library",
"//common/types/traits:go_default_library", "//common/types/traits:go_default_library",
"@com_github_stoewer_go_strcase//:go_default_library", "@com_github_stoewer_go_strcase//:go_default_library",
"@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
"@org_golang_google_genproto_googleapis_rpc//status:go_default_library",
"@org_golang_google_protobuf//encoding/protojson:go_default_library", "@org_golang_google_protobuf//encoding/protojson:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//proto:go_default_library",
"@org_golang_google_protobuf//reflect/protoreflect:go_default_library", "@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
@ -71,8 +71,9 @@ go_test(
"provider_test.go", "provider_test.go",
"string_test.go", "string_test.go",
"timestamp_test.go", "timestamp_test.go",
"type_test.go", "types_test.go",
"uint_test.go", "uint_test.go",
"unknown_test.go",
"util_test.go", "util_test.go",
], ],
embed = [":go_default_library"], embed = [":go_default_library"],

View File

@ -20,7 +20,6 @@ import (
"strconv" "strconv"
"github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/common/types/traits"
anypb "google.golang.org/protobuf/types/known/anypb" anypb "google.golang.org/protobuf/types/known/anypb"
structpb "google.golang.org/protobuf/types/known/structpb" structpb "google.golang.org/protobuf/types/known/structpb"
@ -31,11 +30,6 @@ import (
type Bool bool type Bool bool
var ( var (
// BoolType singleton.
BoolType = NewTypeValue("bool",
traits.ComparerType,
traits.NegatorType)
// boolWrapperType golang reflected type for protobuf bool wrapper type. // boolWrapperType golang reflected type for protobuf bool wrapper type.
boolWrapperType = reflect.TypeOf(&wrapperspb.BoolValue{}) boolWrapperType = reflect.TypeOf(&wrapperspb.BoolValue{})
) )

View File

@ -22,7 +22,6 @@ import (
"unicode/utf8" "unicode/utf8"
"github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/common/types/traits"
anypb "google.golang.org/protobuf/types/known/anypb" anypb "google.golang.org/protobuf/types/known/anypb"
structpb "google.golang.org/protobuf/types/known/structpb" structpb "google.golang.org/protobuf/types/known/structpb"
@ -34,12 +33,6 @@ import (
type Bytes []byte type Bytes []byte
var ( var (
// BytesType singleton.
BytesType = NewTypeValue("bytes",
traits.AdderType,
traits.ComparerType,
traits.SizerType)
// byteWrapperType golang reflected type for protobuf bytes wrapper type. // byteWrapperType golang reflected type for protobuf bytes wrapper type.
byteWrapperType = reflect.TypeOf(&wrapperspb.BytesValue{}) byteWrapperType = reflect.TypeOf(&wrapperspb.BytesValue{})
) )

View File

@ -20,7 +20,6 @@ import (
"reflect" "reflect"
"github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/common/types/traits"
anypb "google.golang.org/protobuf/types/known/anypb" anypb "google.golang.org/protobuf/types/known/anypb"
structpb "google.golang.org/protobuf/types/known/structpb" structpb "google.golang.org/protobuf/types/known/structpb"
@ -32,15 +31,6 @@ import (
type Double float64 type Double float64
var ( var (
// DoubleType singleton.
DoubleType = NewTypeValue("double",
traits.AdderType,
traits.ComparerType,
traits.DividerType,
traits.MultiplierType,
traits.NegatorType,
traits.SubtractorType)
// doubleWrapperType reflected type for protobuf double wrapper type. // doubleWrapperType reflected type for protobuf double wrapper type.
doubleWrapperType = reflect.TypeOf(&wrapperspb.DoubleValue{}) doubleWrapperType = reflect.TypeOf(&wrapperspb.DoubleValue{})

View File

@ -22,7 +22,6 @@ import (
"github.com/google/cel-go/common/overloads" "github.com/google/cel-go/common/overloads"
"github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/common/types/traits"
anypb "google.golang.org/protobuf/types/known/anypb" anypb "google.golang.org/protobuf/types/known/anypb"
dpb "google.golang.org/protobuf/types/known/durationpb" dpb "google.golang.org/protobuf/types/known/durationpb"
@ -41,13 +40,14 @@ func durationOf(d time.Duration) Duration {
} }
var ( var (
// DurationType singleton. durationValueType = reflect.TypeOf(&dpb.Duration{})
DurationType = NewTypeValue("google.protobuf.Duration",
traits.AdderType, durationZeroArgOverloads = map[string]func(ref.Val) ref.Val{
traits.ComparerType, overloads.TimeGetHours: DurationGetHours,
traits.NegatorType, overloads.TimeGetMinutes: DurationGetMinutes,
traits.ReceiverType, overloads.TimeGetSeconds: DurationGetSeconds,
traits.SubtractorType) overloads.TimeGetMilliseconds: DurationGetMilliseconds,
}
) )
// Add implements traits.Adder.Add. // Add implements traits.Adder.Add.
@ -156,7 +156,7 @@ func (d Duration) Negate() ref.Val {
func (d Duration) Receive(function string, overload string, args []ref.Val) ref.Val { func (d Duration) Receive(function string, overload string, args []ref.Val) ref.Val {
if len(args) == 0 { if len(args) == 0 {
if f, found := durationZeroArgOverloads[function]; found { if f, found := durationZeroArgOverloads[function]; found {
return f(d.Duration) return f(d)
} }
} }
return NoSuchOverloadErr() return NoSuchOverloadErr()
@ -185,20 +185,38 @@ func (d Duration) Value() any {
return d.Duration return d.Duration
} }
var ( // DurationGetHours returns the duration in hours.
durationValueType = reflect.TypeOf(&dpb.Duration{}) func DurationGetHours(val ref.Val) ref.Val {
dur, ok := val.(Duration)
if !ok {
return MaybeNoSuchOverloadErr(val)
}
return Int(dur.Hours())
}
durationZeroArgOverloads = map[string]func(time.Duration) ref.Val{ // DurationGetMinutes returns duration in minutes.
overloads.TimeGetHours: func(dur time.Duration) ref.Val { func DurationGetMinutes(val ref.Val) ref.Val {
return Int(dur.Hours()) dur, ok := val.(Duration)
}, if !ok {
overloads.TimeGetMinutes: func(dur time.Duration) ref.Val { return MaybeNoSuchOverloadErr(val)
return Int(dur.Minutes()) }
}, return Int(dur.Minutes())
overloads.TimeGetSeconds: func(dur time.Duration) ref.Val { }
return Int(dur.Seconds())
}, // DurationGetSeconds returns duration in seconds.
overloads.TimeGetMilliseconds: func(dur time.Duration) ref.Val { func DurationGetSeconds(val ref.Val) ref.Val {
return Int(dur.Milliseconds()) dur, ok := val.(Duration)
}} if !ok {
) return MaybeNoSuchOverloadErr(val)
}
return Int(dur.Seconds())
}
// DurationGetMilliseconds returns duration in milliseconds.
func DurationGetMilliseconds(val ref.Val) ref.Val {
dur, ok := val.(Duration)
if !ok {
return MaybeNoSuchOverloadErr(val)
}
return Int(dur.Milliseconds())
}

View File

@ -35,7 +35,7 @@ type Err struct {
var ( var (
// ErrType singleton. // ErrType singleton.
ErrType = NewTypeValue("error") ErrType = NewOpaqueType("error")
// errDivideByZero is an error indicating a division by zero of an integer value. // errDivideByZero is an error indicating a division by zero of an integer value.
errDivideByZero = errors.New("division by zero") errDivideByZero = errors.New("division by zero")
@ -129,6 +129,11 @@ func (e *Err) Is(target error) bool {
return e.error.Error() == target.Error() return e.error.Error() == target.Error()
} }
// Unwrap implements errors.Unwrap.
func (e *Err) Unwrap() error {
return e.error
}
// IsError returns whether the input element ref.Type or ref.Val is equal to // IsError returns whether the input element ref.Type or ref.Val is equal to
// the ErrType singleton. // the ErrType singleton.
func IsError(val ref.Val) bool { func IsError(val ref.Val) bool {

View File

@ -22,7 +22,6 @@ import (
"time" "time"
"github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/common/types/traits"
anypb "google.golang.org/protobuf/types/known/anypb" anypb "google.golang.org/protobuf/types/known/anypb"
structpb "google.golang.org/protobuf/types/known/structpb" structpb "google.golang.org/protobuf/types/known/structpb"
@ -41,16 +40,6 @@ const (
) )
var ( var (
// IntType singleton.
IntType = NewTypeValue("int",
traits.AdderType,
traits.ComparerType,
traits.DividerType,
traits.ModderType,
traits.MultiplierType,
traits.NegatorType,
traits.SubtractorType)
// int32WrapperType reflected type for protobuf int32 wrapper type. // int32WrapperType reflected type for protobuf int32 wrapper type.
int32WrapperType = reflect.TypeOf(&wrapperspb.Int32Value{}) int32WrapperType = reflect.TypeOf(&wrapperspb.Int32Value{})

View File

@ -24,7 +24,7 @@ import (
var ( var (
// IteratorType singleton. // IteratorType singleton.
IteratorType = NewTypeValue("iterator", traits.IteratorType) IteratorType = NewObjectType("iterator", traits.IteratorType)
) )
// baseIterator is the basis for list, map, and object iterators. // baseIterator is the basis for list, map, and object iterators.

View File

@ -29,25 +29,15 @@ import (
structpb "google.golang.org/protobuf/types/known/structpb" structpb "google.golang.org/protobuf/types/known/structpb"
) )
var (
// ListType singleton.
ListType = NewTypeValue("list",
traits.AdderType,
traits.ContainerType,
traits.IndexerType,
traits.IterableType,
traits.SizerType)
)
// NewDynamicList returns a traits.Lister with heterogenous elements. // NewDynamicList returns a traits.Lister with heterogenous elements.
// value should be an array of "native" types, i.e. any type that // value should be an array of "native" types, i.e. any type that
// NativeToValue() can convert to a ref.Val. // NativeToValue() can convert to a ref.Val.
func NewDynamicList(adapter ref.TypeAdapter, value any) traits.Lister { func NewDynamicList(adapter Adapter, value any) traits.Lister {
refValue := reflect.ValueOf(value) refValue := reflect.ValueOf(value)
return &baseList{ return &baseList{
TypeAdapter: adapter, Adapter: adapter,
value: value, value: value,
size: refValue.Len(), size: refValue.Len(),
get: func(i int) any { get: func(i int) any {
return refValue.Index(i).Interface() return refValue.Index(i).Interface()
}, },
@ -55,56 +45,56 @@ func NewDynamicList(adapter ref.TypeAdapter, value any) traits.Lister {
} }
// NewStringList returns a traits.Lister containing only strings. // NewStringList returns a traits.Lister containing only strings.
func NewStringList(adapter ref.TypeAdapter, elems []string) traits.Lister { func NewStringList(adapter Adapter, elems []string) traits.Lister {
return &baseList{ return &baseList{
TypeAdapter: adapter, Adapter: adapter,
value: elems, value: elems,
size: len(elems), size: len(elems),
get: func(i int) any { return elems[i] }, get: func(i int) any { return elems[i] },
} }
} }
// NewRefValList returns a traits.Lister with ref.Val elements. // NewRefValList returns a traits.Lister with ref.Val elements.
// //
// This type specialization is used with list literals within CEL expressions. // This type specialization is used with list literals within CEL expressions.
func NewRefValList(adapter ref.TypeAdapter, elems []ref.Val) traits.Lister { func NewRefValList(adapter Adapter, elems []ref.Val) traits.Lister {
return &baseList{ return &baseList{
TypeAdapter: adapter, Adapter: adapter,
value: elems, value: elems,
size: len(elems), size: len(elems),
get: func(i int) any { return elems[i] }, get: func(i int) any { return elems[i] },
} }
} }
// NewProtoList returns a traits.Lister based on a pb.List instance. // NewProtoList returns a traits.Lister based on a pb.List instance.
func NewProtoList(adapter ref.TypeAdapter, list protoreflect.List) traits.Lister { func NewProtoList(adapter Adapter, list protoreflect.List) traits.Lister {
return &baseList{ return &baseList{
TypeAdapter: adapter, Adapter: adapter,
value: list, value: list,
size: list.Len(), size: list.Len(),
get: func(i int) any { return list.Get(i).Interface() }, get: func(i int) any { return list.Get(i).Interface() },
} }
} }
// NewJSONList returns a traits.Lister based on structpb.ListValue instance. // NewJSONList returns a traits.Lister based on structpb.ListValue instance.
func NewJSONList(adapter ref.TypeAdapter, l *structpb.ListValue) traits.Lister { func NewJSONList(adapter Adapter, l *structpb.ListValue) traits.Lister {
vals := l.GetValues() vals := l.GetValues()
return &baseList{ return &baseList{
TypeAdapter: adapter, Adapter: adapter,
value: l, value: l,
size: len(vals), size: len(vals),
get: func(i int) any { return vals[i] }, get: func(i int) any { return vals[i] },
} }
} }
// NewMutableList creates a new mutable list whose internal state can be modified. // NewMutableList creates a new mutable list whose internal state can be modified.
func NewMutableList(adapter ref.TypeAdapter) traits.MutableLister { func NewMutableList(adapter Adapter) traits.MutableLister {
var mutableValues []ref.Val var mutableValues []ref.Val
l := &mutableList{ l := &mutableList{
baseList: &baseList{ baseList: &baseList{
TypeAdapter: adapter, Adapter: adapter,
value: mutableValues, value: mutableValues,
size: 0, size: 0,
}, },
mutableValues: mutableValues, mutableValues: mutableValues,
} }
@ -116,9 +106,9 @@ func NewMutableList(adapter ref.TypeAdapter) traits.MutableLister {
// baseList points to a list containing elements of any type. // baseList points to a list containing elements of any type.
// The `value` is an array of native values, and refValue is its reflection object. // The `value` is an array of native values, and refValue is its reflection object.
// The `ref.TypeAdapter` enables native type to CEL type conversions. // The `Adapter` enables native type to CEL type conversions.
type baseList struct { type baseList struct {
ref.TypeAdapter Adapter
value any value any
// size indicates the number of elements within the list. // size indicates the number of elements within the list.
@ -143,9 +133,9 @@ func (l *baseList) Add(other ref.Val) ref.Val {
return l return l
} }
return &concatList{ return &concatList{
TypeAdapter: l.TypeAdapter, Adapter: l.Adapter,
prevList: l, prevList: l,
nextList: otherList} nextList: otherList}
} }
// Contains implements the traits.Container interface method. // Contains implements the traits.Container interface method.
@ -322,13 +312,13 @@ func (l *mutableList) Add(other ref.Val) ref.Val {
func (l *mutableList) ToImmutableList() traits.Lister { func (l *mutableList) ToImmutableList() traits.Lister {
// The reference to internal state is guaranteed to be safe as this call is only performed // The reference to internal state is guaranteed to be safe as this call is only performed
// when mutations have been completed. // when mutations have been completed.
return NewRefValList(l.TypeAdapter, l.mutableValues) return NewRefValList(l.Adapter, l.mutableValues)
} }
// concatList combines two list implementations together into a view. // concatList combines two list implementations together into a view.
// The `ref.TypeAdapter` enables native type to CEL type conversions. // The `Adapter` enables native type to CEL type conversions.
type concatList struct { type concatList struct {
ref.TypeAdapter Adapter
value any value any
prevList traits.Lister prevList traits.Lister
nextList traits.Lister nextList traits.Lister
@ -347,9 +337,9 @@ func (l *concatList) Add(other ref.Val) ref.Val {
return l return l
} }
return &concatList{ return &concatList{
TypeAdapter: l.TypeAdapter, Adapter: l.Adapter,
prevList: l, prevList: l,
nextList: otherList} nextList: otherList}
} }
// Contains implements the traits.Container interface method. // Contains implements the traits.Container interface method.
@ -376,7 +366,7 @@ func (l *concatList) Contains(elem ref.Val) ref.Val {
// ConvertToNative implements the ref.Val interface method. // ConvertToNative implements the ref.Val interface method.
func (l *concatList) ConvertToNative(typeDesc reflect.Type) (any, error) { func (l *concatList) ConvertToNative(typeDesc reflect.Type) (any, error) {
combined := NewDynamicList(l.TypeAdapter, l.Value().([]any)) combined := NewDynamicList(l.Adapter, l.Value().([]any))
return combined.ConvertToNative(typeDesc) return combined.ConvertToNative(typeDesc)
} }

View File

@ -32,10 +32,10 @@ import (
) )
// NewDynamicMap returns a traits.Mapper value with dynamic key, value pairs. // NewDynamicMap returns a traits.Mapper value with dynamic key, value pairs.
func NewDynamicMap(adapter ref.TypeAdapter, value any) traits.Mapper { func NewDynamicMap(adapter Adapter, value any) traits.Mapper {
refValue := reflect.ValueOf(value) refValue := reflect.ValueOf(value)
return &baseMap{ return &baseMap{
TypeAdapter: adapter, Adapter: adapter,
mapAccessor: newReflectMapAccessor(adapter, refValue), mapAccessor: newReflectMapAccessor(adapter, refValue),
value: value, value: value,
size: refValue.Len(), size: refValue.Len(),
@ -46,10 +46,10 @@ func NewDynamicMap(adapter ref.TypeAdapter, value any) traits.Mapper {
// encoded in protocol buffer form. // encoded in protocol buffer form.
// //
// The `adapter` argument provides type adaptation capabilities from proto to CEL. // The `adapter` argument provides type adaptation capabilities from proto to CEL.
func NewJSONStruct(adapter ref.TypeAdapter, value *structpb.Struct) traits.Mapper { func NewJSONStruct(adapter Adapter, value *structpb.Struct) traits.Mapper {
fields := value.GetFields() fields := value.GetFields()
return &baseMap{ return &baseMap{
TypeAdapter: adapter, Adapter: adapter,
mapAccessor: newJSONStructAccessor(adapter, fields), mapAccessor: newJSONStructAccessor(adapter, fields),
value: value, value: value,
size: len(fields), size: len(fields),
@ -57,9 +57,9 @@ func NewJSONStruct(adapter ref.TypeAdapter, value *structpb.Struct) traits.Mappe
} }
// NewRefValMap returns a specialized traits.Mapper with CEL valued keys and values. // NewRefValMap returns a specialized traits.Mapper with CEL valued keys and values.
func NewRefValMap(adapter ref.TypeAdapter, value map[ref.Val]ref.Val) traits.Mapper { func NewRefValMap(adapter Adapter, value map[ref.Val]ref.Val) traits.Mapper {
return &baseMap{ return &baseMap{
TypeAdapter: adapter, Adapter: adapter,
mapAccessor: newRefValMapAccessor(value), mapAccessor: newRefValMapAccessor(value),
value: value, value: value,
size: len(value), size: len(value),
@ -67,9 +67,9 @@ func NewRefValMap(adapter ref.TypeAdapter, value map[ref.Val]ref.Val) traits.Map
} }
// NewStringInterfaceMap returns a specialized traits.Mapper with string keys and interface values. // NewStringInterfaceMap returns a specialized traits.Mapper with string keys and interface values.
func NewStringInterfaceMap(adapter ref.TypeAdapter, value map[string]any) traits.Mapper { func NewStringInterfaceMap(adapter Adapter, value map[string]any) traits.Mapper {
return &baseMap{ return &baseMap{
TypeAdapter: adapter, Adapter: adapter,
mapAccessor: newStringIfaceMapAccessor(adapter, value), mapAccessor: newStringIfaceMapAccessor(adapter, value),
value: value, value: value,
size: len(value), size: len(value),
@ -77,9 +77,9 @@ func NewStringInterfaceMap(adapter ref.TypeAdapter, value map[string]any) traits
} }
// NewStringStringMap returns a specialized traits.Mapper with string keys and values. // NewStringStringMap returns a specialized traits.Mapper with string keys and values.
func NewStringStringMap(adapter ref.TypeAdapter, value map[string]string) traits.Mapper { func NewStringStringMap(adapter Adapter, value map[string]string) traits.Mapper {
return &baseMap{ return &baseMap{
TypeAdapter: adapter, Adapter: adapter,
mapAccessor: newStringMapAccessor(value), mapAccessor: newStringMapAccessor(value),
value: value, value: value,
size: len(value), size: len(value),
@ -87,22 +87,13 @@ func NewStringStringMap(adapter ref.TypeAdapter, value map[string]string) traits
} }
// NewProtoMap returns a specialized traits.Mapper for handling protobuf map values. // NewProtoMap returns a specialized traits.Mapper for handling protobuf map values.
func NewProtoMap(adapter ref.TypeAdapter, value *pb.Map) traits.Mapper { func NewProtoMap(adapter Adapter, value *pb.Map) traits.Mapper {
return &protoMap{ return &protoMap{
TypeAdapter: adapter, Adapter: adapter,
value: value, value: value,
} }
} }
var (
// MapType singleton.
MapType = NewTypeValue("map",
traits.ContainerType,
traits.IndexerType,
traits.IterableType,
traits.SizerType)
)
// mapAccessor is a private interface for finding values within a map and iterating over the keys. // mapAccessor is a private interface for finding values within a map and iterating over the keys.
// This interface implements portions of the API surface area required by the traits.Mapper // This interface implements portions of the API surface area required by the traits.Mapper
// interface. // interface.
@ -121,7 +112,7 @@ type mapAccessor interface {
// Since CEL is side-effect free, the base map represents an immutable object. // Since CEL is side-effect free, the base map represents an immutable object.
type baseMap struct { type baseMap struct {
// TypeAdapter used to convert keys and values accessed within the map. // TypeAdapter used to convert keys and values accessed within the map.
ref.TypeAdapter Adapter
// mapAccessor interface implementation used to find and iterate over map keys. // mapAccessor interface implementation used to find and iterate over map keys.
mapAccessor mapAccessor
@ -316,15 +307,15 @@ func (m *baseMap) Value() any {
return m.value return m.value
} }
func newJSONStructAccessor(adapter ref.TypeAdapter, st map[string]*structpb.Value) mapAccessor { func newJSONStructAccessor(adapter Adapter, st map[string]*structpb.Value) mapAccessor {
return &jsonStructAccessor{ return &jsonStructAccessor{
TypeAdapter: adapter, Adapter: adapter,
st: st, st: st,
} }
} }
type jsonStructAccessor struct { type jsonStructAccessor struct {
ref.TypeAdapter Adapter
st map[string]*structpb.Value st map[string]*structpb.Value
} }
@ -359,17 +350,17 @@ func (a *jsonStructAccessor) Iterator() traits.Iterator {
} }
} }
func newReflectMapAccessor(adapter ref.TypeAdapter, value reflect.Value) mapAccessor { func newReflectMapAccessor(adapter Adapter, value reflect.Value) mapAccessor {
keyType := value.Type().Key() keyType := value.Type().Key()
return &reflectMapAccessor{ return &reflectMapAccessor{
TypeAdapter: adapter, Adapter: adapter,
refValue: value, refValue: value,
keyType: keyType, keyType: keyType,
} }
} }
type reflectMapAccessor struct { type reflectMapAccessor struct {
ref.TypeAdapter Adapter
refValue reflect.Value refValue reflect.Value
keyType reflect.Type keyType reflect.Type
} }
@ -427,9 +418,9 @@ func (m *reflectMapAccessor) findInternal(key ref.Val) (ref.Val, bool) {
// Iterator creates a Golang reflection based traits.Iterator. // Iterator creates a Golang reflection based traits.Iterator.
func (m *reflectMapAccessor) Iterator() traits.Iterator { func (m *reflectMapAccessor) Iterator() traits.Iterator {
return &mapIterator{ return &mapIterator{
TypeAdapter: m.TypeAdapter, Adapter: m.Adapter,
mapKeys: m.refValue.MapRange(), mapKeys: m.refValue.MapRange(),
len: m.refValue.Len(), len: m.refValue.Len(),
} }
} }
@ -480,9 +471,9 @@ func (a *refValMapAccessor) Find(key ref.Val) (ref.Val, bool) {
// Iterator produces a new traits.Iterator which iterates over the map keys via Golang reflection. // Iterator produces a new traits.Iterator which iterates over the map keys via Golang reflection.
func (a *refValMapAccessor) Iterator() traits.Iterator { func (a *refValMapAccessor) Iterator() traits.Iterator {
return &mapIterator{ return &mapIterator{
TypeAdapter: DefaultTypeAdapter, Adapter: DefaultTypeAdapter,
mapKeys: reflect.ValueOf(a.mapVal).MapRange(), mapKeys: reflect.ValueOf(a.mapVal).MapRange(),
len: len(a.mapVal), len: len(a.mapVal),
} }
} }
@ -524,15 +515,15 @@ func (a *stringMapAccessor) Iterator() traits.Iterator {
} }
} }
func newStringIfaceMapAccessor(adapter ref.TypeAdapter, mapVal map[string]any) mapAccessor { func newStringIfaceMapAccessor(adapter Adapter, mapVal map[string]any) mapAccessor {
return &stringIfaceMapAccessor{ return &stringIfaceMapAccessor{
TypeAdapter: adapter, Adapter: adapter,
mapVal: mapVal, mapVal: mapVal,
} }
} }
type stringIfaceMapAccessor struct { type stringIfaceMapAccessor struct {
ref.TypeAdapter Adapter
mapVal map[string]any mapVal map[string]any
} }
@ -569,7 +560,7 @@ func (a *stringIfaceMapAccessor) Iterator() traits.Iterator {
// protoMap is a specialized, separate implementation of the traits.Mapper interfaces tailored to // protoMap is a specialized, separate implementation of the traits.Mapper interfaces tailored to
// accessing protoreflect.Map values. // accessing protoreflect.Map values.
type protoMap struct { type protoMap struct {
ref.TypeAdapter Adapter
value *pb.Map value *pb.Map
} }
@ -772,9 +763,9 @@ func (m *protoMap) Iterator() traits.Iterator {
return true return true
}) })
return &protoMapIterator{ return &protoMapIterator{
TypeAdapter: m.TypeAdapter, Adapter: m.Adapter,
mapKeys: mapKeys, mapKeys: mapKeys,
len: m.value.Len(), len: m.value.Len(),
} }
} }
@ -795,7 +786,7 @@ func (m *protoMap) Value() any {
type mapIterator struct { type mapIterator struct {
*baseIterator *baseIterator
ref.TypeAdapter Adapter
mapKeys *reflect.MapIter mapKeys *reflect.MapIter
cursor int cursor int
len int len int
@ -818,7 +809,7 @@ func (it *mapIterator) Next() ref.Val {
type protoMapIterator struct { type protoMapIterator struct {
*baseIterator *baseIterator
ref.TypeAdapter Adapter
mapKeys []protoreflect.MapKey mapKeys []protoreflect.MapKey
cursor int cursor int
len int len int

View File

@ -30,8 +30,6 @@ import (
type Null structpb.NullValue type Null structpb.NullValue
var ( var (
// NullType singleton.
NullType = NewTypeValue("null_type")
// NullValue singleton. // NullValue singleton.
NullValue = Null(structpb.NullValue_NULL_VALUE) NullValue = Null(structpb.NullValue_NULL_VALUE)

View File

@ -29,10 +29,10 @@ import (
) )
type protoObj struct { type protoObj struct {
ref.TypeAdapter Adapter
value proto.Message value proto.Message
typeDesc *pb.TypeDescription typeDesc *pb.TypeDescription
typeValue *TypeValue typeValue ref.Val
} }
// NewObject returns an object based on a proto.Message value which handles // NewObject returns an object based on a proto.Message value which handles
@ -42,15 +42,15 @@ type protoObj struct {
// Note: the type value is pulled from the list of registered types within the // Note: the type value is pulled from the list of registered types within the
// type provider. If the proto type is not registered within the type provider, // type provider. If the proto type is not registered within the type provider,
// then this will result in an error within the type adapter / provider. // then this will result in an error within the type adapter / provider.
func NewObject(adapter ref.TypeAdapter, func NewObject(adapter Adapter,
typeDesc *pb.TypeDescription, typeDesc *pb.TypeDescription,
typeValue *TypeValue, typeValue ref.Val,
value proto.Message) ref.Val { value proto.Message) ref.Val {
return &protoObj{ return &protoObj{
TypeAdapter: adapter, Adapter: adapter,
value: value, value: value,
typeDesc: typeDesc, typeDesc: typeDesc,
typeValue: typeValue} typeValue: typeValue}
} }
func (o *protoObj) ConvertToNative(typeDesc reflect.Type) (any, error) { func (o *protoObj) ConvertToNative(typeDesc reflect.Type) (any, error) {
@ -157,7 +157,7 @@ func (o *protoObj) Get(index ref.Val) ref.Val {
} }
func (o *protoObj) Type() ref.Type { func (o *protoObj) Type() ref.Type {
return o.typeValue return o.typeValue.(ref.Type)
} }
func (o *protoObj) Value() any { func (o *protoObj) Value() any {

View File

@ -24,7 +24,7 @@ import (
var ( var (
// OptionalType indicates the runtime type of an optional value. // OptionalType indicates the runtime type of an optional value.
OptionalType = NewTypeValue("optional") OptionalType = NewOpaqueType("optional")
// OptionalNone is a sentinel value which is used to indicate an empty optional value. // OptionalNone is a sentinel value which is used to indicate an empty optional value.
OptionalNone = &Optional{} OptionalNone = &Optional{}

View File

@ -285,7 +285,7 @@ func (fd *FieldDescription) GetFrom(target any) (any, error) {
// IsEnum returns true if the field type refers to an enum value. // IsEnum returns true if the field type refers to an enum value.
func (fd *FieldDescription) IsEnum() bool { func (fd *FieldDescription) IsEnum() bool {
return fd.desc.Kind() == protoreflect.EnumKind return fd.ProtoKind() == protoreflect.EnumKind
} }
// IsMap returns true if the field is of map type. // IsMap returns true if the field is of map type.
@ -295,7 +295,7 @@ func (fd *FieldDescription) IsMap() bool {
// IsMessage returns true if the field is of message type. // IsMessage returns true if the field is of message type.
func (fd *FieldDescription) IsMessage() bool { func (fd *FieldDescription) IsMessage() bool {
kind := fd.desc.Kind() kind := fd.ProtoKind()
return kind == protoreflect.MessageKind || kind == protoreflect.GroupKind return kind == protoreflect.MessageKind || kind == protoreflect.GroupKind
} }
@ -326,6 +326,11 @@ func (fd *FieldDescription) Name() string {
return string(fd.desc.Name()) return string(fd.desc.Name())
} }
// ProtoKind returns the protobuf reflected kind of the field.
func (fd *FieldDescription) ProtoKind() protoreflect.Kind {
return fd.desc.Kind()
}
// ReflectType returns the Golang reflect.Type for this field. // ReflectType returns the Golang reflect.Type for this field.
func (fd *FieldDescription) ReflectType() reflect.Type { func (fd *FieldDescription) ReflectType() reflect.Type {
return fd.reflectType return fd.reflectType
@ -345,17 +350,17 @@ func (fd *FieldDescription) Zero() proto.Message {
} }
func (fd *FieldDescription) typeDefToType() *exprpb.Type { func (fd *FieldDescription) typeDefToType() *exprpb.Type {
if fd.desc.Kind() == protoreflect.MessageKind || fd.desc.Kind() == protoreflect.GroupKind { if fd.IsMessage() {
msgType := string(fd.desc.Message().FullName()) msgType := string(fd.desc.Message().FullName())
if wk, found := CheckedWellKnowns[msgType]; found { if wk, found := CheckedWellKnowns[msgType]; found {
return wk return wk
} }
return checkedMessageType(msgType) return checkedMessageType(msgType)
} }
if fd.desc.Kind() == protoreflect.EnumKind { if fd.IsEnum() {
return checkedInt return checkedInt
} }
return CheckedPrimitives[fd.desc.Kind()] return CheckedPrimitives[fd.ProtoKind()]
} }
// Map wraps the protoreflect.Map object with a key and value FieldDescription for use in // Map wraps the protoreflect.Map object with a key and value FieldDescription for use in
@ -463,13 +468,13 @@ func unwrapDynamic(desc description, refMsg protoreflect.Message) (any, bool, er
unwrappedAny := &anypb.Any{} unwrappedAny := &anypb.Any{}
err := Merge(unwrappedAny, msg) err := Merge(unwrappedAny, msg)
if err != nil { if err != nil {
return nil, false, err return nil, false, fmt.Errorf("unwrap dynamic field failed: %v", err)
} }
dynMsg, err := unwrappedAny.UnmarshalNew() dynMsg, err := unwrappedAny.UnmarshalNew()
if err != nil { if err != nil {
// Allow the error to move further up the stack as it should result in an type // Allow the error to move further up the stack as it should result in an type
// conversion error if the caller does not recover it somehow. // conversion error if the caller does not recover it somehow.
return nil, false, err return nil, false, fmt.Errorf("unmarshal dynamic any failed: %v", err)
} }
// Attempt to unwrap the dynamic type, otherwise return the dynamic message. // Attempt to unwrap the dynamic type, otherwise return the dynamic message.
unwrapped, nested, err := unwrapDynamic(desc, dynMsg.ProtoReflect()) unwrapped, nested, err := unwrapDynamic(desc, dynMsg.ProtoReflect())
@ -560,8 +565,10 @@ func zeroValueOf(msg proto.Message) proto.Message {
} }
var ( var (
jsonValueTypeURL = "types.googleapis.com/google.protobuf.Value"
zeroValueMap = map[string]proto.Message{ zeroValueMap = map[string]proto.Message{
"google.protobuf.Any": &anypb.Any{}, "google.protobuf.Any": &anypb.Any{TypeUrl: jsonValueTypeURL},
"google.protobuf.Duration": &dpb.Duration{}, "google.protobuf.Duration": &dpb.Duration{},
"google.protobuf.ListValue": &structpb.ListValue{}, "google.protobuf.ListValue": &structpb.ListValue{},
"google.protobuf.Struct": &structpb.Struct{}, "google.protobuf.Struct": &structpb.Struct{},

View File

@ -33,17 +33,64 @@ import (
tpb "google.golang.org/protobuf/types/known/timestamppb" tpb "google.golang.org/protobuf/types/known/timestamppb"
) )
type protoTypeRegistry struct { // Adapter converts native Go values of varying type and complexity to equivalent CEL values.
revTypeMap map[string]ref.Type type Adapter = ref.TypeAdapter
// Provider specifies functions for creating new object instances and for resolving
// enum values by name.
type Provider interface {
// EnumValue returns the numeric value of the given enum value name.
EnumValue(enumName string) ref.Val
// FindIdent takes a qualified identifier name and returns a ref.Val if one exists.
FindIdent(identName string) (ref.Val, bool)
// FindStructType returns the Type give a qualified type name.
//
// For historical reasons, only struct types are expected to be returned through this
// method, and the type values are expected to be wrapped in a TypeType instance using
// TypeTypeWithParam(<structType>).
//
// Returns false if not found.
FindStructType(structType string) (*Type, bool)
// FieldStructFieldType returns the field type for a checked type value. Returns
// false if the field could not be found.
FindStructFieldType(structType, fieldName string) (*FieldType, bool)
// NewValue creates a new type value from a qualified name and map of field
// name to value.
//
// Note, for each value, the Val.ConvertToNative function will be invoked
// to convert the Val to the field's native type. If an error occurs during
// conversion, the NewValue will be a types.Err.
NewValue(structType string, fields map[string]ref.Val) ref.Val
}
// FieldType represents a field's type value and whether that field supports presence detection.
type FieldType struct {
// Type of the field as a CEL native type value.
Type *Type
// IsSet indicates whether the field is set on an input object.
IsSet ref.FieldTester
// GetFrom retrieves the field value on the input object, if set.
GetFrom ref.FieldGetter
}
// Registry provides type information for a set of registered types.
type Registry struct {
revTypeMap map[string]*Type
pbdb *pb.Db pbdb *pb.Db
} }
// NewRegistry accepts a list of proto message instances and returns a type // NewRegistry accepts a list of proto message instances and returns a type
// provider which can create new instances of the provided message or any // provider which can create new instances of the provided message or any
// message that proto depends upon in its FileDescriptor. // message that proto depends upon in its FileDescriptor.
func NewRegistry(types ...proto.Message) (ref.TypeRegistry, error) { func NewRegistry(types ...proto.Message) (*Registry, error) {
p := &protoTypeRegistry{ p := &Registry{
revTypeMap: make(map[string]ref.Type), revTypeMap: make(map[string]*Type),
pbdb: pb.NewDb(), pbdb: pb.NewDb(),
} }
err := p.RegisterType( err := p.RegisterType(
@ -79,18 +126,17 @@ func NewRegistry(types ...proto.Message) (ref.TypeRegistry, error) {
} }
// NewEmptyRegistry returns a registry which is completely unconfigured. // NewEmptyRegistry returns a registry which is completely unconfigured.
func NewEmptyRegistry() ref.TypeRegistry { func NewEmptyRegistry() *Registry {
return &protoTypeRegistry{ return &Registry{
revTypeMap: make(map[string]ref.Type), revTypeMap: make(map[string]*Type),
pbdb: pb.NewDb(), pbdb: pb.NewDb(),
} }
} }
// Copy implements the ref.TypeRegistry interface method which copies the current state of the // Copy copies the current state of the registry into its own memory space.
// registry into its own memory space. func (p *Registry) Copy() *Registry {
func (p *protoTypeRegistry) Copy() ref.TypeRegistry { copy := &Registry{
copy := &protoTypeRegistry{ revTypeMap: make(map[string]*Type),
revTypeMap: make(map[string]ref.Type),
pbdb: p.pbdb.Copy(), pbdb: p.pbdb.Copy(),
} }
for k, v := range p.revTypeMap { for k, v := range p.revTypeMap {
@ -99,7 +145,8 @@ func (p *protoTypeRegistry) Copy() ref.TypeRegistry {
return copy return copy
} }
func (p *protoTypeRegistry) EnumValue(enumName string) ref.Val { // EnumValue returns the numeric value of the given enum value name.
func (p *Registry) EnumValue(enumName string) ref.Val {
enumVal, found := p.pbdb.DescribeEnum(enumName) enumVal, found := p.pbdb.DescribeEnum(enumName)
if !found { if !found {
return NewErr("unknown enum name '%s'", enumName) return NewErr("unknown enum name '%s'", enumName)
@ -107,9 +154,12 @@ func (p *protoTypeRegistry) EnumValue(enumName string) ref.Val {
return Int(enumVal.Value()) return Int(enumVal.Value())
} }
func (p *protoTypeRegistry) FindFieldType(messageType string, // FieldFieldType returns the field type for a checked type value. Returns false if
fieldName string) (*ref.FieldType, bool) { // the field could not be found.
msgType, found := p.pbdb.DescribeType(messageType) //
// Deprecated: use FindStructFieldType
func (p *Registry) FindFieldType(structType, fieldName string) (*ref.FieldType, bool) {
msgType, found := p.pbdb.DescribeType(structType)
if !found { if !found {
return nil, false return nil, false
} }
@ -118,15 +168,32 @@ func (p *protoTypeRegistry) FindFieldType(messageType string,
return nil, false return nil, false
} }
return &ref.FieldType{ return &ref.FieldType{
Type: field.CheckedType(), Type: field.CheckedType(),
IsSet: field.IsSet, IsSet: field.IsSet,
GetFrom: field.GetFrom}, GetFrom: field.GetFrom}, true
true
} }
func (p *protoTypeRegistry) FindIdent(identName string) (ref.Val, bool) { // FieldStructFieldType returns the field type for a checked type value. Returns
// false if the field could not be found.
func (p *Registry) FindStructFieldType(structType, fieldName string) (*FieldType, bool) {
msgType, found := p.pbdb.DescribeType(structType)
if !found {
return nil, false
}
field, found := msgType.FieldByName(fieldName)
if !found {
return nil, false
}
return &FieldType{
Type: fieldDescToCELType(field),
IsSet: field.IsSet,
GetFrom: field.GetFrom}, true
}
// FindIdent takes a qualified identifier name and returns a ref.Val if one exists.
func (p *Registry) FindIdent(identName string) (ref.Val, bool) {
if t, found := p.revTypeMap[identName]; found { if t, found := p.revTypeMap[identName]; found {
return t.(ref.Val), true return t, true
} }
if enumVal, found := p.pbdb.DescribeEnum(identName); found { if enumVal, found := p.pbdb.DescribeEnum(identName); found {
return Int(enumVal.Value()), true return Int(enumVal.Value()), true
@ -134,24 +201,50 @@ func (p *protoTypeRegistry) FindIdent(identName string) (ref.Val, bool) {
return nil, false return nil, false
} }
func (p *protoTypeRegistry) FindType(typeName string) (*exprpb.Type, bool) { // FindType looks up the Type given a qualified typeName. Returns false if not found.
if _, found := p.pbdb.DescribeType(typeName); !found { //
// Deprecated: use FindStructType
func (p *Registry) FindType(structType string) (*exprpb.Type, bool) {
if _, found := p.pbdb.DescribeType(structType); !found {
return nil, false return nil, false
} }
if typeName != "" && typeName[0] == '.' { if structType != "" && structType[0] == '.' {
typeName = typeName[1:] structType = structType[1:]
} }
return &exprpb.Type{ return &exprpb.Type{
TypeKind: &exprpb.Type_Type{ TypeKind: &exprpb.Type_Type{
Type: &exprpb.Type{ Type: &exprpb.Type{
TypeKind: &exprpb.Type_MessageType{ TypeKind: &exprpb.Type_MessageType{
MessageType: typeName}}}}, true MessageType: structType}}}}, true
} }
func (p *protoTypeRegistry) NewValue(typeName string, fields map[string]ref.Val) ref.Val { // FindStructType returns the Type give a qualified type name.
td, found := p.pbdb.DescribeType(typeName) //
// For historical reasons, only struct types are expected to be returned through this
// method, and the type values are expected to be wrapped in a TypeType instance using
// TypeTypeWithParam(<structType>).
//
// Returns false if not found.
func (p *Registry) FindStructType(structType string) (*Type, bool) {
if _, found := p.pbdb.DescribeType(structType); !found {
return nil, false
}
if structType != "" && structType[0] == '.' {
structType = structType[1:]
}
return NewTypeTypeWithParam(NewObjectType(structType)), true
}
// NewValue creates a new type value from a qualified name and map of field
// name to value.
//
// Note, for each value, the Val.ConvertToNative function will be invoked
// to convert the Val to the field's native type. If an error occurs during
// conversion, the NewValue will be a types.Err.
func (p *Registry) NewValue(structType string, fields map[string]ref.Val) ref.Val {
td, found := p.pbdb.DescribeType(structType)
if !found { if !found {
return NewErr("unknown type '%s'", typeName) return NewErr("unknown type '%s'", structType)
} }
msg := td.New() msg := td.New()
fieldMap := td.FieldMap() fieldMap := td.FieldMap()
@ -168,7 +261,8 @@ func (p *protoTypeRegistry) NewValue(typeName string, fields map[string]ref.Val)
return p.NativeToValue(msg.Interface()) return p.NativeToValue(msg.Interface())
} }
func (p *protoTypeRegistry) RegisterDescriptor(fileDesc protoreflect.FileDescriptor) error { // RegisterDescriptor registers the contents of a protocol buffer `FileDescriptor`.
func (p *Registry) RegisterDescriptor(fileDesc protoreflect.FileDescriptor) error {
fd, err := p.pbdb.RegisterDescriptor(fileDesc) fd, err := p.pbdb.RegisterDescriptor(fileDesc)
if err != nil { if err != nil {
return err return err
@ -176,7 +270,8 @@ func (p *protoTypeRegistry) RegisterDescriptor(fileDesc protoreflect.FileDescrip
return p.registerAllTypes(fd) return p.registerAllTypes(fd)
} }
func (p *protoTypeRegistry) RegisterMessage(message proto.Message) error { // RegisterMessage registers a protocol buffer message and its dependencies.
func (p *Registry) RegisterMessage(message proto.Message) error {
fd, err := p.pbdb.RegisterMessage(message) fd, err := p.pbdb.RegisterMessage(message)
if err != nil { if err != nil {
return err return err
@ -184,11 +279,32 @@ func (p *protoTypeRegistry) RegisterMessage(message proto.Message) error {
return p.registerAllTypes(fd) return p.registerAllTypes(fd)
} }
func (p *protoTypeRegistry) RegisterType(types ...ref.Type) error { // RegisterType registers a type value with the provider which ensures the provider is aware of how to
// map the type to an identifier.
//
// If the `ref.Type` value is a `*types.Type` it will be registered directly by its runtime type name.
// If the `ref.Type` value is not a `*types.Type` instance, a `*types.Type` instance which reflects the
// traits present on the input and the runtime type name. By default this foreign type will be treated
// as a types.StructKind. To avoid potential issues where the `ref.Type` values does not match the
// generated `*types.Type` instance, consider always using the `*types.Type` to represent type extensions
// to CEL, even when they're not based on protobuf types.
func (p *Registry) RegisterType(types ...ref.Type) error {
for _, t := range types { for _, t := range types {
p.revTypeMap[t.TypeName()] = t celType := maybeForeignType(t)
existing, found := p.revTypeMap[t.TypeName()]
if !found {
p.revTypeMap[t.TypeName()] = celType
continue
}
if !existing.IsEquivalentType(celType) {
return fmt.Errorf("type registration conflict. found: %v, input: %v", existing, celType)
}
if existing.traitMask != celType.traitMask {
return fmt.Errorf(
"type registered with conflicting traits: %v with traits %v, input: %v",
existing.TypeName(), existing.traitMask, celType.traitMask)
}
} }
// TODO: generate an error when the type name is registered more than once.
return nil return nil
} }
@ -196,7 +312,7 @@ func (p *protoTypeRegistry) RegisterType(types ...ref.Type) error {
// providing support for custom proto-based types. // providing support for custom proto-based types.
// //
// This method should be the inverse of ref.Val.ConvertToNative. // This method should be the inverse of ref.Val.ConvertToNative.
func (p *protoTypeRegistry) NativeToValue(value any) ref.Val { func (p *Registry) NativeToValue(value any) ref.Val {
if val, found := nativeToValue(p, value); found { if val, found := nativeToValue(p, value); found {
return val return val
} }
@ -218,7 +334,7 @@ func (p *protoTypeRegistry) NativeToValue(value any) ref.Val {
if !found { if !found {
return NewErr("unknown type: '%s'", typeName) return NewErr("unknown type: '%s'", typeName)
} }
return NewObject(p, td, typeVal.(*TypeValue), v) return NewObject(p, td, typeVal, v)
case *pb.Map: case *pb.Map:
return NewProtoMap(p, v) return NewProtoMap(p, v)
case protoreflect.List: case protoreflect.List:
@ -231,8 +347,13 @@ func (p *protoTypeRegistry) NativeToValue(value any) ref.Val {
return UnsupportedRefValConversionErr(value) return UnsupportedRefValConversionErr(value)
} }
func (p *protoTypeRegistry) registerAllTypes(fd *pb.FileDescription) error { func (p *Registry) registerAllTypes(fd *pb.FileDescription) error {
for _, typeName := range fd.GetTypeNames() { for _, typeName := range fd.GetTypeNames() {
// skip well-known type names since they're automatically sanitized
// during NewObjectType() calls.
if _, found := checkedWellKnowns[typeName]; found {
continue
}
err := p.RegisterType(NewObjectTypeValue(typeName)) err := p.RegisterType(NewObjectTypeValue(typeName))
if err != nil { if err != nil {
return err return err
@ -241,6 +362,28 @@ func (p *protoTypeRegistry) registerAllTypes(fd *pb.FileDescription) error {
return nil return nil
} }
func fieldDescToCELType(field *pb.FieldDescription) *Type {
if field.IsMap() {
return NewMapType(
singularFieldDescToCELType(field.KeyType),
singularFieldDescToCELType(field.ValueType))
}
if field.IsList() {
return NewListType(singularFieldDescToCELType(field))
}
return singularFieldDescToCELType(field)
}
func singularFieldDescToCELType(field *pb.FieldDescription) *Type {
if field.IsMessage() {
return NewObjectType(string(field.Descriptor().Message().FullName()))
}
if field.IsEnum() {
return IntType
}
return ProtoCELPrimitives[field.ProtoKind()]
}
// defaultTypeAdapter converts go native types to CEL values. // defaultTypeAdapter converts go native types to CEL values.
type defaultTypeAdapter struct{} type defaultTypeAdapter struct{}
@ -259,7 +402,7 @@ func (a *defaultTypeAdapter) NativeToValue(value any) ref.Val {
// nativeToValue returns the converted (ref.Val, true) of a conversion is found, // nativeToValue returns the converted (ref.Val, true) of a conversion is found,
// otherwise (nil, false) // otherwise (nil, false)
func nativeToValue(a ref.TypeAdapter, value any) (ref.Val, bool) { func nativeToValue(a Adapter, value any) (ref.Val, bool) {
switch v := value.(type) { switch v := value.(type) {
case nil: case nil:
return NullValue, true return NullValue, true
@ -547,3 +690,24 @@ func fieldTypeConversionError(field *pb.FieldDescription, err error) error {
msgName := field.Descriptor().ContainingMessage().FullName() msgName := field.Descriptor().ContainingMessage().FullName()
return fmt.Errorf("field type conversion error for %v.%v value type: %v", msgName, field.Name(), err) return fmt.Errorf("field type conversion error for %v.%v value type: %v", msgName, field.Name(), err)
} }
var (
// ProtoCELPrimitives provides a map from the protoreflect Kind to the equivalent CEL type.
ProtoCELPrimitives = map[protoreflect.Kind]*Type{
protoreflect.BoolKind: BoolType,
protoreflect.BytesKind: BytesType,
protoreflect.DoubleKind: DoubleType,
protoreflect.FloatKind: DoubleType,
protoreflect.Int32Kind: IntType,
protoreflect.Int64Kind: IntType,
protoreflect.Sint32Kind: IntType,
protoreflect.Sint64Kind: IntType,
protoreflect.Uint32Kind: UintType,
protoreflect.Uint64Kind: UintType,
protoreflect.Fixed32Kind: UintType,
protoreflect.Fixed64Kind: UintType,
protoreflect.Sfixed32Kind: IntType,
protoreflect.Sfixed64Kind: IntType,
protoreflect.StringKind: StringType,
}
)

View File

@ -23,34 +23,34 @@ import (
// TypeProvider specifies functions for creating new object instances and for // TypeProvider specifies functions for creating new object instances and for
// resolving enum values by name. // resolving enum values by name.
//
// Deprecated: use types.Provider
type TypeProvider interface { type TypeProvider interface {
// EnumValue returns the numeric value of the given enum value name. // EnumValue returns the numeric value of the given enum value name.
EnumValue(enumName string) Val EnumValue(enumName string) Val
// FindIdent takes a qualified identifier name and returns a Value if one // FindIdent takes a qualified identifier name and returns a Value if one exists.
// exists.
FindIdent(identName string) (Val, bool) FindIdent(identName string) (Val, bool)
// FindType looks up the Type given a qualified typeName. Returns false // FindType looks up the Type given a qualified typeName. Returns false if not found.
// if not found.
//
// Used during type-checking only.
FindType(typeName string) (*exprpb.Type, bool) FindType(typeName string) (*exprpb.Type, bool)
// FieldFieldType returns the field type for a checked type value. Returns // FieldFieldType returns the field type for a checked type value. Returns false if
// false if the field could not be found. // the field could not be found.
FindFieldType(messageType string, fieldName string) (*FieldType, bool) FindFieldType(messageType, fieldName string) (*FieldType, bool)
// NewValue creates a new type value from a qualified name and map of field // NewValue creates a new type value from a qualified name and map of field name
// name to value. // to value.
// //
// Note, for each value, the Val.ConvertToNative function will be invoked // Note, for each value, the Val.ConvertToNative function will be invoked to convert
// to convert the Val to the field's native type. If an error occurs during // the Val to the field's native type. If an error occurs during conversion, the
// conversion, the NewValue will be a types.Err. // NewValue will be a types.Err.
NewValue(typeName string, fields map[string]Val) Val NewValue(typeName string, fields map[string]Val) Val
} }
// TypeAdapter converts native Go values of varying type and complexity to equivalent CEL values. // TypeAdapter converts native Go values of varying type and complexity to equivalent CEL values.
//
// Deprecated: use types.Adapter
type TypeAdapter interface { type TypeAdapter interface {
// NativeToValue converts the input `value` to a CEL `ref.Val`. // NativeToValue converts the input `value` to a CEL `ref.Val`.
NativeToValue(value any) Val NativeToValue(value any) Val
@ -60,6 +60,8 @@ type TypeAdapter interface {
// implementations support type-customization, so these features are optional. However, a // implementations support type-customization, so these features are optional. However, a
// `TypeRegistry` should be a `TypeProvider` and a `TypeAdapter` to ensure that types // `TypeRegistry` should be a `TypeProvider` and a `TypeAdapter` to ensure that types
// which are registered can be converted to CEL representations. // which are registered can be converted to CEL representations.
//
// Deprecated: use types.Registry
type TypeRegistry interface { type TypeRegistry interface {
TypeAdapter TypeAdapter
TypeProvider TypeProvider
@ -76,15 +78,14 @@ type TypeRegistry interface {
// If a type is provided more than once with an alternative definition, the // If a type is provided more than once with an alternative definition, the
// call will result in an error. // call will result in an error.
RegisterType(types ...Type) error RegisterType(types ...Type) error
// Copy the TypeRegistry and return a new registry whose mutable state is isolated.
Copy() TypeRegistry
} }
// FieldType represents a field's type value and whether that field supports // FieldType represents a field's type value and whether that field supports
// presence detection. // presence detection.
//
// Deprecated: use types.FieldType
type FieldType struct { type FieldType struct {
// Type of the field. // Type of the field as a protobuf type value.
Type *exprpb.Type Type *exprpb.Type
// IsSet indicates whether the field is set on an input object. // IsSet indicates whether the field is set on an input object.

View File

@ -24,7 +24,6 @@ import (
"github.com/google/cel-go/common/overloads" "github.com/google/cel-go/common/overloads"
"github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/common/types/traits"
anypb "google.golang.org/protobuf/types/known/anypb" anypb "google.golang.org/protobuf/types/known/anypb"
structpb "google.golang.org/protobuf/types/known/structpb" structpb "google.golang.org/protobuf/types/known/structpb"
@ -36,18 +35,10 @@ import (
type String string type String string
var ( var (
// StringType singleton. stringOneArgOverloads = map[string]func(ref.Val, ref.Val) ref.Val{
StringType = NewTypeValue("string", overloads.Contains: StringContains,
traits.AdderType, overloads.EndsWith: StringEndsWith,
traits.ComparerType, overloads.StartsWith: StringStartsWith,
traits.MatcherType,
traits.ReceiverType,
traits.SizerType)
stringOneArgOverloads = map[string]func(String, ref.Val) ref.Val{
overloads.Contains: stringContains,
overloads.EndsWith: stringEndsWith,
overloads.StartsWith: stringStartsWith,
} }
stringWrapperType = reflect.TypeOf(&wrapperspb.StringValue{}) stringWrapperType = reflect.TypeOf(&wrapperspb.StringValue{})
@ -198,26 +189,41 @@ func (s String) Value() any {
return string(s) return string(s)
} }
func stringContains(s String, sub ref.Val) ref.Val { // StringContains returns whether the string contains a substring.
func StringContains(s, sub ref.Val) ref.Val {
str, ok := s.(String)
if !ok {
return MaybeNoSuchOverloadErr(s)
}
subStr, ok := sub.(String) subStr, ok := sub.(String)
if !ok { if !ok {
return MaybeNoSuchOverloadErr(sub) return MaybeNoSuchOverloadErr(sub)
} }
return Bool(strings.Contains(string(s), string(subStr))) return Bool(strings.Contains(string(str), string(subStr)))
} }
func stringEndsWith(s String, suf ref.Val) ref.Val { // StringEndsWith returns whether the target string contains the input suffix.
func StringEndsWith(s, suf ref.Val) ref.Val {
str, ok := s.(String)
if !ok {
return MaybeNoSuchOverloadErr(s)
}
sufStr, ok := suf.(String) sufStr, ok := suf.(String)
if !ok { if !ok {
return MaybeNoSuchOverloadErr(suf) return MaybeNoSuchOverloadErr(suf)
} }
return Bool(strings.HasSuffix(string(s), string(sufStr))) return Bool(strings.HasSuffix(string(str), string(sufStr)))
} }
func stringStartsWith(s String, pre ref.Val) ref.Val { // StringStartsWith returns whether the target string contains the input prefix.
func StringStartsWith(s, pre ref.Val) ref.Val {
str, ok := s.(String)
if !ok {
return MaybeNoSuchOverloadErr(s)
}
preStr, ok := pre.(String) preStr, ok := pre.(String)
if !ok { if !ok {
return MaybeNoSuchOverloadErr(pre) return MaybeNoSuchOverloadErr(pre)
} }
return Bool(strings.HasPrefix(string(s), string(preStr))) return Bool(strings.HasPrefix(string(str), string(preStr)))
} }

View File

@ -23,7 +23,6 @@ import (
"github.com/google/cel-go/common/overloads" "github.com/google/cel-go/common/overloads"
"github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/common/types/traits"
anypb "google.golang.org/protobuf/types/known/anypb" anypb "google.golang.org/protobuf/types/known/anypb"
structpb "google.golang.org/protobuf/types/known/structpb" structpb "google.golang.org/protobuf/types/known/structpb"
@ -53,15 +52,6 @@ const (
maxUnixTime int64 = 253402300799 maxUnixTime int64 = 253402300799
) )
var (
// TimestampType singleton.
TimestampType = NewTypeValue("google.protobuf.Timestamp",
traits.AdderType,
traits.ComparerType,
traits.ReceiverType,
traits.SubtractorType)
)
// Add implements traits.Adder.Add. // Add implements traits.Adder.Add.
func (t Timestamp) Add(other ref.Val) ref.Val { func (t Timestamp) Add(other ref.Val) ref.Val {
switch other.Type() { switch other.Type() {

View File

@ -1,102 +0,0 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package types
import (
"fmt"
"reflect"
"github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/common/types/traits"
)
var (
// TypeType is the type of a TypeValue.
TypeType = NewTypeValue("type")
)
// TypeValue is an instance of a Value that describes a value's type.
type TypeValue struct {
name string
traitMask int
}
// NewTypeValue returns *TypeValue which is both a ref.Type and ref.Val.
func NewTypeValue(name string, traits ...int) *TypeValue {
traitMask := 0
for _, trait := range traits {
traitMask |= trait
}
return &TypeValue{
name: name,
traitMask: traitMask}
}
// NewObjectTypeValue returns a *TypeValue based on the input name, which is
// annotated with the traits relevant to all objects.
func NewObjectTypeValue(name string) *TypeValue {
return NewTypeValue(name,
traits.FieldTesterType,
traits.IndexerType)
}
// ConvertToNative implements ref.Val.ConvertToNative.
func (t *TypeValue) ConvertToNative(typeDesc reflect.Type) (any, error) {
// TODO: replace the internal type representation with a proto-value.
return nil, fmt.Errorf("type conversion not supported for 'type'")
}
// ConvertToType implements ref.Val.ConvertToType.
func (t *TypeValue) ConvertToType(typeVal ref.Type) ref.Val {
switch typeVal {
case TypeType:
return TypeType
case StringType:
return String(t.TypeName())
}
return NewErr("type conversion error from '%s' to '%s'", TypeType, typeVal)
}
// Equal implements ref.Val.Equal.
func (t *TypeValue) Equal(other ref.Val) ref.Val {
otherType, ok := other.(ref.Type)
return Bool(ok && t.TypeName() == otherType.TypeName())
}
// HasTrait indicates whether the type supports the given trait.
// Trait codes are defined in the traits package, e.g. see traits.AdderType.
func (t *TypeValue) HasTrait(trait int) bool {
return trait&t.traitMask == trait
}
// String implements fmt.Stringer.
func (t *TypeValue) String() string {
return t.name
}
// Type implements ref.Val.Type.
func (t *TypeValue) Type() ref.Type {
return TypeType
}
// TypeName gives the type's name as a string.
func (t *TypeValue) TypeName() string {
return t.name
}
// Value implements ref.Val.Value.
func (t *TypeValue) Value() any {
return t.name
}

806
vendor/github.com/google/cel-go/common/types/types.go generated vendored Normal file
View File

@ -0,0 +1,806 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package types
import (
"fmt"
"reflect"
"strings"
chkdecls "github.com/google/cel-go/checker/decls"
"github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/common/types/traits"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
// Kind indicates a CEL type's kind which is used to differentiate quickly between simple
// and complex types.
type Kind uint
const (
// UnspecifiedKind is returned when the type is nil or its kind is not specified.
UnspecifiedKind Kind = iota
// DynKind represents a dynamic type. This kind only exists at type-check time.
DynKind
// AnyKind represents a google.protobuf.Any type. This kind only exists at type-check time.
// Prefer DynKind to AnyKind as AnyKind has a specific meaning which is based on protobuf
// well-known types.
AnyKind
// BoolKind represents a boolean type.
BoolKind
// BytesKind represents a bytes type.
BytesKind
// DoubleKind represents a double type.
DoubleKind
// DurationKind represents a CEL duration type.
DurationKind
// ErrorKind represents a CEL error type.
ErrorKind
// IntKind represents an integer type.
IntKind
// ListKind represents a list type.
ListKind
// MapKind represents a map type.
MapKind
// NullTypeKind represents a null type.
NullTypeKind
// OpaqueKind represents an abstract type which has no accessible fields.
OpaqueKind
// StringKind represents a string type.
StringKind
// StructKind represents a structured object with typed fields.
StructKind
// TimestampKind represents a a CEL time type.
TimestampKind
// TypeKind represents the CEL type.
TypeKind
// TypeParamKind represents a parameterized type whose type name will be resolved at type-check time, if possible.
TypeParamKind
// UintKind represents a uint type.
UintKind
// UnknownKind represents an unknown value type.
UnknownKind
)
var (
// AnyType represents the google.protobuf.Any type.
AnyType = &Type{
kind: AnyKind,
runtimeTypeName: "google.protobuf.Any",
traitMask: traits.FieldTesterType |
traits.IndexerType,
}
// BoolType represents the bool type.
BoolType = &Type{
kind: BoolKind,
runtimeTypeName: "bool",
traitMask: traits.ComparerType |
traits.NegatorType,
}
// BytesType represents the bytes type.
BytesType = &Type{
kind: BytesKind,
runtimeTypeName: "bytes",
traitMask: traits.AdderType |
traits.ComparerType |
traits.SizerType,
}
// DoubleType represents the double type.
DoubleType = &Type{
kind: DoubleKind,
runtimeTypeName: "double",
traitMask: traits.AdderType |
traits.ComparerType |
traits.DividerType |
traits.MultiplierType |
traits.NegatorType |
traits.SubtractorType,
}
// DurationType represents the CEL duration type.
DurationType = &Type{
kind: DurationKind,
runtimeTypeName: "google.protobuf.Duration",
traitMask: traits.AdderType |
traits.ComparerType |
traits.NegatorType |
traits.ReceiverType |
traits.SubtractorType,
}
// DynType represents a dynamic CEL type whose type will be determined at runtime from context.
DynType = &Type{
kind: DynKind,
runtimeTypeName: "dyn",
}
// ErrorType represents a CEL error value.
ErrorType = &Type{
kind: ErrorKind,
runtimeTypeName: "error",
}
// IntType represents the int type.
IntType = &Type{
kind: IntKind,
runtimeTypeName: "int",
traitMask: traits.AdderType |
traits.ComparerType |
traits.DividerType |
traits.ModderType |
traits.MultiplierType |
traits.NegatorType |
traits.SubtractorType,
}
// ListType represents the runtime list type.
ListType = NewListType(nil)
// MapType represents the runtime map type.
MapType = NewMapType(nil, nil)
// NullType represents the type of a null value.
NullType = &Type{
kind: NullTypeKind,
runtimeTypeName: "null_type",
}
// StringType represents the string type.
StringType = &Type{
kind: StringKind,
runtimeTypeName: "string",
traitMask: traits.AdderType |
traits.ComparerType |
traits.MatcherType |
traits.ReceiverType |
traits.SizerType,
}
// TimestampType represents the time type.
TimestampType = &Type{
kind: TimestampKind,
runtimeTypeName: "google.protobuf.Timestamp",
traitMask: traits.AdderType |
traits.ComparerType |
traits.ReceiverType |
traits.SubtractorType,
}
// TypeType represents a CEL type
TypeType = &Type{
kind: TypeKind,
runtimeTypeName: "type",
}
// UintType represents a uint type.
UintType = &Type{
kind: UintKind,
runtimeTypeName: "uint",
traitMask: traits.AdderType |
traits.ComparerType |
traits.DividerType |
traits.ModderType |
traits.MultiplierType |
traits.SubtractorType,
}
// UnknownType represents an unknown value type.
UnknownType = &Type{
kind: UnknownKind,
runtimeTypeName: "unknown",
}
)
var _ ref.Type = &Type{}
var _ ref.Val = &Type{}
// Type holds a reference to a runtime type with an optional type-checked set of type parameters.
type Type struct {
// kind indicates general category of the type.
kind Kind
// parameters holds the optional type-checked set of type Parameters that are used during static analysis.
parameters []*Type
// runtimeTypeName indicates the runtime type name of the type.
runtimeTypeName string
// isAssignableType function determines whether one type is assignable to this type.
// A nil value for the isAssignableType function falls back to equality of kind, runtimeType, and parameters.
isAssignableType func(other *Type) bool
// isAssignableRuntimeType function determines whether the runtime type (with erasure) is assignable to this type.
// A nil value for the isAssignableRuntimeType function falls back to the equality of the type or type name.
isAssignableRuntimeType func(other ref.Val) bool
// traitMask is a mask of flags which indicate the capabilities of the type.
traitMask int
}
// ConvertToNative implements ref.Val.ConvertToNative.
func (t *Type) ConvertToNative(typeDesc reflect.Type) (any, error) {
return nil, fmt.Errorf("type conversion not supported for 'type'")
}
// ConvertToType implements ref.Val.ConvertToType.
func (t *Type) ConvertToType(typeVal ref.Type) ref.Val {
switch typeVal {
case TypeType:
return TypeType
case StringType:
return String(t.TypeName())
}
return NewErr("type conversion error from '%s' to '%s'", TypeType, typeVal)
}
// Equal indicates whether two types have the same runtime type name.
//
// The name Equal is a bit of a misnomer, but for historical reasons, this is the
// runtime behavior. For a more accurate definition see IsType().
func (t *Type) Equal(other ref.Val) ref.Val {
otherType, ok := other.(ref.Type)
return Bool(ok && t.TypeName() == otherType.TypeName())
}
// HasTrait implements the ref.Type interface method.
func (t *Type) HasTrait(trait int) bool {
return trait&t.traitMask == trait
}
// IsExactType indicates whether the two types are exactly the same. This check also verifies type parameter type names.
func (t *Type) IsExactType(other *Type) bool {
return t.isTypeInternal(other, true)
}
// IsEquivalentType indicates whether two types are equivalent. This check ignores type parameter type names.
func (t *Type) IsEquivalentType(other *Type) bool {
return t.isTypeInternal(other, false)
}
// Kind indicates general category of the type.
func (t *Type) Kind() Kind {
if t == nil {
return UnspecifiedKind
}
return t.kind
}
// isTypeInternal checks whether the two types are equivalent or exactly the same based on the checkTypeParamName flag.
func (t *Type) isTypeInternal(other *Type, checkTypeParamName bool) bool {
if t == nil {
return false
}
if t == other {
return true
}
if t.Kind() != other.Kind() || len(t.Parameters()) != len(other.Parameters()) {
return false
}
if (checkTypeParamName || t.Kind() != TypeParamKind) && t.TypeName() != other.TypeName() {
return false
}
for i, p := range t.Parameters() {
if !p.isTypeInternal(other.Parameters()[i], checkTypeParamName) {
return false
}
}
return true
}
// IsAssignableType determines whether the current type is type-check assignable from the input fromType.
func (t *Type) IsAssignableType(fromType *Type) bool {
if t == nil {
return false
}
if t.isAssignableType != nil {
return t.isAssignableType(fromType)
}
return t.defaultIsAssignableType(fromType)
}
// IsAssignableRuntimeType determines whether the current type is runtime assignable from the input runtimeType.
//
// At runtime, parameterized types are erased and so a function which type-checks to support a map(string, string)
// will have a runtime assignable type of a map.
func (t *Type) IsAssignableRuntimeType(val ref.Val) bool {
if t == nil {
return false
}
if t.isAssignableRuntimeType != nil {
return t.isAssignableRuntimeType(val)
}
return t.defaultIsAssignableRuntimeType(val)
}
// Parameters returns the list of type parameters if set.
//
// For ListKind, Parameters()[0] represents the list element type
// For MapKind, Parameters()[0] represents the map key type, and Parameters()[1] represents the map
// value type.
func (t *Type) Parameters() []*Type {
if t == nil {
return emptyParams
}
return t.parameters
}
// DeclaredTypeName indicates the fully qualified and parameterized type-check type name.
func (t *Type) DeclaredTypeName() string {
// if the type itself is neither null, nor dyn, but is assignable to null, then it's a wrapper type.
if t.Kind() != NullTypeKind && !t.isDyn() && t.IsAssignableType(NullType) {
return fmt.Sprintf("wrapper(%s)", t.TypeName())
}
return t.TypeName()
}
// Type implements the ref.Val interface method.
func (t *Type) Type() ref.Type {
return TypeType
}
// Value implements the ref.Val interface method.
func (t *Type) Value() any {
return t.TypeName()
}
// TypeName returns the type-erased fully qualified runtime type name.
//
// TypeName implements the ref.Type interface method.
func (t *Type) TypeName() string {
if t == nil {
return ""
}
return t.runtimeTypeName
}
// String returns a human-readable definition of the type name.
func (t *Type) String() string {
if len(t.Parameters()) == 0 {
return t.DeclaredTypeName()
}
params := make([]string, len(t.Parameters()))
for i, p := range t.Parameters() {
params[i] = p.String()
}
return fmt.Sprintf("%s(%s)", t.DeclaredTypeName(), strings.Join(params, ", "))
}
// isDyn indicates whether the type is dynamic in any way.
func (t *Type) isDyn() bool {
k := t.Kind()
return k == DynKind || k == AnyKind || k == TypeParamKind
}
// defaultIsAssignableType provides the standard definition of what it means for one type to be assignable to another
// where any of the following may return a true result:
// - The from types are the same instance
// - The target type is dynamic
// - The fromType has the same kind and type name as the target type, and all parameters of the target type
//
// are IsAssignableType() from the parameters of the fromType.
func (t *Type) defaultIsAssignableType(fromType *Type) bool {
if t == fromType || t.isDyn() {
return true
}
if t.Kind() != fromType.Kind() ||
t.TypeName() != fromType.TypeName() ||
len(t.Parameters()) != len(fromType.Parameters()) {
return false
}
for i, tp := range t.Parameters() {
fp := fromType.Parameters()[i]
if !tp.IsAssignableType(fp) {
return false
}
}
return true
}
// defaultIsAssignableRuntimeType inspects the type and in the case of list and map elements, the key and element types
// to determine whether a ref.Val is assignable to the declared type for a function signature.
func (t *Type) defaultIsAssignableRuntimeType(val ref.Val) bool {
valType := val.Type()
// If the current type and value type don't agree, then return
if !(t.isDyn() || t.TypeName() == valType.TypeName()) {
return false
}
switch t.Kind() {
case ListKind:
elemType := t.Parameters()[0]
l := val.(traits.Lister)
if l.Size() == IntZero {
return true
}
it := l.Iterator()
elemVal := it.Next()
return elemType.IsAssignableRuntimeType(elemVal)
case MapKind:
keyType := t.Parameters()[0]
elemType := t.Parameters()[1]
m := val.(traits.Mapper)
if m.Size() == IntZero {
return true
}
it := m.Iterator()
keyVal := it.Next()
elemVal := m.Get(keyVal)
return keyType.IsAssignableRuntimeType(keyVal) && elemType.IsAssignableRuntimeType(elemVal)
}
return true
}
// NewListType creates an instances of a list type value with the provided element type.
func NewListType(elemType *Type) *Type {
return &Type{
kind: ListKind,
parameters: []*Type{elemType},
runtimeTypeName: "list",
traitMask: traits.AdderType |
traits.ContainerType |
traits.IndexerType |
traits.IterableType |
traits.SizerType,
}
}
// NewMapType creates an instance of a map type value with the provided key and value types.
func NewMapType(keyType, valueType *Type) *Type {
return &Type{
kind: MapKind,
parameters: []*Type{keyType, valueType},
runtimeTypeName: "map",
traitMask: traits.ContainerType |
traits.IndexerType |
traits.IterableType |
traits.SizerType,
}
}
// NewNullableType creates an instance of a nullable type with the provided wrapped type.
//
// Note: only primitive types are supported as wrapped types.
func NewNullableType(wrapped *Type) *Type {
return &Type{
kind: wrapped.Kind(),
parameters: wrapped.Parameters(),
runtimeTypeName: wrapped.TypeName(),
traitMask: wrapped.traitMask,
isAssignableType: func(other *Type) bool {
return NullType.IsAssignableType(other) || wrapped.IsAssignableType(other)
},
isAssignableRuntimeType: func(other ref.Val) bool {
return NullType.IsAssignableRuntimeType(other) || wrapped.IsAssignableRuntimeType(other)
},
}
}
// NewOptionalType creates an abstract parameterized type instance corresponding to CEL's notion of optional.
func NewOptionalType(param *Type) *Type {
return NewOpaqueType("optional", param)
}
// NewOpaqueType creates an abstract parameterized type with a given name.
func NewOpaqueType(name string, params ...*Type) *Type {
return &Type{
kind: OpaqueKind,
parameters: params,
runtimeTypeName: name,
}
}
// NewObjectType creates a type reference to an externally defined type, e.g. a protobuf message type.
//
// An object type is assumed to support field presence testing and field indexing. Additionally, the
// type may also indicate additional traits through the use of the optional traits vararg argument.
func NewObjectType(typeName string, traits ...int) *Type {
// Function sanitizes object types on the fly
if wkt, found := checkedWellKnowns[typeName]; found {
return wkt
}
traitMask := 0
for _, trait := range traits {
traitMask |= trait
}
return &Type{
kind: StructKind,
parameters: emptyParams,
runtimeTypeName: typeName,
traitMask: structTypeTraitMask | traitMask,
}
}
// NewObjectTypeValue creates a type reference to an externally defined type.
//
// Deprecated: use cel.ObjectType(typeName)
func NewObjectTypeValue(typeName string) *Type {
return NewObjectType(typeName)
}
// NewTypeValue creates an opaque type which has a set of optional type traits as defined in
// the common/types/traits package.
//
// Deprecated: use cel.ObjectType(typeName, traits)
func NewTypeValue(typeName string, traits ...int) *Type {
traitMask := 0
for _, trait := range traits {
traitMask |= trait
}
return &Type{
kind: StructKind,
parameters: emptyParams,
runtimeTypeName: typeName,
traitMask: traitMask,
}
}
// NewTypeParamType creates a parameterized type instance.
func NewTypeParamType(paramName string) *Type {
return &Type{
kind: TypeParamKind,
runtimeTypeName: paramName,
}
}
// NewTypeTypeWithParam creates a type with a type parameter.
// Used for type-checking purposes, but equivalent to TypeType otherwise.
func NewTypeTypeWithParam(param *Type) *Type {
return &Type{
kind: TypeKind,
runtimeTypeName: "type",
parameters: []*Type{param},
}
}
// TypeToExprType converts a CEL-native type representation to a protobuf CEL Type representation.
func TypeToExprType(t *Type) (*exprpb.Type, error) {
switch t.Kind() {
case AnyKind:
return chkdecls.Any, nil
case BoolKind:
return maybeWrapper(t, chkdecls.Bool), nil
case BytesKind:
return maybeWrapper(t, chkdecls.Bytes), nil
case DoubleKind:
return maybeWrapper(t, chkdecls.Double), nil
case DurationKind:
return chkdecls.Duration, nil
case DynKind:
return chkdecls.Dyn, nil
case ErrorKind:
return chkdecls.Error, nil
case IntKind:
return maybeWrapper(t, chkdecls.Int), nil
case ListKind:
if len(t.Parameters()) != 1 {
return nil, fmt.Errorf("invalid list, got %d parameters, wanted one", len(t.Parameters()))
}
et, err := TypeToExprType(t.Parameters()[0])
if err != nil {
return nil, err
}
return chkdecls.NewListType(et), nil
case MapKind:
if len(t.Parameters()) != 2 {
return nil, fmt.Errorf("invalid map, got %d parameters, wanted two", len(t.Parameters()))
}
kt, err := TypeToExprType(t.Parameters()[0])
if err != nil {
return nil, err
}
vt, err := TypeToExprType(t.Parameters()[1])
if err != nil {
return nil, err
}
return chkdecls.NewMapType(kt, vt), nil
case NullTypeKind:
return chkdecls.Null, nil
case OpaqueKind:
params := make([]*exprpb.Type, len(t.Parameters()))
for i, p := range t.Parameters() {
pt, err := TypeToExprType(p)
if err != nil {
return nil, err
}
params[i] = pt
}
return chkdecls.NewAbstractType(t.TypeName(), params...), nil
case StringKind:
return maybeWrapper(t, chkdecls.String), nil
case StructKind:
return chkdecls.NewObjectType(t.TypeName()), nil
case TimestampKind:
return chkdecls.Timestamp, nil
case TypeParamKind:
return chkdecls.NewTypeParamType(t.TypeName()), nil
case TypeKind:
if len(t.Parameters()) == 1 {
p, err := TypeToExprType(t.Parameters()[0])
if err != nil {
return nil, err
}
return chkdecls.NewTypeType(p), nil
}
return chkdecls.NewTypeType(nil), nil
case UintKind:
return maybeWrapper(t, chkdecls.Uint), nil
}
return nil, fmt.Errorf("missing type conversion to proto: %v", t)
}
// ExprTypeToType converts a protobuf CEL type representation to a CEL-native type representation.
func ExprTypeToType(t *exprpb.Type) (*Type, error) {
switch t.GetTypeKind().(type) {
case *exprpb.Type_Dyn:
return DynType, nil
case *exprpb.Type_AbstractType_:
paramTypes := make([]*Type, len(t.GetAbstractType().GetParameterTypes()))
for i, p := range t.GetAbstractType().GetParameterTypes() {
pt, err := ExprTypeToType(p)
if err != nil {
return nil, err
}
paramTypes[i] = pt
}
return NewOpaqueType(t.GetAbstractType().GetName(), paramTypes...), nil
case *exprpb.Type_ListType_:
et, err := ExprTypeToType(t.GetListType().GetElemType())
if err != nil {
return nil, err
}
return NewListType(et), nil
case *exprpb.Type_MapType_:
kt, err := ExprTypeToType(t.GetMapType().GetKeyType())
if err != nil {
return nil, err
}
vt, err := ExprTypeToType(t.GetMapType().GetValueType())
if err != nil {
return nil, err
}
return NewMapType(kt, vt), nil
case *exprpb.Type_MessageType:
return NewObjectType(t.GetMessageType()), nil
case *exprpb.Type_Null:
return NullType, nil
case *exprpb.Type_Primitive:
switch t.GetPrimitive() {
case exprpb.Type_BOOL:
return BoolType, nil
case exprpb.Type_BYTES:
return BytesType, nil
case exprpb.Type_DOUBLE:
return DoubleType, nil
case exprpb.Type_INT64:
return IntType, nil
case exprpb.Type_STRING:
return StringType, nil
case exprpb.Type_UINT64:
return UintType, nil
default:
return nil, fmt.Errorf("unsupported primitive type: %v", t)
}
case *exprpb.Type_TypeParam:
return NewTypeParamType(t.GetTypeParam()), nil
case *exprpb.Type_Type:
if t.GetType().GetTypeKind() != nil {
p, err := ExprTypeToType(t.GetType())
if err != nil {
return nil, err
}
return NewTypeTypeWithParam(p), nil
}
return TypeType, nil
case *exprpb.Type_WellKnown:
switch t.GetWellKnown() {
case exprpb.Type_ANY:
return AnyType, nil
case exprpb.Type_DURATION:
return DurationType, nil
case exprpb.Type_TIMESTAMP:
return TimestampType, nil
default:
return nil, fmt.Errorf("unsupported well-known type: %v", t)
}
case *exprpb.Type_Wrapper:
t, err := ExprTypeToType(&exprpb.Type{TypeKind: &exprpb.Type_Primitive{Primitive: t.GetWrapper()}})
if err != nil {
return nil, err
}
return NewNullableType(t), nil
case *exprpb.Type_Error:
return ErrorType, nil
default:
return nil, fmt.Errorf("unsupported type: %v", t)
}
}
func maybeWrapper(t *Type, pbType *exprpb.Type) *exprpb.Type {
if t.IsAssignableType(NullType) {
return chkdecls.NewWrapperType(pbType)
}
return pbType
}
func maybeForeignType(t ref.Type) *Type {
if celType, ok := t.(*Type); ok {
return celType
}
// Inspect the incoming type to determine its traits. The assumption will be that the incoming
// type does not have any field values; however, if the trait mask indicates that field testing
// and indexing are supported, the foreign type is marked as a struct.
traitMask := 0
for _, trait := range allTraits {
if t.HasTrait(trait) {
traitMask |= trait
}
}
// Treat the value like a struct. If it has no fields, this is harmless to denote the type
// as such since it basically becomes an opaque type by convention.
return NewObjectType(t.TypeName(), traitMask)
}
var (
checkedWellKnowns = map[string]*Type{
// Wrapper types.
"google.protobuf.BoolValue": NewNullableType(BoolType),
"google.protobuf.BytesValue": NewNullableType(BytesType),
"google.protobuf.DoubleValue": NewNullableType(DoubleType),
"google.protobuf.FloatValue": NewNullableType(DoubleType),
"google.protobuf.Int64Value": NewNullableType(IntType),
"google.protobuf.Int32Value": NewNullableType(IntType),
"google.protobuf.UInt64Value": NewNullableType(UintType),
"google.protobuf.UInt32Value": NewNullableType(UintType),
"google.protobuf.StringValue": NewNullableType(StringType),
// Well-known types.
"google.protobuf.Any": AnyType,
"google.protobuf.Duration": DurationType,
"google.protobuf.Timestamp": TimestampType,
// Json types.
"google.protobuf.ListValue": NewListType(DynType),
"google.protobuf.NullValue": NullType,
"google.protobuf.Struct": NewMapType(StringType, DynType),
"google.protobuf.Value": DynType,
}
emptyParams = []*Type{}
allTraits = []int{
traits.AdderType,
traits.ComparerType,
traits.ContainerType,
traits.DividerType,
traits.FieldTesterType,
traits.IndexerType,
traits.IterableType,
traits.IteratorType,
traits.MatcherType,
traits.ModderType,
traits.MultiplierType,
traits.NegatorType,
traits.ReceiverType,
traits.SizerType,
traits.SubtractorType,
}
structTypeTraitMask = traits.FieldTesterType | traits.IndexerType
)

View File

@ -21,7 +21,6 @@ import (
"strconv" "strconv"
"github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/common/types/traits"
anypb "google.golang.org/protobuf/types/known/anypb" anypb "google.golang.org/protobuf/types/known/anypb"
structpb "google.golang.org/protobuf/types/known/structpb" structpb "google.golang.org/protobuf/types/known/structpb"
@ -32,15 +31,6 @@ import (
type Uint uint64 type Uint uint64
var ( var (
// UintType singleton.
UintType = NewTypeValue("uint",
traits.AdderType,
traits.ComparerType,
traits.DividerType,
traits.ModderType,
traits.MultiplierType,
traits.SubtractorType)
uint32WrapperType = reflect.TypeOf(&wrapperspb.UInt32Value{}) uint32WrapperType = reflect.TypeOf(&wrapperspb.UInt32Value{})
uint64WrapperType = reflect.TypeOf(&wrapperspb.UInt64Value{}) uint64WrapperType = reflect.TypeOf(&wrapperspb.UInt64Value{})

View File

@ -15,52 +15,312 @@
package types package types
import ( import (
"fmt"
"math"
"reflect" "reflect"
"sort"
"strings"
"unicode"
"github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/ref"
) )
// Unknown type implementation which collects expression ids which caused the
// current value to become unknown.
type Unknown []int64
var ( var (
// UnknownType singleton. unspecifiedAttribute = &AttributeTrail{qualifierPath: []any{}}
UnknownType = NewTypeValue("unknown")
) )
// NewAttributeTrail creates a new simple attribute from a variable name.
func NewAttributeTrail(variable string) *AttributeTrail {
if variable == "" {
return unspecifiedAttribute
}
return &AttributeTrail{variable: variable}
}
// AttributeTrail specifies a variable with an optional qualifier path. An attribute value is expected to
// correspond to an AbsoluteAttribute, meaning a field selection which starts with a top-level variable.
//
// The qualifer path elements adhere to the AttributeQualifier type constraint.
type AttributeTrail struct {
variable string
qualifierPath []any
}
// Equal returns whether two attribute values have the same variable name and qualifier paths.
func (a *AttributeTrail) Equal(other *AttributeTrail) bool {
if a.Variable() != other.Variable() || len(a.QualifierPath()) != len(other.QualifierPath()) {
return false
}
for i, q := range a.QualifierPath() {
qual := other.QualifierPath()[i]
if !qualifiersEqual(q, qual) {
return false
}
}
return true
}
func qualifiersEqual(a, b any) bool {
if a == b {
return true
}
switch numA := a.(type) {
case int64:
numB, ok := b.(uint64)
if !ok {
return false
}
return intUintEqual(numA, numB)
case uint64:
numB, ok := b.(int64)
if !ok {
return false
}
return intUintEqual(numB, numA)
default:
return false
}
}
func intUintEqual(i int64, u uint64) bool {
if i < 0 || u > math.MaxInt64 {
return false
}
return i == int64(u)
}
// Variable returns the variable name associated with the attribute.
func (a *AttributeTrail) Variable() string {
return a.variable
}
// QualifierPath returns the optional set of qualifying fields or indices applied to the variable.
func (a *AttributeTrail) QualifierPath() []any {
return a.qualifierPath
}
// String returns the string representation of the Attribute.
func (a *AttributeTrail) String() string {
if a.variable == "" {
return "<unspecified>"
}
var str strings.Builder
str.WriteString(a.variable)
for _, q := range a.qualifierPath {
switch q := q.(type) {
case bool, int64:
str.WriteString(fmt.Sprintf("[%v]", q))
case uint64:
str.WriteString(fmt.Sprintf("[%vu]", q))
case string:
if isIdentifierCharacter(q) {
str.WriteString(fmt.Sprintf(".%v", q))
} else {
str.WriteString(fmt.Sprintf("[%q]", q))
}
}
}
return str.String()
}
func isIdentifierCharacter(str string) bool {
for _, c := range str {
if unicode.IsLetter(c) || unicode.IsDigit(c) || string(c) == "_" {
continue
}
return false
}
return true
}
// AttributeQualifier constrains the possible types which may be used to qualify an attribute.
type AttributeQualifier interface {
bool | int64 | uint64 | string
}
// QualifyAttribute qualifies an attribute using a valid AttributeQualifier type.
func QualifyAttribute[T AttributeQualifier](attr *AttributeTrail, qualifier T) *AttributeTrail {
attr.qualifierPath = append(attr.qualifierPath, qualifier)
return attr
}
// Unknown type which collects expression ids which caused the current value to become unknown.
type Unknown struct {
attributeTrails map[int64][]*AttributeTrail
}
// NewUnknown creates a new unknown at a given expression id for an attribute.
//
// If the attribute is nil, the attribute value will be the `unspecifiedAttribute`.
func NewUnknown(id int64, attr *AttributeTrail) *Unknown {
if attr == nil {
attr = unspecifiedAttribute
}
return &Unknown{
attributeTrails: map[int64][]*AttributeTrail{id: {attr}},
}
}
// IDs returns the set of unknown expression ids contained by this value.
//
// Numeric identifiers are guaranteed to be in sorted order.
func (u *Unknown) IDs() []int64 {
ids := make(int64Slice, len(u.attributeTrails))
i := 0
for id := range u.attributeTrails {
ids[i] = id
i++
}
ids.Sort()
return ids
}
// GetAttributeTrails returns the attribute trails, if present, missing for a given expression id.
func (u *Unknown) GetAttributeTrails(id int64) ([]*AttributeTrail, bool) {
trails, found := u.attributeTrails[id]
return trails, found
}
// Contains returns true if the input unknown is a subset of the current unknown.
func (u *Unknown) Contains(other *Unknown) bool {
for id, otherTrails := range other.attributeTrails {
trails, found := u.attributeTrails[id]
if !found || len(otherTrails) != len(trails) {
return false
}
for _, ot := range otherTrails {
found := false
for _, t := range trails {
if t.Equal(ot) {
found = true
break
}
}
if !found {
return false
}
}
}
return true
}
// ConvertToNative implements ref.Val.ConvertToNative. // ConvertToNative implements ref.Val.ConvertToNative.
func (u Unknown) ConvertToNative(typeDesc reflect.Type) (any, error) { func (u *Unknown) ConvertToNative(typeDesc reflect.Type) (any, error) {
return u.Value(), nil return u.Value(), nil
} }
// ConvertToType is an identity function since unknown values cannot be modified. // ConvertToType is an identity function since unknown values cannot be modified.
func (u Unknown) ConvertToType(typeVal ref.Type) ref.Val { func (u *Unknown) ConvertToType(typeVal ref.Type) ref.Val {
return u return u
} }
// Equal is an identity function since unknown values cannot be modified. // Equal is an identity function since unknown values cannot be modified.
func (u Unknown) Equal(other ref.Val) ref.Val { func (u *Unknown) Equal(other ref.Val) ref.Val {
return u return u
} }
// String implements the Stringer interface
func (u *Unknown) String() string {
var str strings.Builder
for id, attrs := range u.attributeTrails {
if str.Len() != 0 {
str.WriteString(", ")
}
if len(attrs) == 1 {
str.WriteString(fmt.Sprintf("%v (%d)", attrs[0], id))
} else {
str.WriteString(fmt.Sprintf("%v (%d)", attrs, id))
}
}
return str.String()
}
// Type implements ref.Val.Type. // Type implements ref.Val.Type.
func (u Unknown) Type() ref.Type { func (u *Unknown) Type() ref.Type {
return UnknownType return UnknownType
} }
// Value implements ref.Val.Value. // Value implements ref.Val.Value.
func (u Unknown) Value() any { func (u *Unknown) Value() any {
return []int64(u) return u
} }
// IsUnknown returns whether the element ref.Type or ref.Val is equal to the // IsUnknown returns whether the element ref.Val is in instance of *types.Unknown
// UnknownType singleton.
func IsUnknown(val ref.Val) bool { func IsUnknown(val ref.Val) bool {
switch val.(type) { switch val.(type) {
case Unknown: case *Unknown:
return true return true
default: default:
return false return false
} }
} }
// MaybeMergeUnknowns determines whether an input value and another, possibly nil, unknown will produce
// an unknown result.
//
// If the input `val` is another Unknown, then the result will be the merge of the `val` and the input
// `unk`. If the `val` is not unknown, then the result will depend on whether the input `unk` is nil.
// If both values are non-nil and unknown, then the return value will be a merge of both unknowns.
func MaybeMergeUnknowns(val ref.Val, unk *Unknown) (*Unknown, bool) {
src, isUnk := val.(*Unknown)
if !isUnk {
if unk != nil {
return unk, true
}
return unk, false
}
return MergeUnknowns(src, unk), true
}
// MergeUnknowns combines two unknown values into a new unknown value.
func MergeUnknowns(unk1, unk2 *Unknown) *Unknown {
if unk1 == nil {
return unk2
}
if unk2 == nil {
return unk1
}
out := &Unknown{
attributeTrails: make(map[int64][]*AttributeTrail, len(unk1.attributeTrails)+len(unk2.attributeTrails)),
}
for id, ats := range unk1.attributeTrails {
out.attributeTrails[id] = ats
}
for id, ats := range unk2.attributeTrails {
existing, found := out.attributeTrails[id]
if !found {
out.attributeTrails[id] = ats
continue
}
for _, at := range ats {
found := false
for _, et := range existing {
if at.Equal(et) {
found = true
break
}
}
if !found {
existing = append(existing, at)
}
}
out.attributeTrails[id] = existing
}
return out
}
// int64Slice is an implementation of the sort.Interface
type int64Slice []int64
// Len returns the number of elements in the slice.
func (x int64Slice) Len() int { return len(x) }
// Less indicates whether the value at index i is less than the value at index j.
func (x int64Slice) Less(i, j int) bool { return x[i] < x[j] }
// Swap swaps the values at indices i and j in place.
func (x int64Slice) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
// Sort is a convenience method: x.Sort() calls Sort(x).
func (x int64Slice) Sort() { sort.Sort(x) }

View File

@ -21,7 +21,7 @@ import (
// IsUnknownOrError returns whether the input element ref.Val is an ErrType or UnknownType. // IsUnknownOrError returns whether the input element ref.Val is an ErrType or UnknownType.
func IsUnknownOrError(val ref.Val) bool { func IsUnknownOrError(val ref.Val) bool {
switch val.(type) { switch val.(type) {
case Unknown, *Err: case *Unknown, *Err:
return true return true
} }
return false return false

View File

@ -9,6 +9,7 @@ go_library(
srcs = [ srcs = [
"encoders.go", "encoders.go",
"guards.go", "guards.go",
"lists.go",
"math.go", "math.go",
"native.go", "native.go",
"protos.go", "protos.go",
@ -19,8 +20,8 @@ go_library(
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
deps = [ deps = [
"//cel:go_default_library", "//cel:go_default_library",
"//checker:go_default_library",
"//checker/decls:go_default_library", "//checker/decls:go_default_library",
"//common:go_default_library",
"//common/overloads:go_default_library", "//common/overloads:go_default_library",
"//common/types:go_default_library", "//common/types:go_default_library",
"//common/types/pb:go_default_library", "//common/types/pb:go_default_library",
@ -41,6 +42,7 @@ go_test(
size = "small", size = "small",
srcs = [ srcs = [
"encoders_test.go", "encoders_test.go",
"lists_test.go",
"math_test.go", "math_test.go",
"native_test.go", "native_test.go",
"protos_test.go", "protos_test.go",
@ -53,7 +55,6 @@ go_test(
deps = [ deps = [
"//cel:go_default_library", "//cel:go_default_library",
"//checker:go_default_library", "//checker:go_default_library",
"//common:go_default_library",
"//common/types:go_default_library", "//common/types:go_default_library",
"//common/types/ref:go_default_library", "//common/types/ref:go_default_library",
"//common/types/traits:go_default_library", "//common/types/traits:go_default_library",

View File

@ -149,6 +149,23 @@ Example:
proto.hasExt(msg, google.expr.proto2.test.int32_ext) // returns true || false proto.hasExt(msg, google.expr.proto2.test.int32_ext) // returns true || false
## Lists
Extended functions for list manipulation. As a general note, all indices are
zero-based.
### Slice
Returns a new sub-list using the indexes provided.
<list>.slice(<int>, <int>) -> <list>
Examples:
[1,2,3,4].slice(1, 3) // return [2, 3]
[1,2,3,4].slice(2, 4) // return [3 ,4]
## Sets ## Sets
Sets provides set relationship tests. Sets provides set relationship tests.

View File

@ -16,7 +16,6 @@ package ext
import ( import (
"github.com/google/cel-go/cel" "github.com/google/cel-go/cel"
"github.com/google/cel-go/common"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
) )
@ -71,7 +70,7 @@ func (celBindings) ProgramOptions() []cel.ProgramOption {
return []cel.ProgramOption{} return []cel.ProgramOption{}
} }
func celBind(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { func celBind(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *cel.Error) {
if !macroTargetMatchesNamespace(celNamespace, target) { if !macroTargetMatchesNamespace(celNamespace, target) {
return nil, nil return nil, nil
} }
@ -81,10 +80,7 @@ func celBind(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr)
case *exprpb.Expr_IdentExpr: case *exprpb.Expr_IdentExpr:
varName = varIdent.GetIdentExpr().GetName() varName = varIdent.GetIdentExpr().GetName()
default: default:
return nil, &common.Error{ return nil, meh.NewError(varIdent.GetId(), "cel.bind() variable names must be simple identifiers")
Message: "cel.bind() variable names must be simple identifers",
Location: meh.OffsetLocation(varIdent.GetId()),
}
} }
varInit := args[1] varInit := args[1]
resultExpr := args[2] resultExpr := args[2]

View File

@ -16,7 +16,6 @@ package ext
import ( import (
"encoding/base64" "encoding/base64"
"reflect"
"github.com/google/cel-go/cel" "github.com/google/cel-go/cel"
"github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types"
@ -86,7 +85,3 @@ func base64DecodeString(str string) ([]byte, error) {
func base64EncodeBytes(bytes []byte) (string, error) { func base64EncodeBytes(bytes []byte) (string, error) {
return base64.StdEncoding.EncodeToString(bytes), nil return base64.StdEncoding.EncodeToString(bytes), nil
} }
var (
bytesListType = reflect.TypeOf([]byte{})
)

View File

@ -17,6 +17,7 @@ package ext
import ( import (
"github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/ref"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
) )

94
vendor/github.com/google/cel-go/ext/lists.go generated vendored Normal file
View File

@ -0,0 +1,94 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ext
import (
"fmt"
"github.com/google/cel-go/cel"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/common/types/traits"
)
// Lists returns a cel.EnvOption to configure extended functions for list manipulation.
// As a general note, all indices are zero-based.
// # Slice
//
// Returns a new sub-list using the indexes provided.
//
// <list>.slice(<int>, <int>) -> <list>
//
// Examples:
//
// [1,2,3,4].slice(1, 3) // return [2, 3]
// [1,2,3,4].slice(2, 4) // return [3 ,4]
func Lists() cel.EnvOption {
return cel.Lib(listsLib{})
}
type listsLib struct{}
// LibraryName implements the SingletonLibrary interface method.
func (listsLib) LibraryName() string {
return "cel.lib.ext.lists"
}
// CompileOptions implements the Library interface method.
func (listsLib) CompileOptions() []cel.EnvOption {
listType := cel.ListType(cel.TypeParamType("T"))
return []cel.EnvOption{
cel.Function("slice",
cel.MemberOverload("list_slice",
[]*cel.Type{listType, cel.IntType, cel.IntType}, listType,
cel.FunctionBinding(func(args ...ref.Val) ref.Val {
list := args[0].(traits.Lister)
start := args[1].(types.Int)
end := args[2].(types.Int)
result, err := slice(list, start, end)
if err != nil {
return types.WrapErr(err)
}
return result
}),
),
),
}
}
// ProgramOptions implements the Library interface method.
func (listsLib) ProgramOptions() []cel.ProgramOption {
return []cel.ProgramOption{}
}
func slice(list traits.Lister, start, end types.Int) (ref.Val, error) {
listLength := list.Size().(types.Int)
if start < 0 || end < 0 {
return nil, fmt.Errorf("cannot slice(%d, %d), negative indexes not supported", start, end)
}
if start > end {
return nil, fmt.Errorf("cannot slice(%d, %d), start index must be less than or equal to end index", start, end)
}
if listLength < end {
return nil, fmt.Errorf("cannot slice(%d, %d), list is length %d", start, end, listLength)
}
var newList []ref.Val
for i := types.Int(start); i < end; i++ {
val := list.Get(i)
newList = append(newList, val)
}
return types.DefaultTypeAdapter.NativeToValue(newList), nil
}

View File

@ -19,10 +19,10 @@ import (
"strings" "strings"
"github.com/google/cel-go/cel" "github.com/google/cel-go/cel"
"github.com/google/cel-go/common"
"github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/common/types/traits" "github.com/google/cel-go/common/types/traits"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
) )
@ -187,24 +187,18 @@ func (mathLib) ProgramOptions() []cel.ProgramOption {
return []cel.ProgramOption{} return []cel.ProgramOption{}
} }
func mathLeast(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { func mathLeast(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *cel.Error) {
if !macroTargetMatchesNamespace(mathNamespace, target) { if !macroTargetMatchesNamespace(mathNamespace, target) {
return nil, nil return nil, nil
} }
switch len(args) { switch len(args) {
case 0: case 0:
return nil, &common.Error{ return nil, meh.NewError(target.GetId(), "math.least() requires at least one argument")
Message: "math.least() requires at least one argument",
Location: meh.OffsetLocation(target.GetId()),
}
case 1: case 1:
if isListLiteralWithValidArgs(args[0]) || isValidArgType(args[0]) { if isListLiteralWithValidArgs(args[0]) || isValidArgType(args[0]) {
return meh.GlobalCall(minFunc, args[0]), nil return meh.GlobalCall(minFunc, args[0]), nil
} }
return nil, &common.Error{ return nil, meh.NewError(args[0].GetId(), "math.least() invalid single argument value")
Message: "math.least() invalid single argument value",
Location: meh.OffsetLocation(args[0].GetId()),
}
case 2: case 2:
err := checkInvalidArgs(meh, "math.least()", args) err := checkInvalidArgs(meh, "math.least()", args)
if err != nil { if err != nil {
@ -220,24 +214,18 @@ func mathLeast(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr
} }
} }
func mathGreatest(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { func mathGreatest(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *cel.Error) {
if !macroTargetMatchesNamespace(mathNamespace, target) { if !macroTargetMatchesNamespace(mathNamespace, target) {
return nil, nil return nil, nil
} }
switch len(args) { switch len(args) {
case 0: case 0:
return nil, &common.Error{ return nil, meh.NewError(target.GetId(), "math.greatest() requires at least one argument")
Message: "math.greatest() requires at least one argument",
Location: meh.OffsetLocation(target.GetId()),
}
case 1: case 1:
if isListLiteralWithValidArgs(args[0]) || isValidArgType(args[0]) { if isListLiteralWithValidArgs(args[0]) || isValidArgType(args[0]) {
return meh.GlobalCall(maxFunc, args[0]), nil return meh.GlobalCall(maxFunc, args[0]), nil
} }
return nil, &common.Error{ return nil, meh.NewError(args[0].GetId(), "math.greatest() invalid single argument value")
Message: "math.greatest() invalid single argument value",
Location: meh.OffsetLocation(args[0].GetId()),
}
case 2: case 2:
err := checkInvalidArgs(meh, "math.greatest()", args) err := checkInvalidArgs(meh, "math.greatest()", args)
if err != nil { if err != nil {
@ -323,14 +311,11 @@ func maxList(numList ref.Val) ref.Val {
} }
} }
func checkInvalidArgs(meh cel.MacroExprHelper, funcName string, args []*exprpb.Expr) *common.Error { func checkInvalidArgs(meh cel.MacroExprHelper, funcName string, args []*exprpb.Expr) *cel.Error {
for _, arg := range args { for _, arg := range args {
err := checkInvalidArgLiteral(funcName, arg) err := checkInvalidArgLiteral(funcName, arg)
if err != nil { if err != nil {
return &common.Error{ return meh.NewError(arg.GetId(), err.Error())
Message: err.Error(),
Location: meh.OffsetLocation(arg.GetId()),
}
} }
} }
return nil return nil

View File

@ -24,13 +24,11 @@ import (
"google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoreflect"
"github.com/google/cel-go/cel" "github.com/google/cel-go/cel"
"github.com/google/cel-go/checker/decls"
"github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/pb" "github.com/google/cel-go/common/types/pb"
"github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/common/types/traits" "github.com/google/cel-go/common/types/traits"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
structpb "google.golang.org/protobuf/types/known/structpb" structpb "google.golang.org/protobuf/types/known/structpb"
) )
@ -81,7 +79,7 @@ var (
// the time that it is invoked. // the time that it is invoked.
func NativeTypes(refTypes ...any) cel.EnvOption { func NativeTypes(refTypes ...any) cel.EnvOption {
return func(env *cel.Env) (*cel.Env, error) { return func(env *cel.Env) (*cel.Env, error) {
tp, err := newNativeTypeProvider(env.TypeAdapter(), env.TypeProvider(), refTypes...) tp, err := newNativeTypeProvider(env.CELTypeAdapter(), env.CELTypeProvider(), refTypes...)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -93,7 +91,7 @@ func NativeTypes(refTypes ...any) cel.EnvOption {
} }
} }
func newNativeTypeProvider(adapter ref.TypeAdapter, provider ref.TypeProvider, refTypes ...any) (*nativeTypeProvider, error) { func newNativeTypeProvider(adapter types.Adapter, provider types.Provider, refTypes ...any) (*nativeTypeProvider, error) {
nativeTypes := make(map[string]*nativeType, len(refTypes)) nativeTypes := make(map[string]*nativeType, len(refTypes))
for _, refType := range refTypes { for _, refType := range refTypes {
switch rt := refType.(type) { switch rt := refType.(type) {
@ -122,18 +120,18 @@ func newNativeTypeProvider(adapter ref.TypeAdapter, provider ref.TypeProvider, r
type nativeTypeProvider struct { type nativeTypeProvider struct {
nativeTypes map[string]*nativeType nativeTypes map[string]*nativeType
baseAdapter ref.TypeAdapter baseAdapter types.Adapter
baseProvider ref.TypeProvider baseProvider types.Provider
} }
// EnumValue proxies to the ref.TypeProvider configured at the times the NativeTypes // EnumValue proxies to the types.Provider configured at the times the NativeTypes
// option was configured. // option was configured.
func (tp *nativeTypeProvider) EnumValue(enumName string) ref.Val { func (tp *nativeTypeProvider) EnumValue(enumName string) ref.Val {
return tp.baseProvider.EnumValue(enumName) return tp.baseProvider.EnumValue(enumName)
} }
// FindIdent looks up natives type instances by qualified identifier, and if not found // FindIdent looks up natives type instances by qualified identifier, and if not found
// proxies to the composed ref.TypeProvider. // proxies to the composed types.Provider.
func (tp *nativeTypeProvider) FindIdent(typeName string) (ref.Val, bool) { func (tp *nativeTypeProvider) FindIdent(typeName string) (ref.Val, bool) {
if t, found := tp.nativeTypes[typeName]; found { if t, found := tp.nativeTypes[typeName]; found {
return t, true return t, true
@ -141,32 +139,35 @@ func (tp *nativeTypeProvider) FindIdent(typeName string) (ref.Val, bool) {
return tp.baseProvider.FindIdent(typeName) return tp.baseProvider.FindIdent(typeName)
} }
// FindType looks up CEL type-checker type definition by qualified identifier, and if not found // FindStructType looks up the CEL type definition by qualified identifier, and if not found
// proxies to the composed ref.TypeProvider. // proxies to the composed types.Provider.
func (tp *nativeTypeProvider) FindType(typeName string) (*exprpb.Type, bool) { func (tp *nativeTypeProvider) FindStructType(typeName string) (*types.Type, bool) {
if _, found := tp.nativeTypes[typeName]; found { if _, found := tp.nativeTypes[typeName]; found {
return decls.NewTypeType(decls.NewObjectType(typeName)), true return types.NewTypeTypeWithParam(types.NewObjectType(typeName)), true
} }
return tp.baseProvider.FindType(typeName) if celType, found := tp.baseProvider.FindStructType(typeName); found {
return celType, true
}
return tp.baseProvider.FindStructType(typeName)
} }
// FindFieldType looks up a native type's field definition, and if the type name is not a native // FindStructFieldType looks up a native type's field definition, and if the type name is not a native
// type then proxies to the composed ref.TypeProvider // type then proxies to the composed types.Provider
func (tp *nativeTypeProvider) FindFieldType(typeName, fieldName string) (*ref.FieldType, bool) { func (tp *nativeTypeProvider) FindStructFieldType(typeName, fieldName string) (*types.FieldType, bool) {
t, found := tp.nativeTypes[typeName] t, found := tp.nativeTypes[typeName]
if !found { if !found {
return tp.baseProvider.FindFieldType(typeName, fieldName) return tp.baseProvider.FindStructFieldType(typeName, fieldName)
} }
refField, isDefined := t.hasField(fieldName) refField, isDefined := t.hasField(fieldName)
if !found || !isDefined { if !found || !isDefined {
return nil, false return nil, false
} }
exprType, ok := convertToExprType(refField.Type) celType, ok := convertToCelType(refField.Type)
if !ok { if !ok {
return nil, false return nil, false
} }
return &ref.FieldType{ return &types.FieldType{
Type: exprType, Type: celType,
IsSet: func(obj any) bool { IsSet: func(obj any) bool {
refVal := reflect.Indirect(reflect.ValueOf(obj)) refVal := reflect.Indirect(reflect.ValueOf(obj))
refField := refVal.FieldByName(fieldName) refField := refVal.FieldByName(fieldName)
@ -243,75 +244,74 @@ func (tp *nativeTypeProvider) NativeToValue(val any) ref.Val {
} }
} }
// convertToExprType converts the Golang reflect.Type to a protobuf exprpb.Type. func convertToCelType(refType reflect.Type) (*cel.Type, bool) {
func convertToExprType(refType reflect.Type) (*exprpb.Type, bool) {
switch refType.Kind() { switch refType.Kind() {
case reflect.Bool: case reflect.Bool:
return decls.Bool, true return cel.BoolType, true
case reflect.Float32, reflect.Float64: case reflect.Float32, reflect.Float64:
return decls.Double, true return cel.DoubleType, true
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
if refType == durationType { if refType == durationType {
return decls.Duration, true return cel.DurationType, true
} }
return decls.Int, true return cel.IntType, true
case reflect.String: case reflect.String:
return decls.String, true return cel.StringType, true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return decls.Uint, true return cel.UintType, true
case reflect.Array, reflect.Slice: case reflect.Array, reflect.Slice:
refElem := refType.Elem() refElem := refType.Elem()
if refElem == reflect.TypeOf(byte(0)) { if refElem == reflect.TypeOf(byte(0)) {
return decls.Bytes, true return cel.BytesType, true
} }
elemType, ok := convertToExprType(refElem) elemType, ok := convertToCelType(refElem)
if !ok { if !ok {
return nil, false return nil, false
} }
return decls.NewListType(elemType), true return cel.ListType(elemType), true
case reflect.Map: case reflect.Map:
keyType, ok := convertToExprType(refType.Key()) keyType, ok := convertToCelType(refType.Key())
if !ok { if !ok {
return nil, false return nil, false
} }
// Ensure the key type is a int, bool, uint, string // Ensure the key type is a int, bool, uint, string
elemType, ok := convertToExprType(refType.Elem()) elemType, ok := convertToCelType(refType.Elem())
if !ok { if !ok {
return nil, false return nil, false
} }
return decls.NewMapType(keyType, elemType), true return cel.MapType(keyType, elemType), true
case reflect.Struct: case reflect.Struct:
if refType == timestampType { if refType == timestampType {
return decls.Timestamp, true return cel.TimestampType, true
} }
return decls.NewObjectType( return cel.ObjectType(
fmt.Sprintf("%s.%s", simplePkgAlias(refType.PkgPath()), refType.Name()), fmt.Sprintf("%s.%s", simplePkgAlias(refType.PkgPath()), refType.Name()),
), true ), true
case reflect.Pointer: case reflect.Pointer:
if refType.Implements(pbMsgInterfaceType) { if refType.Implements(pbMsgInterfaceType) {
pbMsg := reflect.New(refType.Elem()).Interface().(protoreflect.ProtoMessage) pbMsg := reflect.New(refType.Elem()).Interface().(protoreflect.ProtoMessage)
return decls.NewObjectType(string(pbMsg.ProtoReflect().Descriptor().FullName())), true return cel.ObjectType(string(pbMsg.ProtoReflect().Descriptor().FullName())), true
} }
return convertToExprType(refType.Elem()) return convertToCelType(refType.Elem())
} }
return nil, false return nil, false
} }
func newNativeObject(adapter ref.TypeAdapter, val any, refValue reflect.Value) ref.Val { func newNativeObject(adapter types.Adapter, val any, refValue reflect.Value) ref.Val {
valType, err := newNativeType(refValue.Type()) valType, err := newNativeType(refValue.Type())
if err != nil { if err != nil {
return types.NewErr(err.Error()) return types.NewErr(err.Error())
} }
return &nativeObj{ return &nativeObj{
TypeAdapter: adapter, Adapter: adapter,
val: val, val: val,
valType: valType, valType: valType,
refValue: refValue, refValue: refValue,
} }
} }
type nativeObj struct { type nativeObj struct {
ref.TypeAdapter types.Adapter
val any val any
valType *nativeType valType *nativeType
refValue reflect.Value refValue reflect.Value
@ -520,11 +520,11 @@ func (t *nativeType) hasField(fieldName string) (reflect.StructField, bool) {
return f, true return f, true
} }
func adaptFieldValue(adapter ref.TypeAdapter, refField reflect.Value) ref.Val { func adaptFieldValue(adapter types.Adapter, refField reflect.Value) ref.Val {
return adapter.NativeToValue(getFieldValue(adapter, refField)) return adapter.NativeToValue(getFieldValue(adapter, refField))
} }
func getFieldValue(adapter ref.TypeAdapter, refField reflect.Value) any { func getFieldValue(adapter types.Adapter, refField reflect.Value) any {
if refField.IsZero() { if refField.IsZero() {
switch refField.Kind() { switch refField.Kind() {
case reflect.Array, reflect.Slice: case reflect.Array, reflect.Slice:

View File

@ -16,7 +16,6 @@ package ext
import ( import (
"github.com/google/cel-go/cel" "github.com/google/cel-go/cel"
"github.com/google/cel-go/common"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
) )
@ -86,7 +85,7 @@ func (protoLib) ProgramOptions() []cel.ProgramOption {
} }
// hasProtoExt generates a test-only select expression for a fully-qualified extension name on a protobuf message. // hasProtoExt generates a test-only select expression for a fully-qualified extension name on a protobuf message.
func hasProtoExt(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { func hasProtoExt(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *cel.Error) {
if !macroTargetMatchesNamespace(protoNamespace, target) { if !macroTargetMatchesNamespace(protoNamespace, target) {
return nil, nil return nil, nil
} }
@ -98,7 +97,7 @@ func hasProtoExt(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Ex
} }
// getProtoExt generates a select expression for a fully-qualified extension name on a protobuf message. // getProtoExt generates a select expression for a fully-qualified extension name on a protobuf message.
func getProtoExt(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { func getProtoExt(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *cel.Error) {
if !macroTargetMatchesNamespace(protoNamespace, target) { if !macroTargetMatchesNamespace(protoNamespace, target) {
return nil, nil return nil, nil
} }
@ -109,7 +108,7 @@ func getProtoExt(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Ex
return meh.Select(args[0], extFieldName), nil return meh.Select(args[0], extFieldName), nil
} }
func getExtFieldName(meh cel.MacroExprHelper, expr *exprpb.Expr) (string, *common.Error) { func getExtFieldName(meh cel.MacroExprHelper, expr *exprpb.Expr) (string, *cel.Error) {
isValid := false isValid := false
extensionField := "" extensionField := ""
switch expr.GetExprKind().(type) { switch expr.GetExprKind().(type) {
@ -117,10 +116,7 @@ func getExtFieldName(meh cel.MacroExprHelper, expr *exprpb.Expr) (string, *commo
extensionField, isValid = validateIdentifier(expr) extensionField, isValid = validateIdentifier(expr)
} }
if !isValid { if !isValid {
return "", &common.Error{ return "", meh.NewError(expr.GetId(), "invalid extension field")
Message: "invalid extension field",
Location: meh.OffsetLocation(expr.GetId()),
}
} }
return extensionField, nil return extensionField, nil
} }

View File

@ -15,10 +15,14 @@
package ext package ext
import ( import (
"math"
"github.com/google/cel-go/cel" "github.com/google/cel-go/cel"
"github.com/google/cel-go/checker"
"github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/common/types/traits" "github.com/google/cel-go/common/types/traits"
"github.com/google/cel-go/interpreter"
) )
// Sets returns a cel.EnvOption to configure namespaced set relationship // Sets returns a cel.EnvOption to configure namespaced set relationship
@ -95,12 +99,24 @@ func (setsLib) CompileOptions() []cel.EnvOption {
cel.Function("sets.intersects", cel.Function("sets.intersects",
cel.Overload("list_sets_intersects_list", []*cel.Type{listType, listType}, cel.BoolType, cel.Overload("list_sets_intersects_list", []*cel.Type{listType, listType}, cel.BoolType,
cel.BinaryBinding(setsIntersects))), cel.BinaryBinding(setsIntersects))),
cel.CostEstimatorOptions(
checker.OverloadCostEstimate("list_sets_contains_list", estimateSetsCost(1)),
checker.OverloadCostEstimate("list_sets_intersects_list", estimateSetsCost(1)),
// equivalence requires potentially two m*n comparisons to ensure each list is contained by the other
checker.OverloadCostEstimate("list_sets_equivalent_list", estimateSetsCost(2)),
),
} }
} }
// ProgramOptions implements the Library interface method. // ProgramOptions implements the Library interface method.
func (setsLib) ProgramOptions() []cel.ProgramOption { func (setsLib) ProgramOptions() []cel.ProgramOption {
return []cel.ProgramOption{} return []cel.ProgramOption{
cel.CostTrackerOptions(
interpreter.OverloadCostTracker("list_sets_contains_list", trackSetsCost(1)),
interpreter.OverloadCostTracker("list_sets_intersects_list", trackSetsCost(1)),
interpreter.OverloadCostTracker("list_sets_equivalent_list", trackSetsCost(2)),
),
}
} }
func setsIntersects(listA, listB ref.Val) ref.Val { func setsIntersects(listA, listB ref.Val) ref.Val {
@ -136,3 +152,46 @@ func setsEquivalent(listA, listB ref.Val) ref.Val {
} }
return setsContains(listB, listA) return setsContains(listB, listA)
} }
func estimateSetsCost(costFactor float64) checker.FunctionEstimator {
return func(estimator checker.CostEstimator, target *checker.AstNode, args []checker.AstNode) *checker.CallEstimate {
if len(args) == 2 {
arg0Size := estimateSize(estimator, args[0])
arg1Size := estimateSize(estimator, args[1])
costEstimate := arg0Size.Multiply(arg1Size).MultiplyByCostFactor(costFactor).Add(callCostEstimate)
return &checker.CallEstimate{CostEstimate: costEstimate}
}
return nil
}
}
func estimateSize(estimator checker.CostEstimator, node checker.AstNode) checker.SizeEstimate {
if l := node.ComputedSize(); l != nil {
return *l
}
if l := estimator.EstimateSize(node); l != nil {
return *l
}
return checker.SizeEstimate{Min: 0, Max: math.MaxUint64}
}
func trackSetsCost(costFactor float64) interpreter.FunctionTracker {
return func(args []ref.Val, _ ref.Val) *uint64 {
lhsSize := actualSize(args[0])
rhsSize := actualSize(args[1])
cost := callCost + uint64(float64(lhsSize*rhsSize)*costFactor)
return &cost
}
}
func actualSize(value ref.Val) uint64 {
if sz, ok := value.(traits.Sizer); ok {
return uint64(sz.Size().(types.Int))
}
return 1
}
var (
callCostEstimate = checker.CostEstimate{Min: 1, Max: 1}
callCost = uint64(1)
)

View File

@ -173,7 +173,7 @@ const (
// 'TacoCat'.lowerAscii() // returns 'tacocat' // 'TacoCat'.lowerAscii() // returns 'tacocat'
// 'TacoCÆt Xii'.lowerAscii() // returns 'tacocÆt xii' // 'TacoCÆt Xii'.lowerAscii() // returns 'tacocÆt xii'
// //
// # Quote // # Strings.Quote
// //
// Introduced in version: 1 // Introduced in version: 1
// //
@ -301,26 +301,28 @@ func StringsLocale(locale string) StringsOption {
} }
} }
// StringsVersion configures the version of the string library. The version limits which // StringsVersion configures the version of the string library.
// functions are available. Only functions introduced below or equal to the given //
// version included in the library. See the library documentation to determine // The version limits which functions are available. Only functions introduced
// which version a function was introduced at. If the documentation does not // below or equal to the given version included in the library. If this option
// state which version a function was introduced at, it can be assumed to be // is not set, all functions are available.
// introduced at version 0, when the library was first created. //
// If this option is not set, all functions are available. // See the library documentation to determine which version a function was introduced.
func StringsVersion(version uint32) func(lib *stringLib) *stringLib { // If the documentation does not state which version a function was introduced, it can
return func(sl *stringLib) *stringLib { // be assumed to be introduced at version 0, when the library was first created.
sl.version = version func StringsVersion(version uint32) StringsOption {
return sl return func(lib *stringLib) *stringLib {
lib.version = version
return lib
} }
} }
// CompileOptions implements the Library interface method. // CompileOptions implements the Library interface method.
func (sl *stringLib) CompileOptions() []cel.EnvOption { func (lib *stringLib) CompileOptions() []cel.EnvOption {
formatLocale := "en_US" formatLocale := "en_US"
if sl.locale != "" { if lib.locale != "" {
// ensure locale is properly-formed if set // ensure locale is properly-formed if set
_, err := language.Parse(sl.locale) _, err := language.Parse(lib.locale)
if err != nil { if err != nil {
return []cel.EnvOption{ return []cel.EnvOption{
func(e *cel.Env) (*cel.Env, error) { func(e *cel.Env) (*cel.Env, error) {
@ -328,7 +330,7 @@ func (sl *stringLib) CompileOptions() []cel.EnvOption {
}, },
} }
} }
formatLocale = sl.locale formatLocale = lib.locale
} }
opts := []cel.EnvOption{ opts := []cel.EnvOption{
@ -432,7 +434,7 @@ func (sl *stringLib) CompileOptions() []cel.EnvOption {
return stringOrError(upperASCII(string(s))) return stringOrError(upperASCII(string(s)))
}))), }))),
} }
if sl.version >= 1 { if lib.version >= 1 {
opts = append(opts, cel.Function("format", opts = append(opts, cel.Function("format",
cel.MemberOverload("string_format", []*cel.Type{cel.StringType, cel.ListType(cel.DynType)}, cel.StringType, cel.MemberOverload("string_format", []*cel.Type{cel.StringType, cel.ListType(cel.DynType)}, cel.StringType,
cel.FunctionBinding(func(args ...ref.Val) ref.Val { cel.FunctionBinding(func(args ...ref.Val) ref.Val {
@ -447,7 +449,7 @@ func (sl *stringLib) CompileOptions() []cel.EnvOption {
})))) }))))
} }
if sl.version >= 2 { if lib.version >= 2 {
opts = append(opts, opts = append(opts,
cel.Function("join", cel.Function("join",
cel.MemberOverload("list_join", []*cel.Type{cel.ListType(cel.StringType)}, cel.StringType, cel.MemberOverload("list_join", []*cel.Type{cel.ListType(cel.StringType)}, cel.StringType,

Some files were not shown because too many files have changed in this diff Show More