mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-12-20 03:50:24 +00:00
rebase: bump google.golang.org/grpc from 1.68.1 to 1.69.0
Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.68.1 to 1.69.0. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.68.1...v1.69.0) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
parent
afd950ebed
commit
431e9231d2
14
go.mod
14
go.mod
@ -28,7 +28,7 @@ require (
|
|||||||
golang.org/x/crypto v0.31.0
|
golang.org/x/crypto v0.31.0
|
||||||
golang.org/x/net v0.32.0
|
golang.org/x/net v0.32.0
|
||||||
golang.org/x/sys v0.28.0
|
golang.org/x/sys v0.28.0
|
||||||
google.golang.org/grpc v1.68.1
|
google.golang.org/grpc v1.69.0
|
||||||
google.golang.org/protobuf v1.35.2
|
google.golang.org/protobuf v1.35.2
|
||||||
//
|
//
|
||||||
// when updating k8s.io/kubernetes, make sure to update the replace section too
|
// when updating k8s.io/kubernetes, make sure to update the replace section too
|
||||||
@ -148,12 +148,12 @@ require (
|
|||||||
go.etcd.io/etcd/client/v3 v3.5.14 // indirect
|
go.etcd.io/etcd/client/v3 v3.5.14 // indirect
|
||||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect
|
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
|
||||||
go.opentelemetry.io/otel v1.28.0 // indirect
|
go.opentelemetry.io/otel v1.31.0 // indirect
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect
|
||||||
go.opentelemetry.io/otel/metric v1.28.0 // indirect
|
go.opentelemetry.io/otel/metric v1.31.0 // indirect
|
||||||
go.opentelemetry.io/otel/sdk v1.28.0 // indirect
|
go.opentelemetry.io/otel/sdk v1.31.0 // indirect
|
||||||
go.opentelemetry.io/otel/trace v1.28.0 // indirect
|
go.opentelemetry.io/otel/trace v1.31.0 // indirect
|
||||||
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
||||||
go.uber.org/multierr v1.11.0 // indirect
|
go.uber.org/multierr v1.11.0 // indirect
|
||||||
go.uber.org/zap v1.26.0 // indirect
|
go.uber.org/zap v1.26.0 // indirect
|
||||||
@ -165,8 +165,8 @@ require (
|
|||||||
golang.org/x/time v0.5.0 // indirect
|
golang.org/x/time v0.5.0 // indirect
|
||||||
golang.org/x/tools v0.26.0 // indirect
|
golang.org/x/tools v0.26.0 // indirect
|
||||||
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 // indirect
|
google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect
|
||||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||||
|
26
go.sum
26
go.sum
@ -2355,8 +2355,9 @@ go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZV
|
|||||||
go.opentelemetry.io/otel v1.23.0/go.mod h1:YCycw9ZeKhcJFrb34iVSkyT0iczq/zYDtZYFufObyB0=
|
go.opentelemetry.io/otel v1.23.0/go.mod h1:YCycw9ZeKhcJFrb34iVSkyT0iczq/zYDtZYFufObyB0=
|
||||||
go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
|
go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
|
||||||
go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ=
|
go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ=
|
||||||
go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
|
|
||||||
go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
|
go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
|
||||||
|
go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
|
||||||
|
go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0/go.mod h1:GijYcYmNpX1KazD5JmWGsi4P7dDTTTnfv1UbGn84MnU=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0/go.mod h1:GijYcYmNpX1KazD5JmWGsi4P7dDTTTnfv1UbGn84MnU=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY=
|
||||||
@ -2371,15 +2372,19 @@ go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xC
|
|||||||
go.opentelemetry.io/otel/metric v1.23.0/go.mod h1:MqUW2X2a6Q8RN96E2/nqNoT+z9BSms20Jb7Bbp+HiTo=
|
go.opentelemetry.io/otel/metric v1.23.0/go.mod h1:MqUW2X2a6Q8RN96E2/nqNoT+z9BSms20Jb7Bbp+HiTo=
|
||||||
go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco=
|
go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco=
|
||||||
go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak=
|
go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak=
|
||||||
go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=
|
|
||||||
go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s=
|
go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s=
|
||||||
|
go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=
|
||||||
|
go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY=
|
||||||
go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A=
|
go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A=
|
||||||
go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0=
|
go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0=
|
||||||
go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E=
|
go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E=
|
||||||
go.opentelemetry.io/otel/sdk v1.22.0/go.mod h1:iu7luyVGYovrRpe2fmj3CVKouQNdTOkxtLzPvPz1DOc=
|
go.opentelemetry.io/otel/sdk v1.22.0/go.mod h1:iu7luyVGYovrRpe2fmj3CVKouQNdTOkxtLzPvPz1DOc=
|
||||||
go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A=
|
go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A=
|
||||||
go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE=
|
|
||||||
go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
|
go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
|
||||||
|
go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk=
|
||||||
|
go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0=
|
||||||
|
go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
|
||||||
|
go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
|
||||||
go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo=
|
go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo=
|
||||||
go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU=
|
go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU=
|
||||||
go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ=
|
go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ=
|
||||||
@ -2387,8 +2392,9 @@ go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40
|
|||||||
go.opentelemetry.io/otel/trace v1.23.0/go.mod h1:GSGTbIClEsuZrGIzoEHqsVfxgn5UkggkflQwDScNUsk=
|
go.opentelemetry.io/otel/trace v1.23.0/go.mod h1:GSGTbIClEsuZrGIzoEHqsVfxgn5UkggkflQwDScNUsk=
|
||||||
go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
|
go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
|
||||||
go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4=
|
go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4=
|
||||||
go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
|
|
||||||
go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
|
go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
|
||||||
|
go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys=
|
||||||
|
go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A=
|
||||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||||
go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
|
go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
|
||||||
go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
|
go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
|
||||||
@ -3264,8 +3270,8 @@ google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237/go.
|
|||||||
google.golang.org/genproto/googleapis/api v0.0.0-20240513163218-0867130af1f8/go.mod h1:vPrPUTsDCYxXWjP7clS81mZ6/803D8K4iM9Ma27VKas=
|
google.golang.org/genproto/googleapis/api v0.0.0-20240513163218-0867130af1f8/go.mod h1:vPrPUTsDCYxXWjP7clS81mZ6/803D8K4iM9Ma27VKas=
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5/go.mod h1:RGnPtTG7r4i8sPlNyDeikXF99hMM+hN6QMm4ooG9g2g=
|
google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5/go.mod h1:RGnPtTG7r4i8sPlNyDeikXF99hMM+hN6QMm4ooG9g2g=
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU=
|
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU=
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc=
|
google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 h1:fVoAXEKA4+yufmbdVYv+SE73+cPZbbbe8paLsHfkK+U=
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I=
|
google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4=
|
||||||
google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA=
|
google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA=
|
||||||
google.golang.org/genproto/googleapis/bytestream v0.0.0-20230807174057-1744710a1577/go.mod h1:NjCQG/D8JandXxM57PZbAJL1DCNL6EypA0vPPwfsc7c=
|
google.golang.org/genproto/googleapis/bytestream v0.0.0-20230807174057-1744710a1577/go.mod h1:NjCQG/D8JandXxM57PZbAJL1DCNL6EypA0vPPwfsc7c=
|
||||||
google.golang.org/genproto/googleapis/bytestream v0.0.0-20231030173426-d783a09b4405/go.mod h1:GRUCuLdzVqZte8+Dl/D4N25yLzcGqqWaYkeVOwulFqw=
|
google.golang.org/genproto/googleapis/bytestream v0.0.0-20231030173426-d783a09b4405/go.mod h1:GRUCuLdzVqZte8+Dl/D4N25yLzcGqqWaYkeVOwulFqw=
|
||||||
@ -3315,8 +3321,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20240515191416-fc5f0ca64291/go.
|
|||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI=
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||||
@ -3376,8 +3382,8 @@ google.golang.org/grpc v1.63.0/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDom
|
|||||||
google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
|
google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
|
||||||
google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg=
|
google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg=
|
||||||
google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ=
|
google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ=
|
||||||
google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0=
|
google.golang.org/grpc v1.69.0 h1:quSiOM1GJPmPH5XtU+BCoVXcDVJJAzNcoyfC2cCjGkI=
|
||||||
google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw=
|
google.golang.org/grpc v1.69.0/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
|
||||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
||||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
|
13
vendor/go.opentelemetry.io/otel/.golangci.yml
generated
vendored
13
vendor/go.opentelemetry.io/otel/.golangci.yml
generated
vendored
@ -9,6 +9,8 @@ linters:
|
|||||||
disable-all: true
|
disable-all: true
|
||||||
# Specifically enable linters we want to use.
|
# Specifically enable linters we want to use.
|
||||||
enable:
|
enable:
|
||||||
|
- asasalint
|
||||||
|
- bodyclose
|
||||||
- depguard
|
- depguard
|
||||||
- errcheck
|
- errcheck
|
||||||
- errorlint
|
- errorlint
|
||||||
@ -23,6 +25,7 @@ linters:
|
|||||||
- revive
|
- revive
|
||||||
- staticcheck
|
- staticcheck
|
||||||
- tenv
|
- tenv
|
||||||
|
- testifylint
|
||||||
- typecheck
|
- typecheck
|
||||||
- unconvert
|
- unconvert
|
||||||
- unused
|
- unused
|
||||||
@ -62,12 +65,12 @@ issues:
|
|||||||
- path: _test\.go
|
- path: _test\.go
|
||||||
linters:
|
linters:
|
||||||
- gosec
|
- gosec
|
||||||
# Igonoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand)
|
# Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand)
|
||||||
# as we commonly use it in tests and examples.
|
# as we commonly use it in tests and examples.
|
||||||
- text: "G404:"
|
- text: "G404:"
|
||||||
linters:
|
linters:
|
||||||
- gosec
|
- gosec
|
||||||
# Igonoring gosec G402: TLS MinVersion too low
|
# Ignoring gosec G402: TLS MinVersion too low
|
||||||
# as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well.
|
# as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well.
|
||||||
- text: "G402: TLS MinVersion too low."
|
- text: "G402: TLS MinVersion too low."
|
||||||
linters:
|
linters:
|
||||||
@ -300,3 +303,9 @@ linters-settings:
|
|||||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value
|
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value
|
||||||
- name: waitgroup-by-value
|
- name: waitgroup-by-value
|
||||||
disabled: false
|
disabled: false
|
||||||
|
testifylint:
|
||||||
|
enable-all: true
|
||||||
|
disable:
|
||||||
|
- float-compare
|
||||||
|
- go-require
|
||||||
|
- require-error
|
||||||
|
121
vendor/go.opentelemetry.io/otel/CHANGELOG.md
generated
vendored
121
vendor/go.opentelemetry.io/otel/CHANGELOG.md
generated
vendored
@ -8,6 +8,112 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
|
|||||||
|
|
||||||
## [Unreleased]
|
## [Unreleased]
|
||||||
|
|
||||||
|
<!-- Released section -->
|
||||||
|
<!-- Don't change this section unless doing release -->
|
||||||
|
|
||||||
|
## [1.31.0/0.53.0/0.7.0/0.0.10] 2024-10-11
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- Add `go.opentelemetry.io/otel/sdk/metric/exemplar` package which includes `Exemplar`, `Filter`, `TraceBasedFilter`, `AlwaysOnFilter`, `HistogramReservoir`, `FixedSizeReservoir`, `Reservoir`, `Value` and `ValueType` types. These will be used for configuring the exemplar reservoir for the metrics sdk. (#5747, #5862)
|
||||||
|
- Add `WithExportBufferSize` option to log batch processor.(#5877)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Enable exemplars by default in `go.opentelemetry.io/otel/sdk/metric`. Exemplars can be disabled by setting `OTEL_METRICS_EXEMPLAR_FILTER=always_off` (#5778)
|
||||||
|
- `Logger.Enabled` in `go.opentelemetry.io/otel/log` now accepts a newly introduced `EnabledParameters` type instead of `Record`. (#5791)
|
||||||
|
- `FilterProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log/internal/x` now accepts `EnabledParameters` instead of `Record`. (#5791)
|
||||||
|
- The `Record` type in `go.opentelemetry.io/otel/log` is no longer comparable. (#5847)
|
||||||
|
- Performance improvements for the trace SDK `SetAttributes` method in `Span`. (#5864)
|
||||||
|
- Reduce memory allocations for the `Event` and `Link` lists in `Span`. (#5858)
|
||||||
|
- Performance improvements for the trace SDK `AddEvent`, `AddLink`, `RecordError` and `End` methods in `Span`. (#5874)
|
||||||
|
|
||||||
|
### Deprecated
|
||||||
|
|
||||||
|
- Deprecate all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5854)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- The race condition for multiple `FixedSize` exemplar reservoirs identified in #5814 is resolved. (#5819)
|
||||||
|
- Fix log records duplication in case of heterogeneous resource attributes by correctly mapping each log record to it's resource and scope. (#5803)
|
||||||
|
- Fix timer channel drain to avoid hanging on Go 1.23. (#5868)
|
||||||
|
- Fix delegation for global meter providers, and panic when calling otel.SetMeterProvider. (#5827)
|
||||||
|
- Change the `reflect.TypeOf` to use a nil pointer to not allocate on the heap unless necessary. (#5827)
|
||||||
|
|
||||||
|
## [1.30.0/0.52.0/0.6.0/0.0.9] 2024-09-09
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- Support `OTEL_EXPORTER_OTLP_LOGS_INSECURE` and `OTEL_EXPORTER_OTLP_INSECURE` environments in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#5739)
|
||||||
|
- The `WithResource` option for `NewMeterProvider` now merges the provided resources with the ones from environment variables. (#5773)
|
||||||
|
- The `WithResource` option for `NewLoggerProvider` now merges the provided resources with the ones from environment variables. (#5773)
|
||||||
|
- Add UTF-8 support to `go.opentelemetry.io/otel/exporters/prometheus`. (#5755)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fix memory leak in the global `MeterProvider` when identical instruments are repeatedly created. (#5754)
|
||||||
|
- Fix panic on instruments creation when setting meter provider. (#5758)
|
||||||
|
- Fix an issue where `SetMeterProvider` in `go.opentelemetry.io/otel` might miss the delegation for instruments and registries. (#5780)
|
||||||
|
|
||||||
|
### Removed
|
||||||
|
|
||||||
|
- Drop support for [Go 1.21]. (#5736, #5740, #5800)
|
||||||
|
|
||||||
|
## [1.29.0/0.51.0/0.5.0] 2024-08-23
|
||||||
|
|
||||||
|
This release is the last to support [Go 1.21].
|
||||||
|
The next release will require at least [Go 1.22].
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- Add MacOS ARM64 platform to the compatibility testing suite. (#5577)
|
||||||
|
- Add `InstrumentationScope` field to `SpanStub` in `go.opentelemetry.io/otel/sdk/trace/tracetest`, as a replacement for the deprecated `InstrumentationLibrary`. (#5627)
|
||||||
|
- Make the initial release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`.
|
||||||
|
This new module contains an OTLP exporter that transmits log telemetry using gRPC.
|
||||||
|
This module is unstable and breaking changes may be introduced.
|
||||||
|
See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5629)
|
||||||
|
- Add `Walk` function to `TraceState` in `go.opentelemetry.io/otel/trace` to iterate all the key-value pairs. (#5651)
|
||||||
|
- Bridge the trace state in `go.opentelemetry.io/otel/bridge/opencensus`. (#5651)
|
||||||
|
- Zero value of `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` no longer panics. (#5665)
|
||||||
|
- The `FilterProcessor` interface type is added in `go.opentelemetry.io/otel/sdk/log/internal/x`.
|
||||||
|
This is an optional and experimental interface that log `Processor`s can implement to instruct the `Logger` if a `Record` will be processed or not.
|
||||||
|
It replaces the existing `Enabled` method that is removed from the `Processor` interface itself.
|
||||||
|
It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#5692)
|
||||||
|
- Support [Go 1.23]. (#5720)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- `NewMemberRaw`, `NewKeyProperty` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage` allow UTF-8 string in key. (#5132)
|
||||||
|
- `Processor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` now accepts a pointer to `Record` instead of a value so that the record modifications done in a processor are propagated to subsequent registered processors. (#5636)
|
||||||
|
- `SimpleProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log` now returns `false` if the exporter is `nil`. (#5665)
|
||||||
|
- Update the concurrency requirements of `Exporter` in `go.opentelemetry.io/otel/sdk/log`. (#5666)
|
||||||
|
- `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` synchronizes `OnEmit` calls. (#5666)
|
||||||
|
- The `Processor` interface in `go.opentelemetry.io/otel/sdk/log` no longer includes the `Enabled` method.
|
||||||
|
See the `FilterProcessor` interface type added in `go.opentelemetry.io/otel/sdk/log/internal/x` to continue providing this functionality. (#5692)
|
||||||
|
- The `SimpleProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693)
|
||||||
|
- The `BatchProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5584)
|
||||||
|
- Pass the underlying error rather than a generic retry-able failure in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5541)
|
||||||
|
- Correct the `Tracer`, `Meter`, and `Logger` names used in `go.opentelemetry.io/otel/example/dice`. (#5612)
|
||||||
|
- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/namedtracer`. (#5612)
|
||||||
|
- Correct the `Tracer` name used in `go.opentelemetry.io/otel/example/opencensus`. (#5612)
|
||||||
|
- Correct the `Tracer` and `Meter` names used in `go.opentelemetry.io/otel/example/otel-collector`. (#5612)
|
||||||
|
- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/passthrough`. (#5612)
|
||||||
|
- Correct the `Meter` name used in `go.opentelemetry.io/otel/example/prometheus`. (#5612)
|
||||||
|
- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/zipkin`. (#5612)
|
||||||
|
- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5641)
|
||||||
|
- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5650)
|
||||||
|
- Stop percent encoding header environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705)
|
||||||
|
- Remove invalid environment variable header keys in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705)
|
||||||
|
|
||||||
|
### Removed
|
||||||
|
|
||||||
|
- The `Enabled` method of the `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692)
|
||||||
|
- The `Enabled` method of the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692)
|
||||||
|
|
||||||
## [1.28.0/0.50.0/0.4.0] 2024-07-02
|
## [1.28.0/0.50.0/0.4.0] 2024-07-02
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
@ -49,6 +155,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
|
|||||||
- Fix stale timestamps reported by the last-value aggregation. (#5517)
|
- Fix stale timestamps reported by the last-value aggregation. (#5517)
|
||||||
- Indicate the `Exporter` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` must be created by the `New` method. (#5521)
|
- Indicate the `Exporter` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` must be created by the `New` method. (#5521)
|
||||||
- Improved performance in all `{Bool,Int64,Float64,String}SliceValue` functions of `go.opentelemetry.io/attributes` by reducing the number of allocations. (#5549)
|
- Improved performance in all `{Bool,Int64,Float64,String}SliceValue` functions of `go.opentelemetry.io/attributes` by reducing the number of allocations. (#5549)
|
||||||
|
- Replace invalid percent-encoded octet sequences with replacement char in `go.opentelemetry.io/otel/baggage`. (#5528)
|
||||||
|
|
||||||
## [1.27.0/0.49.0/0.3.0] 2024-05-21
|
## [1.27.0/0.49.0/0.3.0] 2024-05-21
|
||||||
|
|
||||||
@ -175,7 +282,7 @@ The next release will require at least [Go 1.21].
|
|||||||
This module includes OpenTelemetry Go's implementation of the Logs Bridge API.
|
This module includes OpenTelemetry Go's implementation of the Logs Bridge API.
|
||||||
This module is in an alpha state, it is subject to breaking changes.
|
This module is in an alpha state, it is subject to breaking changes.
|
||||||
See our [versioning policy](./VERSIONING.md) for more info. (#4961)
|
See our [versioning policy](./VERSIONING.md) for more info. (#4961)
|
||||||
- ARM64 platform to the compatibility testing suite. (#4994)
|
- Add ARM64 platform to the compatibility testing suite. (#4994)
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
|
|
||||||
@ -1836,7 +1943,7 @@ with major version 0.
|
|||||||
- Setting error status while recording error with Span from oteltest package. (#1729)
|
- Setting error status while recording error with Span from oteltest package. (#1729)
|
||||||
- The concept of a remote and local Span stored in a context is unified to just the current Span.
|
- The concept of a remote and local Span stored in a context is unified to just the current Span.
|
||||||
Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed.
|
Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed.
|
||||||
Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContex` can be used to return the current Span.
|
Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContext` can be used to return the current Span.
|
||||||
If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731)
|
If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731)
|
||||||
- The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed.
|
- The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed.
|
||||||
This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749)
|
This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749)
|
||||||
@ -2410,7 +2517,7 @@ This release migrates the default OpenTelemetry SDK into its own Go module, deco
|
|||||||
- Prometheus exporter will not apply stale updates or forget inactive metrics. (#903)
|
- Prometheus exporter will not apply stale updates or forget inactive metrics. (#903)
|
||||||
- Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905)
|
- Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905)
|
||||||
- Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913)
|
- Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913)
|
||||||
- Update otel-colector example to use the v0.5.0 collector. (#915)
|
- Update otel-collector example to use the v0.5.0 collector. (#915)
|
||||||
- The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922)
|
- The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922)
|
||||||
- The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922)
|
- The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922)
|
||||||
- The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists.
|
- The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists.
|
||||||
@ -3003,7 +3110,10 @@ It contains api and sdk for trace and meter.
|
|||||||
- CircleCI build CI manifest files.
|
- CircleCI build CI manifest files.
|
||||||
- CODEOWNERS file to track owners of this project.
|
- CODEOWNERS file to track owners of this project.
|
||||||
|
|
||||||
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.28.0...HEAD
|
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.31.0...HEAD
|
||||||
|
[1.31.0/0.53.0/0.7.0/0.0.10]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.31.0
|
||||||
|
[1.30.0/0.52.0/0.6.0/0.0.9]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.30.0
|
||||||
|
[1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0
|
||||||
[1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0
|
[1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0
|
||||||
[1.27.0/0.49.0/0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0
|
[1.27.0/0.49.0/0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0
|
||||||
[1.26.0/0.48.0/0.2.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.26.0
|
[1.26.0/0.48.0/0.2.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.26.0
|
||||||
@ -3086,6 +3196,9 @@ It contains api and sdk for trace and meter.
|
|||||||
[0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1
|
[0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1
|
||||||
[0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0
|
[0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0
|
||||||
|
|
||||||
|
<!-- Released section ended -->
|
||||||
|
|
||||||
|
[Go 1.23]: https://go.dev/doc/go1.23
|
||||||
[Go 1.22]: https://go.dev/doc/go1.22
|
[Go 1.22]: https://go.dev/doc/go1.22
|
||||||
[Go 1.21]: https://go.dev/doc/go1.21
|
[Go 1.21]: https://go.dev/doc/go1.21
|
||||||
[Go 1.20]: https://go.dev/doc/go1.20
|
[Go 1.20]: https://go.dev/doc/go1.20
|
||||||
|
6
vendor/go.opentelemetry.io/otel/CODEOWNERS
generated
vendored
6
vendor/go.opentelemetry.io/otel/CODEOWNERS
generated
vendored
@ -5,13 +5,13 @@
|
|||||||
#####################################################
|
#####################################################
|
||||||
#
|
#
|
||||||
# Learn about membership in OpenTelemetry community:
|
# Learn about membership in OpenTelemetry community:
|
||||||
# https://github.com/open-telemetry/community/blob/main/community-membership.md
|
# https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md
|
||||||
#
|
#
|
||||||
#
|
#
|
||||||
# Learn about CODEOWNERS file format:
|
# Learn about CODEOWNERS file format:
|
||||||
# https://help.github.com/en/articles/about-code-owners
|
# https://help.github.com/en/articles/about-code-owners
|
||||||
#
|
#
|
||||||
|
|
||||||
* @MrAlias @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu
|
* @MrAlias @XSAM @dashpole @pellared @dmathieu
|
||||||
|
|
||||||
CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole @XSAM @dmathieu
|
CODEOWNERS @MrAlias @pellared @dashpole @XSAM @dmathieu
|
||||||
|
18
vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
generated
vendored
18
vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
generated
vendored
@ -578,7 +578,10 @@ See also:
|
|||||||
The tests should never leak goroutines.
|
The tests should never leak goroutines.
|
||||||
|
|
||||||
Use the term `ConcurrentSafe` in the test name when it aims to verify the
|
Use the term `ConcurrentSafe` in the test name when it aims to verify the
|
||||||
absence of race conditions.
|
absence of race conditions. The top-level tests with this term will be run
|
||||||
|
many times in the `test-concurrent-safe` CI job to increase the chance of
|
||||||
|
catching concurrency issues. This does not apply to subtests when this term
|
||||||
|
is not in their root name.
|
||||||
|
|
||||||
### Internal packages
|
### Internal packages
|
||||||
|
|
||||||
@ -628,11 +631,8 @@ should be canceled.
|
|||||||
|
|
||||||
### Approvers
|
### Approvers
|
||||||
|
|
||||||
- [Chester Cheung](https://github.com/hanyuancheung), Tencent
|
|
||||||
|
|
||||||
### Maintainers
|
### Maintainers
|
||||||
|
|
||||||
- [Aaron Clawson](https://github.com/MadVikingGod), LightStep
|
|
||||||
- [Damien Mathieu](https://github.com/dmathieu), Elastic
|
- [Damien Mathieu](https://github.com/dmathieu), Elastic
|
||||||
- [David Ashpole](https://github.com/dashpole), Google
|
- [David Ashpole](https://github.com/dashpole), Google
|
||||||
- [Robert Pająk](https://github.com/pellared), Splunk
|
- [Robert Pająk](https://github.com/pellared), Splunk
|
||||||
@ -641,16 +641,18 @@ should be canceled.
|
|||||||
|
|
||||||
### Emeritus
|
### Emeritus
|
||||||
|
|
||||||
- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb
|
- [Aaron Clawson](https://github.com/MadVikingGod), LightStep
|
||||||
|
- [Anthony Mirabella](https://github.com/Aneurysm9), AWS
|
||||||
|
- [Chester Cheung](https://github.com/hanyuancheung), Tencent
|
||||||
|
- [Evan Torrie](https://github.com/evantorrie), Yahoo
|
||||||
- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep
|
- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep
|
||||||
- [Josh MacDonald](https://github.com/jmacd), LightStep
|
- [Josh MacDonald](https://github.com/jmacd), LightStep
|
||||||
- [Anthony Mirabella](https://github.com/Aneurysm9), AWS
|
- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb
|
||||||
- [Evan Torrie](https://github.com/evantorrie), Yahoo
|
|
||||||
|
|
||||||
### Become an Approver or a Maintainer
|
### Become an Approver or a Maintainer
|
||||||
|
|
||||||
See the [community membership document in OpenTelemetry community
|
See the [community membership document in OpenTelemetry community
|
||||||
repo](https://github.com/open-telemetry/community/blob/main/community-membership.md).
|
repo](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md).
|
||||||
|
|
||||||
[Approver]: #approvers
|
[Approver]: #approvers
|
||||||
[Maintainer]: #maintainers
|
[Maintainer]: #maintainers
|
||||||
|
22
vendor/go.opentelemetry.io/otel/Makefile
generated
vendored
22
vendor/go.opentelemetry.io/otel/Makefile
generated
vendored
@ -54,9 +54,6 @@ $(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer
|
|||||||
PORTO = $(TOOLS)/porto
|
PORTO = $(TOOLS)/porto
|
||||||
$(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto
|
$(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto
|
||||||
|
|
||||||
GOJQ = $(TOOLS)/gojq
|
|
||||||
$(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq
|
|
||||||
|
|
||||||
GOTMPL = $(TOOLS)/gotmpl
|
GOTMPL = $(TOOLS)/gotmpl
|
||||||
$(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl
|
$(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl
|
||||||
|
|
||||||
@ -67,7 +64,7 @@ GOVULNCHECK = $(TOOLS)/govulncheck
|
|||||||
$(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck
|
$(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck
|
||||||
|
|
||||||
.PHONY: tools
|
.PHONY: tools
|
||||||
tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE)
|
tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE)
|
||||||
|
|
||||||
# Virtualized python tools via docker
|
# Virtualized python tools via docker
|
||||||
|
|
||||||
@ -145,12 +142,14 @@ build-tests/%:
|
|||||||
|
|
||||||
# Tests
|
# Tests
|
||||||
|
|
||||||
TEST_TARGETS := test-default test-bench test-short test-verbose test-race
|
TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe
|
||||||
.PHONY: $(TEST_TARGETS) test
|
.PHONY: $(TEST_TARGETS) test
|
||||||
test-default test-race: ARGS=-race
|
test-default test-race: ARGS=-race
|
||||||
test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=.
|
test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=.
|
||||||
test-short: ARGS=-short
|
test-short: ARGS=-short
|
||||||
test-verbose: ARGS=-v -race
|
test-verbose: ARGS=-v -race
|
||||||
|
test-concurrent-safe: ARGS=-run=ConcurrentSafe -count=100 -race
|
||||||
|
test-concurrent-safe: TIMEOUT=120
|
||||||
$(TEST_TARGETS): test
|
$(TEST_TARGETS): test
|
||||||
test: $(OTEL_GO_MOD_DIRS:%=test/%)
|
test: $(OTEL_GO_MOD_DIRS:%=test/%)
|
||||||
test/%: DIR=$*
|
test/%: DIR=$*
|
||||||
@ -178,17 +177,14 @@ test-coverage: $(GOCOVMERGE)
|
|||||||
done; \
|
done; \
|
||||||
$(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt
|
$(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt
|
||||||
|
|
||||||
# Adding a directory will include all benchmarks in that directory if a filter is not specified.
|
|
||||||
BENCHMARK_TARGETS := sdk/trace
|
|
||||||
.PHONY: benchmark
|
.PHONY: benchmark
|
||||||
benchmark: $(BENCHMARK_TARGETS:%=benchmark/%)
|
benchmark: $(OTEL_GO_MOD_DIRS:%=benchmark/%)
|
||||||
BENCHMARK_FILTER = .
|
|
||||||
# You can override the filter for a particular directory by adding a rule here.
|
|
||||||
benchmark/sdk/trace: BENCHMARK_FILTER = SpanWithAttributes_8/AlwaysSample
|
|
||||||
benchmark/%:
|
benchmark/%:
|
||||||
@echo "$(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(BENCHMARK_FILTER) $*..." \
|
@echo "$(GO) test -run=xxxxxMatchNothingxxxxx -bench=. $*..." \
|
||||||
&& cd $* \
|
&& cd $* \
|
||||||
$(foreach filter, $(BENCHMARK_FILTER), && $(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(filter))
|
&& $(GO) list ./... \
|
||||||
|
| grep -v third_party \
|
||||||
|
| xargs $(GO) test -run=xxxxxMatchNothingxxxxx -bench=.
|
||||||
|
|
||||||
.PHONY: golangci-lint golangci-lint-fix
|
.PHONY: golangci-lint golangci-lint-fix
|
||||||
golangci-lint-fix: ARGS=--fix
|
golangci-lint-fix: ARGS=--fix
|
||||||
|
34
vendor/go.opentelemetry.io/otel/README.md
generated
vendored
34
vendor/go.opentelemetry.io/otel/README.md
generated
vendored
@ -47,20 +47,22 @@ stop ensuring compatibility with these versions in the following manner:
|
|||||||
|
|
||||||
Currently, this project supports the following environments.
|
Currently, this project supports the following environments.
|
||||||
|
|
||||||
| OS | Go Version | Architecture |
|
| OS | Go Version | Architecture |
|
||||||
|---------|------------|--------------|
|
|----------|------------|--------------|
|
||||||
| Ubuntu | 1.22 | amd64 |
|
| Ubuntu | 1.23 | amd64 |
|
||||||
| Ubuntu | 1.21 | amd64 |
|
| Ubuntu | 1.22 | amd64 |
|
||||||
| Ubuntu | 1.22 | 386 |
|
| Ubuntu | 1.23 | 386 |
|
||||||
| Ubuntu | 1.21 | 386 |
|
| Ubuntu | 1.22 | 386 |
|
||||||
| Linux | 1.22 | arm64 |
|
| Linux | 1.23 | arm64 |
|
||||||
| Linux | 1.21 | arm64 |
|
| Linux | 1.22 | arm64 |
|
||||||
| MacOS | 1.22 | amd64 |
|
| macOS 13 | 1.23 | amd64 |
|
||||||
| MacOS | 1.21 | amd64 |
|
| macOS 13 | 1.22 | amd64 |
|
||||||
| Windows | 1.22 | amd64 |
|
| macOS | 1.23 | arm64 |
|
||||||
| Windows | 1.21 | amd64 |
|
| macOS | 1.22 | arm64 |
|
||||||
| Windows | 1.22 | 386 |
|
| Windows | 1.23 | amd64 |
|
||||||
| Windows | 1.21 | 386 |
|
| Windows | 1.22 | amd64 |
|
||||||
|
| Windows | 1.23 | 386 |
|
||||||
|
| Windows | 1.22 | 386 |
|
||||||
|
|
||||||
While this project should work for other systems, no compatibility guarantees
|
While this project should work for other systems, no compatibility guarantees
|
||||||
are made for those systems currently.
|
are made for those systems currently.
|
||||||
@ -87,8 +89,8 @@ If you need to extend the telemetry an instrumentation library provides or want
|
|||||||
to build your own instrumentation for your application directly you will need
|
to build your own instrumentation for your application directly you will need
|
||||||
to use the
|
to use the
|
||||||
[Go otel](https://pkg.go.dev/go.opentelemetry.io/otel)
|
[Go otel](https://pkg.go.dev/go.opentelemetry.io/otel)
|
||||||
package. The included [examples](./example/) are a good way to see some
|
package. The [examples](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples)
|
||||||
practical uses of this process.
|
are a good way to see some practical uses of this process.
|
||||||
|
|
||||||
### Export
|
### Export
|
||||||
|
|
||||||
|
12
vendor/go.opentelemetry.io/otel/RELEASING.md
generated
vendored
12
vendor/go.opentelemetry.io/otel/RELEASING.md
generated
vendored
@ -69,6 +69,7 @@ Update go.mod for submodules to depend on the new release which will happen in t
|
|||||||
```
|
```
|
||||||
|
|
||||||
- Move all the `Unreleased` changes into a new section following the title scheme (`[<new tag>] - <date of release>`).
|
- Move all the `Unreleased` changes into a new section following the title scheme (`[<new tag>] - <date of release>`).
|
||||||
|
- Make sure the new section is under the comment for released section, like `<!-- Released section -->`, so it is protected from being overwritten in the future.
|
||||||
- Update all the appropriate links at the bottom.
|
- Update all the appropriate links at the bottom.
|
||||||
|
|
||||||
4. Push the changes to upstream and create a Pull Request on GitHub.
|
4. Push the changes to upstream and create a Pull Request on GitHub.
|
||||||
@ -110,17 +111,6 @@ It is critical you make sure the version you push upstream is correct.
|
|||||||
Finally create a Release for the new `<new tag>` on GitHub.
|
Finally create a Release for the new `<new tag>` on GitHub.
|
||||||
The release body should include all the release notes from the Changelog for this release.
|
The release body should include all the release notes from the Changelog for this release.
|
||||||
|
|
||||||
## Verify Examples
|
|
||||||
|
|
||||||
After releasing verify that examples build outside of the repository.
|
|
||||||
|
|
||||||
```
|
|
||||||
./verify_examples.sh
|
|
||||||
```
|
|
||||||
|
|
||||||
The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them.
|
|
||||||
This ensures they build with the published release, not the local copy.
|
|
||||||
|
|
||||||
## Post-Release
|
## Post-Release
|
||||||
|
|
||||||
### Contrib Repository
|
### Contrib Repository
|
||||||
|
40
vendor/go.opentelemetry.io/otel/attribute/set.go
generated
vendored
40
vendor/go.opentelemetry.io/otel/attribute/set.go
generated
vendored
@ -347,45 +347,25 @@ func computeDistinct(kvs []KeyValue) Distinct {
|
|||||||
func computeDistinctFixed(kvs []KeyValue) interface{} {
|
func computeDistinctFixed(kvs []KeyValue) interface{} {
|
||||||
switch len(kvs) {
|
switch len(kvs) {
|
||||||
case 1:
|
case 1:
|
||||||
ptr := new([1]KeyValue)
|
return [1]KeyValue(kvs)
|
||||||
copy((*ptr)[:], kvs)
|
|
||||||
return *ptr
|
|
||||||
case 2:
|
case 2:
|
||||||
ptr := new([2]KeyValue)
|
return [2]KeyValue(kvs)
|
||||||
copy((*ptr)[:], kvs)
|
|
||||||
return *ptr
|
|
||||||
case 3:
|
case 3:
|
||||||
ptr := new([3]KeyValue)
|
return [3]KeyValue(kvs)
|
||||||
copy((*ptr)[:], kvs)
|
|
||||||
return *ptr
|
|
||||||
case 4:
|
case 4:
|
||||||
ptr := new([4]KeyValue)
|
return [4]KeyValue(kvs)
|
||||||
copy((*ptr)[:], kvs)
|
|
||||||
return *ptr
|
|
||||||
case 5:
|
case 5:
|
||||||
ptr := new([5]KeyValue)
|
return [5]KeyValue(kvs)
|
||||||
copy((*ptr)[:], kvs)
|
|
||||||
return *ptr
|
|
||||||
case 6:
|
case 6:
|
||||||
ptr := new([6]KeyValue)
|
return [6]KeyValue(kvs)
|
||||||
copy((*ptr)[:], kvs)
|
|
||||||
return *ptr
|
|
||||||
case 7:
|
case 7:
|
||||||
ptr := new([7]KeyValue)
|
return [7]KeyValue(kvs)
|
||||||
copy((*ptr)[:], kvs)
|
|
||||||
return *ptr
|
|
||||||
case 8:
|
case 8:
|
||||||
ptr := new([8]KeyValue)
|
return [8]KeyValue(kvs)
|
||||||
copy((*ptr)[:], kvs)
|
|
||||||
return *ptr
|
|
||||||
case 9:
|
case 9:
|
||||||
ptr := new([9]KeyValue)
|
return [9]KeyValue(kvs)
|
||||||
copy((*ptr)[:], kvs)
|
|
||||||
return *ptr
|
|
||||||
case 10:
|
case 10:
|
||||||
ptr := new([10]KeyValue)
|
return [10]KeyValue(kvs)
|
||||||
copy((*ptr)[:], kvs)
|
|
||||||
return *ptr
|
|
||||||
default:
|
default:
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
150
vendor/go.opentelemetry.io/otel/baggage/baggage.go
generated
vendored
150
vendor/go.opentelemetry.io/otel/baggage/baggage.go
generated
vendored
@ -44,9 +44,15 @@ type Property struct {
|
|||||||
|
|
||||||
// NewKeyProperty returns a new Property for key.
|
// NewKeyProperty returns a new Property for key.
|
||||||
//
|
//
|
||||||
|
// The passed key must be valid, non-empty UTF-8 string.
|
||||||
// If key is invalid, an error will be returned.
|
// If key is invalid, an error will be returned.
|
||||||
|
// However, the specific Propagators that are used to transmit baggage entries across
|
||||||
|
// component boundaries may impose their own restrictions on Property key.
|
||||||
|
// For example, the W3C Baggage specification restricts the Property keys to strings that
|
||||||
|
// satisfy the token definition from RFC7230, Section 3.2.6.
|
||||||
|
// For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key.
|
||||||
func NewKeyProperty(key string) (Property, error) {
|
func NewKeyProperty(key string) (Property, error) {
|
||||||
if !validateKey(key) {
|
if !validateBaggageName(key) {
|
||||||
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
|
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -62,6 +68,10 @@ func NewKeyProperty(key string) (Property, error) {
|
|||||||
// Notice: Consider using [NewKeyValuePropertyRaw] instead
|
// Notice: Consider using [NewKeyValuePropertyRaw] instead
|
||||||
// that does not require percent-encoding of the value.
|
// that does not require percent-encoding of the value.
|
||||||
func NewKeyValueProperty(key, value string) (Property, error) {
|
func NewKeyValueProperty(key, value string) (Property, error) {
|
||||||
|
if !validateKey(key) {
|
||||||
|
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
|
||||||
|
}
|
||||||
|
|
||||||
if !validateValue(value) {
|
if !validateValue(value) {
|
||||||
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value)
|
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value)
|
||||||
}
|
}
|
||||||
@ -74,11 +84,20 @@ func NewKeyValueProperty(key, value string) (Property, error) {
|
|||||||
|
|
||||||
// NewKeyValuePropertyRaw returns a new Property for key with value.
|
// NewKeyValuePropertyRaw returns a new Property for key with value.
|
||||||
//
|
//
|
||||||
// The passed key must be compliant with W3C Baggage specification.
|
// The passed key must be valid, non-empty UTF-8 string.
|
||||||
|
// The passed value must be valid UTF-8 string.
|
||||||
|
// However, the specific Propagators that are used to transmit baggage entries across
|
||||||
|
// component boundaries may impose their own restrictions on Property key.
|
||||||
|
// For example, the W3C Baggage specification restricts the Property keys to strings that
|
||||||
|
// satisfy the token definition from RFC7230, Section 3.2.6.
|
||||||
|
// For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key.
|
||||||
func NewKeyValuePropertyRaw(key, value string) (Property, error) {
|
func NewKeyValuePropertyRaw(key, value string) (Property, error) {
|
||||||
if !validateKey(key) {
|
if !validateBaggageName(key) {
|
||||||
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
|
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
|
||||||
}
|
}
|
||||||
|
if !validateBaggageValue(value) {
|
||||||
|
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value)
|
||||||
|
}
|
||||||
|
|
||||||
p := Property{
|
p := Property{
|
||||||
key: key,
|
key: key,
|
||||||
@ -115,12 +134,15 @@ func (p Property) validate() error {
|
|||||||
return fmt.Errorf("invalid property: %w", err)
|
return fmt.Errorf("invalid property: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !validateKey(p.key) {
|
if !validateBaggageName(p.key) {
|
||||||
return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key))
|
return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key))
|
||||||
}
|
}
|
||||||
if !p.hasValue && p.value != "" {
|
if !p.hasValue && p.value != "" {
|
||||||
return errFunc(errors.New("inconsistent value"))
|
return errFunc(errors.New("inconsistent value"))
|
||||||
}
|
}
|
||||||
|
if p.hasValue && !validateBaggageValue(p.value) {
|
||||||
|
return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value))
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -138,7 +160,15 @@ func (p Property) Value() (string, bool) {
|
|||||||
|
|
||||||
// String encodes Property into a header string compliant with the W3C Baggage
|
// String encodes Property into a header string compliant with the W3C Baggage
|
||||||
// specification.
|
// specification.
|
||||||
|
// It would return empty string if the key is invalid with the W3C Baggage
|
||||||
|
// specification. This could happen for a UTF-8 key, as it may contain
|
||||||
|
// invalid characters.
|
||||||
func (p Property) String() string {
|
func (p Property) String() string {
|
||||||
|
// W3C Baggage specification does not allow percent-encoded keys.
|
||||||
|
if !validateKey(p.key) {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
if p.hasValue {
|
if p.hasValue {
|
||||||
return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, valueEscape(p.value))
|
return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, valueEscape(p.value))
|
||||||
}
|
}
|
||||||
@ -203,9 +233,14 @@ func (p properties) validate() error {
|
|||||||
// String encodes properties into a header string compliant with the W3C Baggage
|
// String encodes properties into a header string compliant with the W3C Baggage
|
||||||
// specification.
|
// specification.
|
||||||
func (p properties) String() string {
|
func (p properties) String() string {
|
||||||
props := make([]string, len(p))
|
props := make([]string, 0, len(p))
|
||||||
for i, prop := range p {
|
for _, prop := range p {
|
||||||
props[i] = prop.String()
|
s := prop.String()
|
||||||
|
|
||||||
|
// Ignored empty properties.
|
||||||
|
if s != "" {
|
||||||
|
props = append(props, s)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return strings.Join(props, propertyDelimiter)
|
return strings.Join(props, propertyDelimiter)
|
||||||
}
|
}
|
||||||
@ -230,6 +265,10 @@ type Member struct {
|
|||||||
// Notice: Consider using [NewMemberRaw] instead
|
// Notice: Consider using [NewMemberRaw] instead
|
||||||
// that does not require percent-encoding of the value.
|
// that does not require percent-encoding of the value.
|
||||||
func NewMember(key, value string, props ...Property) (Member, error) {
|
func NewMember(key, value string, props ...Property) (Member, error) {
|
||||||
|
if !validateKey(key) {
|
||||||
|
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key)
|
||||||
|
}
|
||||||
|
|
||||||
if !validateValue(value) {
|
if !validateValue(value) {
|
||||||
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
|
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
|
||||||
}
|
}
|
||||||
@ -242,7 +281,13 @@ func NewMember(key, value string, props ...Property) (Member, error) {
|
|||||||
|
|
||||||
// NewMemberRaw returns a new Member from the passed arguments.
|
// NewMemberRaw returns a new Member from the passed arguments.
|
||||||
//
|
//
|
||||||
// The passed key must be compliant with W3C Baggage specification.
|
// The passed key must be valid, non-empty UTF-8 string.
|
||||||
|
// The passed value must be valid UTF-8 string.
|
||||||
|
// However, the specific Propagators that are used to transmit baggage entries across
|
||||||
|
// component boundaries may impose their own restrictions on baggage key.
|
||||||
|
// For example, the W3C Baggage specification restricts the baggage keys to strings that
|
||||||
|
// satisfy the token definition from RFC7230, Section 3.2.6.
|
||||||
|
// For maximum compatibility, alphanumeric value are strongly recommended to be used as baggage key.
|
||||||
func NewMemberRaw(key, value string, props ...Property) (Member, error) {
|
func NewMemberRaw(key, value string, props ...Property) (Member, error) {
|
||||||
m := Member{
|
m := Member{
|
||||||
key: key,
|
key: key,
|
||||||
@ -294,19 +339,45 @@ func parseMember(member string) (Member, error) {
|
|||||||
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key)
|
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key)
|
||||||
}
|
}
|
||||||
|
|
||||||
val := strings.TrimSpace(v)
|
rawVal := strings.TrimSpace(v)
|
||||||
if !validateValue(val) {
|
if !validateValue(rawVal) {
|
||||||
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, v)
|
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, v)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decode a percent-encoded value.
|
// Decode a percent-encoded value.
|
||||||
value, err := url.PathUnescape(val)
|
unescapeVal, err := url.PathUnescape(rawVal)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return newInvalidMember(), fmt.Errorf("%w: %w", errInvalidValue, err)
|
return newInvalidMember(), fmt.Errorf("%w: %w", errInvalidValue, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal)
|
||||||
return Member{key: key, value: value, properties: props, hasData: true}, nil
|
return Member{key: key, value: value, properties: props, hasData: true}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// replaceInvalidUTF8Sequences replaces invalid UTF-8 sequences with '<27>'.
|
||||||
|
func replaceInvalidUTF8Sequences(cap int, unescapeVal string) string {
|
||||||
|
if utf8.ValidString(unescapeVal) {
|
||||||
|
return unescapeVal
|
||||||
|
}
|
||||||
|
// W3C baggage spec:
|
||||||
|
// https://github.com/w3c/baggage/blob/8c215efbeebd3fa4b1aceb937a747e56444f22f3/baggage/HTTP_HEADER_FORMAT.md?plain=1#L69
|
||||||
|
|
||||||
|
var b strings.Builder
|
||||||
|
b.Grow(cap)
|
||||||
|
for i := 0; i < len(unescapeVal); {
|
||||||
|
r, size := utf8.DecodeRuneInString(unescapeVal[i:])
|
||||||
|
if r == utf8.RuneError && size == 1 {
|
||||||
|
// Invalid UTF-8 sequence found, replace it with '<27>'
|
||||||
|
_, _ = b.WriteString("<22>")
|
||||||
|
} else {
|
||||||
|
_, _ = b.WriteRune(r)
|
||||||
|
}
|
||||||
|
i += size
|
||||||
|
}
|
||||||
|
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
|
||||||
// validate ensures m conforms to the W3C Baggage specification.
|
// validate ensures m conforms to the W3C Baggage specification.
|
||||||
// A key must be an ASCII string, returning an error otherwise.
|
// A key must be an ASCII string, returning an error otherwise.
|
||||||
func (m Member) validate() error {
|
func (m Member) validate() error {
|
||||||
@ -314,9 +385,12 @@ func (m Member) validate() error {
|
|||||||
return fmt.Errorf("%w: %q", errInvalidMember, m)
|
return fmt.Errorf("%w: %q", errInvalidMember, m)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !validateKey(m.key) {
|
if !validateBaggageName(m.key) {
|
||||||
return fmt.Errorf("%w: %q", errInvalidKey, m.key)
|
return fmt.Errorf("%w: %q", errInvalidKey, m.key)
|
||||||
}
|
}
|
||||||
|
if !validateBaggageValue(m.value) {
|
||||||
|
return fmt.Errorf("%w: %q", errInvalidValue, m.value)
|
||||||
|
}
|
||||||
return m.properties.validate()
|
return m.properties.validate()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -331,10 +405,15 @@ func (m Member) Properties() []Property { return m.properties.Copy() }
|
|||||||
|
|
||||||
// String encodes Member into a header string compliant with the W3C Baggage
|
// String encodes Member into a header string compliant with the W3C Baggage
|
||||||
// specification.
|
// specification.
|
||||||
|
// It would return empty string if the key is invalid with the W3C Baggage
|
||||||
|
// specification. This could happen for a UTF-8 key, as it may contain
|
||||||
|
// invalid characters.
|
||||||
func (m Member) String() string {
|
func (m Member) String() string {
|
||||||
// A key is just an ASCII string. A value is restricted to be
|
// W3C Baggage specification does not allow percent-encoded keys.
|
||||||
// US-ASCII characters excluding CTLs, whitespace,
|
if !validateKey(m.key) {
|
||||||
// DQUOTE, comma, semicolon, and backslash.
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
s := m.key + keyValueDelimiter + valueEscape(m.value)
|
s := m.key + keyValueDelimiter + valueEscape(m.value)
|
||||||
if len(m.properties) > 0 {
|
if len(m.properties) > 0 {
|
||||||
s += propertyDelimiter + m.properties.String()
|
s += propertyDelimiter + m.properties.String()
|
||||||
@ -448,7 +527,7 @@ func (b Baggage) Member(key string) Member {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Members returns all the baggage list-members.
|
// Members returns all the baggage list-members.
|
||||||
// The order of the returned list-members does not have significance.
|
// The order of the returned list-members is not significant.
|
||||||
//
|
//
|
||||||
// The returned members are not validated, as we assume the validation happened
|
// The returned members are not validated, as we assume the validation happened
|
||||||
// when they were added to the Baggage.
|
// when they were added to the Baggage.
|
||||||
@ -469,8 +548,8 @@ func (b Baggage) Members() []Member {
|
|||||||
return members
|
return members
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetMember returns a copy the Baggage with the member included. If the
|
// SetMember returns a copy of the Baggage with the member included. If the
|
||||||
// baggage contains a Member with the same key the existing Member is
|
// baggage contains a Member with the same key, the existing Member is
|
||||||
// replaced.
|
// replaced.
|
||||||
//
|
//
|
||||||
// If member is invalid according to the W3C Baggage specification, an error
|
// If member is invalid according to the W3C Baggage specification, an error
|
||||||
@ -528,14 +607,22 @@ func (b Baggage) Len() int {
|
|||||||
|
|
||||||
// String encodes Baggage into a header string compliant with the W3C Baggage
|
// String encodes Baggage into a header string compliant with the W3C Baggage
|
||||||
// specification.
|
// specification.
|
||||||
|
// It would ignore members where the member key is invalid with the W3C Baggage
|
||||||
|
// specification. This could happen for a UTF-8 key, as it may contain
|
||||||
|
// invalid characters.
|
||||||
func (b Baggage) String() string {
|
func (b Baggage) String() string {
|
||||||
members := make([]string, 0, len(b.list))
|
members := make([]string, 0, len(b.list))
|
||||||
for k, v := range b.list {
|
for k, v := range b.list {
|
||||||
members = append(members, Member{
|
s := Member{
|
||||||
key: k,
|
key: k,
|
||||||
value: v.Value,
|
value: v.Value,
|
||||||
properties: fromInternalProperties(v.Properties),
|
properties: fromInternalProperties(v.Properties),
|
||||||
}.String())
|
}.String()
|
||||||
|
|
||||||
|
// Ignored empty members.
|
||||||
|
if s != "" {
|
||||||
|
members = append(members, s)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return strings.Join(members, listDelimiter)
|
return strings.Join(members, listDelimiter)
|
||||||
}
|
}
|
||||||
@ -607,10 +694,12 @@ func parsePropertyInternal(s string) (p Property, ok bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Decode a percent-encoded value.
|
// Decode a percent-encoded value.
|
||||||
value, err := url.PathUnescape(s[valueStart:valueEnd])
|
rawVal := s[valueStart:valueEnd]
|
||||||
|
unescapeVal, err := url.PathUnescape(rawVal)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal)
|
||||||
|
|
||||||
ok = true
|
ok = true
|
||||||
p.key = s[keyStart:keyEnd]
|
p.key = s[keyStart:keyEnd]
|
||||||
@ -720,6 +809,24 @@ var safeKeyCharset = [utf8.RuneSelf]bool{
|
|||||||
'~': true,
|
'~': true,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// validateBaggageName checks if the string is a valid OpenTelemetry Baggage name.
|
||||||
|
// Baggage name is a valid, non-empty UTF-8 string.
|
||||||
|
func validateBaggageName(s string) bool {
|
||||||
|
if len(s) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return utf8.ValidString(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// validateBaggageValue checks if the string is a valid OpenTelemetry Baggage value.
|
||||||
|
// Baggage value is a valid UTF-8 strings.
|
||||||
|
// Empty string is also a valid UTF-8 string.
|
||||||
|
func validateBaggageValue(s string) bool {
|
||||||
|
return utf8.ValidString(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// validateKey checks if the string is a valid W3C Baggage key.
|
||||||
func validateKey(s string) bool {
|
func validateKey(s string) bool {
|
||||||
if len(s) == 0 {
|
if len(s) == 0 {
|
||||||
return false
|
return false
|
||||||
@ -738,6 +845,7 @@ func validateKeyChar(c int32) bool {
|
|||||||
return c >= 0 && c < int32(utf8.RuneSelf) && safeKeyCharset[c]
|
return c >= 0 && c < int32(utf8.RuneSelf) && safeKeyCharset[c]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// validateValue checks if the string is a valid W3C Baggage value.
|
||||||
func validateValue(s string) bool {
|
func validateValue(s string) bool {
|
||||||
for _, c := range s {
|
for _, c := range s {
|
||||||
if !validateValueChar(c) {
|
if !validateValueChar(c) {
|
||||||
|
2
vendor/go.opentelemetry.io/otel/codes/codes.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/codes/codes.go
generated
vendored
@ -83,7 +83,7 @@ func (c *Code) UnmarshalJSON(b []byte) error {
|
|||||||
return fmt.Errorf("invalid code: %q", ci)
|
return fmt.Errorf("invalid code: %q", ci)
|
||||||
}
|
}
|
||||||
|
|
||||||
*c = Code(ci)
|
*c = Code(ci) // nolint: gosec // Bit size of 32 check above.
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return fmt.Errorf("invalid code: %q", string(b))
|
return fmt.Errorf("invalid code: %q", string(b))
|
||||||
|
2
vendor/go.opentelemetry.io/otel/doc.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/doc.go
generated
vendored
@ -17,6 +17,8 @@ To read more about tracing, see go.opentelemetry.io/otel/trace.
|
|||||||
|
|
||||||
To read more about metrics, see go.opentelemetry.io/otel/metric.
|
To read more about metrics, see go.opentelemetry.io/otel/metric.
|
||||||
|
|
||||||
|
To read more about logs, see go.opentelemetry.io/otel/log.
|
||||||
|
|
||||||
To read more about propagation, see go.opentelemetry.io/otel/propagation and
|
To read more about propagation, see go.opentelemetry.io/otel/propagation and
|
||||||
go.opentelemetry.io/otel/baggage.
|
go.opentelemetry.io/otel/baggage.
|
||||||
*/
|
*/
|
||||||
|
317
vendor/go.opentelemetry.io/otel/internal/global/meter.go
generated
vendored
317
vendor/go.opentelemetry.io/otel/internal/global/meter.go
generated
vendored
@ -5,8 +5,8 @@ package global // import "go.opentelemetry.io/otel/internal/global"
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"container/list"
|
"container/list"
|
||||||
|
"reflect"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
|
||||||
|
|
||||||
"go.opentelemetry.io/otel/metric"
|
"go.opentelemetry.io/otel/metric"
|
||||||
"go.opentelemetry.io/otel/metric/embedded"
|
"go.opentelemetry.io/otel/metric/embedded"
|
||||||
@ -76,7 +76,7 @@ func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me
|
|||||||
return val
|
return val
|
||||||
}
|
}
|
||||||
|
|
||||||
t := &meter{name: name, opts: opts}
|
t := &meter{name: name, opts: opts, instruments: make(map[instID]delegatedInstrument)}
|
||||||
p.meters[key] = t
|
p.meters[key] = t
|
||||||
return t
|
return t
|
||||||
}
|
}
|
||||||
@ -92,17 +92,29 @@ type meter struct {
|
|||||||
opts []metric.MeterOption
|
opts []metric.MeterOption
|
||||||
|
|
||||||
mtx sync.Mutex
|
mtx sync.Mutex
|
||||||
instruments []delegatedInstrument
|
instruments map[instID]delegatedInstrument
|
||||||
|
|
||||||
registry list.List
|
registry list.List
|
||||||
|
|
||||||
delegate atomic.Value // metric.Meter
|
delegate metric.Meter
|
||||||
}
|
}
|
||||||
|
|
||||||
type delegatedInstrument interface {
|
type delegatedInstrument interface {
|
||||||
setDelegate(metric.Meter)
|
setDelegate(metric.Meter)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// instID are the identifying properties of a instrument.
|
||||||
|
type instID struct {
|
||||||
|
// name is the name of the stream.
|
||||||
|
name string
|
||||||
|
// description is the description of the stream.
|
||||||
|
description string
|
||||||
|
// kind defines the functional group of the instrument.
|
||||||
|
kind reflect.Type
|
||||||
|
// unit is the unit of the stream.
|
||||||
|
unit string
|
||||||
|
}
|
||||||
|
|
||||||
// setDelegate configures m to delegate all Meter functionality to Meters
|
// setDelegate configures m to delegate all Meter functionality to Meters
|
||||||
// created by provider.
|
// created by provider.
|
||||||
//
|
//
|
||||||
@ -110,12 +122,12 @@ type delegatedInstrument interface {
|
|||||||
//
|
//
|
||||||
// It is guaranteed by the caller that this happens only once.
|
// It is guaranteed by the caller that this happens only once.
|
||||||
func (m *meter) setDelegate(provider metric.MeterProvider) {
|
func (m *meter) setDelegate(provider metric.MeterProvider) {
|
||||||
meter := provider.Meter(m.name, m.opts...)
|
|
||||||
m.delegate.Store(meter)
|
|
||||||
|
|
||||||
m.mtx.Lock()
|
m.mtx.Lock()
|
||||||
defer m.mtx.Unlock()
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
|
meter := provider.Meter(m.name, m.opts...)
|
||||||
|
m.delegate = meter
|
||||||
|
|
||||||
for _, inst := range m.instruments {
|
for _, inst := range m.instruments {
|
||||||
inst.setDelegate(meter)
|
inst.setDelegate(meter)
|
||||||
}
|
}
|
||||||
@ -133,169 +145,337 @@ func (m *meter) setDelegate(provider metric.MeterProvider) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) {
|
func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) {
|
||||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
|
||||||
return del.Int64Counter(name, options...)
|
|
||||||
}
|
|
||||||
m.mtx.Lock()
|
m.mtx.Lock()
|
||||||
defer m.mtx.Unlock()
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
|
if m.delegate != nil {
|
||||||
|
return m.delegate.Int64Counter(name, options...)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := metric.NewInt64CounterConfig(options...)
|
||||||
|
id := instID{
|
||||||
|
name: name,
|
||||||
|
kind: reflect.TypeOf((*siCounter)(nil)),
|
||||||
|
description: cfg.Description(),
|
||||||
|
unit: cfg.Unit(),
|
||||||
|
}
|
||||||
|
if f, ok := m.instruments[id]; ok {
|
||||||
|
return f.(metric.Int64Counter), nil
|
||||||
|
}
|
||||||
i := &siCounter{name: name, opts: options}
|
i := &siCounter{name: name, opts: options}
|
||||||
m.instruments = append(m.instruments, i)
|
m.instruments[id] = i
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) {
|
func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) {
|
||||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
|
||||||
return del.Int64UpDownCounter(name, options...)
|
|
||||||
}
|
|
||||||
m.mtx.Lock()
|
m.mtx.Lock()
|
||||||
defer m.mtx.Unlock()
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
|
if m.delegate != nil {
|
||||||
|
return m.delegate.Int64UpDownCounter(name, options...)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := metric.NewInt64UpDownCounterConfig(options...)
|
||||||
|
id := instID{
|
||||||
|
name: name,
|
||||||
|
kind: reflect.TypeOf((*siUpDownCounter)(nil)),
|
||||||
|
description: cfg.Description(),
|
||||||
|
unit: cfg.Unit(),
|
||||||
|
}
|
||||||
|
if f, ok := m.instruments[id]; ok {
|
||||||
|
return f.(metric.Int64UpDownCounter), nil
|
||||||
|
}
|
||||||
i := &siUpDownCounter{name: name, opts: options}
|
i := &siUpDownCounter{name: name, opts: options}
|
||||||
m.instruments = append(m.instruments, i)
|
m.instruments[id] = i
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) {
|
func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) {
|
||||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
|
||||||
return del.Int64Histogram(name, options...)
|
|
||||||
}
|
|
||||||
m.mtx.Lock()
|
m.mtx.Lock()
|
||||||
defer m.mtx.Unlock()
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
|
if m.delegate != nil {
|
||||||
|
return m.delegate.Int64Histogram(name, options...)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := metric.NewInt64HistogramConfig(options...)
|
||||||
|
id := instID{
|
||||||
|
name: name,
|
||||||
|
kind: reflect.TypeOf((*siHistogram)(nil)),
|
||||||
|
description: cfg.Description(),
|
||||||
|
unit: cfg.Unit(),
|
||||||
|
}
|
||||||
|
if f, ok := m.instruments[id]; ok {
|
||||||
|
return f.(metric.Int64Histogram), nil
|
||||||
|
}
|
||||||
i := &siHistogram{name: name, opts: options}
|
i := &siHistogram{name: name, opts: options}
|
||||||
m.instruments = append(m.instruments, i)
|
m.instruments[id] = i
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) {
|
func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) {
|
||||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
|
||||||
return del.Int64Gauge(name, options...)
|
|
||||||
}
|
|
||||||
m.mtx.Lock()
|
m.mtx.Lock()
|
||||||
defer m.mtx.Unlock()
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
|
if m.delegate != nil {
|
||||||
|
return m.delegate.Int64Gauge(name, options...)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := metric.NewInt64GaugeConfig(options...)
|
||||||
|
id := instID{
|
||||||
|
name: name,
|
||||||
|
kind: reflect.TypeOf((*siGauge)(nil)),
|
||||||
|
description: cfg.Description(),
|
||||||
|
unit: cfg.Unit(),
|
||||||
|
}
|
||||||
|
if f, ok := m.instruments[id]; ok {
|
||||||
|
return f.(metric.Int64Gauge), nil
|
||||||
|
}
|
||||||
i := &siGauge{name: name, opts: options}
|
i := &siGauge{name: name, opts: options}
|
||||||
m.instruments = append(m.instruments, i)
|
m.instruments[id] = i
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) {
|
func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) {
|
||||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
|
||||||
return del.Int64ObservableCounter(name, options...)
|
|
||||||
}
|
|
||||||
m.mtx.Lock()
|
m.mtx.Lock()
|
||||||
defer m.mtx.Unlock()
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
|
if m.delegate != nil {
|
||||||
|
return m.delegate.Int64ObservableCounter(name, options...)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := metric.NewInt64ObservableCounterConfig(options...)
|
||||||
|
id := instID{
|
||||||
|
name: name,
|
||||||
|
kind: reflect.TypeOf((*aiCounter)(nil)),
|
||||||
|
description: cfg.Description(),
|
||||||
|
unit: cfg.Unit(),
|
||||||
|
}
|
||||||
|
if f, ok := m.instruments[id]; ok {
|
||||||
|
return f.(metric.Int64ObservableCounter), nil
|
||||||
|
}
|
||||||
i := &aiCounter{name: name, opts: options}
|
i := &aiCounter{name: name, opts: options}
|
||||||
m.instruments = append(m.instruments, i)
|
m.instruments[id] = i
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) {
|
func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) {
|
||||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
|
||||||
return del.Int64ObservableUpDownCounter(name, options...)
|
|
||||||
}
|
|
||||||
m.mtx.Lock()
|
m.mtx.Lock()
|
||||||
defer m.mtx.Unlock()
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
|
if m.delegate != nil {
|
||||||
|
return m.delegate.Int64ObservableUpDownCounter(name, options...)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := metric.NewInt64ObservableUpDownCounterConfig(options...)
|
||||||
|
id := instID{
|
||||||
|
name: name,
|
||||||
|
kind: reflect.TypeOf((*aiUpDownCounter)(nil)),
|
||||||
|
description: cfg.Description(),
|
||||||
|
unit: cfg.Unit(),
|
||||||
|
}
|
||||||
|
if f, ok := m.instruments[id]; ok {
|
||||||
|
return f.(metric.Int64ObservableUpDownCounter), nil
|
||||||
|
}
|
||||||
i := &aiUpDownCounter{name: name, opts: options}
|
i := &aiUpDownCounter{name: name, opts: options}
|
||||||
m.instruments = append(m.instruments, i)
|
m.instruments[id] = i
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) {
|
func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) {
|
||||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
|
||||||
return del.Int64ObservableGauge(name, options...)
|
|
||||||
}
|
|
||||||
m.mtx.Lock()
|
m.mtx.Lock()
|
||||||
defer m.mtx.Unlock()
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
|
if m.delegate != nil {
|
||||||
|
return m.delegate.Int64ObservableGauge(name, options...)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := metric.NewInt64ObservableGaugeConfig(options...)
|
||||||
|
id := instID{
|
||||||
|
name: name,
|
||||||
|
kind: reflect.TypeOf((*aiGauge)(nil)),
|
||||||
|
description: cfg.Description(),
|
||||||
|
unit: cfg.Unit(),
|
||||||
|
}
|
||||||
|
if f, ok := m.instruments[id]; ok {
|
||||||
|
return f.(metric.Int64ObservableGauge), nil
|
||||||
|
}
|
||||||
i := &aiGauge{name: name, opts: options}
|
i := &aiGauge{name: name, opts: options}
|
||||||
m.instruments = append(m.instruments, i)
|
m.instruments[id] = i
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) {
|
func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) {
|
||||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
|
||||||
return del.Float64Counter(name, options...)
|
|
||||||
}
|
|
||||||
m.mtx.Lock()
|
m.mtx.Lock()
|
||||||
defer m.mtx.Unlock()
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
|
if m.delegate != nil {
|
||||||
|
return m.delegate.Float64Counter(name, options...)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := metric.NewFloat64CounterConfig(options...)
|
||||||
|
id := instID{
|
||||||
|
name: name,
|
||||||
|
kind: reflect.TypeOf((*sfCounter)(nil)),
|
||||||
|
description: cfg.Description(),
|
||||||
|
unit: cfg.Unit(),
|
||||||
|
}
|
||||||
|
if f, ok := m.instruments[id]; ok {
|
||||||
|
return f.(metric.Float64Counter), nil
|
||||||
|
}
|
||||||
i := &sfCounter{name: name, opts: options}
|
i := &sfCounter{name: name, opts: options}
|
||||||
m.instruments = append(m.instruments, i)
|
m.instruments[id] = i
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) {
|
func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) {
|
||||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
|
||||||
return del.Float64UpDownCounter(name, options...)
|
|
||||||
}
|
|
||||||
m.mtx.Lock()
|
m.mtx.Lock()
|
||||||
defer m.mtx.Unlock()
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
|
if m.delegate != nil {
|
||||||
|
return m.delegate.Float64UpDownCounter(name, options...)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := metric.NewFloat64UpDownCounterConfig(options...)
|
||||||
|
id := instID{
|
||||||
|
name: name,
|
||||||
|
kind: reflect.TypeOf((*sfUpDownCounter)(nil)),
|
||||||
|
description: cfg.Description(),
|
||||||
|
unit: cfg.Unit(),
|
||||||
|
}
|
||||||
|
if f, ok := m.instruments[id]; ok {
|
||||||
|
return f.(metric.Float64UpDownCounter), nil
|
||||||
|
}
|
||||||
i := &sfUpDownCounter{name: name, opts: options}
|
i := &sfUpDownCounter{name: name, opts: options}
|
||||||
m.instruments = append(m.instruments, i)
|
m.instruments[id] = i
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) {
|
func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) {
|
||||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
|
||||||
return del.Float64Histogram(name, options...)
|
|
||||||
}
|
|
||||||
m.mtx.Lock()
|
m.mtx.Lock()
|
||||||
defer m.mtx.Unlock()
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
|
if m.delegate != nil {
|
||||||
|
return m.delegate.Float64Histogram(name, options...)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := metric.NewFloat64HistogramConfig(options...)
|
||||||
|
id := instID{
|
||||||
|
name: name,
|
||||||
|
kind: reflect.TypeOf((*sfHistogram)(nil)),
|
||||||
|
description: cfg.Description(),
|
||||||
|
unit: cfg.Unit(),
|
||||||
|
}
|
||||||
|
if f, ok := m.instruments[id]; ok {
|
||||||
|
return f.(metric.Float64Histogram), nil
|
||||||
|
}
|
||||||
i := &sfHistogram{name: name, opts: options}
|
i := &sfHistogram{name: name, opts: options}
|
||||||
m.instruments = append(m.instruments, i)
|
m.instruments[id] = i
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) {
|
func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) {
|
||||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
|
||||||
return del.Float64Gauge(name, options...)
|
|
||||||
}
|
|
||||||
m.mtx.Lock()
|
m.mtx.Lock()
|
||||||
defer m.mtx.Unlock()
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
|
if m.delegate != nil {
|
||||||
|
return m.delegate.Float64Gauge(name, options...)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := metric.NewFloat64GaugeConfig(options...)
|
||||||
|
id := instID{
|
||||||
|
name: name,
|
||||||
|
kind: reflect.TypeOf((*sfGauge)(nil)),
|
||||||
|
description: cfg.Description(),
|
||||||
|
unit: cfg.Unit(),
|
||||||
|
}
|
||||||
|
if f, ok := m.instruments[id]; ok {
|
||||||
|
return f.(metric.Float64Gauge), nil
|
||||||
|
}
|
||||||
i := &sfGauge{name: name, opts: options}
|
i := &sfGauge{name: name, opts: options}
|
||||||
m.instruments = append(m.instruments, i)
|
m.instruments[id] = i
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) {
|
func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) {
|
||||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
|
||||||
return del.Float64ObservableCounter(name, options...)
|
|
||||||
}
|
|
||||||
m.mtx.Lock()
|
m.mtx.Lock()
|
||||||
defer m.mtx.Unlock()
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
|
if m.delegate != nil {
|
||||||
|
return m.delegate.Float64ObservableCounter(name, options...)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := metric.NewFloat64ObservableCounterConfig(options...)
|
||||||
|
id := instID{
|
||||||
|
name: name,
|
||||||
|
kind: reflect.TypeOf((*afCounter)(nil)),
|
||||||
|
description: cfg.Description(),
|
||||||
|
unit: cfg.Unit(),
|
||||||
|
}
|
||||||
|
if f, ok := m.instruments[id]; ok {
|
||||||
|
return f.(metric.Float64ObservableCounter), nil
|
||||||
|
}
|
||||||
i := &afCounter{name: name, opts: options}
|
i := &afCounter{name: name, opts: options}
|
||||||
m.instruments = append(m.instruments, i)
|
m.instruments[id] = i
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) {
|
func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) {
|
||||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
|
||||||
return del.Float64ObservableUpDownCounter(name, options...)
|
|
||||||
}
|
|
||||||
m.mtx.Lock()
|
m.mtx.Lock()
|
||||||
defer m.mtx.Unlock()
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
|
if m.delegate != nil {
|
||||||
|
return m.delegate.Float64ObservableUpDownCounter(name, options...)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...)
|
||||||
|
id := instID{
|
||||||
|
name: name,
|
||||||
|
kind: reflect.TypeOf((*afUpDownCounter)(nil)),
|
||||||
|
description: cfg.Description(),
|
||||||
|
unit: cfg.Unit(),
|
||||||
|
}
|
||||||
|
if f, ok := m.instruments[id]; ok {
|
||||||
|
return f.(metric.Float64ObservableUpDownCounter), nil
|
||||||
|
}
|
||||||
i := &afUpDownCounter{name: name, opts: options}
|
i := &afUpDownCounter{name: name, opts: options}
|
||||||
m.instruments = append(m.instruments, i)
|
m.instruments[id] = i
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) {
|
func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) {
|
||||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
|
||||||
return del.Float64ObservableGauge(name, options...)
|
|
||||||
}
|
|
||||||
m.mtx.Lock()
|
m.mtx.Lock()
|
||||||
defer m.mtx.Unlock()
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
|
if m.delegate != nil {
|
||||||
|
return m.delegate.Float64ObservableGauge(name, options...)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := metric.NewFloat64ObservableGaugeConfig(options...)
|
||||||
|
id := instID{
|
||||||
|
name: name,
|
||||||
|
kind: reflect.TypeOf((*afGauge)(nil)),
|
||||||
|
description: cfg.Description(),
|
||||||
|
unit: cfg.Unit(),
|
||||||
|
}
|
||||||
|
if f, ok := m.instruments[id]; ok {
|
||||||
|
return f.(metric.Float64ObservableGauge), nil
|
||||||
|
}
|
||||||
i := &afGauge{name: name, opts: options}
|
i := &afGauge{name: name, opts: options}
|
||||||
m.instruments = append(m.instruments, i)
|
m.instruments[id] = i
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RegisterCallback captures the function that will be called during Collect.
|
// RegisterCallback captures the function that will be called during Collect.
|
||||||
func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) {
|
func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) {
|
||||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
|
||||||
insts = unwrapInstruments(insts)
|
|
||||||
return del.RegisterCallback(f, insts...)
|
|
||||||
}
|
|
||||||
|
|
||||||
m.mtx.Lock()
|
m.mtx.Lock()
|
||||||
defer m.mtx.Unlock()
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
|
if m.delegate != nil {
|
||||||
|
insts = unwrapInstruments(insts)
|
||||||
|
return m.delegate.RegisterCallback(f, insts...)
|
||||||
|
}
|
||||||
|
|
||||||
reg := ®istration{instruments: insts, function: f}
|
reg := ®istration{instruments: insts, function: f}
|
||||||
e := m.registry.PushBack(reg)
|
e := m.registry.PushBack(reg)
|
||||||
reg.unreg = func() error {
|
reg.unreg = func() error {
|
||||||
@ -349,6 +529,7 @@ func (c *registration) setDelegate(m metric.Meter) {
|
|||||||
reg, err := m.RegisterCallback(c.function, insts...)
|
reg, err := m.RegisterCallback(c.function, insts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
GetErrorHandler().Handle(err)
|
GetErrorHandler().Handle(err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
c.unreg = reg.Unregister
|
c.unreg = reg.Unregister
|
||||||
|
12
vendor/go.opentelemetry.io/otel/internal/rawhelpers.go
generated
vendored
12
vendor/go.opentelemetry.io/otel/internal/rawhelpers.go
generated
vendored
@ -20,11 +20,13 @@ func RawToBool(r uint64) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func Int64ToRaw(i int64) uint64 {
|
func Int64ToRaw(i int64) uint64 {
|
||||||
return uint64(i)
|
// Assumes original was a valid int64 (overflow not checked).
|
||||||
|
return uint64(i) // nolint: gosec
|
||||||
}
|
}
|
||||||
|
|
||||||
func RawToInt64(r uint64) int64 {
|
func RawToInt64(r uint64) int64 {
|
||||||
return int64(r)
|
// Assumes original was a valid int64 (overflow not checked).
|
||||||
|
return int64(r) // nolint: gosec
|
||||||
}
|
}
|
||||||
|
|
||||||
func Float64ToRaw(f float64) uint64 {
|
func Float64ToRaw(f float64) uint64 {
|
||||||
@ -36,9 +38,11 @@ func RawToFloat64(r uint64) float64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func RawPtrToFloat64Ptr(r *uint64) *float64 {
|
func RawPtrToFloat64Ptr(r *uint64) *float64 {
|
||||||
return (*float64)(unsafe.Pointer(r))
|
// Assumes original was a valid *float64 (overflow not checked).
|
||||||
|
return (*float64)(unsafe.Pointer(r)) // nolint: gosec
|
||||||
}
|
}
|
||||||
|
|
||||||
func RawPtrToInt64Ptr(r *uint64) *int64 {
|
func RawPtrToInt64Ptr(r *uint64) *int64 {
|
||||||
return (*int64)(unsafe.Pointer(r))
|
// Assumes original was a valid *int64 (overflow not checked).
|
||||||
|
return (*int64)(unsafe.Pointer(r)) // nolint: gosec
|
||||||
}
|
}
|
||||||
|
2
vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go
generated
vendored
@ -213,7 +213,7 @@ type Float64Observer interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Float64Callback is a function registered with a Meter that makes
|
// Float64Callback is a function registered with a Meter that makes
|
||||||
// observations for a Float64Observerable instrument it is registered with.
|
// observations for a Float64Observable instrument it is registered with.
|
||||||
// Calls to the Float64Observer record measurement values for the
|
// Calls to the Float64Observer record measurement values for the
|
||||||
// Float64Observable.
|
// Float64Observable.
|
||||||
//
|
//
|
||||||
|
2
vendor/go.opentelemetry.io/otel/metric/asyncint64.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/metric/asyncint64.go
generated
vendored
@ -212,7 +212,7 @@ type Int64Observer interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Int64Callback is a function registered with a Meter that makes observations
|
// Int64Callback is a function registered with a Meter that makes observations
|
||||||
// for an Int64Observerable instrument it is registered with. Calls to the
|
// for an Int64Observable instrument it is registered with. Calls to the
|
||||||
// Int64Observer record measurement values for the Int64Observable.
|
// Int64Observer record measurement values for the Int64Observable.
|
||||||
//
|
//
|
||||||
// The function needs to complete in a finite amount of time and the deadline
|
// The function needs to complete in a finite amount of time and the deadline
|
||||||
|
2
vendor/go.opentelemetry.io/otel/metric/instrument.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/metric/instrument.go
generated
vendored
@ -351,7 +351,7 @@ func WithAttributeSet(attributes attribute.Set) MeasurementOption {
|
|||||||
//
|
//
|
||||||
// cp := make([]attribute.KeyValue, len(attributes))
|
// cp := make([]attribute.KeyValue, len(attributes))
|
||||||
// copy(cp, attributes)
|
// copy(cp, attributes)
|
||||||
// WithAttributes(attribute.NewSet(cp...))
|
// WithAttributeSet(attribute.NewSet(cp...))
|
||||||
//
|
//
|
||||||
// [attribute.NewSet] may modify the passed attributes so this will make a copy
|
// [attribute.NewSet] may modify the passed attributes so this will make a copy
|
||||||
// of attributes before creating a set in order to ensure this function is
|
// of attributes before creating a set in order to ensure this function is
|
||||||
|
13
vendor/go.opentelemetry.io/otel/metric/meter.go
generated
vendored
13
vendor/go.opentelemetry.io/otel/metric/meter.go
generated
vendored
@ -52,6 +52,7 @@ type Meter interface {
|
|||||||
// See the Instrument Name section of the package documentation for more
|
// See the Instrument Name section of the package documentation for more
|
||||||
// information.
|
// information.
|
||||||
Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error)
|
Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error)
|
||||||
|
|
||||||
// Int64UpDownCounter returns a new Int64UpDownCounter instrument
|
// Int64UpDownCounter returns a new Int64UpDownCounter instrument
|
||||||
// identified by name and configured with options. The instrument is used
|
// identified by name and configured with options. The instrument is used
|
||||||
// to synchronously record int64 measurements during a computational
|
// to synchronously record int64 measurements during a computational
|
||||||
@ -61,6 +62,7 @@ type Meter interface {
|
|||||||
// See the Instrument Name section of the package documentation for more
|
// See the Instrument Name section of the package documentation for more
|
||||||
// information.
|
// information.
|
||||||
Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error)
|
Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error)
|
||||||
|
|
||||||
// Int64Histogram returns a new Int64Histogram instrument identified by
|
// Int64Histogram returns a new Int64Histogram instrument identified by
|
||||||
// name and configured with options. The instrument is used to
|
// name and configured with options. The instrument is used to
|
||||||
// synchronously record the distribution of int64 measurements during a
|
// synchronously record the distribution of int64 measurements during a
|
||||||
@ -70,6 +72,7 @@ type Meter interface {
|
|||||||
// See the Instrument Name section of the package documentation for more
|
// See the Instrument Name section of the package documentation for more
|
||||||
// information.
|
// information.
|
||||||
Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error)
|
Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error)
|
||||||
|
|
||||||
// Int64Gauge returns a new Int64Gauge instrument identified by name and
|
// Int64Gauge returns a new Int64Gauge instrument identified by name and
|
||||||
// configured with options. The instrument is used to synchronously record
|
// configured with options. The instrument is used to synchronously record
|
||||||
// instantaneous int64 measurements during a computational operation.
|
// instantaneous int64 measurements during a computational operation.
|
||||||
@ -78,6 +81,7 @@ type Meter interface {
|
|||||||
// See the Instrument Name section of the package documentation for more
|
// See the Instrument Name section of the package documentation for more
|
||||||
// information.
|
// information.
|
||||||
Int64Gauge(name string, options ...Int64GaugeOption) (Int64Gauge, error)
|
Int64Gauge(name string, options ...Int64GaugeOption) (Int64Gauge, error)
|
||||||
|
|
||||||
// Int64ObservableCounter returns a new Int64ObservableCounter identified
|
// Int64ObservableCounter returns a new Int64ObservableCounter identified
|
||||||
// by name and configured with options. The instrument is used to
|
// by name and configured with options. The instrument is used to
|
||||||
// asynchronously record increasing int64 measurements once per a
|
// asynchronously record increasing int64 measurements once per a
|
||||||
@ -92,6 +96,7 @@ type Meter interface {
|
|||||||
// See the Instrument Name section of the package documentation for more
|
// See the Instrument Name section of the package documentation for more
|
||||||
// information.
|
// information.
|
||||||
Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error)
|
Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error)
|
||||||
|
|
||||||
// Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter
|
// Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter
|
||||||
// instrument identified by name and configured with options. The
|
// instrument identified by name and configured with options. The
|
||||||
// instrument is used to asynchronously record int64 measurements once per
|
// instrument is used to asynchronously record int64 measurements once per
|
||||||
@ -106,6 +111,7 @@ type Meter interface {
|
|||||||
// See the Instrument Name section of the package documentation for more
|
// See the Instrument Name section of the package documentation for more
|
||||||
// information.
|
// information.
|
||||||
Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error)
|
Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error)
|
||||||
|
|
||||||
// Int64ObservableGauge returns a new Int64ObservableGauge instrument
|
// Int64ObservableGauge returns a new Int64ObservableGauge instrument
|
||||||
// identified by name and configured with options. The instrument is used
|
// identified by name and configured with options. The instrument is used
|
||||||
// to asynchronously record instantaneous int64 measurements once per a
|
// to asynchronously record instantaneous int64 measurements once per a
|
||||||
@ -130,6 +136,7 @@ type Meter interface {
|
|||||||
// See the Instrument Name section of the package documentation for more
|
// See the Instrument Name section of the package documentation for more
|
||||||
// information.
|
// information.
|
||||||
Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error)
|
Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error)
|
||||||
|
|
||||||
// Float64UpDownCounter returns a new Float64UpDownCounter instrument
|
// Float64UpDownCounter returns a new Float64UpDownCounter instrument
|
||||||
// identified by name and configured with options. The instrument is used
|
// identified by name and configured with options. The instrument is used
|
||||||
// to synchronously record float64 measurements during a computational
|
// to synchronously record float64 measurements during a computational
|
||||||
@ -139,6 +146,7 @@ type Meter interface {
|
|||||||
// See the Instrument Name section of the package documentation for more
|
// See the Instrument Name section of the package documentation for more
|
||||||
// information.
|
// information.
|
||||||
Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error)
|
Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error)
|
||||||
|
|
||||||
// Float64Histogram returns a new Float64Histogram instrument identified by
|
// Float64Histogram returns a new Float64Histogram instrument identified by
|
||||||
// name and configured with options. The instrument is used to
|
// name and configured with options. The instrument is used to
|
||||||
// synchronously record the distribution of float64 measurements during a
|
// synchronously record the distribution of float64 measurements during a
|
||||||
@ -148,6 +156,7 @@ type Meter interface {
|
|||||||
// See the Instrument Name section of the package documentation for more
|
// See the Instrument Name section of the package documentation for more
|
||||||
// information.
|
// information.
|
||||||
Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error)
|
Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error)
|
||||||
|
|
||||||
// Float64Gauge returns a new Float64Gauge instrument identified by name and
|
// Float64Gauge returns a new Float64Gauge instrument identified by name and
|
||||||
// configured with options. The instrument is used to synchronously record
|
// configured with options. The instrument is used to synchronously record
|
||||||
// instantaneous float64 measurements during a computational operation.
|
// instantaneous float64 measurements during a computational operation.
|
||||||
@ -156,6 +165,7 @@ type Meter interface {
|
|||||||
// See the Instrument Name section of the package documentation for more
|
// See the Instrument Name section of the package documentation for more
|
||||||
// information.
|
// information.
|
||||||
Float64Gauge(name string, options ...Float64GaugeOption) (Float64Gauge, error)
|
Float64Gauge(name string, options ...Float64GaugeOption) (Float64Gauge, error)
|
||||||
|
|
||||||
// Float64ObservableCounter returns a new Float64ObservableCounter
|
// Float64ObservableCounter returns a new Float64ObservableCounter
|
||||||
// instrument identified by name and configured with options. The
|
// instrument identified by name and configured with options. The
|
||||||
// instrument is used to asynchronously record increasing float64
|
// instrument is used to asynchronously record increasing float64
|
||||||
@ -170,6 +180,7 @@ type Meter interface {
|
|||||||
// See the Instrument Name section of the package documentation for more
|
// See the Instrument Name section of the package documentation for more
|
||||||
// information.
|
// information.
|
||||||
Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error)
|
Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error)
|
||||||
|
|
||||||
// Float64ObservableUpDownCounter returns a new
|
// Float64ObservableUpDownCounter returns a new
|
||||||
// Float64ObservableUpDownCounter instrument identified by name and
|
// Float64ObservableUpDownCounter instrument identified by name and
|
||||||
// configured with options. The instrument is used to asynchronously record
|
// configured with options. The instrument is used to asynchronously record
|
||||||
@ -184,6 +195,7 @@ type Meter interface {
|
|||||||
// See the Instrument Name section of the package documentation for more
|
// See the Instrument Name section of the package documentation for more
|
||||||
// information.
|
// information.
|
||||||
Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error)
|
Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error)
|
||||||
|
|
||||||
// Float64ObservableGauge returns a new Float64ObservableGauge instrument
|
// Float64ObservableGauge returns a new Float64ObservableGauge instrument
|
||||||
// identified by name and configured with options. The instrument is used
|
// identified by name and configured with options. The instrument is used
|
||||||
// to asynchronously record instantaneous float64 measurements once per a
|
// to asynchronously record instantaneous float64 measurements once per a
|
||||||
@ -242,6 +254,7 @@ type Observer interface {
|
|||||||
|
|
||||||
// ObserveFloat64 records the float64 value for obsrv.
|
// ObserveFloat64 records the float64 value for obsrv.
|
||||||
ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption)
|
ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption)
|
||||||
|
|
||||||
// ObserveInt64 records the int64 value for obsrv.
|
// ObserveInt64 records the int64 value for obsrv.
|
||||||
ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption)
|
ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption)
|
||||||
}
|
}
|
||||||
|
8
vendor/go.opentelemetry.io/otel/renovate.json
generated
vendored
8
vendor/go.opentelemetry.io/otel/renovate.json
generated
vendored
@ -19,6 +19,14 @@
|
|||||||
"matchManagers": ["gomod"],
|
"matchManagers": ["gomod"],
|
||||||
"matchDepTypes": ["indirect"],
|
"matchDepTypes": ["indirect"],
|
||||||
"enabled": false
|
"enabled": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"matchPackageNames": ["google.golang.org/genproto/googleapis/**"],
|
||||||
|
"groupName": "googleapis"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"matchPackageNames": ["golang.org/x/**"],
|
||||||
|
"groupName": "golang.org/x"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
3
vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go
generated
vendored
3
vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go
generated
vendored
@ -4,5 +4,6 @@
|
|||||||
package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation"
|
package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation"
|
||||||
|
|
||||||
// Library represents the instrumentation library.
|
// Library represents the instrumentation library.
|
||||||
// Deprecated: please use Scope instead.
|
//
|
||||||
|
// Deprecated: use [Scope] instead.
|
||||||
type Library = Scope
|
type Library = Scope
|
||||||
|
7
vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go
generated
vendored
7
vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go
generated
vendored
@ -10,17 +10,16 @@ import (
|
|||||||
"golang.org/x/sys/windows/registry"
|
"golang.org/x/sys/windows/registry"
|
||||||
)
|
)
|
||||||
|
|
||||||
// implements hostIDReader
|
// implements hostIDReader.
|
||||||
type hostIDReaderWindows struct{}
|
type hostIDReaderWindows struct{}
|
||||||
|
|
||||||
// read reads MachineGuid from the windows registry key:
|
// read reads MachineGuid from the Windows registry key:
|
||||||
// SOFTWARE\Microsoft\Cryptography
|
// SOFTWARE\Microsoft\Cryptography.
|
||||||
func (*hostIDReaderWindows) read() (string, error) {
|
func (*hostIDReaderWindows) read() (string, error) {
|
||||||
k, err := registry.OpenKey(
|
k, err := registry.OpenKey(
|
||||||
registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Cryptography`,
|
registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Cryptography`,
|
||||||
registry.QUERY_VALUE|registry.WOW64_64KEY,
|
registry.QUERY_VALUE|registry.WOW64_64KEY,
|
||||||
)
|
)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
1
vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go
generated
vendored
1
vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go
generated
vendored
@ -17,7 +17,6 @@ import (
|
|||||||
func platformOSDescription() (string, error) {
|
func platformOSDescription() (string, error) {
|
||||||
k, err := registry.OpenKey(
|
k, err := registry.OpenKey(
|
||||||
registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
|
registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
6
vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
generated
vendored
6
vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
generated
vendored
@ -316,7 +316,11 @@ func (bsp *batchSpanProcessor) processQueue() {
|
|||||||
bsp.batchMutex.Unlock()
|
bsp.batchMutex.Unlock()
|
||||||
if shouldExport {
|
if shouldExport {
|
||||||
if !bsp.timer.Stop() {
|
if !bsp.timer.Stop() {
|
||||||
<-bsp.timer.C
|
// Handle both GODEBUG=asynctimerchan=[0|1] properly.
|
||||||
|
select {
|
||||||
|
case <-bsp.timer.C:
|
||||||
|
default:
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if err := bsp.exportSpans(ctx); err != nil {
|
if err := bsp.exportSpans(ctx); err != nil {
|
||||||
otel.Handle(err)
|
otel.Handle(err)
|
||||||
|
21
vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go
generated
vendored
21
vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go
generated
vendored
@ -12,25 +12,26 @@ import (
|
|||||||
|
|
||||||
// evictedQueue is a FIFO queue with a configurable capacity.
|
// evictedQueue is a FIFO queue with a configurable capacity.
|
||||||
type evictedQueue[T any] struct {
|
type evictedQueue[T any] struct {
|
||||||
queue []T
|
queue []T
|
||||||
capacity int
|
capacity int
|
||||||
droppedCount int
|
droppedCount int
|
||||||
logDropped func()
|
logDroppedMsg string
|
||||||
|
logDroppedOnce sync.Once
|
||||||
}
|
}
|
||||||
|
|
||||||
func newEvictedQueueEvent(capacity int) evictedQueue[Event] {
|
func newEvictedQueueEvent(capacity int) evictedQueue[Event] {
|
||||||
// Do not pre-allocate queue, do this lazily.
|
// Do not pre-allocate queue, do this lazily.
|
||||||
return evictedQueue[Event]{
|
return evictedQueue[Event]{
|
||||||
capacity: capacity,
|
capacity: capacity,
|
||||||
logDropped: sync.OnceFunc(func() { global.Warn("limit reached: dropping trace trace.Event") }),
|
logDroppedMsg: "limit reached: dropping trace trace.Event",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newEvictedQueueLink(capacity int) evictedQueue[Link] {
|
func newEvictedQueueLink(capacity int) evictedQueue[Link] {
|
||||||
// Do not pre-allocate queue, do this lazily.
|
// Do not pre-allocate queue, do this lazily.
|
||||||
return evictedQueue[Link]{
|
return evictedQueue[Link]{
|
||||||
capacity: capacity,
|
capacity: capacity,
|
||||||
logDropped: sync.OnceFunc(func() { global.Warn("limit reached: dropping trace trace.Link") }),
|
logDroppedMsg: "limit reached: dropping trace trace.Link",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -53,6 +54,10 @@ func (eq *evictedQueue[T]) add(value T) {
|
|||||||
eq.queue = append(eq.queue, value)
|
eq.queue = append(eq.queue, value)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (eq *evictedQueue[T]) logDropped() {
|
||||||
|
eq.logDroppedOnce.Do(func() { global.Warn(eq.logDroppedMsg) })
|
||||||
|
}
|
||||||
|
|
||||||
// copy returns a copy of the evictedQueue.
|
// copy returns a copy of the evictedQueue.
|
||||||
func (eq *evictedQueue[T]) copy() []T {
|
func (eq *evictedQueue[T]) copy() []T {
|
||||||
return slices.Clone(eq.queue)
|
return slices.Clone(eq.queue)
|
||||||
|
2
vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go
generated
vendored
@ -99,7 +99,7 @@ func (s snapshot) InstrumentationScope() instrumentation.Scope {
|
|||||||
|
|
||||||
// InstrumentationLibrary returns information about the instrumentation
|
// InstrumentationLibrary returns information about the instrumentation
|
||||||
// library that created the span.
|
// library that created the span.
|
||||||
func (s snapshot) InstrumentationLibrary() instrumentation.Library {
|
func (s snapshot) InstrumentationLibrary() instrumentation.Library { //nolint:staticcheck // This method needs to be define for backwards compatibility
|
||||||
return s.instrumentationScope
|
return s.instrumentationScope
|
||||||
}
|
}
|
||||||
|
|
||||||
|
107
vendor/go.opentelemetry.io/otel/sdk/trace/span.go
generated
vendored
107
vendor/go.opentelemetry.io/otel/sdk/trace/span.go
generated
vendored
@ -62,7 +62,7 @@ type ReadOnlySpan interface {
|
|||||||
// InstrumentationLibrary returns information about the instrumentation
|
// InstrumentationLibrary returns information about the instrumentation
|
||||||
// library that created the span.
|
// library that created the span.
|
||||||
// Deprecated: please use InstrumentationScope instead.
|
// Deprecated: please use InstrumentationScope instead.
|
||||||
InstrumentationLibrary() instrumentation.Library
|
InstrumentationLibrary() instrumentation.Library //nolint:staticcheck // This method needs to be define for backwards compatibility
|
||||||
// Resource returns information about the entity that produced the span.
|
// Resource returns information about the entity that produced the span.
|
||||||
Resource() *resource.Resource
|
Resource() *resource.Resource
|
||||||
// DroppedAttributes returns the number of attributes dropped by the span
|
// DroppedAttributes returns the number of attributes dropped by the span
|
||||||
@ -174,6 +174,17 @@ func (s *recordingSpan) IsRecording() bool {
|
|||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
defer s.mu.Unlock()
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
|
return s.isRecording()
|
||||||
|
}
|
||||||
|
|
||||||
|
// isRecording returns if this span is being recorded. If this span has ended
|
||||||
|
// this will return false.
|
||||||
|
//
|
||||||
|
// This method assumes s.mu.Lock is held by the caller.
|
||||||
|
func (s *recordingSpan) isRecording() bool {
|
||||||
|
if s == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
return s.endTime.IsZero()
|
return s.endTime.IsZero()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -182,11 +193,15 @@ func (s *recordingSpan) IsRecording() bool {
|
|||||||
// included in the set status when the code is for an error. If this span is
|
// included in the set status when the code is for an error. If this span is
|
||||||
// not being recorded than this method does nothing.
|
// not being recorded than this method does nothing.
|
||||||
func (s *recordingSpan) SetStatus(code codes.Code, description string) {
|
func (s *recordingSpan) SetStatus(code codes.Code, description string) {
|
||||||
if !s.IsRecording() {
|
if s == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
defer s.mu.Unlock()
|
defer s.mu.Unlock()
|
||||||
|
if !s.isRecording() {
|
||||||
|
return
|
||||||
|
}
|
||||||
if s.status.Code > code {
|
if s.status.Code > code {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -210,12 +225,15 @@ func (s *recordingSpan) SetStatus(code codes.Code, description string) {
|
|||||||
// attributes the span is configured to have, the last added attributes will
|
// attributes the span is configured to have, the last added attributes will
|
||||||
// be dropped.
|
// be dropped.
|
||||||
func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) {
|
func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) {
|
||||||
if !s.IsRecording() {
|
if s == nil || len(attributes) == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
defer s.mu.Unlock()
|
defer s.mu.Unlock()
|
||||||
|
if !s.isRecording() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
limit := s.tracer.provider.spanLimits.AttributeCountLimit
|
limit := s.tracer.provider.spanLimits.AttributeCountLimit
|
||||||
if limit == 0 {
|
if limit == 0 {
|
||||||
@ -233,7 +251,7 @@ func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) {
|
|||||||
|
|
||||||
// Otherwise, add without deduplication. When attributes are read they
|
// Otherwise, add without deduplication. When attributes are read they
|
||||||
// will be deduplicated, optimizing the operation.
|
// will be deduplicated, optimizing the operation.
|
||||||
s.attributes = slices.Grow(s.attributes, len(s.attributes)+len(attributes))
|
s.attributes = slices.Grow(s.attributes, len(attributes))
|
||||||
for _, a := range attributes {
|
for _, a := range attributes {
|
||||||
if !a.Valid() {
|
if !a.Valid() {
|
||||||
// Drop all invalid attributes.
|
// Drop all invalid attributes.
|
||||||
@ -280,13 +298,17 @@ func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) {
|
|||||||
|
|
||||||
// Do not set a capacity when creating this map. Benchmark testing has
|
// Do not set a capacity when creating this map. Benchmark testing has
|
||||||
// showed this to only add unused memory allocations in general use.
|
// showed this to only add unused memory allocations in general use.
|
||||||
exists := make(map[attribute.Key]int)
|
exists := make(map[attribute.Key]int, len(s.attributes))
|
||||||
s.dedupeAttrsFromRecord(&exists)
|
s.dedupeAttrsFromRecord(exists)
|
||||||
|
|
||||||
// Now that s.attributes is deduplicated, adding unique attributes up to
|
// Now that s.attributes is deduplicated, adding unique attributes up to
|
||||||
// the capacity of s will not over allocate s.attributes.
|
// the capacity of s will not over allocate s.attributes.
|
||||||
sum := len(attrs) + len(s.attributes)
|
|
||||||
s.attributes = slices.Grow(s.attributes, min(sum, limit))
|
// max size = limit
|
||||||
|
maxCap := min(len(attrs)+len(s.attributes), limit)
|
||||||
|
if cap(s.attributes) < maxCap {
|
||||||
|
s.attributes = slices.Grow(s.attributes, maxCap-cap(s.attributes))
|
||||||
|
}
|
||||||
for _, a := range attrs {
|
for _, a := range attrs {
|
||||||
if !a.Valid() {
|
if !a.Valid() {
|
||||||
// Drop all invalid attributes.
|
// Drop all invalid attributes.
|
||||||
@ -296,6 +318,7 @@ func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) {
|
|||||||
|
|
||||||
if idx, ok := exists[a.Key]; ok {
|
if idx, ok := exists[a.Key]; ok {
|
||||||
// Perform all updates before dropping, even when at capacity.
|
// Perform all updates before dropping, even when at capacity.
|
||||||
|
a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a)
|
||||||
s.attributes[idx] = a
|
s.attributes[idx] = a
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -386,9 +409,10 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) {
|
|||||||
// the span's duration in case some operation below takes a while.
|
// the span's duration in case some operation below takes a while.
|
||||||
et := monotonicEndTime(s.startTime)
|
et := monotonicEndTime(s.startTime)
|
||||||
|
|
||||||
// Do relative expensive check now that we have an end time and see if we
|
// Lock the span now that we have an end time and see if we need to do any more processing.
|
||||||
// need to do any more processing.
|
s.mu.Lock()
|
||||||
if !s.IsRecording() {
|
if !s.isRecording() {
|
||||||
|
s.mu.Unlock()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -413,10 +437,11 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if s.executionTracerTaskEnd != nil {
|
if s.executionTracerTaskEnd != nil {
|
||||||
|
s.mu.Unlock()
|
||||||
s.executionTracerTaskEnd()
|
s.executionTracerTaskEnd()
|
||||||
|
s.mu.Lock()
|
||||||
}
|
}
|
||||||
|
|
||||||
s.mu.Lock()
|
|
||||||
// Setting endTime to non-zero marks the span as ended and not recording.
|
// Setting endTime to non-zero marks the span as ended and not recording.
|
||||||
if config.Timestamp().IsZero() {
|
if config.Timestamp().IsZero() {
|
||||||
s.endTime = et
|
s.endTime = et
|
||||||
@ -450,7 +475,13 @@ func monotonicEndTime(start time.Time) time.Time {
|
|||||||
// does not change the Span status. If this span is not being recorded or err is nil
|
// does not change the Span status. If this span is not being recorded or err is nil
|
||||||
// than this method does nothing.
|
// than this method does nothing.
|
||||||
func (s *recordingSpan) RecordError(err error, opts ...trace.EventOption) {
|
func (s *recordingSpan) RecordError(err error, opts ...trace.EventOption) {
|
||||||
if s == nil || err == nil || !s.IsRecording() {
|
if s == nil || err == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
if !s.isRecording() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -486,14 +517,23 @@ func recordStackTrace() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// AddEvent adds an event with the provided name and options. If this span is
|
// AddEvent adds an event with the provided name and options. If this span is
|
||||||
// not being recorded than this method does nothing.
|
// not being recorded then this method does nothing.
|
||||||
func (s *recordingSpan) AddEvent(name string, o ...trace.EventOption) {
|
func (s *recordingSpan) AddEvent(name string, o ...trace.EventOption) {
|
||||||
if !s.IsRecording() {
|
if s == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
if !s.isRecording() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
s.addEvent(name, o...)
|
s.addEvent(name, o...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// addEvent adds an event with the provided name and options.
|
||||||
|
//
|
||||||
|
// This method assumes s.mu.Lock is held by the caller.
|
||||||
func (s *recordingSpan) addEvent(name string, o ...trace.EventOption) {
|
func (s *recordingSpan) addEvent(name string, o ...trace.EventOption) {
|
||||||
c := trace.NewEventConfig(o...)
|
c := trace.NewEventConfig(o...)
|
||||||
e := Event{Name: name, Attributes: c.Attributes(), Time: c.Timestamp()}
|
e := Event{Name: name, Attributes: c.Attributes(), Time: c.Timestamp()}
|
||||||
@ -510,20 +550,21 @@ func (s *recordingSpan) addEvent(name string, o ...trace.EventOption) {
|
|||||||
e.Attributes = e.Attributes[:limit]
|
e.Attributes = e.Attributes[:limit]
|
||||||
}
|
}
|
||||||
|
|
||||||
s.mu.Lock()
|
|
||||||
s.events.add(e)
|
s.events.add(e)
|
||||||
s.mu.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetName sets the name of this span. If this span is not being recorded than
|
// SetName sets the name of this span. If this span is not being recorded than
|
||||||
// this method does nothing.
|
// this method does nothing.
|
||||||
func (s *recordingSpan) SetName(name string) {
|
func (s *recordingSpan) SetName(name string) {
|
||||||
if !s.IsRecording() {
|
if s == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
defer s.mu.Unlock()
|
defer s.mu.Unlock()
|
||||||
|
if !s.isRecording() {
|
||||||
|
return
|
||||||
|
}
|
||||||
s.name = name
|
s.name = name
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -579,23 +620,23 @@ func (s *recordingSpan) Attributes() []attribute.KeyValue {
|
|||||||
func (s *recordingSpan) dedupeAttrs() {
|
func (s *recordingSpan) dedupeAttrs() {
|
||||||
// Do not set a capacity when creating this map. Benchmark testing has
|
// Do not set a capacity when creating this map. Benchmark testing has
|
||||||
// showed this to only add unused memory allocations in general use.
|
// showed this to only add unused memory allocations in general use.
|
||||||
exists := make(map[attribute.Key]int)
|
exists := make(map[attribute.Key]int, len(s.attributes))
|
||||||
s.dedupeAttrsFromRecord(&exists)
|
s.dedupeAttrsFromRecord(exists)
|
||||||
}
|
}
|
||||||
|
|
||||||
// dedupeAttrsFromRecord deduplicates the attributes of s to fit capacity
|
// dedupeAttrsFromRecord deduplicates the attributes of s to fit capacity
|
||||||
// using record as the record of unique attribute keys to their index.
|
// using record as the record of unique attribute keys to their index.
|
||||||
//
|
//
|
||||||
// This method assumes s.mu.Lock is held by the caller.
|
// This method assumes s.mu.Lock is held by the caller.
|
||||||
func (s *recordingSpan) dedupeAttrsFromRecord(record *map[attribute.Key]int) {
|
func (s *recordingSpan) dedupeAttrsFromRecord(record map[attribute.Key]int) {
|
||||||
// Use the fact that slices share the same backing array.
|
// Use the fact that slices share the same backing array.
|
||||||
unique := s.attributes[:0]
|
unique := s.attributes[:0]
|
||||||
for _, a := range s.attributes {
|
for _, a := range s.attributes {
|
||||||
if idx, ok := (*record)[a.Key]; ok {
|
if idx, ok := record[a.Key]; ok {
|
||||||
unique[idx] = a
|
unique[idx] = a
|
||||||
} else {
|
} else {
|
||||||
unique = append(unique, a)
|
unique = append(unique, a)
|
||||||
(*record)[a.Key] = len(unique) - 1
|
record[a.Key] = len(unique) - 1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// s.attributes have element types of attribute.KeyValue. These types are
|
// s.attributes have element types of attribute.KeyValue. These types are
|
||||||
@ -642,7 +683,7 @@ func (s *recordingSpan) InstrumentationScope() instrumentation.Scope {
|
|||||||
|
|
||||||
// InstrumentationLibrary returns the instrumentation.Library associated with
|
// InstrumentationLibrary returns the instrumentation.Library associated with
|
||||||
// the Tracer that created this span.
|
// the Tracer that created this span.
|
||||||
func (s *recordingSpan) InstrumentationLibrary() instrumentation.Library {
|
func (s *recordingSpan) InstrumentationLibrary() instrumentation.Library { //nolint:staticcheck // This method needs to be define for backwards compatibility
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
defer s.mu.Unlock()
|
defer s.mu.Unlock()
|
||||||
return s.tracer.instrumentationScope
|
return s.tracer.instrumentationScope
|
||||||
@ -657,7 +698,7 @@ func (s *recordingSpan) Resource() *resource.Resource {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *recordingSpan) AddLink(link trace.Link) {
|
func (s *recordingSpan) AddLink(link trace.Link) {
|
||||||
if !s.IsRecording() {
|
if s == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !link.SpanContext.IsValid() && len(link.Attributes) == 0 &&
|
if !link.SpanContext.IsValid() && len(link.Attributes) == 0 &&
|
||||||
@ -665,6 +706,12 @@ func (s *recordingSpan) AddLink(link trace.Link) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
if !s.isRecording() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
l := Link{SpanContext: link.SpanContext, Attributes: link.Attributes}
|
l := Link{SpanContext: link.SpanContext, Attributes: link.Attributes}
|
||||||
|
|
||||||
// Discard attributes over limit.
|
// Discard attributes over limit.
|
||||||
@ -678,9 +725,7 @@ func (s *recordingSpan) AddLink(link trace.Link) {
|
|||||||
l.Attributes = l.Attributes[:limit]
|
l.Attributes = l.Attributes[:limit]
|
||||||
}
|
}
|
||||||
|
|
||||||
s.mu.Lock()
|
|
||||||
s.links.add(l)
|
s.links.add(l)
|
||||||
s.mu.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DroppedAttributes returns the number of attributes dropped by the span
|
// DroppedAttributes returns the number of attributes dropped by the span
|
||||||
@ -755,12 +800,16 @@ func (s *recordingSpan) snapshot() ReadOnlySpan {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *recordingSpan) addChild() {
|
func (s *recordingSpan) addChild() {
|
||||||
if !s.IsRecording() {
|
if s == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
if !s.isRecording() {
|
||||||
|
return
|
||||||
|
}
|
||||||
s.childSpanCount++
|
s.childSpanCount++
|
||||||
s.mu.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*recordingSpan) private() {}
|
func (*recordingSpan) private() {}
|
||||||
|
2
vendor/go.opentelemetry.io/otel/sdk/version.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/sdk/version.go
generated
vendored
@ -5,5 +5,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk"
|
|||||||
|
|
||||||
// Version is the current release version of the OpenTelemetry SDK in use.
|
// Version is the current release version of the OpenTelemetry SDK in use.
|
||||||
func Version() string {
|
func Version() string {
|
||||||
return "1.28.0"
|
return "1.31.0"
|
||||||
}
|
}
|
||||||
|
2
vendor/go.opentelemetry.io/otel/semconv/internal/http.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/semconv/internal/http.go
generated
vendored
@ -115,7 +115,7 @@ func hostIPNamePort(hostWithPort string) (ip string, name string, port int) {
|
|||||||
name = hostPart
|
name = hostPart
|
||||||
}
|
}
|
||||||
if parsedPort, err = strconv.ParseUint(portPart, 10, 16); err == nil {
|
if parsedPort, err = strconv.ParseUint(portPart, 10, 16); err == nil {
|
||||||
port = int(parsedPort)
|
port = int(parsedPort) // nolint: gosec // Bit size of 16 checked above.
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
2
vendor/go.opentelemetry.io/otel/trace/context.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/trace/context.go
generated
vendored
@ -22,7 +22,7 @@ func ContextWithSpanContext(parent context.Context, sc SpanContext) context.Cont
|
|||||||
return ContextWithSpan(parent, nonRecordingSpan{sc: sc})
|
return ContextWithSpan(parent, nonRecordingSpan{sc: sc})
|
||||||
}
|
}
|
||||||
|
|
||||||
// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicly
|
// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicitly
|
||||||
// as a remote SpanContext and as the current Span. The Span implementation
|
// as a remote SpanContext and as the current Span. The Span implementation
|
||||||
// that wraps rsc is non-recording and performs no operations other than to
|
// that wraps rsc is non-recording and performs no operations other than to
|
||||||
// return rsc as the SpanContext from the SpanContext method.
|
// return rsc as the SpanContext from the SpanContext method.
|
||||||
|
2
vendor/go.opentelemetry.io/otel/trace/doc.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/trace/doc.go
generated
vendored
@ -96,7 +96,7 @@ can embed the API interface directly.
|
|||||||
|
|
||||||
This option is not recommended. It will lead to publishing packages that
|
This option is not recommended. It will lead to publishing packages that
|
||||||
contain runtime panics when users update to newer versions of
|
contain runtime panics when users update to newer versions of
|
||||||
[go.opentelemetry.io/otel/trace], which may be done with a trasitive
|
[go.opentelemetry.io/otel/trace], which may be done with a transitive
|
||||||
dependency.
|
dependency.
|
||||||
|
|
||||||
Finally, an author can embed another implementation in theirs. The embedded
|
Finally, an author can embed another implementation in theirs. The embedded
|
||||||
|
59
vendor/go.opentelemetry.io/otel/trace/provider.go
generated
vendored
Normal file
59
vendor/go.opentelemetry.io/otel/trace/provider.go
generated
vendored
Normal file
@ -0,0 +1,59 @@
|
|||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
package trace // import "go.opentelemetry.io/otel/trace"
|
||||||
|
|
||||||
|
import "go.opentelemetry.io/otel/trace/embedded"
|
||||||
|
|
||||||
|
// TracerProvider provides Tracers that are used by instrumentation code to
|
||||||
|
// trace computational workflows.
|
||||||
|
//
|
||||||
|
// A TracerProvider is the collection destination of all Spans from Tracers it
|
||||||
|
// provides, it represents a unique telemetry collection pipeline. How that
|
||||||
|
// pipeline is defined, meaning how those Spans are collected, processed, and
|
||||||
|
// where they are exported, depends on its implementation. Instrumentation
|
||||||
|
// authors do not need to define this implementation, rather just use the
|
||||||
|
// provided Tracers to instrument code.
|
||||||
|
//
|
||||||
|
// Commonly, instrumentation code will accept a TracerProvider implementation
|
||||||
|
// at runtime from its users or it can simply use the globally registered one
|
||||||
|
// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider).
|
||||||
|
//
|
||||||
|
// Warning: Methods may be added to this interface in minor releases. See
|
||||||
|
// package documentation on API implementation for information on how to set
|
||||||
|
// default behavior for unimplemented methods.
|
||||||
|
type TracerProvider interface {
|
||||||
|
// Users of the interface can ignore this. This embedded type is only used
|
||||||
|
// by implementations of this interface. See the "API Implementations"
|
||||||
|
// section of the package documentation for more information.
|
||||||
|
embedded.TracerProvider
|
||||||
|
|
||||||
|
// Tracer returns a unique Tracer scoped to be used by instrumentation code
|
||||||
|
// to trace computational workflows. The scope and identity of that
|
||||||
|
// instrumentation code is uniquely defined by the name and options passed.
|
||||||
|
//
|
||||||
|
// The passed name needs to uniquely identify instrumentation code.
|
||||||
|
// Therefore, it is recommended that name is the Go package name of the
|
||||||
|
// library providing instrumentation (note: not the code being
|
||||||
|
// instrumented). Instrumentation libraries can have multiple versions,
|
||||||
|
// therefore, the WithInstrumentationVersion option should be used to
|
||||||
|
// distinguish these different codebases. Additionally, instrumentation
|
||||||
|
// libraries may sometimes use traces to communicate different domains of
|
||||||
|
// workflow data (i.e. using spans to communicate workflow events only). If
|
||||||
|
// this is the case, the WithScopeAttributes option should be used to
|
||||||
|
// uniquely identify Tracers that handle the different domains of workflow
|
||||||
|
// data.
|
||||||
|
//
|
||||||
|
// If the same name and options are passed multiple times, the same Tracer
|
||||||
|
// will be returned (it is up to the implementation if this will be the
|
||||||
|
// same underlying instance of that Tracer or not). It is not necessary to
|
||||||
|
// call this multiple times with the same name and options to get an
|
||||||
|
// up-to-date Tracer. All implementations will ensure any TracerProvider
|
||||||
|
// configuration changes are propagated to all provided Tracers.
|
||||||
|
//
|
||||||
|
// If name is empty, then an implementation defined default name will be
|
||||||
|
// used instead.
|
||||||
|
//
|
||||||
|
// This method is safe to call concurrently.
|
||||||
|
Tracer(name string, options ...TracerOption) Tracer
|
||||||
|
}
|
177
vendor/go.opentelemetry.io/otel/trace/span.go
generated
vendored
Normal file
177
vendor/go.opentelemetry.io/otel/trace/span.go
generated
vendored
Normal file
@ -0,0 +1,177 @@
|
|||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
package trace // import "go.opentelemetry.io/otel/trace"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/attribute"
|
||||||
|
"go.opentelemetry.io/otel/codes"
|
||||||
|
"go.opentelemetry.io/otel/trace/embedded"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Span is the individual component of a trace. It represents a single named
|
||||||
|
// and timed operation of a workflow that is traced. A Tracer is used to
|
||||||
|
// create a Span and it is then up to the operation the Span represents to
|
||||||
|
// properly end the Span when the operation itself ends.
|
||||||
|
//
|
||||||
|
// Warning: Methods may be added to this interface in minor releases. See
|
||||||
|
// package documentation on API implementation for information on how to set
|
||||||
|
// default behavior for unimplemented methods.
|
||||||
|
type Span interface {
|
||||||
|
// Users of the interface can ignore this. This embedded type is only used
|
||||||
|
// by implementations of this interface. See the "API Implementations"
|
||||||
|
// section of the package documentation for more information.
|
||||||
|
embedded.Span
|
||||||
|
|
||||||
|
// End completes the Span. The Span is considered complete and ready to be
|
||||||
|
// delivered through the rest of the telemetry pipeline after this method
|
||||||
|
// is called. Therefore, updates to the Span are not allowed after this
|
||||||
|
// method has been called.
|
||||||
|
End(options ...SpanEndOption)
|
||||||
|
|
||||||
|
// AddEvent adds an event with the provided name and options.
|
||||||
|
AddEvent(name string, options ...EventOption)
|
||||||
|
|
||||||
|
// AddLink adds a link.
|
||||||
|
// Adding links at span creation using WithLinks is preferred to calling AddLink
|
||||||
|
// later, for contexts that are available during span creation, because head
|
||||||
|
// sampling decisions can only consider information present during span creation.
|
||||||
|
AddLink(link Link)
|
||||||
|
|
||||||
|
// IsRecording returns the recording state of the Span. It will return
|
||||||
|
// true if the Span is active and events can be recorded.
|
||||||
|
IsRecording() bool
|
||||||
|
|
||||||
|
// RecordError will record err as an exception span event for this span. An
|
||||||
|
// additional call to SetStatus is required if the Status of the Span should
|
||||||
|
// be set to Error, as this method does not change the Span status. If this
|
||||||
|
// span is not being recorded or err is nil then this method does nothing.
|
||||||
|
RecordError(err error, options ...EventOption)
|
||||||
|
|
||||||
|
// SpanContext returns the SpanContext of the Span. The returned SpanContext
|
||||||
|
// is usable even after the End method has been called for the Span.
|
||||||
|
SpanContext() SpanContext
|
||||||
|
|
||||||
|
// SetStatus sets the status of the Span in the form of a code and a
|
||||||
|
// description, provided the status hasn't already been set to a higher
|
||||||
|
// value before (OK > Error > Unset). The description is only included in a
|
||||||
|
// status when the code is for an error.
|
||||||
|
SetStatus(code codes.Code, description string)
|
||||||
|
|
||||||
|
// SetName sets the Span name.
|
||||||
|
SetName(name string)
|
||||||
|
|
||||||
|
// SetAttributes sets kv as attributes of the Span. If a key from kv
|
||||||
|
// already exists for an attribute of the Span it will be overwritten with
|
||||||
|
// the value contained in kv.
|
||||||
|
SetAttributes(kv ...attribute.KeyValue)
|
||||||
|
|
||||||
|
// TracerProvider returns a TracerProvider that can be used to generate
|
||||||
|
// additional Spans on the same telemetry pipeline as the current Span.
|
||||||
|
TracerProvider() TracerProvider
|
||||||
|
}
|
||||||
|
|
||||||
|
// Link is the relationship between two Spans. The relationship can be within
|
||||||
|
// the same Trace or across different Traces.
|
||||||
|
//
|
||||||
|
// For example, a Link is used in the following situations:
|
||||||
|
//
|
||||||
|
// 1. Batch Processing: A batch of operations may contain operations
|
||||||
|
// associated with one or more traces/spans. Since there can only be one
|
||||||
|
// parent SpanContext, a Link is used to keep reference to the
|
||||||
|
// SpanContext of all operations in the batch.
|
||||||
|
// 2. Public Endpoint: A SpanContext for an in incoming client request on a
|
||||||
|
// public endpoint should be considered untrusted. In such a case, a new
|
||||||
|
// trace with its own identity and sampling decision needs to be created,
|
||||||
|
// but this new trace needs to be related to the original trace in some
|
||||||
|
// form. A Link is used to keep reference to the original SpanContext and
|
||||||
|
// track the relationship.
|
||||||
|
type Link struct {
|
||||||
|
// SpanContext of the linked Span.
|
||||||
|
SpanContext SpanContext
|
||||||
|
|
||||||
|
// Attributes describe the aspects of the link.
|
||||||
|
Attributes []attribute.KeyValue
|
||||||
|
}
|
||||||
|
|
||||||
|
// LinkFromContext returns a link encapsulating the SpanContext in the provided
|
||||||
|
// ctx.
|
||||||
|
func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link {
|
||||||
|
return Link{
|
||||||
|
SpanContext: SpanContextFromContext(ctx),
|
||||||
|
Attributes: attrs,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SpanKind is the role a Span plays in a Trace.
|
||||||
|
type SpanKind int
|
||||||
|
|
||||||
|
// As a convenience, these match the proto definition, see
|
||||||
|
// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129
|
||||||
|
//
|
||||||
|
// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()`
|
||||||
|
// to coerce a span kind to a valid value.
|
||||||
|
const (
|
||||||
|
// SpanKindUnspecified is an unspecified SpanKind and is not a valid
|
||||||
|
// SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal
|
||||||
|
// if it is received.
|
||||||
|
SpanKindUnspecified SpanKind = 0
|
||||||
|
// SpanKindInternal is a SpanKind for a Span that represents an internal
|
||||||
|
// operation within an application.
|
||||||
|
SpanKindInternal SpanKind = 1
|
||||||
|
// SpanKindServer is a SpanKind for a Span that represents the operation
|
||||||
|
// of handling a request from a client.
|
||||||
|
SpanKindServer SpanKind = 2
|
||||||
|
// SpanKindClient is a SpanKind for a Span that represents the operation
|
||||||
|
// of client making a request to a server.
|
||||||
|
SpanKindClient SpanKind = 3
|
||||||
|
// SpanKindProducer is a SpanKind for a Span that represents the operation
|
||||||
|
// of a producer sending a message to a message broker. Unlike
|
||||||
|
// SpanKindClient and SpanKindServer, there is often no direct
|
||||||
|
// relationship between this kind of Span and a SpanKindConsumer kind. A
|
||||||
|
// SpanKindProducer Span will end once the message is accepted by the
|
||||||
|
// message broker which might not overlap with the processing of that
|
||||||
|
// message.
|
||||||
|
SpanKindProducer SpanKind = 4
|
||||||
|
// SpanKindConsumer is a SpanKind for a Span that represents the operation
|
||||||
|
// of a consumer receiving a message from a message broker. Like
|
||||||
|
// SpanKindProducer Spans, there is often no direct relationship between
|
||||||
|
// this Span and the Span that produced the message.
|
||||||
|
SpanKindConsumer SpanKind = 5
|
||||||
|
)
|
||||||
|
|
||||||
|
// ValidateSpanKind returns a valid span kind value. This will coerce
|
||||||
|
// invalid values into the default value, SpanKindInternal.
|
||||||
|
func ValidateSpanKind(spanKind SpanKind) SpanKind {
|
||||||
|
switch spanKind {
|
||||||
|
case SpanKindInternal,
|
||||||
|
SpanKindServer,
|
||||||
|
SpanKindClient,
|
||||||
|
SpanKindProducer,
|
||||||
|
SpanKindConsumer:
|
||||||
|
// valid
|
||||||
|
return spanKind
|
||||||
|
default:
|
||||||
|
return SpanKindInternal
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the specified name of the SpanKind in lower-case.
|
||||||
|
func (sk SpanKind) String() string {
|
||||||
|
switch sk {
|
||||||
|
case SpanKindInternal:
|
||||||
|
return "internal"
|
||||||
|
case SpanKindServer:
|
||||||
|
return "server"
|
||||||
|
case SpanKindClient:
|
||||||
|
return "client"
|
||||||
|
case SpanKindProducer:
|
||||||
|
return "producer"
|
||||||
|
case SpanKindConsumer:
|
||||||
|
return "consumer"
|
||||||
|
default:
|
||||||
|
return "unspecified"
|
||||||
|
}
|
||||||
|
}
|
249
vendor/go.opentelemetry.io/otel/trace/trace.go
generated
vendored
249
vendor/go.opentelemetry.io/otel/trace/trace.go
generated
vendored
@ -5,13 +5,8 @@ package trace // import "go.opentelemetry.io/otel/trace"
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
|
||||||
"go.opentelemetry.io/otel/attribute"
|
|
||||||
"go.opentelemetry.io/otel/codes"
|
|
||||||
"go.opentelemetry.io/otel/trace/embedded"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -326,247 +321,3 @@ func (sc SpanContext) MarshalJSON() ([]byte, error) {
|
|||||||
Remote: sc.remote,
|
Remote: sc.remote,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Span is the individual component of a trace. It represents a single named
|
|
||||||
// and timed operation of a workflow that is traced. A Tracer is used to
|
|
||||||
// create a Span and it is then up to the operation the Span represents to
|
|
||||||
// properly end the Span when the operation itself ends.
|
|
||||||
//
|
|
||||||
// Warning: Methods may be added to this interface in minor releases. See
|
|
||||||
// package documentation on API implementation for information on how to set
|
|
||||||
// default behavior for unimplemented methods.
|
|
||||||
type Span interface {
|
|
||||||
// Users of the interface can ignore this. This embedded type is only used
|
|
||||||
// by implementations of this interface. See the "API Implementations"
|
|
||||||
// section of the package documentation for more information.
|
|
||||||
embedded.Span
|
|
||||||
|
|
||||||
// End completes the Span. The Span is considered complete and ready to be
|
|
||||||
// delivered through the rest of the telemetry pipeline after this method
|
|
||||||
// is called. Therefore, updates to the Span are not allowed after this
|
|
||||||
// method has been called.
|
|
||||||
End(options ...SpanEndOption)
|
|
||||||
|
|
||||||
// AddEvent adds an event with the provided name and options.
|
|
||||||
AddEvent(name string, options ...EventOption)
|
|
||||||
|
|
||||||
// AddLink adds a link.
|
|
||||||
// Adding links at span creation using WithLinks is preferred to calling AddLink
|
|
||||||
// later, for contexts that are available during span creation, because head
|
|
||||||
// sampling decisions can only consider information present during span creation.
|
|
||||||
AddLink(link Link)
|
|
||||||
|
|
||||||
// IsRecording returns the recording state of the Span. It will return
|
|
||||||
// true if the Span is active and events can be recorded.
|
|
||||||
IsRecording() bool
|
|
||||||
|
|
||||||
// RecordError will record err as an exception span event for this span. An
|
|
||||||
// additional call to SetStatus is required if the Status of the Span should
|
|
||||||
// be set to Error, as this method does not change the Span status. If this
|
|
||||||
// span is not being recorded or err is nil then this method does nothing.
|
|
||||||
RecordError(err error, options ...EventOption)
|
|
||||||
|
|
||||||
// SpanContext returns the SpanContext of the Span. The returned SpanContext
|
|
||||||
// is usable even after the End method has been called for the Span.
|
|
||||||
SpanContext() SpanContext
|
|
||||||
|
|
||||||
// SetStatus sets the status of the Span in the form of a code and a
|
|
||||||
// description, provided the status hasn't already been set to a higher
|
|
||||||
// value before (OK > Error > Unset). The description is only included in a
|
|
||||||
// status when the code is for an error.
|
|
||||||
SetStatus(code codes.Code, description string)
|
|
||||||
|
|
||||||
// SetName sets the Span name.
|
|
||||||
SetName(name string)
|
|
||||||
|
|
||||||
// SetAttributes sets kv as attributes of the Span. If a key from kv
|
|
||||||
// already exists for an attribute of the Span it will be overwritten with
|
|
||||||
// the value contained in kv.
|
|
||||||
SetAttributes(kv ...attribute.KeyValue)
|
|
||||||
|
|
||||||
// TracerProvider returns a TracerProvider that can be used to generate
|
|
||||||
// additional Spans on the same telemetry pipeline as the current Span.
|
|
||||||
TracerProvider() TracerProvider
|
|
||||||
}
|
|
||||||
|
|
||||||
// Link is the relationship between two Spans. The relationship can be within
|
|
||||||
// the same Trace or across different Traces.
|
|
||||||
//
|
|
||||||
// For example, a Link is used in the following situations:
|
|
||||||
//
|
|
||||||
// 1. Batch Processing: A batch of operations may contain operations
|
|
||||||
// associated with one or more traces/spans. Since there can only be one
|
|
||||||
// parent SpanContext, a Link is used to keep reference to the
|
|
||||||
// SpanContext of all operations in the batch.
|
|
||||||
// 2. Public Endpoint: A SpanContext for an in incoming client request on a
|
|
||||||
// public endpoint should be considered untrusted. In such a case, a new
|
|
||||||
// trace with its own identity and sampling decision needs to be created,
|
|
||||||
// but this new trace needs to be related to the original trace in some
|
|
||||||
// form. A Link is used to keep reference to the original SpanContext and
|
|
||||||
// track the relationship.
|
|
||||||
type Link struct {
|
|
||||||
// SpanContext of the linked Span.
|
|
||||||
SpanContext SpanContext
|
|
||||||
|
|
||||||
// Attributes describe the aspects of the link.
|
|
||||||
Attributes []attribute.KeyValue
|
|
||||||
}
|
|
||||||
|
|
||||||
// LinkFromContext returns a link encapsulating the SpanContext in the provided ctx.
|
|
||||||
func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link {
|
|
||||||
return Link{
|
|
||||||
SpanContext: SpanContextFromContext(ctx),
|
|
||||||
Attributes: attrs,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SpanKind is the role a Span plays in a Trace.
|
|
||||||
type SpanKind int
|
|
||||||
|
|
||||||
// As a convenience, these match the proto definition, see
|
|
||||||
// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129
|
|
||||||
//
|
|
||||||
// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()`
|
|
||||||
// to coerce a span kind to a valid value.
|
|
||||||
const (
|
|
||||||
// SpanKindUnspecified is an unspecified SpanKind and is not a valid
|
|
||||||
// SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal
|
|
||||||
// if it is received.
|
|
||||||
SpanKindUnspecified SpanKind = 0
|
|
||||||
// SpanKindInternal is a SpanKind for a Span that represents an internal
|
|
||||||
// operation within an application.
|
|
||||||
SpanKindInternal SpanKind = 1
|
|
||||||
// SpanKindServer is a SpanKind for a Span that represents the operation
|
|
||||||
// of handling a request from a client.
|
|
||||||
SpanKindServer SpanKind = 2
|
|
||||||
// SpanKindClient is a SpanKind for a Span that represents the operation
|
|
||||||
// of client making a request to a server.
|
|
||||||
SpanKindClient SpanKind = 3
|
|
||||||
// SpanKindProducer is a SpanKind for a Span that represents the operation
|
|
||||||
// of a producer sending a message to a message broker. Unlike
|
|
||||||
// SpanKindClient and SpanKindServer, there is often no direct
|
|
||||||
// relationship between this kind of Span and a SpanKindConsumer kind. A
|
|
||||||
// SpanKindProducer Span will end once the message is accepted by the
|
|
||||||
// message broker which might not overlap with the processing of that
|
|
||||||
// message.
|
|
||||||
SpanKindProducer SpanKind = 4
|
|
||||||
// SpanKindConsumer is a SpanKind for a Span that represents the operation
|
|
||||||
// of a consumer receiving a message from a message broker. Like
|
|
||||||
// SpanKindProducer Spans, there is often no direct relationship between
|
|
||||||
// this Span and the Span that produced the message.
|
|
||||||
SpanKindConsumer SpanKind = 5
|
|
||||||
)
|
|
||||||
|
|
||||||
// ValidateSpanKind returns a valid span kind value. This will coerce
|
|
||||||
// invalid values into the default value, SpanKindInternal.
|
|
||||||
func ValidateSpanKind(spanKind SpanKind) SpanKind {
|
|
||||||
switch spanKind {
|
|
||||||
case SpanKindInternal,
|
|
||||||
SpanKindServer,
|
|
||||||
SpanKindClient,
|
|
||||||
SpanKindProducer,
|
|
||||||
SpanKindConsumer:
|
|
||||||
// valid
|
|
||||||
return spanKind
|
|
||||||
default:
|
|
||||||
return SpanKindInternal
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the specified name of the SpanKind in lower-case.
|
|
||||||
func (sk SpanKind) String() string {
|
|
||||||
switch sk {
|
|
||||||
case SpanKindInternal:
|
|
||||||
return "internal"
|
|
||||||
case SpanKindServer:
|
|
||||||
return "server"
|
|
||||||
case SpanKindClient:
|
|
||||||
return "client"
|
|
||||||
case SpanKindProducer:
|
|
||||||
return "producer"
|
|
||||||
case SpanKindConsumer:
|
|
||||||
return "consumer"
|
|
||||||
default:
|
|
||||||
return "unspecified"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tracer is the creator of Spans.
|
|
||||||
//
|
|
||||||
// Warning: Methods may be added to this interface in minor releases. See
|
|
||||||
// package documentation on API implementation for information on how to set
|
|
||||||
// default behavior for unimplemented methods.
|
|
||||||
type Tracer interface {
|
|
||||||
// Users of the interface can ignore this. This embedded type is only used
|
|
||||||
// by implementations of this interface. See the "API Implementations"
|
|
||||||
// section of the package documentation for more information.
|
|
||||||
embedded.Tracer
|
|
||||||
|
|
||||||
// Start creates a span and a context.Context containing the newly-created span.
|
|
||||||
//
|
|
||||||
// If the context.Context provided in `ctx` contains a Span then the newly-created
|
|
||||||
// Span will be a child of that span, otherwise it will be a root span. This behavior
|
|
||||||
// can be overridden by providing `WithNewRoot()` as a SpanOption, causing the
|
|
||||||
// newly-created Span to be a root span even if `ctx` contains a Span.
|
|
||||||
//
|
|
||||||
// When creating a Span it is recommended to provide all known span attributes using
|
|
||||||
// the `WithAttributes()` SpanOption as samplers will only have access to the
|
|
||||||
// attributes provided when a Span is created.
|
|
||||||
//
|
|
||||||
// Any Span that is created MUST also be ended. This is the responsibility of the user.
|
|
||||||
// Implementations of this API may leak memory or other resources if Spans are not ended.
|
|
||||||
Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TracerProvider provides Tracers that are used by instrumentation code to
|
|
||||||
// trace computational workflows.
|
|
||||||
//
|
|
||||||
// A TracerProvider is the collection destination of all Spans from Tracers it
|
|
||||||
// provides, it represents a unique telemetry collection pipeline. How that
|
|
||||||
// pipeline is defined, meaning how those Spans are collected, processed, and
|
|
||||||
// where they are exported, depends on its implementation. Instrumentation
|
|
||||||
// authors do not need to define this implementation, rather just use the
|
|
||||||
// provided Tracers to instrument code.
|
|
||||||
//
|
|
||||||
// Commonly, instrumentation code will accept a TracerProvider implementation
|
|
||||||
// at runtime from its users or it can simply use the globally registered one
|
|
||||||
// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider).
|
|
||||||
//
|
|
||||||
// Warning: Methods may be added to this interface in minor releases. See
|
|
||||||
// package documentation on API implementation for information on how to set
|
|
||||||
// default behavior for unimplemented methods.
|
|
||||||
type TracerProvider interface {
|
|
||||||
// Users of the interface can ignore this. This embedded type is only used
|
|
||||||
// by implementations of this interface. See the "API Implementations"
|
|
||||||
// section of the package documentation for more information.
|
|
||||||
embedded.TracerProvider
|
|
||||||
|
|
||||||
// Tracer returns a unique Tracer scoped to be used by instrumentation code
|
|
||||||
// to trace computational workflows. The scope and identity of that
|
|
||||||
// instrumentation code is uniquely defined by the name and options passed.
|
|
||||||
//
|
|
||||||
// The passed name needs to uniquely identify instrumentation code.
|
|
||||||
// Therefore, it is recommended that name is the Go package name of the
|
|
||||||
// library providing instrumentation (note: not the code being
|
|
||||||
// instrumented). Instrumentation libraries can have multiple versions,
|
|
||||||
// therefore, the WithInstrumentationVersion option should be used to
|
|
||||||
// distinguish these different codebases. Additionally, instrumentation
|
|
||||||
// libraries may sometimes use traces to communicate different domains of
|
|
||||||
// workflow data (i.e. using spans to communicate workflow events only). If
|
|
||||||
// this is the case, the WithScopeAttributes option should be used to
|
|
||||||
// uniquely identify Tracers that handle the different domains of workflow
|
|
||||||
// data.
|
|
||||||
//
|
|
||||||
// If the same name and options are passed multiple times, the same Tracer
|
|
||||||
// will be returned (it is up to the implementation if this will be the
|
|
||||||
// same underlying instance of that Tracer or not). It is not necessary to
|
|
||||||
// call this multiple times with the same name and options to get an
|
|
||||||
// up-to-date Tracer. All implementations will ensure any TracerProvider
|
|
||||||
// configuration changes are propagated to all provided Tracers.
|
|
||||||
//
|
|
||||||
// If name is empty, then an implementation defined default name will be
|
|
||||||
// used instead.
|
|
||||||
//
|
|
||||||
// This method is safe to call concurrently.
|
|
||||||
Tracer(name string, options ...TracerOption) Tracer
|
|
||||||
}
|
|
||||||
|
37
vendor/go.opentelemetry.io/otel/trace/tracer.go
generated
vendored
Normal file
37
vendor/go.opentelemetry.io/otel/trace/tracer.go
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
package trace // import "go.opentelemetry.io/otel/trace"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/trace/embedded"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Tracer is the creator of Spans.
|
||||||
|
//
|
||||||
|
// Warning: Methods may be added to this interface in minor releases. See
|
||||||
|
// package documentation on API implementation for information on how to set
|
||||||
|
// default behavior for unimplemented methods.
|
||||||
|
type Tracer interface {
|
||||||
|
// Users of the interface can ignore this. This embedded type is only used
|
||||||
|
// by implementations of this interface. See the "API Implementations"
|
||||||
|
// section of the package documentation for more information.
|
||||||
|
embedded.Tracer
|
||||||
|
|
||||||
|
// Start creates a span and a context.Context containing the newly-created span.
|
||||||
|
//
|
||||||
|
// If the context.Context provided in `ctx` contains a Span then the newly-created
|
||||||
|
// Span will be a child of that span, otherwise it will be a root span. This behavior
|
||||||
|
// can be overridden by providing `WithNewRoot()` as a SpanOption, causing the
|
||||||
|
// newly-created Span to be a root span even if `ctx` contains a Span.
|
||||||
|
//
|
||||||
|
// When creating a Span it is recommended to provide all known span attributes using
|
||||||
|
// the `WithAttributes()` SpanOption as samplers will only have access to the
|
||||||
|
// attributes provided when a Span is created.
|
||||||
|
//
|
||||||
|
// Any Span that is created MUST also be ended. This is the responsibility of the user.
|
||||||
|
// Implementations of this API may leak memory or other resources if Spans are not ended.
|
||||||
|
Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span)
|
||||||
|
}
|
10
vendor/go.opentelemetry.io/otel/trace/tracestate.go
generated
vendored
10
vendor/go.opentelemetry.io/otel/trace/tracestate.go
generated
vendored
@ -260,6 +260,16 @@ func (ts TraceState) Get(key string) string {
|
|||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Walk walks all key value pairs in the TraceState by calling f
|
||||||
|
// Iteration stops if f returns false.
|
||||||
|
func (ts TraceState) Walk(f func(key, value string) bool) {
|
||||||
|
for _, m := range ts.list {
|
||||||
|
if !f(m.Key, m.Value) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Insert adds a new list-member defined by the key/value pair to the
|
// Insert adds a new list-member defined by the key/value pair to the
|
||||||
// TraceState. If a list-member already exists for the given key, that
|
// TraceState. If a list-member already exists for the given key, that
|
||||||
// list-member's value is updated. The new or updated list-member is always
|
// list-member's value is updated. The new or updated list-member is always
|
||||||
|
74
vendor/go.opentelemetry.io/otel/verify_examples.sh
generated
vendored
74
vendor/go.opentelemetry.io/otel/verify_examples.sh
generated
vendored
@ -1,74 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Copyright The OpenTelemetry Authors
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
cd $(dirname $0)
|
|
||||||
TOOLS_DIR=$(pwd)/.tools
|
|
||||||
|
|
||||||
if [ -z "${GOPATH}" ] ; then
|
|
||||||
printf "GOPATH is not defined.\n"
|
|
||||||
exit -1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ ! -d "${GOPATH}" ] ; then
|
|
||||||
printf "GOPATH ${GOPATH} is invalid \n"
|
|
||||||
exit -1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Pre-requisites
|
|
||||||
if ! git diff --quiet; then \
|
|
||||||
git status
|
|
||||||
printf "\n\nError: working tree is not clean\n"
|
|
||||||
exit -1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$(git tag --contains $(git log -1 --pretty=format:"%H"))" = "" ] ; then
|
|
||||||
printf "$(git log -1)"
|
|
||||||
printf "\n\nError: HEAD is not pointing to a tagged version"
|
|
||||||
fi
|
|
||||||
|
|
||||||
make ${TOOLS_DIR}/gojq
|
|
||||||
|
|
||||||
DIR_TMP="${GOPATH}/src/oteltmp/"
|
|
||||||
rm -rf $DIR_TMP
|
|
||||||
mkdir -p $DIR_TMP
|
|
||||||
|
|
||||||
printf "Copy examples to ${DIR_TMP}\n"
|
|
||||||
cp -a ./example ${DIR_TMP}
|
|
||||||
|
|
||||||
# Update go.mod files
|
|
||||||
printf "Update go.mod: rename module and remove replace\n"
|
|
||||||
|
|
||||||
PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | egrep 'example' | sed 's/^\.\///' | sort)
|
|
||||||
|
|
||||||
for dir in $PACKAGE_DIRS; do
|
|
||||||
printf " Update go.mod for $dir\n"
|
|
||||||
(cd "${DIR_TMP}/${dir}" && \
|
|
||||||
# replaces is ("mod1" "mod2" …)
|
|
||||||
replaces=($(go mod edit -json | ${TOOLS_DIR}/gojq '.Replace[].Old.Path')) && \
|
|
||||||
# strip double quotes
|
|
||||||
replaces=("${replaces[@]%\"}") && \
|
|
||||||
replaces=("${replaces[@]#\"}") && \
|
|
||||||
# make an array (-dropreplace=mod1 -dropreplace=mod2 …)
|
|
||||||
dropreplaces=("${replaces[@]/#/-dropreplace=}") && \
|
|
||||||
go mod edit -module "oteltmp/${dir}" "${dropreplaces[@]}" && \
|
|
||||||
go mod tidy)
|
|
||||||
done
|
|
||||||
printf "Update done:\n\n"
|
|
||||||
|
|
||||||
# Build directories that contain main package. These directories are different than
|
|
||||||
# directories that contain go.mod files.
|
|
||||||
printf "Build examples:\n"
|
|
||||||
EXAMPLES=$(./get_main_pkgs.sh ./example)
|
|
||||||
for ex in $EXAMPLES; do
|
|
||||||
printf " Build $ex in ${DIR_TMP}/${ex}\n"
|
|
||||||
(cd "${DIR_TMP}/${ex}" && \
|
|
||||||
go build .)
|
|
||||||
done
|
|
||||||
|
|
||||||
# Cleanup
|
|
||||||
printf "Remove copied files.\n"
|
|
||||||
rm -rf $DIR_TMP
|
|
42
vendor/go.opentelemetry.io/otel/verify_released_changelog.sh
generated
vendored
Normal file
42
vendor/go.opentelemetry.io/otel/verify_released_changelog.sh
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright The OpenTelemetry Authors
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
TARGET="${1:?Must provide target ref}"
|
||||||
|
|
||||||
|
FILE="CHANGELOG.md"
|
||||||
|
TEMP_DIR=$(mktemp -d)
|
||||||
|
echo "Temp folder: $TEMP_DIR"
|
||||||
|
|
||||||
|
# Only the latest commit of the feature branch is available
|
||||||
|
# automatically. To diff with the base branch, we need to
|
||||||
|
# fetch that too (and we only need its latest commit).
|
||||||
|
git fetch origin "${TARGET}" --depth=1
|
||||||
|
|
||||||
|
# Checkout the previous version on the base branch of the changelog to tmpfolder
|
||||||
|
git --work-tree="$TEMP_DIR" checkout FETCH_HEAD $FILE
|
||||||
|
|
||||||
|
PREVIOUS_FILE="$TEMP_DIR/$FILE"
|
||||||
|
CURRENT_FILE="$FILE"
|
||||||
|
PREVIOUS_LOCKED_FILE="$TEMP_DIR/previous_locked_section.md"
|
||||||
|
CURRENT_LOCKED_FILE="$TEMP_DIR/current_locked_section.md"
|
||||||
|
|
||||||
|
# Extract released sections from the previous version
|
||||||
|
awk '/^<!-- Released section -->/ {flag=1} /^<!-- Released section ended -->/ {flag=0} flag' "$PREVIOUS_FILE" > "$PREVIOUS_LOCKED_FILE"
|
||||||
|
|
||||||
|
# Extract released sections from the current version
|
||||||
|
awk '/^<!-- Released section -->/ {flag=1} /^<!-- Released section ended -->/ {flag=0} flag' "$CURRENT_FILE" > "$CURRENT_LOCKED_FILE"
|
||||||
|
|
||||||
|
# Compare the released sections
|
||||||
|
if ! diff -q "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE"; then
|
||||||
|
echo "Error: The released sections of the changelog file have been modified."
|
||||||
|
diff "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE"
|
||||||
|
rm -rf "$TEMP_DIR"
|
||||||
|
false
|
||||||
|
fi
|
||||||
|
|
||||||
|
rm -rf "$TEMP_DIR"
|
||||||
|
echo "The released sections remain unchanged."
|
2
vendor/go.opentelemetry.io/otel/version.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/version.go
generated
vendored
@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel"
|
|||||||
|
|
||||||
// Version is the current release version of OpenTelemetry in use.
|
// Version is the current release version of OpenTelemetry in use.
|
||||||
func Version() string {
|
func Version() string {
|
||||||
return "1.28.0"
|
return "1.31.0"
|
||||||
}
|
}
|
||||||
|
10
vendor/go.opentelemetry.io/otel/versions.yaml
generated
vendored
10
vendor/go.opentelemetry.io/otel/versions.yaml
generated
vendored
@ -3,7 +3,7 @@
|
|||||||
|
|
||||||
module-sets:
|
module-sets:
|
||||||
stable-v1:
|
stable-v1:
|
||||||
version: v1.28.0
|
version: v1.31.0
|
||||||
modules:
|
modules:
|
||||||
- go.opentelemetry.io/otel
|
- go.opentelemetry.io/otel
|
||||||
- go.opentelemetry.io/otel/bridge/opencensus
|
- go.opentelemetry.io/otel/bridge/opencensus
|
||||||
@ -29,21 +29,21 @@ module-sets:
|
|||||||
- go.opentelemetry.io/otel/sdk/metric
|
- go.opentelemetry.io/otel/sdk/metric
|
||||||
- go.opentelemetry.io/otel/trace
|
- go.opentelemetry.io/otel/trace
|
||||||
experimental-metrics:
|
experimental-metrics:
|
||||||
version: v0.50.0
|
version: v0.53.0
|
||||||
modules:
|
modules:
|
||||||
- go.opentelemetry.io/otel/example/prometheus
|
- go.opentelemetry.io/otel/example/prometheus
|
||||||
- go.opentelemetry.io/otel/exporters/prometheus
|
- go.opentelemetry.io/otel/exporters/prometheus
|
||||||
experimental-logs:
|
experimental-logs:
|
||||||
version: v0.4.0
|
version: v0.7.0
|
||||||
modules:
|
modules:
|
||||||
- go.opentelemetry.io/otel/log
|
- go.opentelemetry.io/otel/log
|
||||||
- go.opentelemetry.io/otel/sdk/log
|
- go.opentelemetry.io/otel/sdk/log
|
||||||
|
- go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc
|
||||||
- go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp
|
- go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp
|
||||||
- go.opentelemetry.io/otel/exporters/stdout/stdoutlog
|
- go.opentelemetry.io/otel/exporters/stdout/stdoutlog
|
||||||
experimental-schema:
|
experimental-schema:
|
||||||
version: v0.0.8
|
version: v0.0.10
|
||||||
modules:
|
modules:
|
||||||
- go.opentelemetry.io/otel/schema
|
- go.opentelemetry.io/otel/schema
|
||||||
excluded-modules:
|
excluded-modules:
|
||||||
- go.opentelemetry.io/otel/internal/tools
|
- go.opentelemetry.io/otel/internal/tools
|
||||||
- go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc
|
|
||||||
|
93
vendor/google.golang.org/grpc/balancer/balancer.go
generated
vendored
93
vendor/google.golang.org/grpc/balancer/balancer.go
generated
vendored
@ -73,17 +73,6 @@ func unregisterForTesting(name string) {
|
|||||||
delete(m, name)
|
delete(m, name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// connectedAddress returns the connected address for a SubConnState. The
|
|
||||||
// address is only valid if the state is READY.
|
|
||||||
func connectedAddress(scs SubConnState) resolver.Address {
|
|
||||||
return scs.connectedAddress
|
|
||||||
}
|
|
||||||
|
|
||||||
// setConnectedAddress sets the connected address for a SubConnState.
|
|
||||||
func setConnectedAddress(scs *SubConnState, addr resolver.Address) {
|
|
||||||
scs.connectedAddress = addr
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
internal.BalancerUnregister = unregisterForTesting
|
internal.BalancerUnregister = unregisterForTesting
|
||||||
internal.ConnectedAddress = connectedAddress
|
internal.ConnectedAddress = connectedAddress
|
||||||
@ -106,57 +95,6 @@ func Get(name string) Builder {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// A SubConn represents a single connection to a gRPC backend service.
|
|
||||||
//
|
|
||||||
// Each SubConn contains a list of addresses.
|
|
||||||
//
|
|
||||||
// All SubConns start in IDLE, and will not try to connect. To trigger the
|
|
||||||
// connecting, Balancers must call Connect. If a connection re-enters IDLE,
|
|
||||||
// Balancers must call Connect again to trigger a new connection attempt.
|
|
||||||
//
|
|
||||||
// gRPC will try to connect to the addresses in sequence, and stop trying the
|
|
||||||
// remainder once the first connection is successful. If an attempt to connect
|
|
||||||
// to all addresses encounters an error, the SubConn will enter
|
|
||||||
// TRANSIENT_FAILURE for a backoff period, and then transition to IDLE.
|
|
||||||
//
|
|
||||||
// Once established, if a connection is lost, the SubConn will transition
|
|
||||||
// directly to IDLE.
|
|
||||||
//
|
|
||||||
// This interface is to be implemented by gRPC. Users should not need their own
|
|
||||||
// implementation of this interface. For situations like testing, any
|
|
||||||
// implementations should embed this interface. This allows gRPC to add new
|
|
||||||
// methods to this interface.
|
|
||||||
type SubConn interface {
|
|
||||||
// UpdateAddresses updates the addresses used in this SubConn.
|
|
||||||
// gRPC checks if currently-connected address is still in the new list.
|
|
||||||
// If it's in the list, the connection will be kept.
|
|
||||||
// If it's not in the list, the connection will gracefully close, and
|
|
||||||
// a new connection will be created.
|
|
||||||
//
|
|
||||||
// This will trigger a state transition for the SubConn.
|
|
||||||
//
|
|
||||||
// Deprecated: this method will be removed. Create new SubConns for new
|
|
||||||
// addresses instead.
|
|
||||||
UpdateAddresses([]resolver.Address)
|
|
||||||
// Connect starts the connecting for this SubConn.
|
|
||||||
Connect()
|
|
||||||
// GetOrBuildProducer returns a reference to the existing Producer for this
|
|
||||||
// ProducerBuilder in this SubConn, or, if one does not currently exist,
|
|
||||||
// creates a new one and returns it. Returns a close function which may be
|
|
||||||
// called when the Producer is no longer needed. Otherwise the producer
|
|
||||||
// will automatically be closed upon connection loss or subchannel close.
|
|
||||||
// Should only be called on a SubConn in state Ready. Otherwise the
|
|
||||||
// producer will be unable to create streams.
|
|
||||||
GetOrBuildProducer(ProducerBuilder) (p Producer, close func())
|
|
||||||
// Shutdown shuts down the SubConn gracefully. Any started RPCs will be
|
|
||||||
// allowed to complete. No future calls should be made on the SubConn.
|
|
||||||
// One final state update will be delivered to the StateListener (or
|
|
||||||
// UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to
|
|
||||||
// indicate the shutdown operation. This may be delivered before
|
|
||||||
// in-progress RPCs are complete and the actual connection is closed.
|
|
||||||
Shutdown()
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSubConnOptions contains options to create new SubConn.
|
// NewSubConnOptions contains options to create new SubConn.
|
||||||
type NewSubConnOptions struct {
|
type NewSubConnOptions struct {
|
||||||
// CredsBundle is the credentials bundle that will be used in the created
|
// CredsBundle is the credentials bundle that will be used in the created
|
||||||
@ -424,18 +362,6 @@ type ExitIdler interface {
|
|||||||
ExitIdle()
|
ExitIdle()
|
||||||
}
|
}
|
||||||
|
|
||||||
// SubConnState describes the state of a SubConn.
|
|
||||||
type SubConnState struct {
|
|
||||||
// ConnectivityState is the connectivity state of the SubConn.
|
|
||||||
ConnectivityState connectivity.State
|
|
||||||
// ConnectionError is set if the ConnectivityState is TransientFailure,
|
|
||||||
// describing the reason the SubConn failed. Otherwise, it is nil.
|
|
||||||
ConnectionError error
|
|
||||||
// connectedAddr contains the connected address when ConnectivityState is
|
|
||||||
// Ready. Otherwise, it is indeterminate.
|
|
||||||
connectedAddress resolver.Address
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClientConnState describes the state of a ClientConn relevant to the
|
// ClientConnState describes the state of a ClientConn relevant to the
|
||||||
// balancer.
|
// balancer.
|
||||||
type ClientConnState struct {
|
type ClientConnState struct {
|
||||||
@ -448,22 +374,3 @@ type ClientConnState struct {
|
|||||||
// ErrBadResolverState may be returned by UpdateClientConnState to indicate a
|
// ErrBadResolverState may be returned by UpdateClientConnState to indicate a
|
||||||
// problem with the provided name resolver data.
|
// problem with the provided name resolver data.
|
||||||
var ErrBadResolverState = errors.New("bad resolver state")
|
var ErrBadResolverState = errors.New("bad resolver state")
|
||||||
|
|
||||||
// A ProducerBuilder is a simple constructor for a Producer. It is used by the
|
|
||||||
// SubConn to create producers when needed.
|
|
||||||
type ProducerBuilder interface {
|
|
||||||
// Build creates a Producer. The first parameter is always a
|
|
||||||
// grpc.ClientConnInterface (a type to allow creating RPCs/streams on the
|
|
||||||
// associated SubConn), but is declared as `any` to avoid a dependency
|
|
||||||
// cycle. Build also returns a close function that will be called when all
|
|
||||||
// references to the Producer have been given up for a SubConn, or when a
|
|
||||||
// connectivity state change occurs on the SubConn. The close function
|
|
||||||
// should always block until all asynchronous cleanup work is completed.
|
|
||||||
Build(grpcClientConnInterface any) (p Producer, close func())
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Producer is a type shared among potentially many consumers. It is
|
|
||||||
// associated with a SubConn, and an implementation will typically contain
|
|
||||||
// other methods to provide additional functionality, e.g. configuration or
|
|
||||||
// subscription registration.
|
|
||||||
type Producer any
|
|
||||||
|
17
vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go
generated
vendored
17
vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go
generated
vendored
@ -18,7 +18,18 @@
|
|||||||
// Package internal contains code internal to the pickfirst package.
|
// Package internal contains code internal to the pickfirst package.
|
||||||
package internal
|
package internal
|
||||||
|
|
||||||
import "math/rand"
|
import (
|
||||||
|
rand "math/rand/v2"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
// RandShuffle pseudo-randomizes the order of addresses.
|
var (
|
||||||
var RandShuffle = rand.Shuffle
|
// RandShuffle pseudo-randomizes the order of addresses.
|
||||||
|
RandShuffle = rand.Shuffle
|
||||||
|
// TimeAfterFunc allows mocking the timer for testing connection delay
|
||||||
|
// related functionality.
|
||||||
|
TimeAfterFunc = func(d time.Duration, f func()) func() {
|
||||||
|
timer := time.AfterFunc(d, f)
|
||||||
|
return func() { timer.Stop() }
|
||||||
|
}
|
||||||
|
)
|
||||||
|
2
vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
generated
vendored
2
vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
generated
vendored
@ -23,7 +23,7 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
rand "math/rand/v2"
|
||||||
|
|
||||||
"google.golang.org/grpc/balancer"
|
"google.golang.org/grpc/balancer"
|
||||||
"google.golang.org/grpc/balancer/pickfirst/internal"
|
"google.golang.org/grpc/balancer/pickfirst/internal"
|
||||||
|
470
vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go
generated
vendored
470
vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go
generated
vendored
@ -29,11 +29,15 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"net/netip"
|
||||||
"sync"
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"google.golang.org/grpc/balancer"
|
"google.golang.org/grpc/balancer"
|
||||||
"google.golang.org/grpc/balancer/pickfirst/internal"
|
"google.golang.org/grpc/balancer/pickfirst/internal"
|
||||||
"google.golang.org/grpc/connectivity"
|
"google.golang.org/grpc/connectivity"
|
||||||
|
expstats "google.golang.org/grpc/experimental/stats"
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
"google.golang.org/grpc/internal/envconfig"
|
"google.golang.org/grpc/internal/envconfig"
|
||||||
internalgrpclog "google.golang.org/grpc/internal/grpclog"
|
internalgrpclog "google.golang.org/grpc/internal/grpclog"
|
||||||
@ -50,26 +54,68 @@ func init() {
|
|||||||
balancer.Register(pickfirstBuilder{})
|
balancer.Register(pickfirstBuilder{})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// enableHealthListenerKeyType is a unique key type used in resolver attributes
|
||||||
|
// to indicate whether the health listener usage is enabled.
|
||||||
|
type enableHealthListenerKeyType struct{}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
logger = grpclog.Component("pick-first-leaf-lb")
|
logger = grpclog.Component("pick-first-leaf-lb")
|
||||||
// Name is the name of the pick_first_leaf balancer.
|
// Name is the name of the pick_first_leaf balancer.
|
||||||
// It is changed to "pick_first" in init() if this balancer is to be
|
// It is changed to "pick_first" in init() if this balancer is to be
|
||||||
// registered as the default pickfirst.
|
// registered as the default pickfirst.
|
||||||
Name = "pick_first_leaf"
|
Name = "pick_first_leaf"
|
||||||
|
disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
|
||||||
|
Name: "grpc.lb.pick_first.disconnections",
|
||||||
|
Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.",
|
||||||
|
Unit: "disconnection",
|
||||||
|
Labels: []string{"grpc.target"},
|
||||||
|
Default: false,
|
||||||
|
})
|
||||||
|
connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
|
||||||
|
Name: "grpc.lb.pick_first.connection_attempts_succeeded",
|
||||||
|
Description: "EXPERIMENTAL. Number of successful connection attempts.",
|
||||||
|
Unit: "attempt",
|
||||||
|
Labels: []string{"grpc.target"},
|
||||||
|
Default: false,
|
||||||
|
})
|
||||||
|
connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
|
||||||
|
Name: "grpc.lb.pick_first.connection_attempts_failed",
|
||||||
|
Description: "EXPERIMENTAL. Number of failed connection attempts.",
|
||||||
|
Unit: "attempt",
|
||||||
|
Labels: []string{"grpc.target"},
|
||||||
|
Default: false,
|
||||||
|
})
|
||||||
)
|
)
|
||||||
|
|
||||||
// TODO: change to pick-first when this becomes the default pick_first policy.
|
const (
|
||||||
const logPrefix = "[pick-first-leaf-lb %p] "
|
// TODO: change to pick-first when this becomes the default pick_first policy.
|
||||||
|
logPrefix = "[pick-first-leaf-lb %p] "
|
||||||
|
// connectionDelayInterval is the time to wait for during the happy eyeballs
|
||||||
|
// pass before starting the next connection attempt.
|
||||||
|
connectionDelayInterval = 250 * time.Millisecond
|
||||||
|
)
|
||||||
|
|
||||||
|
type ipAddrFamily int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// ipAddrFamilyUnknown represents strings that can't be parsed as an IP
|
||||||
|
// address.
|
||||||
|
ipAddrFamilyUnknown ipAddrFamily = iota
|
||||||
|
ipAddrFamilyV4
|
||||||
|
ipAddrFamilyV6
|
||||||
|
)
|
||||||
|
|
||||||
type pickfirstBuilder struct{}
|
type pickfirstBuilder struct{}
|
||||||
|
|
||||||
func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer {
|
func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions) balancer.Balancer {
|
||||||
b := &pickfirstBalancer{
|
b := &pickfirstBalancer{
|
||||||
cc: cc,
|
cc: cc,
|
||||||
addressList: addressList{},
|
target: bo.Target.String(),
|
||||||
subConns: resolver.NewAddressMap(),
|
metricsRecorder: bo.MetricsRecorder, // ClientConn will always create a Metrics Recorder.
|
||||||
state: connectivity.Connecting,
|
|
||||||
mu: sync.Mutex{},
|
subConns: resolver.NewAddressMap(),
|
||||||
|
state: connectivity.Connecting,
|
||||||
|
cancelConnectionTimer: func() {},
|
||||||
}
|
}
|
||||||
b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b))
|
b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b))
|
||||||
return b
|
return b
|
||||||
@ -87,6 +133,13 @@ func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalan
|
|||||||
return cfg, nil
|
return cfg, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EnableHealthListener updates the state to configure pickfirst for using a
|
||||||
|
// generic health listener.
|
||||||
|
func EnableHealthListener(state resolver.State) resolver.State {
|
||||||
|
state.Attributes = state.Attributes.WithValue(enableHealthListenerKeyType{}, true)
|
||||||
|
return state
|
||||||
|
}
|
||||||
|
|
||||||
type pfConfig struct {
|
type pfConfig struct {
|
||||||
serviceconfig.LoadBalancingConfig `json:"-"`
|
serviceconfig.LoadBalancingConfig `json:"-"`
|
||||||
|
|
||||||
@ -104,14 +157,19 @@ type scData struct {
|
|||||||
subConn balancer.SubConn
|
subConn balancer.SubConn
|
||||||
addr resolver.Address
|
addr resolver.Address
|
||||||
|
|
||||||
state connectivity.State
|
rawConnectivityState connectivity.State
|
||||||
lastErr error
|
// The effective connectivity state based on raw connectivity, health state
|
||||||
|
// and after following sticky TransientFailure behaviour defined in A62.
|
||||||
|
effectiveState connectivity.State
|
||||||
|
lastErr error
|
||||||
|
connectionFailedInFirstPass bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) {
|
func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) {
|
||||||
sd := &scData{
|
sd := &scData{
|
||||||
state: connectivity.Idle,
|
rawConnectivityState: connectivity.Idle,
|
||||||
addr: addr,
|
effectiveState: connectivity.Idle,
|
||||||
|
addr: addr,
|
||||||
}
|
}
|
||||||
sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{
|
sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{
|
||||||
StateListener: func(state balancer.SubConnState) {
|
StateListener: func(state balancer.SubConnState) {
|
||||||
@ -128,19 +186,25 @@ func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) {
|
|||||||
type pickfirstBalancer struct {
|
type pickfirstBalancer struct {
|
||||||
// The following fields are initialized at build time and read-only after
|
// The following fields are initialized at build time and read-only after
|
||||||
// that and therefore do not need to be guarded by a mutex.
|
// that and therefore do not need to be guarded by a mutex.
|
||||||
logger *internalgrpclog.PrefixLogger
|
logger *internalgrpclog.PrefixLogger
|
||||||
cc balancer.ClientConn
|
cc balancer.ClientConn
|
||||||
|
target string
|
||||||
|
metricsRecorder expstats.MetricsRecorder // guaranteed to be non nil
|
||||||
|
|
||||||
// The mutex is used to ensure synchronization of updates triggered
|
// The mutex is used to ensure synchronization of updates triggered
|
||||||
// from the idle picker and the already serialized resolver,
|
// from the idle picker and the already serialized resolver,
|
||||||
// SubConn state updates.
|
// SubConn state updates.
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
|
// State reported to the channel based on SubConn states and resolver
|
||||||
|
// updates.
|
||||||
state connectivity.State
|
state connectivity.State
|
||||||
// scData for active subonns mapped by address.
|
// scData for active subonns mapped by address.
|
||||||
subConns *resolver.AddressMap
|
subConns *resolver.AddressMap
|
||||||
addressList addressList
|
addressList addressList
|
||||||
firstPass bool
|
firstPass bool
|
||||||
numTF int
|
numTF int
|
||||||
|
cancelConnectionTimer func()
|
||||||
|
healthCheckingEnabled bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// ResolverError is called by the ClientConn when the name resolver produces
|
// ResolverError is called by the ClientConn when the name resolver produces
|
||||||
@ -166,7 +230,7 @@ func (b *pickfirstBalancer) resolverErrorLocked(err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
b.cc.UpdateState(balancer.State{
|
b.updateBalancerState(balancer.State{
|
||||||
ConnectivityState: connectivity.TransientFailure,
|
ConnectivityState: connectivity.TransientFailure,
|
||||||
Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)},
|
Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)},
|
||||||
})
|
})
|
||||||
@ -175,15 +239,16 @@ func (b *pickfirstBalancer) resolverErrorLocked(err error) {
|
|||||||
func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
|
func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
|
||||||
b.mu.Lock()
|
b.mu.Lock()
|
||||||
defer b.mu.Unlock()
|
defer b.mu.Unlock()
|
||||||
|
b.cancelConnectionTimer()
|
||||||
if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 {
|
if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 {
|
||||||
// Cleanup state pertaining to the previous resolver state.
|
// Cleanup state pertaining to the previous resolver state.
|
||||||
// Treat an empty address list like an error by calling b.ResolverError.
|
// Treat an empty address list like an error by calling b.ResolverError.
|
||||||
b.state = connectivity.TransientFailure
|
|
||||||
b.closeSubConnsLocked()
|
b.closeSubConnsLocked()
|
||||||
b.addressList.updateAddrs(nil)
|
b.addressList.updateAddrs(nil)
|
||||||
b.resolverErrorLocked(errors.New("produced zero addresses"))
|
b.resolverErrorLocked(errors.New("produced zero addresses"))
|
||||||
return balancer.ErrBadResolverState
|
return balancer.ErrBadResolverState
|
||||||
}
|
}
|
||||||
|
b.healthCheckingEnabled = state.ResolverState.Attributes.Value(enableHealthListenerKeyType{}) != nil
|
||||||
cfg, ok := state.BalancerConfig.(pfConfig)
|
cfg, ok := state.BalancerConfig.(pfConfig)
|
||||||
if state.BalancerConfig != nil && !ok {
|
if state.BalancerConfig != nil && !ok {
|
||||||
return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState)
|
return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState)
|
||||||
@ -206,9 +271,6 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState
|
|||||||
// "Flatten the list by concatenating the ordered list of addresses for
|
// "Flatten the list by concatenating the ordered list of addresses for
|
||||||
// each of the endpoints, in order." - A61
|
// each of the endpoints, in order." - A61
|
||||||
for _, endpoint := range endpoints {
|
for _, endpoint := range endpoints {
|
||||||
// "In the flattened list, interleave addresses from the two address
|
|
||||||
// families, as per RFC-8305 section 4." - A61
|
|
||||||
// TODO: support the above language.
|
|
||||||
newAddrs = append(newAddrs, endpoint.Addresses...)
|
newAddrs = append(newAddrs, endpoint.Addresses...)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -231,16 +293,17 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState
|
|||||||
// Not de-duplicating would result in attempting to connect to the same
|
// Not de-duplicating would result in attempting to connect to the same
|
||||||
// SubConn multiple times in the same pass. We don't want this.
|
// SubConn multiple times in the same pass. We don't want this.
|
||||||
newAddrs = deDupAddresses(newAddrs)
|
newAddrs = deDupAddresses(newAddrs)
|
||||||
|
newAddrs = interleaveAddresses(newAddrs)
|
||||||
|
|
||||||
// Since we have a new set of addresses, we are again at first pass.
|
prevAddr := b.addressList.currentAddress()
|
||||||
b.firstPass = true
|
prevSCData, found := b.subConns.Get(prevAddr)
|
||||||
|
prevAddrsCount := b.addressList.size()
|
||||||
|
isPrevRawConnectivityStateReady := found && prevSCData.(*scData).rawConnectivityState == connectivity.Ready
|
||||||
|
b.addressList.updateAddrs(newAddrs)
|
||||||
|
|
||||||
// If the previous ready SubConn exists in new address list,
|
// If the previous ready SubConn exists in new address list,
|
||||||
// keep this connection and don't create new SubConns.
|
// keep this connection and don't create new SubConns.
|
||||||
prevAddr := b.addressList.currentAddress()
|
if isPrevRawConnectivityStateReady && b.addressList.seekTo(prevAddr) {
|
||||||
prevAddrsCount := b.addressList.size()
|
|
||||||
b.addressList.updateAddrs(newAddrs)
|
|
||||||
if b.state == connectivity.Ready && b.addressList.seekTo(prevAddr) {
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -252,18 +315,17 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState
|
|||||||
// we should still enter CONNECTING because the sticky TF behaviour
|
// we should still enter CONNECTING because the sticky TF behaviour
|
||||||
// mentioned in A62 applies only when the TRANSIENT_FAILURE is reported
|
// mentioned in A62 applies only when the TRANSIENT_FAILURE is reported
|
||||||
// due to connectivity failures.
|
// due to connectivity failures.
|
||||||
if b.state == connectivity.Ready || b.state == connectivity.Connecting || prevAddrsCount == 0 {
|
if isPrevRawConnectivityStateReady || b.state == connectivity.Connecting || prevAddrsCount == 0 {
|
||||||
// Start connection attempt at first address.
|
// Start connection attempt at first address.
|
||||||
b.state = connectivity.Connecting
|
b.forceUpdateConcludedStateLocked(balancer.State{
|
||||||
b.cc.UpdateState(balancer.State{
|
|
||||||
ConnectivityState: connectivity.Connecting,
|
ConnectivityState: connectivity.Connecting,
|
||||||
Picker: &picker{err: balancer.ErrNoSubConnAvailable},
|
Picker: &picker{err: balancer.ErrNoSubConnAvailable},
|
||||||
})
|
})
|
||||||
b.requestConnectionLocked()
|
b.startFirstPassLocked()
|
||||||
} else if b.state == connectivity.TransientFailure {
|
} else if b.state == connectivity.TransientFailure {
|
||||||
// If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until
|
// If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until
|
||||||
// we're READY. See A62.
|
// we're READY. See A62.
|
||||||
b.requestConnectionLocked()
|
b.startFirstPassLocked()
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -278,6 +340,7 @@ func (b *pickfirstBalancer) Close() {
|
|||||||
b.mu.Lock()
|
b.mu.Lock()
|
||||||
defer b.mu.Unlock()
|
defer b.mu.Unlock()
|
||||||
b.closeSubConnsLocked()
|
b.closeSubConnsLocked()
|
||||||
|
b.cancelConnectionTimer()
|
||||||
b.state = connectivity.Shutdown
|
b.state = connectivity.Shutdown
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -287,12 +350,21 @@ func (b *pickfirstBalancer) Close() {
|
|||||||
func (b *pickfirstBalancer) ExitIdle() {
|
func (b *pickfirstBalancer) ExitIdle() {
|
||||||
b.mu.Lock()
|
b.mu.Lock()
|
||||||
defer b.mu.Unlock()
|
defer b.mu.Unlock()
|
||||||
if b.state == connectivity.Idle && b.addressList.currentAddress() == b.addressList.first() {
|
if b.state == connectivity.Idle {
|
||||||
b.firstPass = true
|
b.startFirstPassLocked()
|
||||||
b.requestConnectionLocked()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (b *pickfirstBalancer) startFirstPassLocked() {
|
||||||
|
b.firstPass = true
|
||||||
|
b.numTF = 0
|
||||||
|
// Reset the connection attempt record for existing SubConns.
|
||||||
|
for _, sd := range b.subConns.Values() {
|
||||||
|
sd.(*scData).connectionFailedInFirstPass = false
|
||||||
|
}
|
||||||
|
b.requestConnectionLocked()
|
||||||
|
}
|
||||||
|
|
||||||
func (b *pickfirstBalancer) closeSubConnsLocked() {
|
func (b *pickfirstBalancer) closeSubConnsLocked() {
|
||||||
for _, sd := range b.subConns.Values() {
|
for _, sd := range b.subConns.Values() {
|
||||||
sd.(*scData).subConn.Shutdown()
|
sd.(*scData).subConn.Shutdown()
|
||||||
@ -314,6 +386,70 @@ func deDupAddresses(addrs []resolver.Address) []resolver.Address {
|
|||||||
return retAddrs
|
return retAddrs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// interleaveAddresses interleaves addresses of both families (IPv4 and IPv6)
|
||||||
|
// as per RFC-8305 section 4.
|
||||||
|
// Whichever address family is first in the list is followed by an address of
|
||||||
|
// the other address family; that is, if the first address in the list is IPv6,
|
||||||
|
// then the first IPv4 address should be moved up in the list to be second in
|
||||||
|
// the list. It doesn't support configuring "First Address Family Count", i.e.
|
||||||
|
// there will always be a single member of the first address family at the
|
||||||
|
// beginning of the interleaved list.
|
||||||
|
// Addresses that are neither IPv4 nor IPv6 are treated as part of a third
|
||||||
|
// "unknown" family for interleaving.
|
||||||
|
// See: https://datatracker.ietf.org/doc/html/rfc8305#autoid-6
|
||||||
|
func interleaveAddresses(addrs []resolver.Address) []resolver.Address {
|
||||||
|
familyAddrsMap := map[ipAddrFamily][]resolver.Address{}
|
||||||
|
interleavingOrder := []ipAddrFamily{}
|
||||||
|
for _, addr := range addrs {
|
||||||
|
family := addressFamily(addr.Addr)
|
||||||
|
if _, found := familyAddrsMap[family]; !found {
|
||||||
|
interleavingOrder = append(interleavingOrder, family)
|
||||||
|
}
|
||||||
|
familyAddrsMap[family] = append(familyAddrsMap[family], addr)
|
||||||
|
}
|
||||||
|
|
||||||
|
interleavedAddrs := make([]resolver.Address, 0, len(addrs))
|
||||||
|
|
||||||
|
for curFamilyIdx := 0; len(interleavedAddrs) < len(addrs); curFamilyIdx = (curFamilyIdx + 1) % len(interleavingOrder) {
|
||||||
|
// Some IP types may have fewer addresses than others, so we look for
|
||||||
|
// the next type that has a remaining member to add to the interleaved
|
||||||
|
// list.
|
||||||
|
family := interleavingOrder[curFamilyIdx]
|
||||||
|
remainingMembers := familyAddrsMap[family]
|
||||||
|
if len(remainingMembers) > 0 {
|
||||||
|
interleavedAddrs = append(interleavedAddrs, remainingMembers[0])
|
||||||
|
familyAddrsMap[family] = remainingMembers[1:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return interleavedAddrs
|
||||||
|
}
|
||||||
|
|
||||||
|
// addressFamily returns the ipAddrFamily after parsing the address string.
|
||||||
|
// If the address isn't of the format "ip-address:port", it returns
|
||||||
|
// ipAddrFamilyUnknown. The address may be valid even if it's not an IP when
|
||||||
|
// using a resolver like passthrough where the address may be a hostname in
|
||||||
|
// some format that the dialer can resolve.
|
||||||
|
func addressFamily(address string) ipAddrFamily {
|
||||||
|
// Parse the IP after removing the port.
|
||||||
|
host, _, err := net.SplitHostPort(address)
|
||||||
|
if err != nil {
|
||||||
|
return ipAddrFamilyUnknown
|
||||||
|
}
|
||||||
|
ip, err := netip.ParseAddr(host)
|
||||||
|
if err != nil {
|
||||||
|
return ipAddrFamilyUnknown
|
||||||
|
}
|
||||||
|
switch {
|
||||||
|
case ip.Is4() || ip.Is4In6():
|
||||||
|
return ipAddrFamilyV4
|
||||||
|
case ip.Is6():
|
||||||
|
return ipAddrFamilyV6
|
||||||
|
default:
|
||||||
|
return ipAddrFamilyUnknown
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// reconcileSubConnsLocked updates the active subchannels based on a new address
|
// reconcileSubConnsLocked updates the active subchannels based on a new address
|
||||||
// list from the resolver. It does this by:
|
// list from the resolver. It does this by:
|
||||||
// - closing subchannels: any existing subchannels associated with addresses
|
// - closing subchannels: any existing subchannels associated with addresses
|
||||||
@ -342,6 +478,7 @@ func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address)
|
|||||||
// shutdownRemainingLocked shuts down remaining subConns. Called when a subConn
|
// shutdownRemainingLocked shuts down remaining subConns. Called when a subConn
|
||||||
// becomes ready, which means that all other subConn must be shutdown.
|
// becomes ready, which means that all other subConn must be shutdown.
|
||||||
func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) {
|
func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) {
|
||||||
|
b.cancelConnectionTimer()
|
||||||
for _, v := range b.subConns.Values() {
|
for _, v := range b.subConns.Values() {
|
||||||
sd := v.(*scData)
|
sd := v.(*scData)
|
||||||
if sd.subConn != selected.subConn {
|
if sd.subConn != selected.subConn {
|
||||||
@ -382,46 +519,89 @@ func (b *pickfirstBalancer) requestConnectionLocked() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
scd := sd.(*scData)
|
scd := sd.(*scData)
|
||||||
switch scd.state {
|
switch scd.rawConnectivityState {
|
||||||
case connectivity.Idle:
|
case connectivity.Idle:
|
||||||
scd.subConn.Connect()
|
scd.subConn.Connect()
|
||||||
|
b.scheduleNextConnectionLocked()
|
||||||
|
return
|
||||||
case connectivity.TransientFailure:
|
case connectivity.TransientFailure:
|
||||||
// Try the next address.
|
// The SubConn is being re-used and failed during a previous pass
|
||||||
|
// over the addressList. It has not completed backoff yet.
|
||||||
|
// Mark it as having failed and try the next address.
|
||||||
|
scd.connectionFailedInFirstPass = true
|
||||||
lastErr = scd.lastErr
|
lastErr = scd.lastErr
|
||||||
continue
|
continue
|
||||||
case connectivity.Ready:
|
|
||||||
// Should never happen.
|
|
||||||
b.logger.Errorf("Requesting a connection even though we have a READY SubConn")
|
|
||||||
case connectivity.Shutdown:
|
|
||||||
// Should never happen.
|
|
||||||
b.logger.Errorf("SubConn with state SHUTDOWN present in SubConns map")
|
|
||||||
case connectivity.Connecting:
|
case connectivity.Connecting:
|
||||||
// Wait for the SubConn to report success or failure.
|
// Wait for the connection attempt to complete or the timer to fire
|
||||||
|
// before attempting the next address.
|
||||||
|
b.scheduleNextConnectionLocked()
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", scd.rawConnectivityState)
|
||||||
|
return
|
||||||
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// All the remaining addresses in the list are in TRANSIENT_FAILURE, end the
|
||||||
|
// first pass if possible.
|
||||||
|
b.endFirstPassIfPossibleLocked(lastErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *pickfirstBalancer) scheduleNextConnectionLocked() {
|
||||||
|
b.cancelConnectionTimer()
|
||||||
|
if !b.addressList.hasNext() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// All the remaining addresses in the list are in TRANSIENT_FAILURE, end the
|
curAddr := b.addressList.currentAddress()
|
||||||
// first pass.
|
cancelled := false // Access to this is protected by the balancer's mutex.
|
||||||
b.endFirstPassLocked(lastErr)
|
closeFn := internal.TimeAfterFunc(connectionDelayInterval, func() {
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
// If the scheduled task is cancelled while acquiring the mutex, return.
|
||||||
|
if cancelled {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if b.logger.V(2) {
|
||||||
|
b.logger.Infof("Happy Eyeballs timer expired while waiting for connection to %q.", curAddr.Addr)
|
||||||
|
}
|
||||||
|
if b.addressList.increment() {
|
||||||
|
b.requestConnectionLocked()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
// Access to the cancellation callback held by the balancer is guarded by
|
||||||
|
// the balancer's mutex, so it's safe to set the boolean from the callback.
|
||||||
|
b.cancelConnectionTimer = sync.OnceFunc(func() {
|
||||||
|
cancelled = true
|
||||||
|
closeFn()
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) {
|
func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) {
|
||||||
b.mu.Lock()
|
b.mu.Lock()
|
||||||
defer b.mu.Unlock()
|
defer b.mu.Unlock()
|
||||||
oldState := sd.state
|
oldState := sd.rawConnectivityState
|
||||||
sd.state = newState.ConnectivityState
|
sd.rawConnectivityState = newState.ConnectivityState
|
||||||
// Previously relevant SubConns can still callback with state updates.
|
// Previously relevant SubConns can still callback with state updates.
|
||||||
// To prevent pickers from returning these obsolete SubConns, this logic
|
// To prevent pickers from returning these obsolete SubConns, this logic
|
||||||
// is included to check if the current list of active SubConns includes this
|
// is included to check if the current list of active SubConns includes this
|
||||||
// SubConn.
|
// SubConn.
|
||||||
if activeSD, found := b.subConns.Get(sd.addr); !found || activeSD != sd {
|
if !b.isActiveSCData(sd) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if newState.ConnectivityState == connectivity.Shutdown {
|
if newState.ConnectivityState == connectivity.Shutdown {
|
||||||
|
sd.effectiveState = connectivity.Shutdown
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Record a connection attempt when exiting CONNECTING.
|
||||||
|
if newState.ConnectivityState == connectivity.TransientFailure {
|
||||||
|
sd.connectionFailedInFirstPass = true
|
||||||
|
connectionAttemptsFailedMetric.Record(b.metricsRecorder, 1, b.target)
|
||||||
|
}
|
||||||
|
|
||||||
if newState.ConnectivityState == connectivity.Ready {
|
if newState.ConnectivityState == connectivity.Ready {
|
||||||
|
connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target)
|
||||||
b.shutdownRemainingLocked(sd)
|
b.shutdownRemainingLocked(sd)
|
||||||
if !b.addressList.seekTo(sd.addr) {
|
if !b.addressList.seekTo(sd.addr) {
|
||||||
// This should not fail as we should have only one SubConn after
|
// This should not fail as we should have only one SubConn after
|
||||||
@ -429,10 +609,30 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub
|
|||||||
b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses)
|
b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
b.state = connectivity.Ready
|
if !b.healthCheckingEnabled {
|
||||||
b.cc.UpdateState(balancer.State{
|
if b.logger.V(2) {
|
||||||
ConnectivityState: connectivity.Ready,
|
b.logger.Infof("SubConn %p reported connectivity state READY and the health listener is disabled. Transitioning SubConn to READY.", sd.subConn)
|
||||||
Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}},
|
}
|
||||||
|
|
||||||
|
sd.effectiveState = connectivity.Ready
|
||||||
|
b.updateBalancerState(balancer.State{
|
||||||
|
ConnectivityState: connectivity.Ready,
|
||||||
|
Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}},
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if b.logger.V(2) {
|
||||||
|
b.logger.Infof("SubConn %p reported connectivity state READY. Registering health listener.", sd.subConn)
|
||||||
|
}
|
||||||
|
// Send a CONNECTING update to take the SubConn out of sticky-TF if
|
||||||
|
// required.
|
||||||
|
sd.effectiveState = connectivity.Connecting
|
||||||
|
b.updateBalancerState(balancer.State{
|
||||||
|
ConnectivityState: connectivity.Connecting,
|
||||||
|
Picker: &picker{err: balancer.ErrNoSubConnAvailable},
|
||||||
|
})
|
||||||
|
sd.subConn.RegisterHealthListener(func(scs balancer.SubConnState) {
|
||||||
|
b.updateSubConnHealthState(sd, scs)
|
||||||
})
|
})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -443,13 +643,24 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub
|
|||||||
// a transport is successfully created, but the connection fails
|
// a transport is successfully created, but the connection fails
|
||||||
// before the SubConn can send the notification for READY. We treat
|
// before the SubConn can send the notification for READY. We treat
|
||||||
// this as a successful connection and transition to IDLE.
|
// this as a successful connection and transition to IDLE.
|
||||||
if (b.state == connectivity.Ready && newState.ConnectivityState != connectivity.Ready) || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) {
|
// TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second
|
||||||
|
// part of the if condition below once the issue is fixed.
|
||||||
|
if oldState == connectivity.Ready || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) {
|
||||||
// Once a transport fails, the balancer enters IDLE and starts from
|
// Once a transport fails, the balancer enters IDLE and starts from
|
||||||
// the first address when the picker is used.
|
// the first address when the picker is used.
|
||||||
b.shutdownRemainingLocked(sd)
|
b.shutdownRemainingLocked(sd)
|
||||||
b.state = connectivity.Idle
|
sd.effectiveState = newState.ConnectivityState
|
||||||
|
// READY SubConn interspliced in between CONNECTING and IDLE, need to
|
||||||
|
// account for that.
|
||||||
|
if oldState == connectivity.Connecting {
|
||||||
|
// A known issue (https://github.com/grpc/grpc-go/issues/7862)
|
||||||
|
// causes a race that prevents the READY state change notification.
|
||||||
|
// This works around it.
|
||||||
|
connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target)
|
||||||
|
}
|
||||||
|
disconnectionsMetric.Record(b.metricsRecorder, 1, b.target)
|
||||||
b.addressList.reset()
|
b.addressList.reset()
|
||||||
b.cc.UpdateState(balancer.State{
|
b.updateBalancerState(balancer.State{
|
||||||
ConnectivityState: connectivity.Idle,
|
ConnectivityState: connectivity.Idle,
|
||||||
Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)},
|
Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)},
|
||||||
})
|
})
|
||||||
@ -459,32 +670,35 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub
|
|||||||
if b.firstPass {
|
if b.firstPass {
|
||||||
switch newState.ConnectivityState {
|
switch newState.ConnectivityState {
|
||||||
case connectivity.Connecting:
|
case connectivity.Connecting:
|
||||||
// The balancer can be in either IDLE, CONNECTING or
|
// The effective state can be in either IDLE, CONNECTING or
|
||||||
// TRANSIENT_FAILURE. If it's in TRANSIENT_FAILURE, stay in
|
// TRANSIENT_FAILURE. If it's TRANSIENT_FAILURE, stay in
|
||||||
// TRANSIENT_FAILURE until it's READY. See A62.
|
// TRANSIENT_FAILURE until it's READY. See A62.
|
||||||
// If the balancer is already in CONNECTING, no update is needed.
|
if sd.effectiveState != connectivity.TransientFailure {
|
||||||
if b.state == connectivity.Idle {
|
sd.effectiveState = connectivity.Connecting
|
||||||
b.state = connectivity.Connecting
|
b.updateBalancerState(balancer.State{
|
||||||
b.cc.UpdateState(balancer.State{
|
|
||||||
ConnectivityState: connectivity.Connecting,
|
ConnectivityState: connectivity.Connecting,
|
||||||
Picker: &picker{err: balancer.ErrNoSubConnAvailable},
|
Picker: &picker{err: balancer.ErrNoSubConnAvailable},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
case connectivity.TransientFailure:
|
case connectivity.TransientFailure:
|
||||||
sd.lastErr = newState.ConnectionError
|
sd.lastErr = newState.ConnectionError
|
||||||
|
sd.effectiveState = connectivity.TransientFailure
|
||||||
// Since we're re-using common SubConns while handling resolver
|
// Since we're re-using common SubConns while handling resolver
|
||||||
// updates, we could receive an out of turn TRANSIENT_FAILURE from
|
// updates, we could receive an out of turn TRANSIENT_FAILURE from
|
||||||
// a pass over the previous address list. We ignore such updates.
|
// a pass over the previous address list. Happy Eyeballs will also
|
||||||
|
// cause out of order updates to arrive.
|
||||||
|
|
||||||
if curAddr := b.addressList.currentAddress(); !equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) {
|
if curAddr := b.addressList.currentAddress(); equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) {
|
||||||
return
|
b.cancelConnectionTimer()
|
||||||
|
if b.addressList.increment() {
|
||||||
|
b.requestConnectionLocked()
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if b.addressList.increment() {
|
|
||||||
b.requestConnectionLocked()
|
// End the first pass if we've seen a TRANSIENT_FAILURE from all
|
||||||
return
|
// SubConns once.
|
||||||
}
|
b.endFirstPassIfPossibleLocked(newState.ConnectionError)
|
||||||
// End of the first pass.
|
|
||||||
b.endFirstPassLocked(newState.ConnectionError)
|
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -495,7 +709,7 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub
|
|||||||
b.numTF = (b.numTF + 1) % b.subConns.Len()
|
b.numTF = (b.numTF + 1) % b.subConns.Len()
|
||||||
sd.lastErr = newState.ConnectionError
|
sd.lastErr = newState.ConnectionError
|
||||||
if b.numTF%b.subConns.Len() == 0 {
|
if b.numTF%b.subConns.Len() == 0 {
|
||||||
b.cc.UpdateState(balancer.State{
|
b.updateBalancerState(balancer.State{
|
||||||
ConnectivityState: connectivity.TransientFailure,
|
ConnectivityState: connectivity.TransientFailure,
|
||||||
Picker: &picker{err: newState.ConnectionError},
|
Picker: &picker{err: newState.ConnectionError},
|
||||||
})
|
})
|
||||||
@ -509,24 +723,95 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *pickfirstBalancer) endFirstPassLocked(lastErr error) {
|
// endFirstPassIfPossibleLocked ends the first happy-eyeballs pass if all the
|
||||||
|
// addresses are tried and their SubConns have reported a failure.
|
||||||
|
func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) {
|
||||||
|
// An optimization to avoid iterating over the entire SubConn map.
|
||||||
|
if b.addressList.isValid() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Connect() has been called on all the SubConns. The first pass can be
|
||||||
|
// ended if all the SubConns have reported a failure.
|
||||||
|
for _, v := range b.subConns.Values() {
|
||||||
|
sd := v.(*scData)
|
||||||
|
if !sd.connectionFailedInFirstPass {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
b.firstPass = false
|
b.firstPass = false
|
||||||
b.numTF = 0
|
b.updateBalancerState(balancer.State{
|
||||||
b.state = connectivity.TransientFailure
|
|
||||||
|
|
||||||
b.cc.UpdateState(balancer.State{
|
|
||||||
ConnectivityState: connectivity.TransientFailure,
|
ConnectivityState: connectivity.TransientFailure,
|
||||||
Picker: &picker{err: lastErr},
|
Picker: &picker{err: lastErr},
|
||||||
})
|
})
|
||||||
// Start re-connecting all the SubConns that are already in IDLE.
|
// Start re-connecting all the SubConns that are already in IDLE.
|
||||||
for _, v := range b.subConns.Values() {
|
for _, v := range b.subConns.Values() {
|
||||||
sd := v.(*scData)
|
sd := v.(*scData)
|
||||||
if sd.state == connectivity.Idle {
|
if sd.rawConnectivityState == connectivity.Idle {
|
||||||
sd.subConn.Connect()
|
sd.subConn.Connect()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (b *pickfirstBalancer) isActiveSCData(sd *scData) bool {
|
||||||
|
activeSD, found := b.subConns.Get(sd.addr)
|
||||||
|
return found && activeSD == sd
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *pickfirstBalancer) updateSubConnHealthState(sd *scData, state balancer.SubConnState) {
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
// Previously relevant SubConns can still callback with state updates.
|
||||||
|
// To prevent pickers from returning these obsolete SubConns, this logic
|
||||||
|
// is included to check if the current list of active SubConns includes
|
||||||
|
// this SubConn.
|
||||||
|
if !b.isActiveSCData(sd) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sd.effectiveState = state.ConnectivityState
|
||||||
|
switch state.ConnectivityState {
|
||||||
|
case connectivity.Ready:
|
||||||
|
b.updateBalancerState(balancer.State{
|
||||||
|
ConnectivityState: connectivity.Ready,
|
||||||
|
Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}},
|
||||||
|
})
|
||||||
|
case connectivity.TransientFailure:
|
||||||
|
b.updateBalancerState(balancer.State{
|
||||||
|
ConnectivityState: connectivity.TransientFailure,
|
||||||
|
Picker: &picker{err: fmt.Errorf("pickfirst: health check failure: %v", state.ConnectionError)},
|
||||||
|
})
|
||||||
|
case connectivity.Connecting:
|
||||||
|
b.updateBalancerState(balancer.State{
|
||||||
|
ConnectivityState: connectivity.Connecting,
|
||||||
|
Picker: &picker{err: balancer.ErrNoSubConnAvailable},
|
||||||
|
})
|
||||||
|
default:
|
||||||
|
b.logger.Errorf("Got unexpected health update for SubConn %p: %v", state)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateBalancerState stores the state reported to the channel and calls
|
||||||
|
// ClientConn.UpdateState(). As an optimization, it avoids sending duplicate
|
||||||
|
// updates to the channel.
|
||||||
|
func (b *pickfirstBalancer) updateBalancerState(newState balancer.State) {
|
||||||
|
// In case of TransientFailures allow the picker to be updated to update
|
||||||
|
// the connectivity error, in all other cases don't send duplicate state
|
||||||
|
// updates.
|
||||||
|
if newState.ConnectivityState == b.state && b.state != connectivity.TransientFailure {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b.forceUpdateConcludedStateLocked(newState)
|
||||||
|
}
|
||||||
|
|
||||||
|
// forceUpdateConcludedStateLocked stores the state reported to the channel and
|
||||||
|
// calls ClientConn.UpdateState().
|
||||||
|
// A separate function is defined to force update the ClientConn state since the
|
||||||
|
// channel doesn't correctly assume that LB policies start in CONNECTING and
|
||||||
|
// relies on LB policy to send an initial CONNECTING update.
|
||||||
|
func (b *pickfirstBalancer) forceUpdateConcludedStateLocked(newState balancer.State) {
|
||||||
|
b.state = newState.ConnectivityState
|
||||||
|
b.cc.UpdateState(newState)
|
||||||
|
}
|
||||||
|
|
||||||
type picker struct {
|
type picker struct {
|
||||||
result balancer.PickResult
|
result balancer.PickResult
|
||||||
err error
|
err error
|
||||||
@ -583,15 +868,6 @@ func (al *addressList) currentAddress() resolver.Address {
|
|||||||
return al.addresses[al.idx]
|
return al.addresses[al.idx]
|
||||||
}
|
}
|
||||||
|
|
||||||
// first returns the first address in the list. If the list is empty, it returns
|
|
||||||
// an empty address instead.
|
|
||||||
func (al *addressList) first() resolver.Address {
|
|
||||||
if len(al.addresses) == 0 {
|
|
||||||
return resolver.Address{}
|
|
||||||
}
|
|
||||||
return al.addresses[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (al *addressList) reset() {
|
func (al *addressList) reset() {
|
||||||
al.idx = 0
|
al.idx = 0
|
||||||
}
|
}
|
||||||
@ -614,6 +890,16 @@ func (al *addressList) seekTo(needle resolver.Address) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// hasNext returns whether incrementing the addressList will result in moving
|
||||||
|
// past the end of the list. If the list has already moved past the end, it
|
||||||
|
// returns false.
|
||||||
|
func (al *addressList) hasNext() bool {
|
||||||
|
if !al.isValid() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return al.idx+1 < len(al.addresses)
|
||||||
|
}
|
||||||
|
|
||||||
// equalAddressIgnoringBalAttributes returns true is a and b are considered
|
// equalAddressIgnoringBalAttributes returns true is a and b are considered
|
||||||
// equal. This is different from the Equal method on the resolver.Address type
|
// equal. This is different from the Equal method on the resolver.Address type
|
||||||
// which considers all fields to determine equality. Here, we only consider
|
// which considers all fields to determine equality. Here, we only consider
|
||||||
|
4
vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
generated
vendored
4
vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
generated
vendored
@ -22,7 +22,7 @@
|
|||||||
package roundrobin
|
package roundrobin
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"math/rand"
|
rand "math/rand/v2"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
||||||
"google.golang.org/grpc/balancer"
|
"google.golang.org/grpc/balancer"
|
||||||
@ -60,7 +60,7 @@ func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker {
|
|||||||
// Start at a random index, as the same RR balancer rebuilds a new
|
// Start at a random index, as the same RR balancer rebuilds a new
|
||||||
// picker when SubConn states change, and we don't want to apply excess
|
// picker when SubConn states change, and we don't want to apply excess
|
||||||
// load to the first server in the list.
|
// load to the first server in the list.
|
||||||
next: uint32(rand.Intn(len(scs))),
|
next: uint32(rand.IntN(len(scs))),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
134
vendor/google.golang.org/grpc/balancer/subconn.go
generated
vendored
Normal file
134
vendor/google.golang.org/grpc/balancer/subconn.go
generated
vendored
Normal file
@ -0,0 +1,134 @@
|
|||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2024 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package balancer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"google.golang.org/grpc/connectivity"
|
||||||
|
"google.golang.org/grpc/internal"
|
||||||
|
"google.golang.org/grpc/resolver"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A SubConn represents a single connection to a gRPC backend service.
|
||||||
|
//
|
||||||
|
// All SubConns start in IDLE, and will not try to connect. To trigger a
|
||||||
|
// connection attempt, Balancers must call Connect.
|
||||||
|
//
|
||||||
|
// If the connection attempt fails, the SubConn will transition to
|
||||||
|
// TRANSIENT_FAILURE for a backoff period, and then return to IDLE. If the
|
||||||
|
// connection attempt succeeds, it will transition to READY.
|
||||||
|
//
|
||||||
|
// If a READY SubConn becomes disconnected, the SubConn will transition to IDLE.
|
||||||
|
//
|
||||||
|
// If a connection re-enters IDLE, Balancers must call Connect again to trigger
|
||||||
|
// a new connection attempt.
|
||||||
|
//
|
||||||
|
// Each SubConn contains a list of addresses. gRPC will try to connect to the
|
||||||
|
// addresses in sequence, and stop trying the remainder once the first
|
||||||
|
// connection is successful. However, this behavior is deprecated. SubConns
|
||||||
|
// should only use a single address.
|
||||||
|
//
|
||||||
|
// NOTICE: This interface is intended to be implemented by gRPC, or intercepted
|
||||||
|
// by custom load balancing poilices. Users should not need their own complete
|
||||||
|
// implementation of this interface -- they should always delegate to a SubConn
|
||||||
|
// returned by ClientConn.NewSubConn() by embedding it in their implementations.
|
||||||
|
// An embedded SubConn must never be nil, or runtime panics will occur.
|
||||||
|
type SubConn interface {
|
||||||
|
// UpdateAddresses updates the addresses used in this SubConn.
|
||||||
|
// gRPC checks if currently-connected address is still in the new list.
|
||||||
|
// If it's in the list, the connection will be kept.
|
||||||
|
// If it's not in the list, the connection will gracefully close, and
|
||||||
|
// a new connection will be created.
|
||||||
|
//
|
||||||
|
// This will trigger a state transition for the SubConn.
|
||||||
|
//
|
||||||
|
// Deprecated: this method will be removed. Create new SubConns for new
|
||||||
|
// addresses instead.
|
||||||
|
UpdateAddresses([]resolver.Address)
|
||||||
|
// Connect starts the connecting for this SubConn.
|
||||||
|
Connect()
|
||||||
|
// GetOrBuildProducer returns a reference to the existing Producer for this
|
||||||
|
// ProducerBuilder in this SubConn, or, if one does not currently exist,
|
||||||
|
// creates a new one and returns it. Returns a close function which may be
|
||||||
|
// called when the Producer is no longer needed. Otherwise the producer
|
||||||
|
// will automatically be closed upon connection loss or subchannel close.
|
||||||
|
// Should only be called on a SubConn in state Ready. Otherwise the
|
||||||
|
// producer will be unable to create streams.
|
||||||
|
GetOrBuildProducer(ProducerBuilder) (p Producer, close func())
|
||||||
|
// Shutdown shuts down the SubConn gracefully. Any started RPCs will be
|
||||||
|
// allowed to complete. No future calls should be made on the SubConn.
|
||||||
|
// One final state update will be delivered to the StateListener (or
|
||||||
|
// UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to
|
||||||
|
// indicate the shutdown operation. This may be delivered before
|
||||||
|
// in-progress RPCs are complete and the actual connection is closed.
|
||||||
|
Shutdown()
|
||||||
|
// RegisterHealthListener registers a health listener that receives health
|
||||||
|
// updates for a Ready SubConn. Only one health listener can be registered
|
||||||
|
// at a time. A health listener should be registered each time the SubConn's
|
||||||
|
// connectivity state changes to READY. Registering a health listener when
|
||||||
|
// the connectivity state is not READY may result in undefined behaviour.
|
||||||
|
// This method must not be called synchronously while handling an update
|
||||||
|
// from a previously registered health listener.
|
||||||
|
RegisterHealthListener(func(SubConnState))
|
||||||
|
// EnforceSubConnEmbedding is included to force implementers to embed
|
||||||
|
// another implementation of this interface, allowing gRPC to add methods
|
||||||
|
// without breaking users.
|
||||||
|
internal.EnforceSubConnEmbedding
|
||||||
|
}
|
||||||
|
|
||||||
|
// A ProducerBuilder is a simple constructor for a Producer. It is used by the
|
||||||
|
// SubConn to create producers when needed.
|
||||||
|
type ProducerBuilder interface {
|
||||||
|
// Build creates a Producer. The first parameter is always a
|
||||||
|
// grpc.ClientConnInterface (a type to allow creating RPCs/streams on the
|
||||||
|
// associated SubConn), but is declared as `any` to avoid a dependency
|
||||||
|
// cycle. Build also returns a close function that will be called when all
|
||||||
|
// references to the Producer have been given up for a SubConn, or when a
|
||||||
|
// connectivity state change occurs on the SubConn. The close function
|
||||||
|
// should always block until all asynchronous cleanup work is completed.
|
||||||
|
Build(grpcClientConnInterface any) (p Producer, close func())
|
||||||
|
}
|
||||||
|
|
||||||
|
// SubConnState describes the state of a SubConn.
|
||||||
|
type SubConnState struct {
|
||||||
|
// ConnectivityState is the connectivity state of the SubConn.
|
||||||
|
ConnectivityState connectivity.State
|
||||||
|
// ConnectionError is set if the ConnectivityState is TransientFailure,
|
||||||
|
// describing the reason the SubConn failed. Otherwise, it is nil.
|
||||||
|
ConnectionError error
|
||||||
|
// connectedAddr contains the connected address when ConnectivityState is
|
||||||
|
// Ready. Otherwise, it is indeterminate.
|
||||||
|
connectedAddress resolver.Address
|
||||||
|
}
|
||||||
|
|
||||||
|
// connectedAddress returns the connected address for a SubConnState. The
|
||||||
|
// address is only valid if the state is READY.
|
||||||
|
func connectedAddress(scs SubConnState) resolver.Address {
|
||||||
|
return scs.connectedAddress
|
||||||
|
}
|
||||||
|
|
||||||
|
// setConnectedAddress sets the connected address for a SubConnState.
|
||||||
|
func setConnectedAddress(scs *SubConnState, addr resolver.Address) {
|
||||||
|
scs.connectedAddress = addr
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Producer is a type shared among potentially many consumers. It is
|
||||||
|
// associated with a SubConn, and an implementation will typically contain
|
||||||
|
// other methods to provide additional functionality, e.g. configuration or
|
||||||
|
// subscription registration.
|
||||||
|
type Producer any
|
77
vendor/google.golang.org/grpc/balancer_wrapper.go
generated
vendored
77
vendor/google.golang.org/grpc/balancer_wrapper.go
generated
vendored
@ -189,6 +189,7 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer
|
|||||||
ac: ac,
|
ac: ac,
|
||||||
producers: make(map[balancer.ProducerBuilder]*refCountedProducer),
|
producers: make(map[balancer.ProducerBuilder]*refCountedProducer),
|
||||||
stateListener: opts.StateListener,
|
stateListener: opts.StateListener,
|
||||||
|
healthData: newHealthData(connectivity.Idle),
|
||||||
}
|
}
|
||||||
ac.acbw = acbw
|
ac.acbw = acbw
|
||||||
return acbw, nil
|
return acbw, nil
|
||||||
@ -254,12 +255,32 @@ func (ccb *ccBalancerWrapper) Target() string {
|
|||||||
// acBalancerWrapper is a wrapper on top of ac for balancers.
|
// acBalancerWrapper is a wrapper on top of ac for balancers.
|
||||||
// It implements balancer.SubConn interface.
|
// It implements balancer.SubConn interface.
|
||||||
type acBalancerWrapper struct {
|
type acBalancerWrapper struct {
|
||||||
|
internal.EnforceSubConnEmbedding
|
||||||
ac *addrConn // read-only
|
ac *addrConn // read-only
|
||||||
ccb *ccBalancerWrapper // read-only
|
ccb *ccBalancerWrapper // read-only
|
||||||
stateListener func(balancer.SubConnState)
|
stateListener func(balancer.SubConnState)
|
||||||
|
|
||||||
producersMu sync.Mutex
|
producersMu sync.Mutex
|
||||||
producers map[balancer.ProducerBuilder]*refCountedProducer
|
producers map[balancer.ProducerBuilder]*refCountedProducer
|
||||||
|
|
||||||
|
// Access to healthData is protected by healthMu.
|
||||||
|
healthMu sync.Mutex
|
||||||
|
// healthData is stored as a pointer to detect when the health listener is
|
||||||
|
// dropped or updated. This is required as closures can't be compared for
|
||||||
|
// equality.
|
||||||
|
healthData *healthData
|
||||||
|
}
|
||||||
|
|
||||||
|
// healthData holds data related to health state reporting.
|
||||||
|
type healthData struct {
|
||||||
|
// connectivityState stores the most recent connectivity state delivered
|
||||||
|
// to the LB policy. This is stored to avoid sending updates when the
|
||||||
|
// SubConn has already exited connectivity state READY.
|
||||||
|
connectivityState connectivity.State
|
||||||
|
}
|
||||||
|
|
||||||
|
func newHealthData(s connectivity.State) *healthData {
|
||||||
|
return &healthData{connectivityState: s}
|
||||||
}
|
}
|
||||||
|
|
||||||
// updateState is invoked by grpc to push a subConn state update to the
|
// updateState is invoked by grpc to push a subConn state update to the
|
||||||
@ -279,6 +300,24 @@ func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolve
|
|||||||
if s == connectivity.Ready {
|
if s == connectivity.Ready {
|
||||||
setConnectedAddress(&scs, curAddr)
|
setConnectedAddress(&scs, curAddr)
|
||||||
}
|
}
|
||||||
|
// Invalidate the health listener by updating the healthData.
|
||||||
|
acbw.healthMu.Lock()
|
||||||
|
// A race may occur if a health listener is registered soon after the
|
||||||
|
// connectivity state is set but before the stateListener is called.
|
||||||
|
// Two cases may arise:
|
||||||
|
// 1. The new state is not READY: RegisterHealthListener has checks to
|
||||||
|
// ensure no updates are sent when the connectivity state is not
|
||||||
|
// READY.
|
||||||
|
// 2. The new state is READY: This means that the old state wasn't Ready.
|
||||||
|
// The RegisterHealthListener API mentions that a health listener
|
||||||
|
// must not be registered when a SubConn is not ready to avoid such
|
||||||
|
// races. When this happens, the LB policy would get health updates
|
||||||
|
// on the old listener. When the LB policy registers a new listener
|
||||||
|
// on receiving the connectivity update, the health updates will be
|
||||||
|
// sent to the new health listener.
|
||||||
|
acbw.healthData = newHealthData(scs.ConnectivityState)
|
||||||
|
acbw.healthMu.Unlock()
|
||||||
|
|
||||||
acbw.stateListener(scs)
|
acbw.stateListener(scs)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -373,3 +412,41 @@ func (acbw *acBalancerWrapper) closeProducers() {
|
|||||||
delete(acbw.producers, pb)
|
delete(acbw.producers, pb)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RegisterHealthListener accepts a health listener from the LB policy. It sends
|
||||||
|
// updates to the health listener as long as the SubConn's connectivity state
|
||||||
|
// doesn't change and a new health listener is not registered. To invalidate
|
||||||
|
// the currently registered health listener, acbw updates the healthData. If a
|
||||||
|
// nil listener is registered, the active health listener is dropped.
|
||||||
|
func (acbw *acBalancerWrapper) RegisterHealthListener(listener func(balancer.SubConnState)) {
|
||||||
|
acbw.healthMu.Lock()
|
||||||
|
defer acbw.healthMu.Unlock()
|
||||||
|
// listeners should not be registered when the connectivity state
|
||||||
|
// isn't Ready. This may happen when the balancer registers a listener
|
||||||
|
// after the connectivityState is updated, but before it is notified
|
||||||
|
// of the update.
|
||||||
|
if acbw.healthData.connectivityState != connectivity.Ready {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Replace the health data to stop sending updates to any previously
|
||||||
|
// registered health listeners.
|
||||||
|
hd := newHealthData(connectivity.Ready)
|
||||||
|
acbw.healthData = hd
|
||||||
|
if listener == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
acbw.ccb.serializer.TrySchedule(func(ctx context.Context) {
|
||||||
|
if ctx.Err() != nil || acbw.ccb.balancer == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Don't send updates if a new listener is registered.
|
||||||
|
acbw.healthMu.Lock()
|
||||||
|
defer acbw.healthMu.Unlock()
|
||||||
|
curHD := acbw.healthData
|
||||||
|
if curHD != hd {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
listener(balancer.SubConnState{ConnectivityState: connectivity.Ready})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
180
vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
generated
vendored
180
vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
generated
vendored
@ -18,7 +18,7 @@
|
|||||||
|
|
||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// versions:
|
// versions:
|
||||||
// protoc-gen-go v1.34.2
|
// protoc-gen-go v1.35.1
|
||||||
// protoc v5.27.1
|
// protoc v5.27.1
|
||||||
// source: grpc/binlog/v1/binarylog.proto
|
// source: grpc/binlog/v1/binarylog.proto
|
||||||
|
|
||||||
@ -274,11 +274,9 @@ type GrpcLogEntry struct {
|
|||||||
|
|
||||||
func (x *GrpcLogEntry) Reset() {
|
func (x *GrpcLogEntry) Reset() {
|
||||||
*x = GrpcLogEntry{}
|
*x = GrpcLogEntry{}
|
||||||
if protoimpl.UnsafeEnabled {
|
mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0]
|
||||||
mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0]
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms.StoreMessageInfo(mi)
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *GrpcLogEntry) String() string {
|
func (x *GrpcLogEntry) String() string {
|
||||||
@ -289,7 +287,7 @@ func (*GrpcLogEntry) ProtoMessage() {}
|
|||||||
|
|
||||||
func (x *GrpcLogEntry) ProtoReflect() protoreflect.Message {
|
func (x *GrpcLogEntry) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0]
|
mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
@ -440,11 +438,9 @@ type ClientHeader struct {
|
|||||||
|
|
||||||
func (x *ClientHeader) Reset() {
|
func (x *ClientHeader) Reset() {
|
||||||
*x = ClientHeader{}
|
*x = ClientHeader{}
|
||||||
if protoimpl.UnsafeEnabled {
|
mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1]
|
||||||
mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1]
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms.StoreMessageInfo(mi)
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *ClientHeader) String() string {
|
func (x *ClientHeader) String() string {
|
||||||
@ -455,7 +451,7 @@ func (*ClientHeader) ProtoMessage() {}
|
|||||||
|
|
||||||
func (x *ClientHeader) ProtoReflect() protoreflect.Message {
|
func (x *ClientHeader) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1]
|
mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
@ -509,11 +505,9 @@ type ServerHeader struct {
|
|||||||
|
|
||||||
func (x *ServerHeader) Reset() {
|
func (x *ServerHeader) Reset() {
|
||||||
*x = ServerHeader{}
|
*x = ServerHeader{}
|
||||||
if protoimpl.UnsafeEnabled {
|
mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2]
|
||||||
mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2]
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms.StoreMessageInfo(mi)
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *ServerHeader) String() string {
|
func (x *ServerHeader) String() string {
|
||||||
@ -524,7 +518,7 @@ func (*ServerHeader) ProtoMessage() {}
|
|||||||
|
|
||||||
func (x *ServerHeader) ProtoReflect() protoreflect.Message {
|
func (x *ServerHeader) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2]
|
mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
@ -565,11 +559,9 @@ type Trailer struct {
|
|||||||
|
|
||||||
func (x *Trailer) Reset() {
|
func (x *Trailer) Reset() {
|
||||||
*x = Trailer{}
|
*x = Trailer{}
|
||||||
if protoimpl.UnsafeEnabled {
|
mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3]
|
||||||
mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3]
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms.StoreMessageInfo(mi)
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *Trailer) String() string {
|
func (x *Trailer) String() string {
|
||||||
@ -580,7 +572,7 @@ func (*Trailer) ProtoMessage() {}
|
|||||||
|
|
||||||
func (x *Trailer) ProtoReflect() protoreflect.Message {
|
func (x *Trailer) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3]
|
mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
@ -638,11 +630,9 @@ type Message struct {
|
|||||||
|
|
||||||
func (x *Message) Reset() {
|
func (x *Message) Reset() {
|
||||||
*x = Message{}
|
*x = Message{}
|
||||||
if protoimpl.UnsafeEnabled {
|
mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4]
|
||||||
mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4]
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms.StoreMessageInfo(mi)
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *Message) String() string {
|
func (x *Message) String() string {
|
||||||
@ -653,7 +643,7 @@ func (*Message) ProtoMessage() {}
|
|||||||
|
|
||||||
func (x *Message) ProtoReflect() protoreflect.Message {
|
func (x *Message) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4]
|
mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
@ -713,11 +703,9 @@ type Metadata struct {
|
|||||||
|
|
||||||
func (x *Metadata) Reset() {
|
func (x *Metadata) Reset() {
|
||||||
*x = Metadata{}
|
*x = Metadata{}
|
||||||
if protoimpl.UnsafeEnabled {
|
mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5]
|
||||||
mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5]
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms.StoreMessageInfo(mi)
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *Metadata) String() string {
|
func (x *Metadata) String() string {
|
||||||
@ -728,7 +716,7 @@ func (*Metadata) ProtoMessage() {}
|
|||||||
|
|
||||||
func (x *Metadata) ProtoReflect() protoreflect.Message {
|
func (x *Metadata) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5]
|
mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
@ -762,11 +750,9 @@ type MetadataEntry struct {
|
|||||||
|
|
||||||
func (x *MetadataEntry) Reset() {
|
func (x *MetadataEntry) Reset() {
|
||||||
*x = MetadataEntry{}
|
*x = MetadataEntry{}
|
||||||
if protoimpl.UnsafeEnabled {
|
mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6]
|
||||||
mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6]
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms.StoreMessageInfo(mi)
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *MetadataEntry) String() string {
|
func (x *MetadataEntry) String() string {
|
||||||
@ -777,7 +763,7 @@ func (*MetadataEntry) ProtoMessage() {}
|
|||||||
|
|
||||||
func (x *MetadataEntry) ProtoReflect() protoreflect.Message {
|
func (x *MetadataEntry) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6]
|
mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
@ -820,11 +806,9 @@ type Address struct {
|
|||||||
|
|
||||||
func (x *Address) Reset() {
|
func (x *Address) Reset() {
|
||||||
*x = Address{}
|
*x = Address{}
|
||||||
if protoimpl.UnsafeEnabled {
|
mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7]
|
||||||
mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7]
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms.StoreMessageInfo(mi)
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *Address) String() string {
|
func (x *Address) String() string {
|
||||||
@ -835,7 +819,7 @@ func (*Address) ProtoMessage() {}
|
|||||||
|
|
||||||
func (x *Address) ProtoReflect() protoreflect.Message {
|
func (x *Address) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7]
|
mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
@ -1057,104 +1041,6 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
|
|||||||
if File_grpc_binlog_v1_binarylog_proto != nil {
|
if File_grpc_binlog_v1_binarylog_proto != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !protoimpl.UnsafeEnabled {
|
|
||||||
file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v any, i int) any {
|
|
||||||
switch v := v.(*GrpcLogEntry); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v any, i int) any {
|
|
||||||
switch v := v.(*ClientHeader); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v any, i int) any {
|
|
||||||
switch v := v.(*ServerHeader); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v any, i int) any {
|
|
||||||
switch v := v.(*Trailer); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v any, i int) any {
|
|
||||||
switch v := v.(*Message); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v any, i int) any {
|
|
||||||
switch v := v.(*Metadata); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v any, i int) any {
|
|
||||||
switch v := v.(*MetadataEntry); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v any, i int) any {
|
|
||||||
switch v := v.(*Address); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []any{
|
file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []any{
|
||||||
(*GrpcLogEntry_ClientHeader)(nil),
|
(*GrpcLogEntry_ClientHeader)(nil),
|
||||||
(*GrpcLogEntry_ServerHeader)(nil),
|
(*GrpcLogEntry_ServerHeader)(nil),
|
||||||
|
11
vendor/google.golang.org/grpc/clientconn.go
generated
vendored
11
vendor/google.golang.org/grpc/clientconn.go
generated
vendored
@ -775,10 +775,7 @@ func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var balCfg serviceconfig.LoadBalancingConfig
|
balCfg := cc.sc.lbConfig
|
||||||
if cc.sc != nil && cc.sc.lbConfig != nil {
|
|
||||||
balCfg = cc.sc.lbConfig
|
|
||||||
}
|
|
||||||
bw := cc.balancerWrapper
|
bw := cc.balancerWrapper
|
||||||
cc.mu.Unlock()
|
cc.mu.Unlock()
|
||||||
|
|
||||||
@ -1374,7 +1371,7 @@ func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address,
|
|||||||
defer cancel()
|
defer cancel()
|
||||||
copts.ChannelzParent = ac.channelz
|
copts.ChannelzParent = ac.channelz
|
||||||
|
|
||||||
newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose)
|
newTr, err := transport.NewHTTP2Client(connectCtx, ac.cc.ctx, addr, copts, onClose)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if logger.V(2) {
|
if logger.V(2) {
|
||||||
logger.Infof("Creating new client transport to %q: %v", addr, err)
|
logger.Infof("Creating new client transport to %q: %v", addr, err)
|
||||||
@ -1448,7 +1445,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) {
|
|||||||
if !ac.scopts.HealthCheckEnabled {
|
if !ac.scopts.HealthCheckEnabled {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
healthCheckFunc := ac.cc.dopts.healthCheckFunc
|
healthCheckFunc := internal.HealthCheckFunc
|
||||||
if healthCheckFunc == nil {
|
if healthCheckFunc == nil {
|
||||||
// The health package is not imported to set health check function.
|
// The health package is not imported to set health check function.
|
||||||
//
|
//
|
||||||
@ -1480,7 +1477,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) {
|
|||||||
}
|
}
|
||||||
// Start the health checking stream.
|
// Start the health checking stream.
|
||||||
go func() {
|
go func() {
|
||||||
err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName)
|
err := healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if status.Code(err) == codes.Unimplemented {
|
if status.Code(err) == codes.Unimplemented {
|
||||||
channelz.Error(logger, ac.channelz, "Subchannel health check is unimplemented at server side, thus health check is disabled")
|
channelz.Error(logger, ac.channelz, "Subchannel health check is unimplemented at server side, thus health check is disabled")
|
||||||
|
2
vendor/google.golang.org/grpc/codec.go
generated
vendored
2
vendor/google.golang.org/grpc/codec.go
generated
vendored
@ -71,7 +71,7 @@ func (c codecV0Bridge) Marshal(v any) (mem.BufferSlice, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return mem.BufferSlice{mem.NewBuffer(&data, nil)}, nil
|
return mem.BufferSlice{mem.SliceBuffer(data)}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c codecV0Bridge) Unmarshal(data mem.BufferSlice, v any) (err error) {
|
func (c codecV0Bridge) Unmarshal(data mem.BufferSlice, v any) (err error) {
|
||||||
|
16
vendor/google.golang.org/grpc/dialoptions.go
generated
vendored
16
vendor/google.golang.org/grpc/dialoptions.go
generated
vendored
@ -87,7 +87,6 @@ type dialOptions struct {
|
|||||||
disableServiceConfig bool
|
disableServiceConfig bool
|
||||||
disableRetry bool
|
disableRetry bool
|
||||||
disableHealthCheck bool
|
disableHealthCheck bool
|
||||||
healthCheckFunc internal.HealthChecker
|
|
||||||
minConnectTimeout func() time.Duration
|
minConnectTimeout func() time.Duration
|
||||||
defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON.
|
defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON.
|
||||||
defaultServiceConfigRawJSON *string
|
defaultServiceConfigRawJSON *string
|
||||||
@ -445,10 +444,6 @@ func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOp
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
|
||||||
internal.WithHealthCheckFunc = withHealthCheckFunc
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithDialer returns a DialOption that specifies a function to use for dialing
|
// WithDialer returns a DialOption that specifies a function to use for dialing
|
||||||
// network addresses. If FailOnNonTempDialError() is set to true, and an error
|
// network addresses. If FailOnNonTempDialError() is set to true, and an error
|
||||||
// is returned by f, gRPC checks the error's Temporary() method to decide if it
|
// is returned by f, gRPC checks the error's Temporary() method to decide if it
|
||||||
@ -662,16 +657,6 @@ func WithDisableHealthCheck() DialOption {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// withHealthCheckFunc replaces the default health check function with the
|
|
||||||
// provided one. It makes tests easier to change the health check function.
|
|
||||||
//
|
|
||||||
// For testing purpose only.
|
|
||||||
func withHealthCheckFunc(f internal.HealthChecker) DialOption {
|
|
||||||
return newFuncDialOption(func(o *dialOptions) {
|
|
||||||
o.healthCheckFunc = f
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func defaultDialOptions() dialOptions {
|
func defaultDialOptions() dialOptions {
|
||||||
return dialOptions{
|
return dialOptions{
|
||||||
copts: transport.ConnectOptions{
|
copts: transport.ConnectOptions{
|
||||||
@ -682,7 +667,6 @@ func defaultDialOptions() dialOptions {
|
|||||||
BufferPool: mem.DefaultBufferPool(),
|
BufferPool: mem.DefaultBufferPool(),
|
||||||
},
|
},
|
||||||
bs: internalbackoff.DefaultExponential,
|
bs: internalbackoff.DefaultExponential,
|
||||||
healthCheckFunc: internal.HealthCheckFunc,
|
|
||||||
idleTimeout: 30 * time.Minute,
|
idleTimeout: 30 * time.Minute,
|
||||||
defaultScheme: "dns",
|
defaultScheme: "dns",
|
||||||
maxCallAttempts: defaultMaxCallAttempts,
|
maxCallAttempts: defaultMaxCallAttempts,
|
||||||
|
27
vendor/google.golang.org/grpc/experimental/stats/metricregistry.go
generated
vendored
27
vendor/google.golang.org/grpc/experimental/stats/metricregistry.go
generated
vendored
@ -23,6 +23,7 @@ import (
|
|||||||
|
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
"google.golang.org/grpc/internal"
|
"google.golang.org/grpc/internal"
|
||||||
|
"google.golang.org/grpc/stats"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@ -34,7 +35,7 @@ var logger = grpclog.Component("metrics-registry")
|
|||||||
// DefaultMetrics are the default metrics registered through global metrics
|
// DefaultMetrics are the default metrics registered through global metrics
|
||||||
// registry. This is written to at initialization time only, and is read only
|
// registry. This is written to at initialization time only, and is read only
|
||||||
// after initialization.
|
// after initialization.
|
||||||
var DefaultMetrics = NewMetrics()
|
var DefaultMetrics = stats.NewMetricSet()
|
||||||
|
|
||||||
// MetricDescriptor is the data for a registered metric.
|
// MetricDescriptor is the data for a registered metric.
|
||||||
type MetricDescriptor struct {
|
type MetricDescriptor struct {
|
||||||
@ -42,7 +43,7 @@ type MetricDescriptor struct {
|
|||||||
// (including any per call metrics). See
|
// (including any per call metrics). See
|
||||||
// https://github.com/grpc/proposal/blob/master/A79-non-per-call-metrics-architecture.md#metric-instrument-naming-conventions
|
// https://github.com/grpc/proposal/blob/master/A79-non-per-call-metrics-architecture.md#metric-instrument-naming-conventions
|
||||||
// for metric naming conventions.
|
// for metric naming conventions.
|
||||||
Name Metric
|
Name string
|
||||||
// The description of this metric.
|
// The description of this metric.
|
||||||
Description string
|
Description string
|
||||||
// The unit (e.g. entries, seconds) of this metric.
|
// The unit (e.g. entries, seconds) of this metric.
|
||||||
@ -154,27 +155,27 @@ func (h *Int64GaugeHandle) Record(recorder MetricsRecorder, incr int64, labels .
|
|||||||
}
|
}
|
||||||
|
|
||||||
// registeredMetrics are the registered metric descriptor names.
|
// registeredMetrics are the registered metric descriptor names.
|
||||||
var registeredMetrics = make(map[Metric]bool)
|
var registeredMetrics = make(map[string]bool)
|
||||||
|
|
||||||
// metricsRegistry contains all of the registered metrics.
|
// metricsRegistry contains all of the registered metrics.
|
||||||
//
|
//
|
||||||
// This is written to only at init time, and read only after that.
|
// This is written to only at init time, and read only after that.
|
||||||
var metricsRegistry = make(map[Metric]*MetricDescriptor)
|
var metricsRegistry = make(map[string]*MetricDescriptor)
|
||||||
|
|
||||||
// DescriptorForMetric returns the MetricDescriptor from the global registry.
|
// DescriptorForMetric returns the MetricDescriptor from the global registry.
|
||||||
//
|
//
|
||||||
// Returns nil if MetricDescriptor not present.
|
// Returns nil if MetricDescriptor not present.
|
||||||
func DescriptorForMetric(metric Metric) *MetricDescriptor {
|
func DescriptorForMetric(metricName string) *MetricDescriptor {
|
||||||
return metricsRegistry[metric]
|
return metricsRegistry[metricName]
|
||||||
}
|
}
|
||||||
|
|
||||||
func registerMetric(name Metric, def bool) {
|
func registerMetric(metricName string, def bool) {
|
||||||
if registeredMetrics[name] {
|
if registeredMetrics[metricName] {
|
||||||
logger.Fatalf("metric %v already registered", name)
|
logger.Fatalf("metric %v already registered", metricName)
|
||||||
}
|
}
|
||||||
registeredMetrics[name] = true
|
registeredMetrics[metricName] = true
|
||||||
if def {
|
if def {
|
||||||
DefaultMetrics = DefaultMetrics.Add(name)
|
DefaultMetrics = DefaultMetrics.Add(metricName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -256,8 +257,8 @@ func snapshotMetricsRegistryForTesting() func() {
|
|||||||
oldRegisteredMetrics := registeredMetrics
|
oldRegisteredMetrics := registeredMetrics
|
||||||
oldMetricsRegistry := metricsRegistry
|
oldMetricsRegistry := metricsRegistry
|
||||||
|
|
||||||
registeredMetrics = make(map[Metric]bool)
|
registeredMetrics = make(map[string]bool)
|
||||||
metricsRegistry = make(map[Metric]*MetricDescriptor)
|
metricsRegistry = make(map[string]*MetricDescriptor)
|
||||||
maps.Copy(registeredMetrics, registeredMetrics)
|
maps.Copy(registeredMetrics, registeredMetrics)
|
||||||
maps.Copy(metricsRegistry, metricsRegistry)
|
maps.Copy(metricsRegistry, metricsRegistry)
|
||||||
|
|
||||||
|
75
vendor/google.golang.org/grpc/experimental/stats/metrics.go
generated
vendored
75
vendor/google.golang.org/grpc/experimental/stats/metrics.go
generated
vendored
@ -19,8 +19,6 @@
|
|||||||
// Package stats contains experimental metrics/stats API's.
|
// Package stats contains experimental metrics/stats API's.
|
||||||
package stats
|
package stats
|
||||||
|
|
||||||
import "maps"
|
|
||||||
|
|
||||||
// MetricsRecorder records on metrics derived from metric registry.
|
// MetricsRecorder records on metrics derived from metric registry.
|
||||||
type MetricsRecorder interface {
|
type MetricsRecorder interface {
|
||||||
// RecordInt64Count records the measurement alongside labels on the int
|
// RecordInt64Count records the measurement alongside labels on the int
|
||||||
@ -39,76 +37,3 @@ type MetricsRecorder interface {
|
|||||||
// gauge associated with the provided handle.
|
// gauge associated with the provided handle.
|
||||||
RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string)
|
RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Metric is an identifier for a metric.
|
|
||||||
type Metric string
|
|
||||||
|
|
||||||
// Metrics is a set of metrics to record. Once created, Metrics is immutable,
|
|
||||||
// however Add and Remove can make copies with specific metrics added or
|
|
||||||
// removed, respectively.
|
|
||||||
//
|
|
||||||
// Do not construct directly; use NewMetrics instead.
|
|
||||||
type Metrics struct {
|
|
||||||
// metrics are the set of metrics to initialize.
|
|
||||||
metrics map[Metric]bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMetrics returns a Metrics containing Metrics.
|
|
||||||
func NewMetrics(metrics ...Metric) *Metrics {
|
|
||||||
newMetrics := make(map[Metric]bool)
|
|
||||||
for _, metric := range metrics {
|
|
||||||
newMetrics[metric] = true
|
|
||||||
}
|
|
||||||
return &Metrics{
|
|
||||||
metrics: newMetrics,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Metrics returns the metrics set. The returned map is read-only and must not
|
|
||||||
// be modified.
|
|
||||||
func (m *Metrics) Metrics() map[Metric]bool {
|
|
||||||
return m.metrics
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add adds the metrics to the metrics set and returns a new copy with the
|
|
||||||
// additional metrics.
|
|
||||||
func (m *Metrics) Add(metrics ...Metric) *Metrics {
|
|
||||||
newMetrics := make(map[Metric]bool)
|
|
||||||
for metric := range m.metrics {
|
|
||||||
newMetrics[metric] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, metric := range metrics {
|
|
||||||
newMetrics[metric] = true
|
|
||||||
}
|
|
||||||
return &Metrics{
|
|
||||||
metrics: newMetrics,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Join joins the metrics passed in with the metrics set, and returns a new copy
|
|
||||||
// with the merged metrics.
|
|
||||||
func (m *Metrics) Join(metrics *Metrics) *Metrics {
|
|
||||||
newMetrics := make(map[Metric]bool)
|
|
||||||
maps.Copy(newMetrics, m.metrics)
|
|
||||||
maps.Copy(newMetrics, metrics.metrics)
|
|
||||||
return &Metrics{
|
|
||||||
metrics: newMetrics,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove removes the metrics from the metrics set and returns a new copy with
|
|
||||||
// the metrics removed.
|
|
||||||
func (m *Metrics) Remove(metrics ...Metric) *Metrics {
|
|
||||||
newMetrics := make(map[Metric]bool)
|
|
||||||
for metric := range m.metrics {
|
|
||||||
newMetrics[metric] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, metric := range metrics {
|
|
||||||
delete(newMetrics, metric)
|
|
||||||
}
|
|
||||||
return &Metrics{
|
|
||||||
metrics: newMetrics,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
107
vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go
generated
vendored
107
vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go
generated
vendored
@ -101,6 +101,22 @@ var severityName = []string{
|
|||||||
fatalLog: "FATAL",
|
fatalLog: "FATAL",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// sprintf is fmt.Sprintf.
|
||||||
|
// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily.
|
||||||
|
var sprintf = fmt.Sprintf
|
||||||
|
|
||||||
|
// sprint is fmt.Sprint.
|
||||||
|
// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily.
|
||||||
|
var sprint = fmt.Sprint
|
||||||
|
|
||||||
|
// sprintln is fmt.Sprintln.
|
||||||
|
// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily.
|
||||||
|
var sprintln = fmt.Sprintln
|
||||||
|
|
||||||
|
// exit is os.Exit.
|
||||||
|
// This var exists to make it possible to test functions calling os.Exit.
|
||||||
|
var exit = os.Exit
|
||||||
|
|
||||||
// loggerT is the default logger used by grpclog.
|
// loggerT is the default logger used by grpclog.
|
||||||
type loggerT struct {
|
type loggerT struct {
|
||||||
m []*log.Logger
|
m []*log.Logger
|
||||||
@ -111,7 +127,7 @@ type loggerT struct {
|
|||||||
func (g *loggerT) output(severity int, s string) {
|
func (g *loggerT) output(severity int, s string) {
|
||||||
sevStr := severityName[severity]
|
sevStr := severityName[severity]
|
||||||
if !g.jsonFormat {
|
if !g.jsonFormat {
|
||||||
g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s))
|
g.m[severity].Output(2, sevStr+": "+s)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// TODO: we can also include the logging component, but that needs more
|
// TODO: we can also include the logging component, but that needs more
|
||||||
@ -123,55 +139,79 @@ func (g *loggerT) output(severity int, s string) {
|
|||||||
g.m[severity].Output(2, string(b))
|
g.m[severity].Output(2, string(b))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (g *loggerT) printf(severity int, format string, args ...any) {
|
||||||
|
// Note the discard check is duplicated in each print func, rather than in
|
||||||
|
// output, to avoid the expensive Sprint calls.
|
||||||
|
// De-duplicating this by moving to output would be a significant performance regression!
|
||||||
|
if lg := g.m[severity]; lg.Writer() == io.Discard {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
g.output(severity, sprintf(format, args...))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *loggerT) print(severity int, v ...any) {
|
||||||
|
if lg := g.m[severity]; lg.Writer() == io.Discard {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
g.output(severity, sprint(v...))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *loggerT) println(severity int, v ...any) {
|
||||||
|
if lg := g.m[severity]; lg.Writer() == io.Discard {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
g.output(severity, sprintln(v...))
|
||||||
|
}
|
||||||
|
|
||||||
func (g *loggerT) Info(args ...any) {
|
func (g *loggerT) Info(args ...any) {
|
||||||
g.output(infoLog, fmt.Sprint(args...))
|
g.print(infoLog, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *loggerT) Infoln(args ...any) {
|
func (g *loggerT) Infoln(args ...any) {
|
||||||
g.output(infoLog, fmt.Sprintln(args...))
|
g.println(infoLog, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *loggerT) Infof(format string, args ...any) {
|
func (g *loggerT) Infof(format string, args ...any) {
|
||||||
g.output(infoLog, fmt.Sprintf(format, args...))
|
g.printf(infoLog, format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *loggerT) Warning(args ...any) {
|
func (g *loggerT) Warning(args ...any) {
|
||||||
g.output(warningLog, fmt.Sprint(args...))
|
g.print(warningLog, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *loggerT) Warningln(args ...any) {
|
func (g *loggerT) Warningln(args ...any) {
|
||||||
g.output(warningLog, fmt.Sprintln(args...))
|
g.println(warningLog, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *loggerT) Warningf(format string, args ...any) {
|
func (g *loggerT) Warningf(format string, args ...any) {
|
||||||
g.output(warningLog, fmt.Sprintf(format, args...))
|
g.printf(warningLog, format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *loggerT) Error(args ...any) {
|
func (g *loggerT) Error(args ...any) {
|
||||||
g.output(errorLog, fmt.Sprint(args...))
|
g.print(errorLog, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *loggerT) Errorln(args ...any) {
|
func (g *loggerT) Errorln(args ...any) {
|
||||||
g.output(errorLog, fmt.Sprintln(args...))
|
g.println(errorLog, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *loggerT) Errorf(format string, args ...any) {
|
func (g *loggerT) Errorf(format string, args ...any) {
|
||||||
g.output(errorLog, fmt.Sprintf(format, args...))
|
g.printf(errorLog, format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *loggerT) Fatal(args ...any) {
|
func (g *loggerT) Fatal(args ...any) {
|
||||||
g.output(fatalLog, fmt.Sprint(args...))
|
g.print(fatalLog, args...)
|
||||||
os.Exit(1)
|
exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *loggerT) Fatalln(args ...any) {
|
func (g *loggerT) Fatalln(args ...any) {
|
||||||
g.output(fatalLog, fmt.Sprintln(args...))
|
g.println(fatalLog, args...)
|
||||||
os.Exit(1)
|
exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *loggerT) Fatalf(format string, args ...any) {
|
func (g *loggerT) Fatalf(format string, args ...any) {
|
||||||
g.output(fatalLog, fmt.Sprintf(format, args...))
|
g.printf(fatalLog, format, args...)
|
||||||
os.Exit(1)
|
exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *loggerT) V(l int) bool {
|
func (g *loggerT) V(l int) bool {
|
||||||
@ -186,19 +226,42 @@ type LoggerV2Config struct {
|
|||||||
FormatJSON bool
|
FormatJSON bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// combineLoggers returns a combined logger for both higher & lower severity logs,
|
||||||
|
// or only one if the other is io.Discard.
|
||||||
|
//
|
||||||
|
// This uses io.Discard instead of io.MultiWriter when all loggers
|
||||||
|
// are set to io.Discard. Both this package and the standard log package have
|
||||||
|
// significant optimizations for io.Discard, which io.MultiWriter lacks (as of
|
||||||
|
// this writing).
|
||||||
|
func combineLoggers(lower, higher io.Writer) io.Writer {
|
||||||
|
if lower == io.Discard {
|
||||||
|
return higher
|
||||||
|
}
|
||||||
|
if higher == io.Discard {
|
||||||
|
return lower
|
||||||
|
}
|
||||||
|
return io.MultiWriter(lower, higher)
|
||||||
|
}
|
||||||
|
|
||||||
// NewLoggerV2 creates a new LoggerV2 instance with the provided configuration.
|
// NewLoggerV2 creates a new LoggerV2 instance with the provided configuration.
|
||||||
// The infoW, warningW, and errorW writers are used to write log messages of
|
// The infoW, warningW, and errorW writers are used to write log messages of
|
||||||
// different severity levels.
|
// different severity levels.
|
||||||
func NewLoggerV2(infoW, warningW, errorW io.Writer, c LoggerV2Config) LoggerV2 {
|
func NewLoggerV2(infoW, warningW, errorW io.Writer, c LoggerV2Config) LoggerV2 {
|
||||||
var m []*log.Logger
|
|
||||||
flag := log.LstdFlags
|
flag := log.LstdFlags
|
||||||
if c.FormatJSON {
|
if c.FormatJSON {
|
||||||
flag = 0
|
flag = 0
|
||||||
}
|
}
|
||||||
m = append(m, log.New(infoW, "", flag))
|
|
||||||
m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag))
|
warningW = combineLoggers(infoW, warningW)
|
||||||
ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal.
|
errorW = combineLoggers(errorW, warningW)
|
||||||
m = append(m, log.New(ew, "", flag))
|
|
||||||
m = append(m, log.New(ew, "", flag))
|
fatalW := errorW
|
||||||
|
|
||||||
|
m := []*log.Logger{
|
||||||
|
log.New(infoW, "", flag),
|
||||||
|
log.New(warningW, "", flag),
|
||||||
|
log.New(errorW, "", flag),
|
||||||
|
log.New(fatalW, "", flag),
|
||||||
|
}
|
||||||
return &loggerT{m: m, v: c.Verbosity, jsonFormat: c.FormatJSON}
|
return &loggerT{m: m, v: c.Verbosity, jsonFormat: c.FormatJSON}
|
||||||
}
|
}
|
||||||
|
48
vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
generated
vendored
48
vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
generated
vendored
@ -17,7 +17,7 @@
|
|||||||
|
|
||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// versions:
|
// versions:
|
||||||
// protoc-gen-go v1.34.2
|
// protoc-gen-go v1.35.1
|
||||||
// protoc v5.27.1
|
// protoc v5.27.1
|
||||||
// source: grpc/health/v1/health.proto
|
// source: grpc/health/v1/health.proto
|
||||||
|
|
||||||
@ -99,11 +99,9 @@ type HealthCheckRequest struct {
|
|||||||
|
|
||||||
func (x *HealthCheckRequest) Reset() {
|
func (x *HealthCheckRequest) Reset() {
|
||||||
*x = HealthCheckRequest{}
|
*x = HealthCheckRequest{}
|
||||||
if protoimpl.UnsafeEnabled {
|
mi := &file_grpc_health_v1_health_proto_msgTypes[0]
|
||||||
mi := &file_grpc_health_v1_health_proto_msgTypes[0]
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms.StoreMessageInfo(mi)
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *HealthCheckRequest) String() string {
|
func (x *HealthCheckRequest) String() string {
|
||||||
@ -114,7 +112,7 @@ func (*HealthCheckRequest) ProtoMessage() {}
|
|||||||
|
|
||||||
func (x *HealthCheckRequest) ProtoReflect() protoreflect.Message {
|
func (x *HealthCheckRequest) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_grpc_health_v1_health_proto_msgTypes[0]
|
mi := &file_grpc_health_v1_health_proto_msgTypes[0]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
@ -146,11 +144,9 @@ type HealthCheckResponse struct {
|
|||||||
|
|
||||||
func (x *HealthCheckResponse) Reset() {
|
func (x *HealthCheckResponse) Reset() {
|
||||||
*x = HealthCheckResponse{}
|
*x = HealthCheckResponse{}
|
||||||
if protoimpl.UnsafeEnabled {
|
mi := &file_grpc_health_v1_health_proto_msgTypes[1]
|
||||||
mi := &file_grpc_health_v1_health_proto_msgTypes[1]
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms.StoreMessageInfo(mi)
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *HealthCheckResponse) String() string {
|
func (x *HealthCheckResponse) String() string {
|
||||||
@ -161,7 +157,7 @@ func (*HealthCheckResponse) ProtoMessage() {}
|
|||||||
|
|
||||||
func (x *HealthCheckResponse) ProtoReflect() protoreflect.Message {
|
func (x *HealthCheckResponse) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_grpc_health_v1_health_proto_msgTypes[1]
|
mi := &file_grpc_health_v1_health_proto_msgTypes[1]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
@ -260,32 +256,6 @@ func file_grpc_health_v1_health_proto_init() {
|
|||||||
if File_grpc_health_v1_health_proto != nil {
|
if File_grpc_health_v1_health_proto != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !protoimpl.UnsafeEnabled {
|
|
||||||
file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v any, i int) any {
|
|
||||||
switch v := v.(*HealthCheckRequest); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v any, i int) any {
|
|
||||||
switch v := v.(*HealthCheckResponse); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
type x struct{}
|
type x struct{}
|
||||||
out := protoimpl.TypeBuilder{
|
out := protoimpl.TypeBuilder{
|
||||||
File: protoimpl.DescBuilder{
|
File: protoimpl.DescBuilder{
|
||||||
|
2
vendor/google.golang.org/grpc/internal/backoff/backoff.go
generated
vendored
2
vendor/google.golang.org/grpc/internal/backoff/backoff.go
generated
vendored
@ -25,7 +25,7 @@ package backoff
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"math/rand"
|
rand "math/rand/v2"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
grpcbackoff "google.golang.org/grpc/backoff"
|
grpcbackoff "google.golang.org/grpc/backoff"
|
||||||
|
22
vendor/google.golang.org/grpc/internal/internal.go
generated
vendored
22
vendor/google.golang.org/grpc/internal/internal.go
generated
vendored
@ -29,8 +29,6 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// WithHealthCheckFunc is set by dialoptions.go
|
|
||||||
WithHealthCheckFunc any // func (HealthChecker) DialOption
|
|
||||||
// HealthCheckFunc is used to provide client-side LB channel health checking
|
// HealthCheckFunc is used to provide client-side LB channel health checking
|
||||||
HealthCheckFunc HealthChecker
|
HealthCheckFunc HealthChecker
|
||||||
// BalancerUnregister is exported by package balancer to unregister a balancer.
|
// BalancerUnregister is exported by package balancer to unregister a balancer.
|
||||||
@ -149,6 +147,20 @@ var (
|
|||||||
// other features, including the CSDS service.
|
// other features, including the CSDS service.
|
||||||
NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error)
|
NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error)
|
||||||
|
|
||||||
|
// NewXDSResolverWithClientForTesting creates a new xDS resolver builder
|
||||||
|
// using the provided xDS client instead of creating a new one using the
|
||||||
|
// bootstrap configuration specified by the supported environment variables.
|
||||||
|
// The resolver.Builder is meant to be used in conjunction with the
|
||||||
|
// grpc.WithResolvers DialOption. The resolver.Builder does not take
|
||||||
|
// ownership of the provided xDS client and it is the responsibility of the
|
||||||
|
// caller to close the client when no longer required.
|
||||||
|
//
|
||||||
|
// Testing Only
|
||||||
|
//
|
||||||
|
// This function should ONLY be used for testing and may not work with some
|
||||||
|
// other features, including the CSDS service.
|
||||||
|
NewXDSResolverWithClientForTesting any // func(xdsclient.XDSClient) (resolver.Builder, error)
|
||||||
|
|
||||||
// RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster
|
// RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster
|
||||||
// Specifier Plugin for testing purposes, regardless of the XDSRLS environment
|
// Specifier Plugin for testing purposes, regardless of the XDSRLS environment
|
||||||
// variable.
|
// variable.
|
||||||
@ -255,3 +267,9 @@ const (
|
|||||||
// It currently has an experimental suffix which would be removed once
|
// It currently has an experimental suffix which would be removed once
|
||||||
// end-to-end testing of the policy is completed.
|
// end-to-end testing of the policy is completed.
|
||||||
const RLSLoadBalancingPolicyName = "rls_experimental"
|
const RLSLoadBalancingPolicyName = "rls_experimental"
|
||||||
|
|
||||||
|
// EnforceSubConnEmbedding is used to enforce proper SubConn implementation
|
||||||
|
// embedding.
|
||||||
|
type EnforceSubConnEmbedding interface {
|
||||||
|
enforceSubConnEmbedding()
|
||||||
|
}
|
||||||
|
41
vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
generated
vendored
41
vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
generated
vendored
@ -24,8 +24,9 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
rand "math/rand/v2"
|
||||||
"net"
|
"net"
|
||||||
|
"net/netip"
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@ -122,7 +123,7 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts
|
|||||||
}
|
}
|
||||||
|
|
||||||
// IP address.
|
// IP address.
|
||||||
if ipAddr, ok := formatIP(host); ok {
|
if ipAddr, err := formatIP(host); err == nil {
|
||||||
addr := []resolver.Address{{Addr: ipAddr + ":" + port}}
|
addr := []resolver.Address{{Addr: ipAddr + ":" + port}}
|
||||||
cc.UpdateState(resolver.State{Addresses: addr})
|
cc.UpdateState(resolver.State{Addresses: addr})
|
||||||
return deadResolver{}, nil
|
return deadResolver{}, nil
|
||||||
@ -260,9 +261,9 @@ func (d *dnsResolver) lookupSRV(ctx context.Context) ([]resolver.Address, error)
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
for _, a := range lbAddrs {
|
for _, a := range lbAddrs {
|
||||||
ip, ok := formatIP(a)
|
ip, err := formatIP(a)
|
||||||
if !ok {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("dns: error parsing A record IP address %v", a)
|
return nil, fmt.Errorf("dns: error parsing A record IP address %v: %v", a, err)
|
||||||
}
|
}
|
||||||
addr := ip + ":" + strconv.Itoa(int(s.Port))
|
addr := ip + ":" + strconv.Itoa(int(s.Port))
|
||||||
newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target})
|
newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target})
|
||||||
@ -322,9 +323,9 @@ func (d *dnsResolver) lookupHost(ctx context.Context) ([]resolver.Address, error
|
|||||||
}
|
}
|
||||||
newAddrs := make([]resolver.Address, 0, len(addrs))
|
newAddrs := make([]resolver.Address, 0, len(addrs))
|
||||||
for _, a := range addrs {
|
for _, a := range addrs {
|
||||||
ip, ok := formatIP(a)
|
ip, err := formatIP(a)
|
||||||
if !ok {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("dns: error parsing A record IP address %v", a)
|
return nil, fmt.Errorf("dns: error parsing A record IP address %v: %v", a, err)
|
||||||
}
|
}
|
||||||
addr := ip + ":" + d.port
|
addr := ip + ":" + d.port
|
||||||
newAddrs = append(newAddrs, resolver.Address{Addr: addr})
|
newAddrs = append(newAddrs, resolver.Address{Addr: addr})
|
||||||
@ -351,19 +352,19 @@ func (d *dnsResolver) lookup() (*resolver.State, error) {
|
|||||||
return &state, nil
|
return &state, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// formatIP returns ok = false if addr is not a valid textual representation of
|
// formatIP returns an error if addr is not a valid textual representation of
|
||||||
// an IP address. If addr is an IPv4 address, return the addr and ok = true.
|
// an IP address. If addr is an IPv4 address, return the addr and error = nil.
|
||||||
// If addr is an IPv6 address, return the addr enclosed in square brackets and
|
// If addr is an IPv6 address, return the addr enclosed in square brackets and
|
||||||
// ok = true.
|
// error = nil.
|
||||||
func formatIP(addr string) (addrIP string, ok bool) {
|
func formatIP(addr string) (string, error) {
|
||||||
ip := net.ParseIP(addr)
|
ip, err := netip.ParseAddr(addr)
|
||||||
if ip == nil {
|
if err != nil {
|
||||||
return "", false
|
return "", err
|
||||||
}
|
}
|
||||||
if ip.To4() != nil {
|
if ip.Is4() {
|
||||||
return addr, true
|
return addr, nil
|
||||||
}
|
}
|
||||||
return "[" + addr + "]", true
|
return "[" + addr + "]", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// parseTarget takes the user input target string and default port, returns
|
// parseTarget takes the user input target string and default port, returns
|
||||||
@ -379,7 +380,7 @@ func parseTarget(target, defaultPort string) (host, port string, err error) {
|
|||||||
if target == "" {
|
if target == "" {
|
||||||
return "", "", internal.ErrMissingAddr
|
return "", "", internal.ErrMissingAddr
|
||||||
}
|
}
|
||||||
if ip := net.ParseIP(target); ip != nil {
|
if _, err := netip.ParseAddr(target); err == nil {
|
||||||
// target is an IPv4 or IPv6(without brackets) address
|
// target is an IPv4 or IPv6(without brackets) address
|
||||||
return target, defaultPort, nil
|
return target, defaultPort, nil
|
||||||
}
|
}
|
||||||
@ -427,7 +428,7 @@ func chosenByPercentage(a *int) bool {
|
|||||||
if a == nil {
|
if a == nil {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return rand.Intn(100)+1 <= *a
|
return rand.IntN(100)+1 <= *a
|
||||||
}
|
}
|
||||||
|
|
||||||
func canaryingSC(js string) string {
|
func canaryingSC(js string) string {
|
||||||
|
144
vendor/google.golang.org/grpc/internal/transport/client_stream.go
generated
vendored
Normal file
144
vendor/google.golang.org/grpc/internal/transport/client_stream.go
generated
vendored
Normal file
@ -0,0 +1,144 @@
|
|||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2024 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package transport
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
|
"golang.org/x/net/http2"
|
||||||
|
"google.golang.org/grpc/mem"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ClientStream implements streaming functionality for a gRPC client.
|
||||||
|
type ClientStream struct {
|
||||||
|
*Stream // Embed for common stream functionality.
|
||||||
|
|
||||||
|
ct *http2Client
|
||||||
|
done chan struct{} // closed at the end of stream to unblock writers.
|
||||||
|
doneFunc func() // invoked at the end of stream.
|
||||||
|
|
||||||
|
headerChan chan struct{} // closed to indicate the end of header metadata.
|
||||||
|
headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times.
|
||||||
|
// headerValid indicates whether a valid header was received. Only
|
||||||
|
// meaningful after headerChan is closed (always call waitOnHeader() before
|
||||||
|
// reading its value).
|
||||||
|
headerValid bool
|
||||||
|
header metadata.MD // the received header metadata
|
||||||
|
noHeaders bool // set if the client never received headers (set only after the stream is done).
|
||||||
|
|
||||||
|
bytesReceived atomic.Bool // indicates whether any bytes have been received on this stream
|
||||||
|
unprocessed atomic.Bool // set if the server sends a refused stream or GOAWAY including this stream
|
||||||
|
|
||||||
|
status *status.Status // the status error received from the server
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read reads an n byte message from the input stream.
|
||||||
|
func (s *ClientStream) Read(n int) (mem.BufferSlice, error) {
|
||||||
|
b, err := s.Stream.read(n)
|
||||||
|
if err == nil {
|
||||||
|
s.ct.incrMsgRecv()
|
||||||
|
}
|
||||||
|
return b, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the stream and popagates err to any readers.
|
||||||
|
func (s *ClientStream) Close(err error) {
|
||||||
|
var (
|
||||||
|
rst bool
|
||||||
|
rstCode http2.ErrCode
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
rst = true
|
||||||
|
rstCode = http2.ErrCodeCancel
|
||||||
|
}
|
||||||
|
s.ct.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write writes the hdr and data bytes to the output stream.
|
||||||
|
func (s *ClientStream) Write(hdr []byte, data mem.BufferSlice, opts *WriteOptions) error {
|
||||||
|
return s.ct.write(s, hdr, data, opts)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BytesReceived indicates whether any bytes have been received on this stream.
|
||||||
|
func (s *ClientStream) BytesReceived() bool {
|
||||||
|
return s.bytesReceived.Load()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unprocessed indicates whether the server did not process this stream --
|
||||||
|
// i.e. it sent a refused stream or GOAWAY including this stream ID.
|
||||||
|
func (s *ClientStream) Unprocessed() bool {
|
||||||
|
return s.unprocessed.Load()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ClientStream) waitOnHeader() {
|
||||||
|
select {
|
||||||
|
case <-s.ctx.Done():
|
||||||
|
// Close the stream to prevent headers/trailers from changing after
|
||||||
|
// this function returns.
|
||||||
|
s.Close(ContextErr(s.ctx.Err()))
|
||||||
|
// headerChan could possibly not be closed yet if closeStream raced
|
||||||
|
// with operateHeaders; wait until it is closed explicitly here.
|
||||||
|
<-s.headerChan
|
||||||
|
case <-s.headerChan:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RecvCompress returns the compression algorithm applied to the inbound
|
||||||
|
// message. It is empty string if there is no compression applied.
|
||||||
|
func (s *ClientStream) RecvCompress() string {
|
||||||
|
s.waitOnHeader()
|
||||||
|
return s.recvCompress
|
||||||
|
}
|
||||||
|
|
||||||
|
// Done returns a channel which is closed when it receives the final status
|
||||||
|
// from the server.
|
||||||
|
func (s *ClientStream) Done() <-chan struct{} {
|
||||||
|
return s.done
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header returns the header metadata of the stream. Acquires the key-value
|
||||||
|
// pairs of header metadata once it is available. It blocks until i) the
|
||||||
|
// metadata is ready or ii) there is no header metadata or iii) the stream is
|
||||||
|
// canceled/expired.
|
||||||
|
func (s *ClientStream) Header() (metadata.MD, error) {
|
||||||
|
s.waitOnHeader()
|
||||||
|
|
||||||
|
if !s.headerValid || s.noHeaders {
|
||||||
|
return nil, s.status.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.header.Copy(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TrailersOnly blocks until a header or trailers-only frame is received and
|
||||||
|
// then returns true if the stream was trailers-only. If the stream ends
|
||||||
|
// before headers are received, returns true, nil.
|
||||||
|
func (s *ClientStream) TrailersOnly() bool {
|
||||||
|
s.waitOnHeader()
|
||||||
|
return s.noHeaders
|
||||||
|
}
|
||||||
|
|
||||||
|
// Status returns the status received from the server.
|
||||||
|
// Status can be read safely only after the stream has ended,
|
||||||
|
// that is, after Done() is closed.
|
||||||
|
func (s *ClientStream) Status() *status.Status {
|
||||||
|
return s.status
|
||||||
|
}
|
9
vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
generated
vendored
9
vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
generated
vendored
@ -92,14 +92,11 @@ func (f *trInFlow) newLimit(n uint32) uint32 {
|
|||||||
|
|
||||||
func (f *trInFlow) onData(n uint32) uint32 {
|
func (f *trInFlow) onData(n uint32) uint32 {
|
||||||
f.unacked += n
|
f.unacked += n
|
||||||
if f.unacked >= f.limit/4 {
|
if f.unacked < f.limit/4 {
|
||||||
w := f.unacked
|
|
||||||
f.unacked = 0
|
|
||||||
f.updateEffectiveWindowSize()
|
f.updateEffectiveWindowSize()
|
||||||
return w
|
return 0
|
||||||
}
|
}
|
||||||
f.updateEffectiveWindowSize()
|
return f.reset()
|
||||||
return 0
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *trInFlow) reset() uint32 {
|
func (f *trInFlow) reset() uint32 {
|
||||||
|
36
vendor/google.golang.org/grpc/internal/transport/handler_server.go
generated
vendored
36
vendor/google.golang.org/grpc/internal/transport/handler_server.go
generated
vendored
@ -225,7 +225,7 @@ func (ht *serverHandlerTransport) do(fn func()) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error {
|
func (ht *serverHandlerTransport) writeStatus(s *ServerStream, st *status.Status) error {
|
||||||
ht.writeStatusMu.Lock()
|
ht.writeStatusMu.Lock()
|
||||||
defer ht.writeStatusMu.Unlock()
|
defer ht.writeStatusMu.Unlock()
|
||||||
|
|
||||||
@ -289,14 +289,14 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
|
|||||||
|
|
||||||
// writePendingHeaders sets common and custom headers on the first
|
// writePendingHeaders sets common and custom headers on the first
|
||||||
// write call (Write, WriteHeader, or WriteStatus)
|
// write call (Write, WriteHeader, or WriteStatus)
|
||||||
func (ht *serverHandlerTransport) writePendingHeaders(s *Stream) {
|
func (ht *serverHandlerTransport) writePendingHeaders(s *ServerStream) {
|
||||||
ht.writeCommonHeaders(s)
|
ht.writeCommonHeaders(s)
|
||||||
ht.writeCustomHeaders(s)
|
ht.writeCustomHeaders(s)
|
||||||
}
|
}
|
||||||
|
|
||||||
// writeCommonHeaders sets common headers on the first write
|
// writeCommonHeaders sets common headers on the first write
|
||||||
// call (Write, WriteHeader, or WriteStatus).
|
// call (Write, WriteHeader, or WriteStatus).
|
||||||
func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) {
|
func (ht *serverHandlerTransport) writeCommonHeaders(s *ServerStream) {
|
||||||
h := ht.rw.Header()
|
h := ht.rw.Header()
|
||||||
h["Date"] = nil // suppress Date to make tests happy; TODO: restore
|
h["Date"] = nil // suppress Date to make tests happy; TODO: restore
|
||||||
h.Set("Content-Type", ht.contentType)
|
h.Set("Content-Type", ht.contentType)
|
||||||
@ -317,7 +317,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) {
|
|||||||
|
|
||||||
// writeCustomHeaders sets custom headers set on the stream via SetHeader
|
// writeCustomHeaders sets custom headers set on the stream via SetHeader
|
||||||
// on the first write call (Write, WriteHeader, or WriteStatus)
|
// on the first write call (Write, WriteHeader, or WriteStatus)
|
||||||
func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) {
|
func (ht *serverHandlerTransport) writeCustomHeaders(s *ServerStream) {
|
||||||
h := ht.rw.Header()
|
h := ht.rw.Header()
|
||||||
|
|
||||||
s.hdrMu.Lock()
|
s.hdrMu.Lock()
|
||||||
@ -333,7 +333,7 @@ func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) {
|
|||||||
s.hdrMu.Unlock()
|
s.hdrMu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error {
|
func (ht *serverHandlerTransport) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ *WriteOptions) error {
|
||||||
// Always take a reference because otherwise there is no guarantee the data will
|
// Always take a reference because otherwise there is no guarantee the data will
|
||||||
// be available after this function returns. This is what callers to Write
|
// be available after this function returns. This is what callers to Write
|
||||||
// expect.
|
// expect.
|
||||||
@ -357,7 +357,7 @@ func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSl
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
|
func (ht *serverHandlerTransport) writeHeader(s *ServerStream, md metadata.MD) error {
|
||||||
if err := s.SetHeader(md); err != nil {
|
if err := s.SetHeader(md); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -385,7 +385,7 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*Stream)) {
|
func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*ServerStream)) {
|
||||||
// With this transport type there will be exactly 1 stream: this HTTP request.
|
// With this transport type there will be exactly 1 stream: this HTTP request.
|
||||||
var cancel context.CancelFunc
|
var cancel context.CancelFunc
|
||||||
if ht.timeoutSet {
|
if ht.timeoutSet {
|
||||||
@ -408,16 +408,18 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream
|
|||||||
|
|
||||||
ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
|
ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
|
||||||
req := ht.req
|
req := ht.req
|
||||||
s := &Stream{
|
s := &ServerStream{
|
||||||
id: 0, // irrelevant
|
Stream: &Stream{
|
||||||
ctx: ctx,
|
id: 0, // irrelevant
|
||||||
requestRead: func(int) {},
|
ctx: ctx,
|
||||||
|
requestRead: func(int) {},
|
||||||
|
buf: newRecvBuffer(),
|
||||||
|
method: req.URL.Path,
|
||||||
|
recvCompress: req.Header.Get("grpc-encoding"),
|
||||||
|
contentSubtype: ht.contentSubtype,
|
||||||
|
},
|
||||||
cancel: cancel,
|
cancel: cancel,
|
||||||
buf: newRecvBuffer(),
|
|
||||||
st: ht,
|
st: ht,
|
||||||
method: req.URL.Path,
|
|
||||||
recvCompress: req.Header.Get("grpc-encoding"),
|
|
||||||
contentSubtype: ht.contentSubtype,
|
|
||||||
headerWireLength: 0, // won't have access to header wire length until golang/go#18997.
|
headerWireLength: 0, // won't have access to header wire length until golang/go#18997.
|
||||||
}
|
}
|
||||||
s.trReader = &transportReader{
|
s.trReader = &transportReader{
|
||||||
@ -471,9 +473,7 @@ func (ht *serverHandlerTransport) runStream() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ht *serverHandlerTransport) IncrMsgSent() {}
|
func (ht *serverHandlerTransport) incrMsgRecv() {}
|
||||||
|
|
||||||
func (ht *serverHandlerTransport) IncrMsgRecv() {}
|
|
||||||
|
|
||||||
func (ht *serverHandlerTransport) Drain(string) {
|
func (ht *serverHandlerTransport) Drain(string) {
|
||||||
panic("Drain() is not implemented")
|
panic("Drain() is not implemented")
|
||||||
|
91
vendor/google.golang.org/grpc/internal/transport/http2_client.go
generated
vendored
91
vendor/google.golang.org/grpc/internal/transport/http2_client.go
generated
vendored
@ -123,7 +123,7 @@ type http2Client struct {
|
|||||||
mu sync.Mutex // guard the following variables
|
mu sync.Mutex // guard the following variables
|
||||||
nextID uint32
|
nextID uint32
|
||||||
state transportState
|
state transportState
|
||||||
activeStreams map[uint32]*Stream
|
activeStreams map[uint32]*ClientStream
|
||||||
// prevGoAway ID records the Last-Stream-ID in the previous GOAway frame.
|
// prevGoAway ID records the Last-Stream-ID in the previous GOAway frame.
|
||||||
prevGoAwayID uint32
|
prevGoAwayID uint32
|
||||||
// goAwayReason records the http2.ErrCode and debug data received with the
|
// goAwayReason records the http2.ErrCode and debug data received with the
|
||||||
@ -199,10 +199,10 @@ func isTemporary(err error) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
|
// NewHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
|
||||||
// and starts to receive messages on it. Non-nil error returns if construction
|
// and starts to receive messages on it. Non-nil error returns if construction
|
||||||
// fails.
|
// fails.
|
||||||
func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ *http2Client, err error) {
|
func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ ClientTransport, err error) {
|
||||||
scheme := "http"
|
scheme := "http"
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
defer func() {
|
defer func() {
|
||||||
@ -339,7 +339,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
|||||||
framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize),
|
framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize),
|
||||||
fc: &trInFlow{limit: uint32(icwz)},
|
fc: &trInFlow{limit: uint32(icwz)},
|
||||||
scheme: scheme,
|
scheme: scheme,
|
||||||
activeStreams: make(map[uint32]*Stream),
|
activeStreams: make(map[uint32]*ClientStream),
|
||||||
isSecure: isSecure,
|
isSecure: isSecure,
|
||||||
perRPCCreds: perRPCCreds,
|
perRPCCreds: perRPCCreds,
|
||||||
kp: kp,
|
kp: kp,
|
||||||
@ -480,17 +480,19 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
|||||||
return t, nil
|
return t, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
|
func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientStream {
|
||||||
// TODO(zhaoq): Handle uint32 overflow of Stream.id.
|
// TODO(zhaoq): Handle uint32 overflow of Stream.id.
|
||||||
s := &Stream{
|
s := &ClientStream{
|
||||||
ct: t,
|
Stream: &Stream{
|
||||||
done: make(chan struct{}),
|
method: callHdr.Method,
|
||||||
method: callHdr.Method,
|
sendCompress: callHdr.SendCompress,
|
||||||
sendCompress: callHdr.SendCompress,
|
buf: newRecvBuffer(),
|
||||||
buf: newRecvBuffer(),
|
contentSubtype: callHdr.ContentSubtype,
|
||||||
headerChan: make(chan struct{}),
|
},
|
||||||
contentSubtype: callHdr.ContentSubtype,
|
ct: t,
|
||||||
doneFunc: callHdr.DoneFunc,
|
done: make(chan struct{}),
|
||||||
|
headerChan: make(chan struct{}),
|
||||||
|
doneFunc: callHdr.DoneFunc,
|
||||||
}
|
}
|
||||||
s.wq = newWriteQuota(defaultWriteQuota, s.done)
|
s.wq = newWriteQuota(defaultWriteQuota, s.done)
|
||||||
s.requestRead = func(n int) {
|
s.requestRead = func(n int) {
|
||||||
@ -506,7 +508,7 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
|
|||||||
ctxDone: s.ctx.Done(),
|
ctxDone: s.ctx.Done(),
|
||||||
recv: s.buf,
|
recv: s.buf,
|
||||||
closeStream: func(err error) {
|
closeStream: func(err error) {
|
||||||
t.CloseStream(s, err)
|
s.Close(err)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
windowHandler: func(n int) {
|
windowHandler: func(n int) {
|
||||||
@ -597,12 +599,6 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
|
|||||||
for k, v := range callAuthData {
|
for k, v := range callAuthData {
|
||||||
headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
|
headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
|
||||||
}
|
}
|
||||||
if b := stats.OutgoingTags(ctx); b != nil {
|
|
||||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)})
|
|
||||||
}
|
|
||||||
if b := stats.OutgoingTrace(ctx); b != nil {
|
|
||||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)})
|
|
||||||
}
|
|
||||||
|
|
||||||
if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok {
|
if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok {
|
||||||
var k string
|
var k string
|
||||||
@ -738,7 +734,7 @@ func (e NewStreamError) Error() string {
|
|||||||
|
|
||||||
// NewStream creates a stream and registers it into the transport as "active"
|
// NewStream creates a stream and registers it into the transport as "active"
|
||||||
// streams. All non-nil errors returned will be *NewStreamError.
|
// streams. All non-nil errors returned will be *NewStreamError.
|
||||||
func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) {
|
func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientStream, error) {
|
||||||
ctx = peer.NewContext(ctx, t.getPeer())
|
ctx = peer.NewContext(ctx, t.getPeer())
|
||||||
|
|
||||||
// ServerName field of the resolver returned address takes precedence over
|
// ServerName field of the resolver returned address takes precedence over
|
||||||
@ -763,7 +759,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
// The stream was unprocessed by the server.
|
// The stream was unprocessed by the server.
|
||||||
atomic.StoreUint32(&s.unprocessed, 1)
|
s.unprocessed.Store(true)
|
||||||
s.write(recvMsg{err: err})
|
s.write(recvMsg{err: err})
|
||||||
close(s.done)
|
close(s.done)
|
||||||
// If headerChan isn't closed, then close it.
|
// If headerChan isn't closed, then close it.
|
||||||
@ -908,21 +904,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
|
|||||||
return s, nil
|
return s, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CloseStream clears the footprint of a stream when the stream is not needed any more.
|
func (t *http2Client) closeStream(s *ClientStream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) {
|
||||||
// This must not be executed in reader's goroutine.
|
|
||||||
func (t *http2Client) CloseStream(s *Stream, err error) {
|
|
||||||
var (
|
|
||||||
rst bool
|
|
||||||
rstCode http2.ErrCode
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
rst = true
|
|
||||||
rstCode = http2.ErrCodeCancel
|
|
||||||
}
|
|
||||||
t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) {
|
|
||||||
// Set stream status to done.
|
// Set stream status to done.
|
||||||
if s.swapState(streamDone) == streamDone {
|
if s.swapState(streamDone) == streamDone {
|
||||||
// If it was already done, return. If multiple closeStream calls
|
// If it was already done, return. If multiple closeStream calls
|
||||||
@ -1085,7 +1067,7 @@ func (t *http2Client) GracefulClose() {
|
|||||||
|
|
||||||
// Write formats the data into HTTP2 data frame(s) and sends it out. The caller
|
// Write formats the data into HTTP2 data frame(s) and sends it out. The caller
|
||||||
// should proceed only if Write returns nil.
|
// should proceed only if Write returns nil.
|
||||||
func (t *http2Client) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error {
|
func (t *http2Client) write(s *ClientStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error {
|
||||||
reader := data.Reader()
|
reader := data.Reader()
|
||||||
|
|
||||||
if opts.Last {
|
if opts.Last {
|
||||||
@ -1114,10 +1096,11 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *O
|
|||||||
_ = reader.Close()
|
_ = reader.Close()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
t.incrMsgSent()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *http2Client) getStream(f http2.Frame) *Stream {
|
func (t *http2Client) getStream(f http2.Frame) *ClientStream {
|
||||||
t.mu.Lock()
|
t.mu.Lock()
|
||||||
s := t.activeStreams[f.Header().StreamID]
|
s := t.activeStreams[f.Header().StreamID]
|
||||||
t.mu.Unlock()
|
t.mu.Unlock()
|
||||||
@ -1127,7 +1110,7 @@ func (t *http2Client) getStream(f http2.Frame) *Stream {
|
|||||||
// adjustWindow sends out extra window update over the initial window size
|
// adjustWindow sends out extra window update over the initial window size
|
||||||
// of stream if the application is requesting data larger in size than
|
// of stream if the application is requesting data larger in size than
|
||||||
// the window.
|
// the window.
|
||||||
func (t *http2Client) adjustWindow(s *Stream, n uint32) {
|
func (t *http2Client) adjustWindow(s *ClientStream, n uint32) {
|
||||||
if w := s.fc.maybeAdjust(n); w > 0 {
|
if w := s.fc.maybeAdjust(n); w > 0 {
|
||||||
t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w})
|
t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w})
|
||||||
}
|
}
|
||||||
@ -1136,7 +1119,7 @@ func (t *http2Client) adjustWindow(s *Stream, n uint32) {
|
|||||||
// updateWindow adjusts the inbound quota for the stream.
|
// updateWindow adjusts the inbound quota for the stream.
|
||||||
// Window updates will be sent out when the cumulative quota
|
// Window updates will be sent out when the cumulative quota
|
||||||
// exceeds the corresponding threshold.
|
// exceeds the corresponding threshold.
|
||||||
func (t *http2Client) updateWindow(s *Stream, n uint32) {
|
func (t *http2Client) updateWindow(s *ClientStream, n uint32) {
|
||||||
if w := s.fc.onRead(n); w > 0 {
|
if w := s.fc.onRead(n); w > 0 {
|
||||||
t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w})
|
t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w})
|
||||||
}
|
}
|
||||||
@ -1242,7 +1225,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
|
|||||||
}
|
}
|
||||||
if f.ErrCode == http2.ErrCodeRefusedStream {
|
if f.ErrCode == http2.ErrCodeRefusedStream {
|
||||||
// The stream was unprocessed by the server.
|
// The stream was unprocessed by the server.
|
||||||
atomic.StoreUint32(&s.unprocessed, 1)
|
s.unprocessed.Store(true)
|
||||||
}
|
}
|
||||||
statusCode, ok := http2ErrConvTab[f.ErrCode]
|
statusCode, ok := http2ErrConvTab[f.ErrCode]
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -1383,11 +1366,11 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) error {
|
|||||||
return connectionErrorf(true, nil, "received goaway and there are no active streams")
|
return connectionErrorf(true, nil, "received goaway and there are no active streams")
|
||||||
}
|
}
|
||||||
|
|
||||||
streamsToClose := make([]*Stream, 0)
|
streamsToClose := make([]*ClientStream, 0)
|
||||||
for streamID, stream := range t.activeStreams {
|
for streamID, stream := range t.activeStreams {
|
||||||
if streamID > id && streamID <= upperLimit {
|
if streamID > id && streamID <= upperLimit {
|
||||||
// The stream was unprocessed by the server.
|
// The stream was unprocessed by the server.
|
||||||
atomic.StoreUint32(&stream.unprocessed, 1)
|
stream.unprocessed.Store(true)
|
||||||
streamsToClose = append(streamsToClose, stream)
|
streamsToClose = append(streamsToClose, stream)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1439,7 +1422,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
endStream := frame.StreamEnded()
|
endStream := frame.StreamEnded()
|
||||||
atomic.StoreUint32(&s.bytesReceived, 1)
|
s.bytesReceived.Store(true)
|
||||||
initialHeader := atomic.LoadUint32(&s.headerChanClosed) == 0
|
initialHeader := atomic.LoadUint32(&s.headerChanClosed) == 0
|
||||||
|
|
||||||
if !initialHeader && !endStream {
|
if !initialHeader && !endStream {
|
||||||
@ -1809,14 +1792,18 @@ func (t *http2Client) socketMetrics() *channelz.EphemeralSocketMetrics {
|
|||||||
|
|
||||||
func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr }
|
func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr }
|
||||||
|
|
||||||
func (t *http2Client) IncrMsgSent() {
|
func (t *http2Client) incrMsgSent() {
|
||||||
t.channelz.SocketMetrics.MessagesSent.Add(1)
|
if channelz.IsOn() {
|
||||||
t.channelz.SocketMetrics.LastMessageSentTimestamp.Store(time.Now().UnixNano())
|
t.channelz.SocketMetrics.MessagesSent.Add(1)
|
||||||
|
t.channelz.SocketMetrics.LastMessageSentTimestamp.Store(time.Now().UnixNano())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *http2Client) IncrMsgRecv() {
|
func (t *http2Client) incrMsgRecv() {
|
||||||
t.channelz.SocketMetrics.MessagesReceived.Add(1)
|
if channelz.IsOn() {
|
||||||
t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Store(time.Now().UnixNano())
|
t.channelz.SocketMetrics.MessagesReceived.Add(1)
|
||||||
|
t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Store(time.Now().UnixNano())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *http2Client) getOutFlowWindow() int64 {
|
func (t *http2Client) getOutFlowWindow() int64 {
|
||||||
|
69
vendor/google.golang.org/grpc/internal/transport/http2_server.go
generated
vendored
69
vendor/google.golang.org/grpc/internal/transport/http2_server.go
generated
vendored
@ -25,7 +25,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
"math/rand"
|
rand "math/rand/v2"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
@ -111,7 +111,7 @@ type http2Server struct {
|
|||||||
// already initialized since draining is already underway.
|
// already initialized since draining is already underway.
|
||||||
drainEvent *grpcsync.Event
|
drainEvent *grpcsync.Event
|
||||||
state transportState
|
state transportState
|
||||||
activeStreams map[uint32]*Stream
|
activeStreams map[uint32]*ServerStream
|
||||||
// idle is the time instant when the connection went idle.
|
// idle is the time instant when the connection went idle.
|
||||||
// This is either the beginning of the connection or when the number of
|
// This is either the beginning of the connection or when the number of
|
||||||
// RPCs go down to 0.
|
// RPCs go down to 0.
|
||||||
@ -256,7 +256,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
|||||||
inTapHandle: config.InTapHandle,
|
inTapHandle: config.InTapHandle,
|
||||||
fc: &trInFlow{limit: uint32(icwz)},
|
fc: &trInFlow{limit: uint32(icwz)},
|
||||||
state: reachable,
|
state: reachable,
|
||||||
activeStreams: make(map[uint32]*Stream),
|
activeStreams: make(map[uint32]*ServerStream),
|
||||||
stats: config.StatsHandlers,
|
stats: config.StatsHandlers,
|
||||||
kp: kp,
|
kp: kp,
|
||||||
idle: time.Now(),
|
idle: time.Now(),
|
||||||
@ -359,7 +359,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
|||||||
|
|
||||||
// operateHeaders takes action on the decoded headers. Returns an error if fatal
|
// operateHeaders takes action on the decoded headers. Returns an error if fatal
|
||||||
// error encountered and transport needs to close, otherwise returns nil.
|
// error encountered and transport needs to close, otherwise returns nil.
|
||||||
func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*Stream)) error {
|
func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*ServerStream)) error {
|
||||||
// Acquire max stream ID lock for entire duration
|
// Acquire max stream ID lock for entire duration
|
||||||
t.maxStreamMu.Lock()
|
t.maxStreamMu.Lock()
|
||||||
defer t.maxStreamMu.Unlock()
|
defer t.maxStreamMu.Unlock()
|
||||||
@ -385,11 +385,13 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade
|
|||||||
t.maxStreamID = streamID
|
t.maxStreamID = streamID
|
||||||
|
|
||||||
buf := newRecvBuffer()
|
buf := newRecvBuffer()
|
||||||
s := &Stream{
|
s := &ServerStream{
|
||||||
id: streamID,
|
Stream: &Stream{
|
||||||
|
id: streamID,
|
||||||
|
buf: buf,
|
||||||
|
fc: &inFlow{limit: uint32(t.initialWindowSize)},
|
||||||
|
},
|
||||||
st: t,
|
st: t,
|
||||||
buf: buf,
|
|
||||||
fc: &inFlow{limit: uint32(t.initialWindowSize)},
|
|
||||||
headerWireLength: int(frame.Header().Length),
|
headerWireLength: int(frame.Header().Length),
|
||||||
}
|
}
|
||||||
var (
|
var (
|
||||||
@ -537,12 +539,6 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade
|
|||||||
// Attach the received metadata to the context.
|
// Attach the received metadata to the context.
|
||||||
if len(mdata) > 0 {
|
if len(mdata) > 0 {
|
||||||
s.ctx = metadata.NewIncomingContext(s.ctx, mdata)
|
s.ctx = metadata.NewIncomingContext(s.ctx, mdata)
|
||||||
if statsTags := mdata["grpc-tags-bin"]; len(statsTags) > 0 {
|
|
||||||
s.ctx = stats.SetIncomingTags(s.ctx, []byte(statsTags[len(statsTags)-1]))
|
|
||||||
}
|
|
||||||
if statsTrace := mdata["grpc-trace-bin"]; len(statsTrace) > 0 {
|
|
||||||
s.ctx = stats.SetIncomingTrace(s.ctx, []byte(statsTrace[len(statsTrace)-1]))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
t.mu.Lock()
|
t.mu.Lock()
|
||||||
if t.state != reachable {
|
if t.state != reachable {
|
||||||
@ -634,7 +630,7 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade
|
|||||||
// HandleStreams receives incoming streams using the given handler. This is
|
// HandleStreams receives incoming streams using the given handler. This is
|
||||||
// typically run in a separate goroutine.
|
// typically run in a separate goroutine.
|
||||||
// traceCtx attaches trace to ctx and returns the new context.
|
// traceCtx attaches trace to ctx and returns the new context.
|
||||||
func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) {
|
func (t *http2Server) HandleStreams(ctx context.Context, handle func(*ServerStream)) {
|
||||||
defer func() {
|
defer func() {
|
||||||
close(t.readerDone)
|
close(t.readerDone)
|
||||||
<-t.loopyWriterDone
|
<-t.loopyWriterDone
|
||||||
@ -698,7 +694,7 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) {
|
func (t *http2Server) getStream(f http2.Frame) (*ServerStream, bool) {
|
||||||
t.mu.Lock()
|
t.mu.Lock()
|
||||||
defer t.mu.Unlock()
|
defer t.mu.Unlock()
|
||||||
if t.activeStreams == nil {
|
if t.activeStreams == nil {
|
||||||
@ -716,7 +712,7 @@ func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) {
|
|||||||
// adjustWindow sends out extra window update over the initial window size
|
// adjustWindow sends out extra window update over the initial window size
|
||||||
// of stream if the application is requesting data larger in size than
|
// of stream if the application is requesting data larger in size than
|
||||||
// the window.
|
// the window.
|
||||||
func (t *http2Server) adjustWindow(s *Stream, n uint32) {
|
func (t *http2Server) adjustWindow(s *ServerStream, n uint32) {
|
||||||
if w := s.fc.maybeAdjust(n); w > 0 {
|
if w := s.fc.maybeAdjust(n); w > 0 {
|
||||||
t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w})
|
t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w})
|
||||||
}
|
}
|
||||||
@ -726,7 +722,7 @@ func (t *http2Server) adjustWindow(s *Stream, n uint32) {
|
|||||||
// updateWindow adjusts the inbound quota for the stream and the transport.
|
// updateWindow adjusts the inbound quota for the stream and the transport.
|
||||||
// Window updates will deliver to the controller for sending when
|
// Window updates will deliver to the controller for sending when
|
||||||
// the cumulative quota exceeds the corresponding threshold.
|
// the cumulative quota exceeds the corresponding threshold.
|
||||||
func (t *http2Server) updateWindow(s *Stream, n uint32) {
|
func (t *http2Server) updateWindow(s *ServerStream, n uint32) {
|
||||||
if w := s.fc.onRead(n); w > 0 {
|
if w := s.fc.onRead(n); w > 0 {
|
||||||
t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id,
|
t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id,
|
||||||
increment: w,
|
increment: w,
|
||||||
@ -963,7 +959,7 @@ func (t *http2Server) checkForHeaderListSize(it any) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *http2Server) streamContextErr(s *Stream) error {
|
func (t *http2Server) streamContextErr(s *ServerStream) error {
|
||||||
select {
|
select {
|
||||||
case <-t.done:
|
case <-t.done:
|
||||||
return ErrConnClosing
|
return ErrConnClosing
|
||||||
@ -973,7 +969,7 @@ func (t *http2Server) streamContextErr(s *Stream) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// WriteHeader sends the header metadata md back to the client.
|
// WriteHeader sends the header metadata md back to the client.
|
||||||
func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
|
func (t *http2Server) writeHeader(s *ServerStream, md metadata.MD) error {
|
||||||
s.hdrMu.Lock()
|
s.hdrMu.Lock()
|
||||||
defer s.hdrMu.Unlock()
|
defer s.hdrMu.Unlock()
|
||||||
if s.getState() == streamDone {
|
if s.getState() == streamDone {
|
||||||
@ -1006,7 +1002,7 @@ func (t *http2Server) setResetPingStrikes() {
|
|||||||
atomic.StoreUint32(&t.resetPingStrikes, 1)
|
atomic.StoreUint32(&t.resetPingStrikes, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *http2Server) writeHeaderLocked(s *Stream) error {
|
func (t *http2Server) writeHeaderLocked(s *ServerStream) error {
|
||||||
// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
|
// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
|
||||||
// first and create a slice of that exact size.
|
// first and create a slice of that exact size.
|
||||||
headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else.
|
headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else.
|
||||||
@ -1046,7 +1042,7 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error {
|
|||||||
// There is no further I/O operations being able to perform on this stream.
|
// There is no further I/O operations being able to perform on this stream.
|
||||||
// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early
|
// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early
|
||||||
// OK is adopted.
|
// OK is adopted.
|
||||||
func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
|
func (t *http2Server) writeStatus(s *ServerStream, st *status.Status) error {
|
||||||
s.hdrMu.Lock()
|
s.hdrMu.Lock()
|
||||||
defer s.hdrMu.Unlock()
|
defer s.hdrMu.Unlock()
|
||||||
|
|
||||||
@ -1117,11 +1113,11 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
|
|||||||
|
|
||||||
// Write converts the data into HTTP2 data frame and sends it out. Non-nil error
|
// Write converts the data into HTTP2 data frame and sends it out. Non-nil error
|
||||||
// is returns if it fails (e.g., framing error, transport error).
|
// is returns if it fails (e.g., framing error, transport error).
|
||||||
func (t *http2Server) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error {
|
func (t *http2Server) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ *WriteOptions) error {
|
||||||
reader := data.Reader()
|
reader := data.Reader()
|
||||||
|
|
||||||
if !s.isHeaderSent() { // Headers haven't been written yet.
|
if !s.isHeaderSent() { // Headers haven't been written yet.
|
||||||
if err := t.WriteHeader(s, nil); err != nil {
|
if err := t.writeHeader(s, nil); err != nil {
|
||||||
_ = reader.Close()
|
_ = reader.Close()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1147,6 +1143,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Opti
|
|||||||
_ = reader.Close()
|
_ = reader.Close()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
t.incrMsgSent()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1276,7 +1273,7 @@ func (t *http2Server) Close(err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// deleteStream deletes the stream s from transport's active streams.
|
// deleteStream deletes the stream s from transport's active streams.
|
||||||
func (t *http2Server) deleteStream(s *Stream, eosReceived bool) {
|
func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) {
|
||||||
|
|
||||||
t.mu.Lock()
|
t.mu.Lock()
|
||||||
if _, ok := t.activeStreams[s.id]; ok {
|
if _, ok := t.activeStreams[s.id]; ok {
|
||||||
@ -1297,7 +1294,7 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// finishStream closes the stream and puts the trailing headerFrame into controlbuf.
|
// finishStream closes the stream and puts the trailing headerFrame into controlbuf.
|
||||||
func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) {
|
func (t *http2Server) finishStream(s *ServerStream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) {
|
||||||
// In case stream sending and receiving are invoked in separate
|
// In case stream sending and receiving are invoked in separate
|
||||||
// goroutines (e.g., bi-directional streaming), cancel needs to be
|
// goroutines (e.g., bi-directional streaming), cancel needs to be
|
||||||
// called to interrupt the potential blocking on other goroutines.
|
// called to interrupt the potential blocking on other goroutines.
|
||||||
@ -1321,7 +1318,7 @@ func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, h
|
|||||||
}
|
}
|
||||||
|
|
||||||
// closeStream clears the footprint of a stream when the stream is not needed any more.
|
// closeStream clears the footprint of a stream when the stream is not needed any more.
|
||||||
func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) {
|
func (t *http2Server) closeStream(s *ServerStream, rst bool, rstCode http2.ErrCode, eosReceived bool) {
|
||||||
// In case stream sending and receiving are invoked in separate
|
// In case stream sending and receiving are invoked in separate
|
||||||
// goroutines (e.g., bi-directional streaming), cancel needs to be
|
// goroutines (e.g., bi-directional streaming), cancel needs to be
|
||||||
// called to interrupt the potential blocking on other goroutines.
|
// called to interrupt the potential blocking on other goroutines.
|
||||||
@ -1415,14 +1412,18 @@ func (t *http2Server) socketMetrics() *channelz.EphemeralSocketMetrics {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *http2Server) IncrMsgSent() {
|
func (t *http2Server) incrMsgSent() {
|
||||||
t.channelz.SocketMetrics.MessagesSent.Add(1)
|
if channelz.IsOn() {
|
||||||
t.channelz.SocketMetrics.LastMessageSentTimestamp.Add(1)
|
t.channelz.SocketMetrics.MessagesSent.Add(1)
|
||||||
|
t.channelz.SocketMetrics.LastMessageSentTimestamp.Add(1)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *http2Server) IncrMsgRecv() {
|
func (t *http2Server) incrMsgRecv() {
|
||||||
t.channelz.SocketMetrics.MessagesReceived.Add(1)
|
if channelz.IsOn() {
|
||||||
t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Add(1)
|
t.channelz.SocketMetrics.MessagesReceived.Add(1)
|
||||||
|
t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Add(1)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *http2Server) getOutFlowWindow() int64 {
|
func (t *http2Server) getOutFlowWindow() int64 {
|
||||||
@ -1455,7 +1456,7 @@ func getJitter(v time.Duration) time.Duration {
|
|||||||
}
|
}
|
||||||
// Generate a jitter between +/- 10% of the value.
|
// Generate a jitter between +/- 10% of the value.
|
||||||
r := int64(v / 10)
|
r := int64(v / 10)
|
||||||
j := rand.Int63n(2*r) - r
|
j := rand.Int64N(2*r) - r
|
||||||
return time.Duration(j)
|
return time.Duration(j)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
178
vendor/google.golang.org/grpc/internal/transport/server_stream.go
generated
vendored
Normal file
178
vendor/google.golang.org/grpc/internal/transport/server_stream.go
generated
vendored
Normal file
@ -0,0 +1,178 @@
|
|||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2024 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package transport
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
|
"google.golang.org/grpc/mem"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ServerStream implements streaming functionality for a gRPC server.
|
||||||
|
type ServerStream struct {
|
||||||
|
*Stream // Embed for common stream functionality.
|
||||||
|
|
||||||
|
st internalServerTransport
|
||||||
|
ctxDone <-chan struct{} // closed at the end of stream. Cache of ctx.Done() (for performance)
|
||||||
|
cancel context.CancelFunc // invoked at the end of stream to cancel ctx.
|
||||||
|
|
||||||
|
// Holds compressor names passed in grpc-accept-encoding metadata from the
|
||||||
|
// client.
|
||||||
|
clientAdvertisedCompressors string
|
||||||
|
headerWireLength int
|
||||||
|
|
||||||
|
// hdrMu protects outgoing header and trailer metadata.
|
||||||
|
hdrMu sync.Mutex
|
||||||
|
header metadata.MD // the outgoing header metadata. Updated by WriteHeader.
|
||||||
|
headerSent atomic.Bool // atomically set when the headers are sent out.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read reads an n byte message from the input stream.
|
||||||
|
func (s *ServerStream) Read(n int) (mem.BufferSlice, error) {
|
||||||
|
b, err := s.Stream.read(n)
|
||||||
|
if err == nil {
|
||||||
|
s.st.incrMsgRecv()
|
||||||
|
}
|
||||||
|
return b, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// SendHeader sends the header metadata for the given stream.
|
||||||
|
func (s *ServerStream) SendHeader(md metadata.MD) error {
|
||||||
|
return s.st.writeHeader(s, md)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write writes the hdr and data bytes to the output stream.
|
||||||
|
func (s *ServerStream) Write(hdr []byte, data mem.BufferSlice, opts *WriteOptions) error {
|
||||||
|
return s.st.write(s, hdr, data, opts)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteStatus sends the status of a stream to the client. WriteStatus is
|
||||||
|
// the final call made on a stream and always occurs.
|
||||||
|
func (s *ServerStream) WriteStatus(st *status.Status) error {
|
||||||
|
return s.st.writeStatus(s, st)
|
||||||
|
}
|
||||||
|
|
||||||
|
// isHeaderSent indicates whether headers have been sent.
|
||||||
|
func (s *ServerStream) isHeaderSent() bool {
|
||||||
|
return s.headerSent.Load()
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateHeaderSent updates headerSent and returns true
|
||||||
|
// if it was already set.
|
||||||
|
func (s *ServerStream) updateHeaderSent() bool {
|
||||||
|
return s.headerSent.Swap(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RecvCompress returns the compression algorithm applied to the inbound
|
||||||
|
// message. It is empty string if there is no compression applied.
|
||||||
|
func (s *ServerStream) RecvCompress() string {
|
||||||
|
return s.recvCompress
|
||||||
|
}
|
||||||
|
|
||||||
|
// SendCompress returns the send compressor name.
|
||||||
|
func (s *ServerStream) SendCompress() string {
|
||||||
|
return s.sendCompress
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContentSubtype returns the content-subtype for a request. For example, a
|
||||||
|
// content-subtype of "proto" will result in a content-type of
|
||||||
|
// "application/grpc+proto". This will always be lowercase. See
|
||||||
|
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
|
||||||
|
// more details.
|
||||||
|
func (s *ServerStream) ContentSubtype() string {
|
||||||
|
return s.contentSubtype
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSendCompress sets the compression algorithm to the stream.
|
||||||
|
func (s *ServerStream) SetSendCompress(name string) error {
|
||||||
|
if s.isHeaderSent() || s.getState() == streamDone {
|
||||||
|
return errors.New("transport: set send compressor called after headers sent or stream done")
|
||||||
|
}
|
||||||
|
|
||||||
|
s.sendCompress = name
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetContext sets the context of the stream. This will be deleted once the
|
||||||
|
// stats handler callouts all move to gRPC layer.
|
||||||
|
func (s *ServerStream) SetContext(ctx context.Context) {
|
||||||
|
s.ctx = ctx
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientAdvertisedCompressors returns the compressor names advertised by the
|
||||||
|
// client via grpc-accept-encoding header.
|
||||||
|
func (s *ServerStream) ClientAdvertisedCompressors() []string {
|
||||||
|
values := strings.Split(s.clientAdvertisedCompressors, ",")
|
||||||
|
for i, v := range values {
|
||||||
|
values[i] = strings.TrimSpace(v)
|
||||||
|
}
|
||||||
|
return values
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header returns the header metadata of the stream. It returns the out header
|
||||||
|
// after t.WriteHeader is called. It does not block and must not be called
|
||||||
|
// until after WriteHeader.
|
||||||
|
func (s *ServerStream) Header() (metadata.MD, error) {
|
||||||
|
// Return the header in stream. It will be the out
|
||||||
|
// header after t.WriteHeader is called.
|
||||||
|
return s.header.Copy(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HeaderWireLength returns the size of the headers of the stream as received
|
||||||
|
// from the wire.
|
||||||
|
func (s *ServerStream) HeaderWireLength() int {
|
||||||
|
return s.headerWireLength
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetHeader sets the header metadata. This can be called multiple times.
|
||||||
|
// This should not be called in parallel to other data writes.
|
||||||
|
func (s *ServerStream) SetHeader(md metadata.MD) error {
|
||||||
|
if md.Len() == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if s.isHeaderSent() || s.getState() == streamDone {
|
||||||
|
return ErrIllegalHeaderWrite
|
||||||
|
}
|
||||||
|
s.hdrMu.Lock()
|
||||||
|
s.header = metadata.Join(s.header, md)
|
||||||
|
s.hdrMu.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetTrailer sets the trailer metadata which will be sent with the RPC status
|
||||||
|
// by the server. This can be called multiple times.
|
||||||
|
// This should not be called parallel to other data writes.
|
||||||
|
func (s *ServerStream) SetTrailer(md metadata.MD) error {
|
||||||
|
if md.Len() == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if s.getState() == streamDone {
|
||||||
|
return ErrIllegalHeaderWrite
|
||||||
|
}
|
||||||
|
s.hdrMu.Lock()
|
||||||
|
s.trailer = metadata.Join(s.trailer, md)
|
||||||
|
s.hdrMu.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
321
vendor/google.golang.org/grpc/internal/transport/transport.go
generated
vendored
321
vendor/google.golang.org/grpc/internal/transport/transport.go
generated
vendored
@ -27,7 +27,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"strings"
|
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
@ -39,7 +38,6 @@ import (
|
|||||||
"google.golang.org/grpc/mem"
|
"google.golang.org/grpc/mem"
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/metadata"
|
||||||
"google.golang.org/grpc/peer"
|
"google.golang.org/grpc/peer"
|
||||||
"google.golang.org/grpc/resolver"
|
|
||||||
"google.golang.org/grpc/stats"
|
"google.golang.org/grpc/stats"
|
||||||
"google.golang.org/grpc/status"
|
"google.golang.org/grpc/status"
|
||||||
"google.golang.org/grpc/tap"
|
"google.golang.org/grpc/tap"
|
||||||
@ -133,7 +131,7 @@ type recvBufferReader struct {
|
|||||||
err error
|
err error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *recvBufferReader) ReadHeader(header []byte) (n int, err error) {
|
func (r *recvBufferReader) ReadMessageHeader(header []byte) (n int, err error) {
|
||||||
if r.err != nil {
|
if r.err != nil {
|
||||||
return 0, r.err
|
return 0, r.err
|
||||||
}
|
}
|
||||||
@ -142,9 +140,9 @@ func (r *recvBufferReader) ReadHeader(header []byte) (n int, err error) {
|
|||||||
return n, nil
|
return n, nil
|
||||||
}
|
}
|
||||||
if r.closeStream != nil {
|
if r.closeStream != nil {
|
||||||
n, r.err = r.readHeaderClient(header)
|
n, r.err = r.readMessageHeaderClient(header)
|
||||||
} else {
|
} else {
|
||||||
n, r.err = r.readHeader(header)
|
n, r.err = r.readMessageHeader(header)
|
||||||
}
|
}
|
||||||
return n, r.err
|
return n, r.err
|
||||||
}
|
}
|
||||||
@ -174,12 +172,12 @@ func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) {
|
|||||||
return buf, r.err
|
return buf, r.err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *recvBufferReader) readHeader(header []byte) (n int, err error) {
|
func (r *recvBufferReader) readMessageHeader(header []byte) (n int, err error) {
|
||||||
select {
|
select {
|
||||||
case <-r.ctxDone:
|
case <-r.ctxDone:
|
||||||
return 0, ContextErr(r.ctx.Err())
|
return 0, ContextErr(r.ctx.Err())
|
||||||
case m := <-r.recv.get():
|
case m := <-r.recv.get():
|
||||||
return r.readHeaderAdditional(m, header)
|
return r.readMessageHeaderAdditional(m, header)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -192,7 +190,7 @@ func (r *recvBufferReader) read(n int) (buf mem.Buffer, err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *recvBufferReader) readHeaderClient(header []byte) (n int, err error) {
|
func (r *recvBufferReader) readMessageHeaderClient(header []byte) (n int, err error) {
|
||||||
// If the context is canceled, then closes the stream with nil metadata.
|
// If the context is canceled, then closes the stream with nil metadata.
|
||||||
// closeStream writes its error parameter to r.recv as a recvMsg.
|
// closeStream writes its error parameter to r.recv as a recvMsg.
|
||||||
// r.readAdditional acts on that message and returns the necessary error.
|
// r.readAdditional acts on that message and returns the necessary error.
|
||||||
@ -213,9 +211,9 @@ func (r *recvBufferReader) readHeaderClient(header []byte) (n int, err error) {
|
|||||||
// faster.
|
// faster.
|
||||||
r.closeStream(ContextErr(r.ctx.Err()))
|
r.closeStream(ContextErr(r.ctx.Err()))
|
||||||
m := <-r.recv.get()
|
m := <-r.recv.get()
|
||||||
return r.readHeaderAdditional(m, header)
|
return r.readMessageHeaderAdditional(m, header)
|
||||||
case m := <-r.recv.get():
|
case m := <-r.recv.get():
|
||||||
return r.readHeaderAdditional(m, header)
|
return r.readMessageHeaderAdditional(m, header)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -246,7 +244,7 @@ func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *recvBufferReader) readHeaderAdditional(m recvMsg, header []byte) (n int, err error) {
|
func (r *recvBufferReader) readMessageHeaderAdditional(m recvMsg, header []byte) (n int, err error) {
|
||||||
r.recv.load()
|
r.recv.load()
|
||||||
if m.err != nil {
|
if m.err != nil {
|
||||||
if m.buffer != nil {
|
if m.buffer != nil {
|
||||||
@ -288,14 +286,8 @@ const (
|
|||||||
// Stream represents an RPC in the transport layer.
|
// Stream represents an RPC in the transport layer.
|
||||||
type Stream struct {
|
type Stream struct {
|
||||||
id uint32
|
id uint32
|
||||||
st ServerTransport // nil for client side Stream
|
ctx context.Context // the associated context of the stream
|
||||||
ct ClientTransport // nil for server side Stream
|
method string // the associated RPC method of the stream
|
||||||
ctx context.Context // the associated context of the stream
|
|
||||||
cancel context.CancelFunc // always nil for client side Stream
|
|
||||||
done chan struct{} // closed at the end of stream to unblock writers. On the client side.
|
|
||||||
doneFunc func() // invoked at the end of stream on client side.
|
|
||||||
ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance)
|
|
||||||
method string // the associated RPC method of the stream
|
|
||||||
recvCompress string
|
recvCompress string
|
||||||
sendCompress string
|
sendCompress string
|
||||||
buf *recvBuffer
|
buf *recvBuffer
|
||||||
@ -303,58 +295,17 @@ type Stream struct {
|
|||||||
fc *inFlow
|
fc *inFlow
|
||||||
wq *writeQuota
|
wq *writeQuota
|
||||||
|
|
||||||
// Holds compressor names passed in grpc-accept-encoding metadata from the
|
|
||||||
// client. This is empty for the client side stream.
|
|
||||||
clientAdvertisedCompressors string
|
|
||||||
// Callback to state application's intentions to read data. This
|
// Callback to state application's intentions to read data. This
|
||||||
// is used to adjust flow control, if needed.
|
// is used to adjust flow control, if needed.
|
||||||
requestRead func(int)
|
requestRead func(int)
|
||||||
|
|
||||||
headerChan chan struct{} // closed to indicate the end of header metadata.
|
|
||||||
headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times.
|
|
||||||
// headerValid indicates whether a valid header was received. Only
|
|
||||||
// meaningful after headerChan is closed (always call waitOnHeader() before
|
|
||||||
// reading its value). Not valid on server side.
|
|
||||||
headerValid bool
|
|
||||||
headerWireLength int // Only set on server side.
|
|
||||||
|
|
||||||
// hdrMu protects header and trailer metadata on the server-side.
|
|
||||||
hdrMu sync.Mutex
|
|
||||||
// On client side, header keeps the received header metadata.
|
|
||||||
//
|
|
||||||
// On server side, header keeps the header set by SetHeader(). The complete
|
|
||||||
// header will merged into this after t.WriteHeader() is called.
|
|
||||||
header metadata.MD
|
|
||||||
trailer metadata.MD // the key-value map of trailer metadata.
|
|
||||||
|
|
||||||
noHeaders bool // set if the client never received headers (set only after the stream is done).
|
|
||||||
|
|
||||||
// On the server-side, headerSent is atomically set to 1 when the headers are sent out.
|
|
||||||
headerSent uint32
|
|
||||||
|
|
||||||
state streamState
|
state streamState
|
||||||
|
|
||||||
// On client-side it is the status error received from the server.
|
|
||||||
// On server-side it is unused.
|
|
||||||
status *status.Status
|
|
||||||
|
|
||||||
bytesReceived uint32 // indicates whether any bytes have been received on this stream
|
|
||||||
unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream
|
|
||||||
|
|
||||||
// contentSubtype is the content-subtype for requests.
|
// contentSubtype is the content-subtype for requests.
|
||||||
// this must be lowercase or the behavior is undefined.
|
// this must be lowercase or the behavior is undefined.
|
||||||
contentSubtype string
|
contentSubtype string
|
||||||
}
|
|
||||||
|
|
||||||
// isHeaderSent is only valid on the server-side.
|
trailer metadata.MD // the key-value map of trailer metadata.
|
||||||
func (s *Stream) isHeaderSent() bool {
|
|
||||||
return atomic.LoadUint32(&s.headerSent) == 1
|
|
||||||
}
|
|
||||||
|
|
||||||
// updateHeaderSent updates headerSent and returns true
|
|
||||||
// if it was already set. It is valid only on server-side.
|
|
||||||
func (s *Stream) updateHeaderSent() bool {
|
|
||||||
return atomic.SwapUint32(&s.headerSent, 1) == 1
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Stream) swapState(st streamState) streamState {
|
func (s *Stream) swapState(st streamState) streamState {
|
||||||
@ -369,110 +320,12 @@ func (s *Stream) getState() streamState {
|
|||||||
return streamState(atomic.LoadUint32((*uint32)(&s.state)))
|
return streamState(atomic.LoadUint32((*uint32)(&s.state)))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Stream) waitOnHeader() {
|
|
||||||
if s.headerChan == nil {
|
|
||||||
// On the server headerChan is always nil since a stream originates
|
|
||||||
// only after having received headers.
|
|
||||||
return
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case <-s.ctx.Done():
|
|
||||||
// Close the stream to prevent headers/trailers from changing after
|
|
||||||
// this function returns.
|
|
||||||
s.ct.CloseStream(s, ContextErr(s.ctx.Err()))
|
|
||||||
// headerChan could possibly not be closed yet if closeStream raced
|
|
||||||
// with operateHeaders; wait until it is closed explicitly here.
|
|
||||||
<-s.headerChan
|
|
||||||
case <-s.headerChan:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// RecvCompress returns the compression algorithm applied to the inbound
|
|
||||||
// message. It is empty string if there is no compression applied.
|
|
||||||
func (s *Stream) RecvCompress() string {
|
|
||||||
s.waitOnHeader()
|
|
||||||
return s.recvCompress
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetSendCompress sets the compression algorithm to the stream.
|
|
||||||
func (s *Stream) SetSendCompress(name string) error {
|
|
||||||
if s.isHeaderSent() || s.getState() == streamDone {
|
|
||||||
return errors.New("transport: set send compressor called after headers sent or stream done")
|
|
||||||
}
|
|
||||||
|
|
||||||
s.sendCompress = name
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SendCompress returns the send compressor name.
|
|
||||||
func (s *Stream) SendCompress() string {
|
|
||||||
return s.sendCompress
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClientAdvertisedCompressors returns the compressor names advertised by the
|
|
||||||
// client via grpc-accept-encoding header.
|
|
||||||
func (s *Stream) ClientAdvertisedCompressors() []string {
|
|
||||||
values := strings.Split(s.clientAdvertisedCompressors, ",")
|
|
||||||
for i, v := range values {
|
|
||||||
values[i] = strings.TrimSpace(v)
|
|
||||||
}
|
|
||||||
return values
|
|
||||||
}
|
|
||||||
|
|
||||||
// Done returns a channel which is closed when it receives the final status
|
|
||||||
// from the server.
|
|
||||||
func (s *Stream) Done() <-chan struct{} {
|
|
||||||
return s.done
|
|
||||||
}
|
|
||||||
|
|
||||||
// Header returns the header metadata of the stream.
|
|
||||||
//
|
|
||||||
// On client side, it acquires the key-value pairs of header metadata once it is
|
|
||||||
// available. It blocks until i) the metadata is ready or ii) there is no header
|
|
||||||
// metadata or iii) the stream is canceled/expired.
|
|
||||||
//
|
|
||||||
// On server side, it returns the out header after t.WriteHeader is called. It
|
|
||||||
// does not block and must not be called until after WriteHeader.
|
|
||||||
func (s *Stream) Header() (metadata.MD, error) {
|
|
||||||
if s.headerChan == nil {
|
|
||||||
// On server side, return the header in stream. It will be the out
|
|
||||||
// header after t.WriteHeader is called.
|
|
||||||
return s.header.Copy(), nil
|
|
||||||
}
|
|
||||||
s.waitOnHeader()
|
|
||||||
|
|
||||||
if !s.headerValid || s.noHeaders {
|
|
||||||
return nil, s.status.Err()
|
|
||||||
}
|
|
||||||
|
|
||||||
return s.header.Copy(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TrailersOnly blocks until a header or trailers-only frame is received and
|
|
||||||
// then returns true if the stream was trailers-only. If the stream ends
|
|
||||||
// before headers are received, returns true, nil. Client-side only.
|
|
||||||
func (s *Stream) TrailersOnly() bool {
|
|
||||||
s.waitOnHeader()
|
|
||||||
return s.noHeaders
|
|
||||||
}
|
|
||||||
|
|
||||||
// Trailer returns the cached trailer metadata. Note that if it is not called
|
// Trailer returns the cached trailer metadata. Note that if it is not called
|
||||||
// after the entire stream is done, it could return an empty MD. Client
|
// after the entire stream is done, it could return an empty MD.
|
||||||
// side only.
|
|
||||||
// It can be safely read only after stream has ended that is either read
|
// It can be safely read only after stream has ended that is either read
|
||||||
// or write have returned io.EOF.
|
// or write have returned io.EOF.
|
||||||
func (s *Stream) Trailer() metadata.MD {
|
func (s *Stream) Trailer() metadata.MD {
|
||||||
c := s.trailer.Copy()
|
return s.trailer.Copy()
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContentSubtype returns the content-subtype for a request. For example, a
|
|
||||||
// content-subtype of "proto" will result in a content-type of
|
|
||||||
// "application/grpc+proto". This will always be lowercase. See
|
|
||||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
|
|
||||||
// more details.
|
|
||||||
func (s *Stream) ContentSubtype() string {
|
|
||||||
return s.contentSubtype
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Context returns the context of the stream.
|
// Context returns the context of the stream.
|
||||||
@ -480,90 +333,31 @@ func (s *Stream) Context() context.Context {
|
|||||||
return s.ctx
|
return s.ctx
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetContext sets the context of the stream. This will be deleted once the
|
|
||||||
// stats handler callouts all move to gRPC layer.
|
|
||||||
func (s *Stream) SetContext(ctx context.Context) {
|
|
||||||
s.ctx = ctx
|
|
||||||
}
|
|
||||||
|
|
||||||
// Method returns the method for the stream.
|
// Method returns the method for the stream.
|
||||||
func (s *Stream) Method() string {
|
func (s *Stream) Method() string {
|
||||||
return s.method
|
return s.method
|
||||||
}
|
}
|
||||||
|
|
||||||
// Status returns the status received from the server.
|
|
||||||
// Status can be read safely only after the stream has ended,
|
|
||||||
// that is, after Done() is closed.
|
|
||||||
func (s *Stream) Status() *status.Status {
|
|
||||||
return s.status
|
|
||||||
}
|
|
||||||
|
|
||||||
// HeaderWireLength returns the size of the headers of the stream as received
|
|
||||||
// from the wire. Valid only on the server.
|
|
||||||
func (s *Stream) HeaderWireLength() int {
|
|
||||||
return s.headerWireLength
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetHeader sets the header metadata. This can be called multiple times.
|
|
||||||
// Server side only.
|
|
||||||
// This should not be called in parallel to other data writes.
|
|
||||||
func (s *Stream) SetHeader(md metadata.MD) error {
|
|
||||||
if md.Len() == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if s.isHeaderSent() || s.getState() == streamDone {
|
|
||||||
return ErrIllegalHeaderWrite
|
|
||||||
}
|
|
||||||
s.hdrMu.Lock()
|
|
||||||
s.header = metadata.Join(s.header, md)
|
|
||||||
s.hdrMu.Unlock()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SendHeader sends the given header metadata. The given metadata is
|
|
||||||
// combined with any metadata set by previous calls to SetHeader and
|
|
||||||
// then written to the transport stream.
|
|
||||||
func (s *Stream) SendHeader(md metadata.MD) error {
|
|
||||||
return s.st.WriteHeader(s, md)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetTrailer sets the trailer metadata which will be sent with the RPC status
|
|
||||||
// by the server. This can be called multiple times. Server side only.
|
|
||||||
// This should not be called parallel to other data writes.
|
|
||||||
func (s *Stream) SetTrailer(md metadata.MD) error {
|
|
||||||
if md.Len() == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if s.getState() == streamDone {
|
|
||||||
return ErrIllegalHeaderWrite
|
|
||||||
}
|
|
||||||
s.hdrMu.Lock()
|
|
||||||
s.trailer = metadata.Join(s.trailer, md)
|
|
||||||
s.hdrMu.Unlock()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Stream) write(m recvMsg) {
|
func (s *Stream) write(m recvMsg) {
|
||||||
s.buf.put(m)
|
s.buf.put(m)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadHeader reads data into the provided header slice from the stream. It
|
// ReadMessageHeader reads data into the provided header slice from the stream.
|
||||||
// first checks if there was an error during a previous read operation and
|
// It first checks if there was an error during a previous read operation and
|
||||||
// returns it if present. It then requests a read operation for the length of
|
// returns it if present. It then requests a read operation for the length of
|
||||||
// the header. It continues to read from the stream until the entire header
|
// the header. It continues to read from the stream until the entire header
|
||||||
// slice is filled or an error occurs. If an `io.EOF` error is encountered
|
// slice is filled or an error occurs. If an `io.EOF` error is encountered with
|
||||||
// with partially read data, it is converted to `io.ErrUnexpectedEOF` to
|
// partially read data, it is converted to `io.ErrUnexpectedEOF` to indicate an
|
||||||
// indicate an unexpected end of the stream. The method returns any error
|
// unexpected end of the stream. The method returns any error encountered during
|
||||||
// encountered during the read process or nil if the header was successfully
|
// the read process or nil if the header was successfully read.
|
||||||
// read.
|
func (s *Stream) ReadMessageHeader(header []byte) (err error) {
|
||||||
func (s *Stream) ReadHeader(header []byte) (err error) {
|
|
||||||
// Don't request a read if there was an error earlier
|
// Don't request a read if there was an error earlier
|
||||||
if er := s.trReader.er; er != nil {
|
if er := s.trReader.er; er != nil {
|
||||||
return er
|
return er
|
||||||
}
|
}
|
||||||
s.requestRead(len(header))
|
s.requestRead(len(header))
|
||||||
for len(header) != 0 {
|
for len(header) != 0 {
|
||||||
n, err := s.trReader.ReadHeader(header)
|
n, err := s.trReader.ReadMessageHeader(header)
|
||||||
header = header[n:]
|
header = header[n:]
|
||||||
if len(header) == 0 {
|
if len(header) == 0 {
|
||||||
err = nil
|
err = nil
|
||||||
@ -579,7 +373,7 @@ func (s *Stream) ReadHeader(header []byte) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Read reads n bytes from the wire for this stream.
|
// Read reads n bytes from the wire for this stream.
|
||||||
func (s *Stream) Read(n int) (data mem.BufferSlice, err error) {
|
func (s *Stream) read(n int) (data mem.BufferSlice, err error) {
|
||||||
// Don't request a read if there was an error earlier
|
// Don't request a read if there was an error earlier
|
||||||
if er := s.trReader.er; er != nil {
|
if er := s.trReader.er; er != nil {
|
||||||
return nil, er
|
return nil, er
|
||||||
@ -619,8 +413,8 @@ type transportReader struct {
|
|||||||
er error
|
er error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *transportReader) ReadHeader(header []byte) (int, error) {
|
func (t *transportReader) ReadMessageHeader(header []byte) (int, error) {
|
||||||
n, err := t.reader.ReadHeader(header)
|
n, err := t.reader.ReadMessageHeader(header)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.er = err
|
t.er = err
|
||||||
return 0, err
|
return 0, err
|
||||||
@ -639,17 +433,6 @@ func (t *transportReader) Read(n int) (mem.Buffer, error) {
|
|||||||
return buf, nil
|
return buf, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// BytesReceived indicates whether any bytes have been received on this stream.
|
|
||||||
func (s *Stream) BytesReceived() bool {
|
|
||||||
return atomic.LoadUint32(&s.bytesReceived) == 1
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unprocessed indicates whether the server did not process this stream --
|
|
||||||
// i.e. it sent a refused stream or GOAWAY including this stream ID.
|
|
||||||
func (s *Stream) Unprocessed() bool {
|
|
||||||
return atomic.LoadUint32(&s.unprocessed) == 1
|
|
||||||
}
|
|
||||||
|
|
||||||
// GoString is implemented by Stream so context.String() won't
|
// GoString is implemented by Stream so context.String() won't
|
||||||
// race when printing %#v.
|
// race when printing %#v.
|
||||||
func (s *Stream) GoString() string {
|
func (s *Stream) GoString() string {
|
||||||
@ -725,15 +508,9 @@ type ConnectOptions struct {
|
|||||||
BufferPool mem.BufferPool
|
BufferPool mem.BufferPool
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClientTransport establishes the transport with the required ConnectOptions
|
// WriteOptions provides additional hints and information for message
|
||||||
// and returns it to the caller.
|
|
||||||
func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (ClientTransport, error) {
|
|
||||||
return newHTTP2Client(connectCtx, ctx, addr, opts, onClose)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Options provides additional hints and information for message
|
|
||||||
// transmission.
|
// transmission.
|
||||||
type Options struct {
|
type WriteOptions struct {
|
||||||
// Last indicates whether this write is the last piece for
|
// Last indicates whether this write is the last piece for
|
||||||
// this stream.
|
// this stream.
|
||||||
Last bool
|
Last bool
|
||||||
@ -782,18 +559,8 @@ type ClientTransport interface {
|
|||||||
// It does not block.
|
// It does not block.
|
||||||
GracefulClose()
|
GracefulClose()
|
||||||
|
|
||||||
// Write sends the data for the given stream. A nil stream indicates
|
|
||||||
// the write is to be performed on the transport as a whole.
|
|
||||||
Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error
|
|
||||||
|
|
||||||
// NewStream creates a Stream for an RPC.
|
// NewStream creates a Stream for an RPC.
|
||||||
NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error)
|
NewStream(ctx context.Context, callHdr *CallHdr) (*ClientStream, error)
|
||||||
|
|
||||||
// CloseStream clears the footprint of a stream when the stream is
|
|
||||||
// not needed any more. The err indicates the error incurred when
|
|
||||||
// CloseStream is called. Must be called when a stream is finished
|
|
||||||
// unless the associated transport is closing.
|
|
||||||
CloseStream(stream *Stream, err error)
|
|
||||||
|
|
||||||
// Error returns a channel that is closed when some I/O error
|
// Error returns a channel that is closed when some I/O error
|
||||||
// happens. Typically the caller should have a goroutine to monitor
|
// happens. Typically the caller should have a goroutine to monitor
|
||||||
@ -813,12 +580,6 @@ type ClientTransport interface {
|
|||||||
|
|
||||||
// RemoteAddr returns the remote network address.
|
// RemoteAddr returns the remote network address.
|
||||||
RemoteAddr() net.Addr
|
RemoteAddr() net.Addr
|
||||||
|
|
||||||
// IncrMsgSent increments the number of message sent through this transport.
|
|
||||||
IncrMsgSent()
|
|
||||||
|
|
||||||
// IncrMsgRecv increments the number of message received through this transport.
|
|
||||||
IncrMsgRecv()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ServerTransport is the common interface for all gRPC server-side transport
|
// ServerTransport is the common interface for all gRPC server-side transport
|
||||||
@ -828,19 +589,7 @@ type ClientTransport interface {
|
|||||||
// Write methods for a given Stream will be called serially.
|
// Write methods for a given Stream will be called serially.
|
||||||
type ServerTransport interface {
|
type ServerTransport interface {
|
||||||
// HandleStreams receives incoming streams using the given handler.
|
// HandleStreams receives incoming streams using the given handler.
|
||||||
HandleStreams(context.Context, func(*Stream))
|
HandleStreams(context.Context, func(*ServerStream))
|
||||||
|
|
||||||
// WriteHeader sends the header metadata for the given stream.
|
|
||||||
// WriteHeader may not be called on all streams.
|
|
||||||
WriteHeader(s *Stream, md metadata.MD) error
|
|
||||||
|
|
||||||
// Write sends the data for the given stream.
|
|
||||||
// Write may not be called on all streams.
|
|
||||||
Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error
|
|
||||||
|
|
||||||
// WriteStatus sends the status of a stream to the client. WriteStatus is
|
|
||||||
// the final call made on a stream and always occurs.
|
|
||||||
WriteStatus(s *Stream, st *status.Status) error
|
|
||||||
|
|
||||||
// Close tears down the transport. Once it is called, the transport
|
// Close tears down the transport. Once it is called, the transport
|
||||||
// should not be accessed any more. All the pending streams and their
|
// should not be accessed any more. All the pending streams and their
|
||||||
@ -852,12 +601,14 @@ type ServerTransport interface {
|
|||||||
|
|
||||||
// Drain notifies the client this ServerTransport stops accepting new RPCs.
|
// Drain notifies the client this ServerTransport stops accepting new RPCs.
|
||||||
Drain(debugData string)
|
Drain(debugData string)
|
||||||
|
}
|
||||||
|
|
||||||
// IncrMsgSent increments the number of message sent through this transport.
|
type internalServerTransport interface {
|
||||||
IncrMsgSent()
|
ServerTransport
|
||||||
|
writeHeader(s *ServerStream, md metadata.MD) error
|
||||||
// IncrMsgRecv increments the number of message received through this transport.
|
write(s *ServerStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error
|
||||||
IncrMsgRecv()
|
writeStatus(s *ServerStream, st *status.Status) error
|
||||||
|
incrMsgRecv()
|
||||||
}
|
}
|
||||||
|
|
||||||
// connectionErrorf creates an ConnectionError with the specified error description.
|
// connectionErrorf creates an ConnectionError with the specified error description.
|
||||||
|
59
vendor/google.golang.org/grpc/mem/buffer_slice.go
generated
vendored
59
vendor/google.golang.org/grpc/mem/buffer_slice.go
generated
vendored
@ -22,6 +22,11 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// 32 KiB is what io.Copy uses.
|
||||||
|
readAllBufSize = 32 * 1024
|
||||||
|
)
|
||||||
|
|
||||||
// BufferSlice offers a means to represent data that spans one or more Buffer
|
// BufferSlice offers a means to represent data that spans one or more Buffer
|
||||||
// instances. A BufferSlice is meant to be immutable after creation, and methods
|
// instances. A BufferSlice is meant to be immutable after creation, and methods
|
||||||
// like Ref create and return copies of the slice. This is why all methods have
|
// like Ref create and return copies of the slice. This is why all methods have
|
||||||
@ -219,8 +224,58 @@ func (w *writer) Write(p []byte) (n int, err error) {
|
|||||||
|
|
||||||
// NewWriter wraps the given BufferSlice and BufferPool to implement the
|
// NewWriter wraps the given BufferSlice and BufferPool to implement the
|
||||||
// io.Writer interface. Every call to Write copies the contents of the given
|
// io.Writer interface. Every call to Write copies the contents of the given
|
||||||
// buffer into a new Buffer pulled from the given pool and the Buffer is added to
|
// buffer into a new Buffer pulled from the given pool and the Buffer is
|
||||||
// the given BufferSlice.
|
// added to the given BufferSlice.
|
||||||
func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer {
|
func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer {
|
||||||
return &writer{buffers: buffers, pool: pool}
|
return &writer{buffers: buffers, pool: pool}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ReadAll reads from r until an error or EOF and returns the data it read.
|
||||||
|
// A successful call returns err == nil, not err == EOF. Because ReadAll is
|
||||||
|
// defined to read from src until EOF, it does not treat an EOF from Read
|
||||||
|
// as an error to be reported.
|
||||||
|
//
|
||||||
|
// Important: A failed call returns a non-nil error and may also return
|
||||||
|
// partially read buffers. It is the responsibility of the caller to free the
|
||||||
|
// BufferSlice returned, or its memory will not be reused.
|
||||||
|
func ReadAll(r io.Reader, pool BufferPool) (BufferSlice, error) {
|
||||||
|
var result BufferSlice
|
||||||
|
if wt, ok := r.(io.WriterTo); ok {
|
||||||
|
// This is more optimal since wt knows the size of chunks it wants to
|
||||||
|
// write and, hence, we can allocate buffers of an optimal size to fit
|
||||||
|
// them. E.g. might be a single big chunk, and we wouldn't chop it
|
||||||
|
// into pieces.
|
||||||
|
w := NewWriter(&result, pool)
|
||||||
|
_, err := wt.WriteTo(w)
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
|
nextBuffer:
|
||||||
|
for {
|
||||||
|
buf := pool.Get(readAllBufSize)
|
||||||
|
// We asked for 32KiB but may have been given a bigger buffer.
|
||||||
|
// Use all of it if that's the case.
|
||||||
|
*buf = (*buf)[:cap(*buf)]
|
||||||
|
usedCap := 0
|
||||||
|
for {
|
||||||
|
n, err := r.Read((*buf)[usedCap:])
|
||||||
|
usedCap += n
|
||||||
|
if err != nil {
|
||||||
|
if usedCap == 0 {
|
||||||
|
// Nothing in this buf, put it back
|
||||||
|
pool.Put(buf)
|
||||||
|
} else {
|
||||||
|
*buf = (*buf)[:usedCap]
|
||||||
|
result = append(result, NewBuffer(buf, pool))
|
||||||
|
}
|
||||||
|
if err == io.EOF {
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
|
if len(*buf) == usedCap {
|
||||||
|
result = append(result, NewBuffer(buf, pool))
|
||||||
|
continue nextBuffer
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
4
vendor/google.golang.org/grpc/preloader.go
generated
vendored
4
vendor/google.golang.org/grpc/preloader.go
generated
vendored
@ -62,7 +62,7 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error {
|
|||||||
|
|
||||||
materializedData := data.Materialize()
|
materializedData := data.Materialize()
|
||||||
data.Free()
|
data.Free()
|
||||||
p.encodedData = mem.BufferSlice{mem.NewBuffer(&materializedData, nil)}
|
p.encodedData = mem.BufferSlice{mem.SliceBuffer(materializedData)}
|
||||||
|
|
||||||
// TODO: it should be possible to grab the bufferPool from the underlying
|
// TODO: it should be possible to grab the bufferPool from the underlying
|
||||||
// stream implementation with a type cast to its actual type (such as
|
// stream implementation with a type cast to its actual type (such as
|
||||||
@ -76,7 +76,7 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error {
|
|||||||
if p.pf.isCompressed() {
|
if p.pf.isCompressed() {
|
||||||
materializedCompData := compData.Materialize()
|
materializedCompData := compData.Materialize()
|
||||||
compData.Free()
|
compData.Free()
|
||||||
compData = mem.BufferSlice{mem.NewBuffer(&materializedCompData, nil)}
|
compData = mem.BufferSlice{mem.SliceBuffer(materializedCompData)}
|
||||||
}
|
}
|
||||||
|
|
||||||
p.hdr, p.payload = msgHeader(p.encodedData, compData, p.pf)
|
p.hdr, p.payload = msgHeader(p.encodedData, compData, p.pf)
|
||||||
|
22
vendor/google.golang.org/grpc/resolver/resolver.go
generated
vendored
22
vendor/google.golang.org/grpc/resolver/resolver.go
generated
vendored
@ -22,6 +22,7 @@ package resolver
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"net/url"
|
"net/url"
|
||||||
@ -237,8 +238,8 @@ type ClientConn interface {
|
|||||||
// UpdateState can be omitted.
|
// UpdateState can be omitted.
|
||||||
UpdateState(State) error
|
UpdateState(State) error
|
||||||
// ReportError notifies the ClientConn that the Resolver encountered an
|
// ReportError notifies the ClientConn that the Resolver encountered an
|
||||||
// error. The ClientConn will notify the load balancer and begin calling
|
// error. The ClientConn then forwards this error to the load balancing
|
||||||
// ResolveNow on the Resolver with exponential backoff.
|
// policy.
|
||||||
ReportError(error)
|
ReportError(error)
|
||||||
// NewAddress is called by resolver to notify ClientConn a new list
|
// NewAddress is called by resolver to notify ClientConn a new list
|
||||||
// of resolved addresses.
|
// of resolved addresses.
|
||||||
@ -330,3 +331,20 @@ type AuthorityOverrider interface {
|
|||||||
// typically in line, and must keep it unchanged.
|
// typically in line, and must keep it unchanged.
|
||||||
OverrideAuthority(Target) string
|
OverrideAuthority(Target) string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ValidateEndpoints validates endpoints from a petiole policy's perspective.
|
||||||
|
// Petiole policies should call this before calling into their children. See
|
||||||
|
// [gRPC A61](https://github.com/grpc/proposal/blob/master/A61-IPv4-IPv6-dualstack-backends.md)
|
||||||
|
// for details.
|
||||||
|
func ValidateEndpoints(endpoints []Endpoint) error {
|
||||||
|
if len(endpoints) == 0 {
|
||||||
|
return errors.New("endpoints list is empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, endpoint := range endpoints {
|
||||||
|
for range endpoint.Addresses {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return errors.New("endpoints list contains no addresses")
|
||||||
|
}
|
||||||
|
42
vendor/google.golang.org/grpc/rpc_util.go
generated
vendored
42
vendor/google.golang.org/grpc/rpc_util.go
generated
vendored
@ -622,7 +622,7 @@ func (pf payloadFormat) isCompressed() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type streamReader interface {
|
type streamReader interface {
|
||||||
ReadHeader(header []byte) error
|
ReadMessageHeader(header []byte) error
|
||||||
Read(n int) (mem.BufferSlice, error)
|
Read(n int) (mem.BufferSlice, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -656,7 +656,7 @@ type parser struct {
|
|||||||
// that the underlying streamReader must not return an incompatible
|
// that the underlying streamReader must not return an incompatible
|
||||||
// error.
|
// error.
|
||||||
func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSlice, error) {
|
func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSlice, error) {
|
||||||
err := p.r.ReadHeader(p.header[:])
|
err := p.r.ReadMessageHeader(p.header[:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, nil, err
|
return 0, nil, err
|
||||||
}
|
}
|
||||||
@ -664,9 +664,6 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSl
|
|||||||
pf := payloadFormat(p.header[0])
|
pf := payloadFormat(p.header[0])
|
||||||
length := binary.BigEndian.Uint32(p.header[1:])
|
length := binary.BigEndian.Uint32(p.header[1:])
|
||||||
|
|
||||||
if length == 0 {
|
|
||||||
return pf, nil, nil
|
|
||||||
}
|
|
||||||
if int64(length) > int64(maxInt) {
|
if int64(length) > int64(maxInt) {
|
||||||
return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt)
|
return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt)
|
||||||
}
|
}
|
||||||
@ -817,7 +814,7 @@ func (p *payloadInfo) free() {
|
|||||||
// the buffer is no longer needed.
|
// the buffer is no longer needed.
|
||||||
// TODO: Refactor this function to reduce the number of arguments.
|
// TODO: Refactor this function to reduce the number of arguments.
|
||||||
// See: https://google.github.io/styleguide/go/best-practices.html#function-argument-lists
|
// See: https://google.github.io/styleguide/go/best-practices.html#function-argument-lists
|
||||||
func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool,
|
func recvAndDecompress(p *parser, s recvCompressor, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool,
|
||||||
) (out mem.BufferSlice, err error) {
|
) (out mem.BufferSlice, err error) {
|
||||||
pf, compressed, err := p.recvMsg(maxReceiveMessageSize)
|
pf, compressed, err := p.recvMsg(maxReceiveMessageSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -841,7 +838,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei
|
|||||||
var uncompressedBuf []byte
|
var uncompressedBuf []byte
|
||||||
uncompressedBuf, err = dc.Do(compressed.Reader())
|
uncompressedBuf, err = dc.Do(compressed.Reader())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
out = mem.BufferSlice{mem.NewBuffer(&uncompressedBuf, nil)}
|
out = mem.BufferSlice{mem.SliceBuffer(uncompressedBuf)}
|
||||||
}
|
}
|
||||||
size = len(uncompressedBuf)
|
size = len(uncompressedBuf)
|
||||||
} else {
|
} else {
|
||||||
@ -877,30 +874,7 @@ func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMes
|
|||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Can/should this still be preserved with the new BufferSlice API? Are
|
out, err := mem.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1), pool)
|
||||||
// there any actual benefits to allocating a single large buffer instead of
|
|
||||||
// multiple smaller ones?
|
|
||||||
//if sizer, ok := compressor.(interface {
|
|
||||||
// DecompressedSize(compressedBytes []byte) int
|
|
||||||
//}); ok {
|
|
||||||
// if size := sizer.DecompressedSize(d); size >= 0 {
|
|
||||||
// if size > maxReceiveMessageSize {
|
|
||||||
// return nil, size, nil
|
|
||||||
// }
|
|
||||||
// // size is used as an estimate to size the buffer, but we
|
|
||||||
// // will read more data if available.
|
|
||||||
// // +MinRead so ReadFrom will not reallocate if size is correct.
|
|
||||||
// //
|
|
||||||
// // TODO: If we ensure that the buffer size is the same as the DecompressedSize,
|
|
||||||
// // we can also utilize the recv buffer pool here.
|
|
||||||
// buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead))
|
|
||||||
// bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
|
|
||||||
// return buf.Bytes(), int(bytesRead), err
|
|
||||||
// }
|
|
||||||
//}
|
|
||||||
|
|
||||||
var out mem.BufferSlice
|
|
||||||
_, err = io.Copy(mem.NewWriter(&out, pool), io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
out.Free()
|
out.Free()
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
@ -908,10 +882,14 @@ func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMes
|
|||||||
return out, out.Len(), nil
|
return out, out.Len(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type recvCompressor interface {
|
||||||
|
RecvCompress() string
|
||||||
|
}
|
||||||
|
|
||||||
// For the two compressor parameters, both should not be set, but if they are,
|
// For the two compressor parameters, both should not be set, but if they are,
|
||||||
// dc takes precedence over compressor.
|
// dc takes precedence over compressor.
|
||||||
// TODO(dfawley): wrap the old compressor/decompressor using the new API?
|
// TODO(dfawley): wrap the old compressor/decompressor using the new API?
|
||||||
func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error {
|
func recv(p *parser, c baseCodec, s recvCompressor, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error {
|
||||||
data, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor, isServer)
|
data, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor, isServer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
87
vendor/google.golang.org/grpc/server.go
generated
vendored
87
vendor/google.golang.org/grpc/server.go
generated
vendored
@ -87,12 +87,13 @@ func init() {
|
|||||||
var statusOK = status.New(codes.OK, "")
|
var statusOK = status.New(codes.OK, "")
|
||||||
var logger = grpclog.Component("core")
|
var logger = grpclog.Component("core")
|
||||||
|
|
||||||
type methodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error)
|
// MethodHandler is a function type that processes a unary RPC method call.
|
||||||
|
type MethodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error)
|
||||||
|
|
||||||
// MethodDesc represents an RPC service's method specification.
|
// MethodDesc represents an RPC service's method specification.
|
||||||
type MethodDesc struct {
|
type MethodDesc struct {
|
||||||
MethodName string
|
MethodName string
|
||||||
Handler methodHandler
|
Handler MethodHandler
|
||||||
}
|
}
|
||||||
|
|
||||||
// ServiceDesc represents an RPC service's specification.
|
// ServiceDesc represents an RPC service's specification.
|
||||||
@ -621,8 +622,8 @@ func bufferPool(bufferPool mem.BufferPool) ServerOption {
|
|||||||
// workload (assuming a QPS of a few thousand requests/sec).
|
// workload (assuming a QPS of a few thousand requests/sec).
|
||||||
const serverWorkerResetThreshold = 1 << 16
|
const serverWorkerResetThreshold = 1 << 16
|
||||||
|
|
||||||
// serverWorker blocks on a *transport.Stream channel forever and waits for
|
// serverWorker blocks on a *transport.ServerStream channel forever and waits
|
||||||
// data to be fed by serveStreams. This allows multiple requests to be
|
// for data to be fed by serveStreams. This allows multiple requests to be
|
||||||
// processed by the same goroutine, removing the need for expensive stack
|
// processed by the same goroutine, removing the need for expensive stack
|
||||||
// re-allocations (see the runtime.morestack problem [1]).
|
// re-allocations (see the runtime.morestack problem [1]).
|
||||||
//
|
//
|
||||||
@ -1020,7 +1021,7 @@ func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport,
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams)
|
streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams)
|
||||||
st.HandleStreams(ctx, func(stream *transport.Stream) {
|
st.HandleStreams(ctx, func(stream *transport.ServerStream) {
|
||||||
s.handlersWG.Add(1)
|
s.handlersWG.Add(1)
|
||||||
streamQuota.acquire()
|
streamQuota.acquire()
|
||||||
f := func() {
|
f := func() {
|
||||||
@ -1136,7 +1137,7 @@ func (s *Server) incrCallsFailed() {
|
|||||||
s.channelz.ServerMetrics.CallsFailed.Add(1)
|
s.channelz.ServerMetrics.CallsFailed.Add(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error {
|
func (s *Server) sendResponse(ctx context.Context, stream *transport.ServerStream, msg any, cp Compressor, opts *transport.WriteOptions, comp encoding.Compressor) error {
|
||||||
data, err := encode(s.getCodec(stream.ContentSubtype()), msg)
|
data, err := encode(s.getCodec(stream.ContentSubtype()), msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err)
|
channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err)
|
||||||
@ -1165,7 +1166,7 @@ func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport,
|
|||||||
if payloadLen > s.opts.maxSendMessageSize {
|
if payloadLen > s.opts.maxSendMessageSize {
|
||||||
return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize)
|
return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize)
|
||||||
}
|
}
|
||||||
err = t.Write(stream, hdr, payload, opts)
|
err = stream.Write(hdr, payload, opts)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if len(s.opts.statsHandlers) != 0 {
|
if len(s.opts.statsHandlers) != 0 {
|
||||||
for _, sh := range s.opts.statsHandlers {
|
for _, sh := range s.opts.statsHandlers {
|
||||||
@ -1212,7 +1213,7 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) {
|
func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) {
|
||||||
shs := s.opts.statsHandlers
|
shs := s.opts.statsHandlers
|
||||||
if len(shs) != 0 || trInfo != nil || channelz.IsOn() {
|
if len(shs) != 0 || trInfo != nil || channelz.IsOn() {
|
||||||
if channelz.IsOn() {
|
if channelz.IsOn() {
|
||||||
@ -1320,7 +1321,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor
|
|||||||
decomp = encoding.GetCompressor(rc)
|
decomp = encoding.GetCompressor(rc)
|
||||||
if decomp == nil {
|
if decomp == nil {
|
||||||
st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc)
|
st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc)
|
||||||
t.WriteStatus(stream, st)
|
stream.WriteStatus(st)
|
||||||
return st.Err()
|
return st.Err()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1354,15 +1355,12 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor
|
|||||||
|
|
||||||
d, err := recvAndDecompress(&parser{r: stream, bufferPool: s.opts.bufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp, true)
|
d, err := recvAndDecompress(&parser{r: stream, bufferPool: s.opts.bufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if e := t.WriteStatus(stream, status.Convert(err)); e != nil {
|
if e := stream.WriteStatus(status.Convert(err)); e != nil {
|
||||||
channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e)
|
channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer d.Free()
|
defer d.Free()
|
||||||
if channelz.IsOn() {
|
|
||||||
t.IncrMsgRecv()
|
|
||||||
}
|
|
||||||
df := func(v any) error {
|
df := func(v any) error {
|
||||||
if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil {
|
if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil {
|
||||||
return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
|
return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
|
||||||
@ -1404,7 +1402,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor
|
|||||||
trInfo.tr.LazyLog(stringer(appStatus.Message()), true)
|
trInfo.tr.LazyLog(stringer(appStatus.Message()), true)
|
||||||
trInfo.tr.SetError()
|
trInfo.tr.SetError()
|
||||||
}
|
}
|
||||||
if e := t.WriteStatus(stream, appStatus); e != nil {
|
if e := stream.WriteStatus(appStatus); e != nil {
|
||||||
channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e)
|
channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e)
|
||||||
}
|
}
|
||||||
if len(binlogs) != 0 {
|
if len(binlogs) != 0 {
|
||||||
@ -1431,20 +1429,20 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor
|
|||||||
if trInfo != nil {
|
if trInfo != nil {
|
||||||
trInfo.tr.LazyLog(stringer("OK"), false)
|
trInfo.tr.LazyLog(stringer("OK"), false)
|
||||||
}
|
}
|
||||||
opts := &transport.Options{Last: true}
|
opts := &transport.WriteOptions{Last: true}
|
||||||
|
|
||||||
// Server handler could have set new compressor by calling SetSendCompressor.
|
// Server handler could have set new compressor by calling SetSendCompressor.
|
||||||
// In case it is set, we need to use it for compressing outbound message.
|
// In case it is set, we need to use it for compressing outbound message.
|
||||||
if stream.SendCompress() != sendCompressorName {
|
if stream.SendCompress() != sendCompressorName {
|
||||||
comp = encoding.GetCompressor(stream.SendCompress())
|
comp = encoding.GetCompressor(stream.SendCompress())
|
||||||
}
|
}
|
||||||
if err := s.sendResponse(ctx, t, stream, reply, cp, opts, comp); err != nil {
|
if err := s.sendResponse(ctx, stream, reply, cp, opts, comp); err != nil {
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
// The entire stream is done (for unary RPC only).
|
// The entire stream is done (for unary RPC only).
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if sts, ok := status.FromError(err); ok {
|
if sts, ok := status.FromError(err); ok {
|
||||||
if e := t.WriteStatus(stream, sts); e != nil {
|
if e := stream.WriteStatus(sts); e != nil {
|
||||||
channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e)
|
channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -1484,9 +1482,6 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor
|
|||||||
binlog.Log(ctx, sm)
|
binlog.Log(ctx, sm)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if channelz.IsOn() {
|
|
||||||
t.IncrMsgSent()
|
|
||||||
}
|
|
||||||
if trInfo != nil {
|
if trInfo != nil {
|
||||||
trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true)
|
trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true)
|
||||||
}
|
}
|
||||||
@ -1502,7 +1497,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor
|
|||||||
binlog.Log(ctx, st)
|
binlog.Log(ctx, st)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return t.WriteStatus(stream, statusOK)
|
return stream.WriteStatus(statusOK)
|
||||||
}
|
}
|
||||||
|
|
||||||
// chainStreamServerInterceptors chains all stream server interceptors into one.
|
// chainStreamServerInterceptors chains all stream server interceptors into one.
|
||||||
@ -1541,7 +1536,7 @@ func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, inf
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) {
|
func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) {
|
||||||
if channelz.IsOn() {
|
if channelz.IsOn() {
|
||||||
s.incrCallsStarted()
|
s.incrCallsStarted()
|
||||||
}
|
}
|
||||||
@ -1561,7 +1556,6 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran
|
|||||||
ctx = NewContextWithServerTransportStream(ctx, stream)
|
ctx = NewContextWithServerTransportStream(ctx, stream)
|
||||||
ss := &serverStream{
|
ss := &serverStream{
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
t: t,
|
|
||||||
s: stream,
|
s: stream,
|
||||||
p: &parser{r: stream, bufferPool: s.opts.bufferPool},
|
p: &parser{r: stream, bufferPool: s.opts.bufferPool},
|
||||||
codec: s.getCodec(stream.ContentSubtype()),
|
codec: s.getCodec(stream.ContentSubtype()),
|
||||||
@ -1648,7 +1642,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran
|
|||||||
ss.decomp = encoding.GetCompressor(rc)
|
ss.decomp = encoding.GetCompressor(rc)
|
||||||
if ss.decomp == nil {
|
if ss.decomp == nil {
|
||||||
st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc)
|
st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc)
|
||||||
t.WriteStatus(ss.s, st)
|
ss.s.WriteStatus(st)
|
||||||
return st.Err()
|
return st.Err()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1717,7 +1711,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran
|
|||||||
binlog.Log(ctx, st)
|
binlog.Log(ctx, st)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
t.WriteStatus(ss.s, appStatus)
|
ss.s.WriteStatus(appStatus)
|
||||||
// TODO: Should we log an error from WriteStatus here and below?
|
// TODO: Should we log an error from WriteStatus here and below?
|
||||||
return appErr
|
return appErr
|
||||||
}
|
}
|
||||||
@ -1735,10 +1729,10 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran
|
|||||||
binlog.Log(ctx, st)
|
binlog.Log(ctx, st)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return t.WriteStatus(ss.s, statusOK)
|
return ss.s.WriteStatus(statusOK)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) {
|
func (s *Server) handleStream(t transport.ServerTransport, stream *transport.ServerStream) {
|
||||||
ctx := stream.Context()
|
ctx := stream.Context()
|
||||||
ctx = contextWithServer(ctx, s)
|
ctx = contextWithServer(ctx, s)
|
||||||
var ti *traceInfo
|
var ti *traceInfo
|
||||||
@ -1768,7 +1762,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
|
|||||||
ti.tr.SetError()
|
ti.tr.SetError()
|
||||||
}
|
}
|
||||||
errDesc := fmt.Sprintf("malformed method name: %q", stream.Method())
|
errDesc := fmt.Sprintf("malformed method name: %q", stream.Method())
|
||||||
if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
|
if err := stream.WriteStatus(status.New(codes.Unimplemented, errDesc)); err != nil {
|
||||||
if ti != nil {
|
if ti != nil {
|
||||||
ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
|
ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
|
||||||
ti.tr.SetError()
|
ti.tr.SetError()
|
||||||
@ -1783,17 +1777,20 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
|
|||||||
service := sm[:pos]
|
service := sm[:pos]
|
||||||
method := sm[pos+1:]
|
method := sm[pos+1:]
|
||||||
|
|
||||||
md, _ := metadata.FromIncomingContext(ctx)
|
// FromIncomingContext is expensive: skip if there are no statsHandlers
|
||||||
for _, sh := range s.opts.statsHandlers {
|
if len(s.opts.statsHandlers) > 0 {
|
||||||
ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()})
|
md, _ := metadata.FromIncomingContext(ctx)
|
||||||
sh.HandleRPC(ctx, &stats.InHeader{
|
for _, sh := range s.opts.statsHandlers {
|
||||||
FullMethod: stream.Method(),
|
ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()})
|
||||||
RemoteAddr: t.Peer().Addr,
|
sh.HandleRPC(ctx, &stats.InHeader{
|
||||||
LocalAddr: t.Peer().LocalAddr,
|
FullMethod: stream.Method(),
|
||||||
Compression: stream.RecvCompress(),
|
RemoteAddr: t.Peer().Addr,
|
||||||
WireLength: stream.HeaderWireLength(),
|
LocalAddr: t.Peer().LocalAddr,
|
||||||
Header: md,
|
Compression: stream.RecvCompress(),
|
||||||
})
|
WireLength: stream.HeaderWireLength(),
|
||||||
|
Header: md,
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// To have calls in stream callouts work. Will delete once all stats handler
|
// To have calls in stream callouts work. Will delete once all stats handler
|
||||||
// calls come from the gRPC layer.
|
// calls come from the gRPC layer.
|
||||||
@ -1802,17 +1799,17 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
|
|||||||
srv, knownService := s.services[service]
|
srv, knownService := s.services[service]
|
||||||
if knownService {
|
if knownService {
|
||||||
if md, ok := srv.methods[method]; ok {
|
if md, ok := srv.methods[method]; ok {
|
||||||
s.processUnaryRPC(ctx, t, stream, srv, md, ti)
|
s.processUnaryRPC(ctx, stream, srv, md, ti)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if sd, ok := srv.streams[method]; ok {
|
if sd, ok := srv.streams[method]; ok {
|
||||||
s.processStreamingRPC(ctx, t, stream, srv, sd, ti)
|
s.processStreamingRPC(ctx, stream, srv, sd, ti)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Unknown service, or known server unknown method.
|
// Unknown service, or known server unknown method.
|
||||||
if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil {
|
if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil {
|
||||||
s.processStreamingRPC(ctx, t, stream, nil, unknownDesc, ti)
|
s.processStreamingRPC(ctx, stream, nil, unknownDesc, ti)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
var errDesc string
|
var errDesc string
|
||||||
@ -1825,7 +1822,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
|
|||||||
ti.tr.LazyPrintf("%s", errDesc)
|
ti.tr.LazyPrintf("%s", errDesc)
|
||||||
ti.tr.SetError()
|
ti.tr.SetError()
|
||||||
}
|
}
|
||||||
if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
|
if err := stream.WriteStatus(status.New(codes.Unimplemented, errDesc)); err != nil {
|
||||||
if ti != nil {
|
if ti != nil {
|
||||||
ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
|
ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
|
||||||
ti.tr.SetError()
|
ti.tr.SetError()
|
||||||
@ -2100,7 +2097,7 @@ func SendHeader(ctx context.Context, md metadata.MD) error {
|
|||||||
// Notice: This function is EXPERIMENTAL and may be changed or removed in a
|
// Notice: This function is EXPERIMENTAL and may be changed or removed in a
|
||||||
// later release.
|
// later release.
|
||||||
func SetSendCompressor(ctx context.Context, name string) error {
|
func SetSendCompressor(ctx context.Context, name string) error {
|
||||||
stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream)
|
stream, ok := ServerTransportStreamFromContext(ctx).(*transport.ServerStream)
|
||||||
if !ok || stream == nil {
|
if !ok || stream == nil {
|
||||||
return fmt.Errorf("failed to fetch the stream from the given context")
|
return fmt.Errorf("failed to fetch the stream from the given context")
|
||||||
}
|
}
|
||||||
@ -2122,7 +2119,7 @@ func SetSendCompressor(ctx context.Context, name string) error {
|
|||||||
// Notice: This function is EXPERIMENTAL and may be changed or removed in a
|
// Notice: This function is EXPERIMENTAL and may be changed or removed in a
|
||||||
// later release.
|
// later release.
|
||||||
func ClientSupportedCompressors(ctx context.Context) ([]string, error) {
|
func ClientSupportedCompressors(ctx context.Context) ([]string, error) {
|
||||||
stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream)
|
stream, ok := ServerTransportStreamFromContext(ctx).(*transport.ServerStream)
|
||||||
if !ok || stream == nil {
|
if !ok || stream == nil {
|
||||||
return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx)
|
return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx)
|
||||||
}
|
}
|
||||||
|
5
vendor/google.golang.org/grpc/service_config.go
generated
vendored
5
vendor/google.golang.org/grpc/service_config.go
generated
vendored
@ -168,6 +168,7 @@ func init() {
|
|||||||
return parseServiceConfig(js, defaultMaxCallAttempts)
|
return parseServiceConfig(js, defaultMaxCallAttempts)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseServiceConfig(js string, maxAttempts int) *serviceconfig.ParseResult {
|
func parseServiceConfig(js string, maxAttempts int) *serviceconfig.ParseResult {
|
||||||
if len(js) == 0 {
|
if len(js) == 0 {
|
||||||
return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")}
|
return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")}
|
||||||
@ -297,7 +298,7 @@ func convertRetryPolicy(jrp *jsonRetryPolicy, maxAttempts int) (p *internalservi
|
|||||||
return rp, nil
|
return rp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func min(a, b *int) *int {
|
func minPointers(a, b *int) *int {
|
||||||
if *a < *b {
|
if *a < *b {
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
@ -309,7 +310,7 @@ func getMaxSize(mcMax, doptMax *int, defaultVal int) *int {
|
|||||||
return &defaultVal
|
return &defaultVal
|
||||||
}
|
}
|
||||||
if mcMax != nil && doptMax != nil {
|
if mcMax != nil && doptMax != nil {
|
||||||
return min(mcMax, doptMax)
|
return minPointers(mcMax, doptMax)
|
||||||
}
|
}
|
||||||
if mcMax != nil {
|
if mcMax != nil {
|
||||||
return mcMax
|
return mcMax
|
||||||
|
81
vendor/google.golang.org/grpc/stats/metrics.go
generated
vendored
Normal file
81
vendor/google.golang.org/grpc/stats/metrics.go
generated
vendored
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2024 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package stats
|
||||||
|
|
||||||
|
import "maps"
|
||||||
|
|
||||||
|
// MetricSet is a set of metrics to record. Once created, MetricSet is immutable,
|
||||||
|
// however Add and Remove can make copies with specific metrics added or
|
||||||
|
// removed, respectively.
|
||||||
|
//
|
||||||
|
// Do not construct directly; use NewMetricSet instead.
|
||||||
|
type MetricSet struct {
|
||||||
|
// metrics are the set of metrics to initialize.
|
||||||
|
metrics map[string]bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMetricSet returns a MetricSet containing metricNames.
|
||||||
|
func NewMetricSet(metricNames ...string) *MetricSet {
|
||||||
|
newMetrics := make(map[string]bool)
|
||||||
|
for _, metric := range metricNames {
|
||||||
|
newMetrics[metric] = true
|
||||||
|
}
|
||||||
|
return &MetricSet{metrics: newMetrics}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Metrics returns the metrics set. The returned map is read-only and must not
|
||||||
|
// be modified.
|
||||||
|
func (m *MetricSet) Metrics() map[string]bool {
|
||||||
|
return m.metrics
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds the metricNames to the metrics set and returns a new copy with the
|
||||||
|
// additional metrics.
|
||||||
|
func (m *MetricSet) Add(metricNames ...string) *MetricSet {
|
||||||
|
newMetrics := make(map[string]bool)
|
||||||
|
for metric := range m.metrics {
|
||||||
|
newMetrics[metric] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, metric := range metricNames {
|
||||||
|
newMetrics[metric] = true
|
||||||
|
}
|
||||||
|
return &MetricSet{metrics: newMetrics}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Join joins the metrics passed in with the metrics set, and returns a new copy
|
||||||
|
// with the merged metrics.
|
||||||
|
func (m *MetricSet) Join(metrics *MetricSet) *MetricSet {
|
||||||
|
newMetrics := make(map[string]bool)
|
||||||
|
maps.Copy(newMetrics, m.metrics)
|
||||||
|
maps.Copy(newMetrics, metrics.metrics)
|
||||||
|
return &MetricSet{metrics: newMetrics}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove removes the metricNames from the metrics set and returns a new copy
|
||||||
|
// with the metrics removed.
|
||||||
|
func (m *MetricSet) Remove(metricNames ...string) *MetricSet {
|
||||||
|
newMetrics := make(map[string]bool)
|
||||||
|
for metric := range m.metrics {
|
||||||
|
newMetrics[metric] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, metric := range metricNames {
|
||||||
|
delete(newMetrics, metric)
|
||||||
|
}
|
||||||
|
return &MetricSet{metrics: newMetrics}
|
||||||
|
}
|
74
vendor/google.golang.org/grpc/stats/stats.go
generated
vendored
74
vendor/google.golang.org/grpc/stats/stats.go
generated
vendored
@ -260,84 +260,42 @@ func (s *ConnEnd) IsClient() bool { return s.Client }
|
|||||||
|
|
||||||
func (s *ConnEnd) isConnStats() {}
|
func (s *ConnEnd) isConnStats() {}
|
||||||
|
|
||||||
type incomingTagsKey struct{}
|
|
||||||
type outgoingTagsKey struct{}
|
|
||||||
|
|
||||||
// SetTags attaches stats tagging data to the context, which will be sent in
|
// SetTags attaches stats tagging data to the context, which will be sent in
|
||||||
// the outgoing RPC with the header grpc-tags-bin. Subsequent calls to
|
// the outgoing RPC with the header grpc-tags-bin. Subsequent calls to
|
||||||
// SetTags will overwrite the values from earlier calls.
|
// SetTags will overwrite the values from earlier calls.
|
||||||
//
|
//
|
||||||
// NOTE: this is provided only for backward compatibility with existing clients
|
// Deprecated: set the `grpc-tags-bin` header in the metadata instead.
|
||||||
// and will likely be removed in an upcoming release. New uses should transmit
|
|
||||||
// this type of data using metadata with a different, non-reserved (i.e. does
|
|
||||||
// not begin with "grpc-") header name.
|
|
||||||
func SetTags(ctx context.Context, b []byte) context.Context {
|
func SetTags(ctx context.Context, b []byte) context.Context {
|
||||||
return context.WithValue(ctx, outgoingTagsKey{}, b)
|
return metadata.AppendToOutgoingContext(ctx, "grpc-tags-bin", string(b))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tags returns the tags from the context for the inbound RPC.
|
// Tags returns the tags from the context for the inbound RPC.
|
||||||
//
|
//
|
||||||
// NOTE: this is provided only for backward compatibility with existing clients
|
// Deprecated: obtain the `grpc-tags-bin` header from metadata instead.
|
||||||
// and will likely be removed in an upcoming release. New uses should transmit
|
|
||||||
// this type of data using metadata with a different, non-reserved (i.e. does
|
|
||||||
// not begin with "grpc-") header name.
|
|
||||||
func Tags(ctx context.Context) []byte {
|
func Tags(ctx context.Context) []byte {
|
||||||
b, _ := ctx.Value(incomingTagsKey{}).([]byte)
|
traceValues := metadata.ValueFromIncomingContext(ctx, "grpc-tags-bin")
|
||||||
return b
|
if len(traceValues) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return []byte(traceValues[len(traceValues)-1])
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetIncomingTags attaches stats tagging data to the context, to be read by
|
|
||||||
// the application (not sent in outgoing RPCs).
|
|
||||||
//
|
|
||||||
// This is intended for gRPC-internal use ONLY.
|
|
||||||
func SetIncomingTags(ctx context.Context, b []byte) context.Context {
|
|
||||||
return context.WithValue(ctx, incomingTagsKey{}, b)
|
|
||||||
}
|
|
||||||
|
|
||||||
// OutgoingTags returns the tags from the context for the outbound RPC.
|
|
||||||
//
|
|
||||||
// This is intended for gRPC-internal use ONLY.
|
|
||||||
func OutgoingTags(ctx context.Context) []byte {
|
|
||||||
b, _ := ctx.Value(outgoingTagsKey{}).([]byte)
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
type incomingTraceKey struct{}
|
|
||||||
type outgoingTraceKey struct{}
|
|
||||||
|
|
||||||
// SetTrace attaches stats tagging data to the context, which will be sent in
|
// SetTrace attaches stats tagging data to the context, which will be sent in
|
||||||
// the outgoing RPC with the header grpc-trace-bin. Subsequent calls to
|
// the outgoing RPC with the header grpc-trace-bin. Subsequent calls to
|
||||||
// SetTrace will overwrite the values from earlier calls.
|
// SetTrace will overwrite the values from earlier calls.
|
||||||
//
|
//
|
||||||
// NOTE: this is provided only for backward compatibility with existing clients
|
// Deprecated: set the `grpc-trace-bin` header in the metadata instead.
|
||||||
// and will likely be removed in an upcoming release. New uses should transmit
|
|
||||||
// this type of data using metadata with a different, non-reserved (i.e. does
|
|
||||||
// not begin with "grpc-") header name.
|
|
||||||
func SetTrace(ctx context.Context, b []byte) context.Context {
|
func SetTrace(ctx context.Context, b []byte) context.Context {
|
||||||
return context.WithValue(ctx, outgoingTraceKey{}, b)
|
return metadata.AppendToOutgoingContext(ctx, "grpc-trace-bin", string(b))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Trace returns the trace from the context for the inbound RPC.
|
// Trace returns the trace from the context for the inbound RPC.
|
||||||
//
|
//
|
||||||
// NOTE: this is provided only for backward compatibility with existing clients
|
// Deprecated: obtain the `grpc-trace-bin` header from metadata instead.
|
||||||
// and will likely be removed in an upcoming release. New uses should transmit
|
|
||||||
// this type of data using metadata with a different, non-reserved (i.e. does
|
|
||||||
// not begin with "grpc-") header name.
|
|
||||||
func Trace(ctx context.Context) []byte {
|
func Trace(ctx context.Context) []byte {
|
||||||
b, _ := ctx.Value(incomingTraceKey{}).([]byte)
|
traceValues := metadata.ValueFromIncomingContext(ctx, "grpc-trace-bin")
|
||||||
return b
|
if len(traceValues) == 0 {
|
||||||
}
|
return nil
|
||||||
|
}
|
||||||
// SetIncomingTrace attaches stats tagging data to the context, to be read by
|
return []byte(traceValues[len(traceValues)-1])
|
||||||
// the application (not sent in outgoing RPCs). It is intended for
|
|
||||||
// gRPC-internal use.
|
|
||||||
func SetIncomingTrace(ctx context.Context, b []byte) context.Context {
|
|
||||||
return context.WithValue(ctx, incomingTraceKey{}, b)
|
|
||||||
}
|
|
||||||
|
|
||||||
// OutgoingTrace returns the trace from the context for the outbound RPC. It is
|
|
||||||
// intended for gRPC-internal use.
|
|
||||||
func OutgoingTrace(ctx context.Context) []byte {
|
|
||||||
b, _ := ctx.Value(outgoingTraceKey{}).([]byte)
|
|
||||||
return b
|
|
||||||
}
|
}
|
||||||
|
62
vendor/google.golang.org/grpc/stream.go
generated
vendored
62
vendor/google.golang.org/grpc/stream.go
generated
vendored
@ -23,7 +23,7 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
"math/rand"
|
rand "math/rand/v2"
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
@ -113,7 +113,9 @@ type ClientStream interface {
|
|||||||
// SendMsg is generally called by generated code. On error, SendMsg aborts
|
// SendMsg is generally called by generated code. On error, SendMsg aborts
|
||||||
// the stream. If the error was generated by the client, the status is
|
// the stream. If the error was generated by the client, the status is
|
||||||
// returned directly; otherwise, io.EOF is returned and the status of
|
// returned directly; otherwise, io.EOF is returned and the status of
|
||||||
// the stream may be discovered using RecvMsg.
|
// the stream may be discovered using RecvMsg. For unary or server-streaming
|
||||||
|
// RPCs (StreamDesc.ClientStreams is false), a nil error is returned
|
||||||
|
// unconditionally.
|
||||||
//
|
//
|
||||||
// SendMsg blocks until:
|
// SendMsg blocks until:
|
||||||
// - There is sufficient flow control to schedule m with the transport, or
|
// - There is sufficient flow control to schedule m with the transport, or
|
||||||
@ -216,7 +218,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
|||||||
|
|
||||||
var mc serviceconfig.MethodConfig
|
var mc serviceconfig.MethodConfig
|
||||||
var onCommit func()
|
var onCommit func()
|
||||||
var newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) {
|
newStream := func(ctx context.Context, done func()) (iresolver.ClientStream, error) {
|
||||||
return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...)
|
return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -584,7 +586,7 @@ type csAttempt struct {
|
|||||||
ctx context.Context
|
ctx context.Context
|
||||||
cs *clientStream
|
cs *clientStream
|
||||||
t transport.ClientTransport
|
t transport.ClientTransport
|
||||||
s *transport.Stream
|
s *transport.ClientStream
|
||||||
p *parser
|
p *parser
|
||||||
pickResult balancer.PickResult
|
pickResult balancer.PickResult
|
||||||
|
|
||||||
@ -706,11 +708,10 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) {
|
|||||||
cs.numRetriesSincePushback = 0
|
cs.numRetriesSincePushback = 0
|
||||||
} else {
|
} else {
|
||||||
fact := math.Pow(rp.BackoffMultiplier, float64(cs.numRetriesSincePushback))
|
fact := math.Pow(rp.BackoffMultiplier, float64(cs.numRetriesSincePushback))
|
||||||
cur := float64(rp.InitialBackoff) * fact
|
cur := min(float64(rp.InitialBackoff)*fact, float64(rp.MaxBackoff))
|
||||||
if max := float64(rp.MaxBackoff); cur > max {
|
// Apply jitter by multiplying with a random factor between 0.8 and 1.2
|
||||||
cur = max
|
cur *= 0.8 + 0.4*rand.Float64()
|
||||||
}
|
dur = time.Duration(int64(cur))
|
||||||
dur = time.Duration(rand.Int63n(int64(cur)))
|
|
||||||
cs.numRetriesSincePushback++
|
cs.numRetriesSincePushback++
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -991,7 +992,7 @@ func (cs *clientStream) CloseSend() error {
|
|||||||
}
|
}
|
||||||
cs.sentLast = true
|
cs.sentLast = true
|
||||||
op := func(a *csAttempt) error {
|
op := func(a *csAttempt) error {
|
||||||
a.t.Write(a.s, nil, nil, &transport.Options{Last: true})
|
a.s.Write(nil, nil, &transport.WriteOptions{Last: true})
|
||||||
// Always return nil; io.EOF is the only error that might make sense
|
// Always return nil; io.EOF is the only error that might make sense
|
||||||
// instead, but there is no need to signal the client to call RecvMsg
|
// instead, but there is no need to signal the client to call RecvMsg
|
||||||
// as the only use left for the stream after CloseSend is to call
|
// as the only use left for the stream after CloseSend is to call
|
||||||
@ -1083,7 +1084,7 @@ func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength
|
|||||||
}
|
}
|
||||||
a.mu.Unlock()
|
a.mu.Unlock()
|
||||||
}
|
}
|
||||||
if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil {
|
if err := a.s.Write(hdr, payld, &transport.WriteOptions{Last: !cs.desc.ClientStreams}); err != nil {
|
||||||
if !cs.desc.ClientStreams {
|
if !cs.desc.ClientStreams {
|
||||||
// For non-client-streaming RPCs, we return nil instead of EOF on error
|
// For non-client-streaming RPCs, we return nil instead of EOF on error
|
||||||
// because the generated code requires it. finish is not called; RecvMsg()
|
// because the generated code requires it. finish is not called; RecvMsg()
|
||||||
@ -1097,9 +1098,6 @@ func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength
|
|||||||
sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now()))
|
sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if channelz.IsOn() {
|
|
||||||
a.t.IncrMsgSent()
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1153,9 +1151,6 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
|
|||||||
Length: payInfo.uncompressedBytes.Len(),
|
Length: payInfo.uncompressedBytes.Len(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
if channelz.IsOn() {
|
|
||||||
a.t.IncrMsgRecv()
|
|
||||||
}
|
|
||||||
if cs.desc.ServerStreams {
|
if cs.desc.ServerStreams {
|
||||||
// Subsequent messages should be received by subsequent RecvMsg calls.
|
// Subsequent messages should be received by subsequent RecvMsg calls.
|
||||||
return nil
|
return nil
|
||||||
@ -1183,7 +1178,7 @@ func (a *csAttempt) finish(err error) {
|
|||||||
}
|
}
|
||||||
var tr metadata.MD
|
var tr metadata.MD
|
||||||
if a.s != nil {
|
if a.s != nil {
|
||||||
a.t.CloseStream(a.s, err)
|
a.s.Close(err)
|
||||||
tr = a.s.Trailer()
|
tr = a.s.Trailer()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1340,7 +1335,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin
|
|||||||
}
|
}
|
||||||
|
|
||||||
type addrConnStream struct {
|
type addrConnStream struct {
|
||||||
s *transport.Stream
|
s *transport.ClientStream
|
||||||
ac *addrConn
|
ac *addrConn
|
||||||
callHdr *transport.CallHdr
|
callHdr *transport.CallHdr
|
||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
@ -1380,7 +1375,7 @@ func (as *addrConnStream) CloseSend() error {
|
|||||||
}
|
}
|
||||||
as.sentLast = true
|
as.sentLast = true
|
||||||
|
|
||||||
as.t.Write(as.s, nil, nil, &transport.Options{Last: true})
|
as.s.Write(nil, nil, &transport.WriteOptions{Last: true})
|
||||||
// Always return nil; io.EOF is the only error that might make sense
|
// Always return nil; io.EOF is the only error that might make sense
|
||||||
// instead, but there is no need to signal the client to call RecvMsg
|
// instead, but there is no need to signal the client to call RecvMsg
|
||||||
// as the only use left for the stream after CloseSend is to call
|
// as the only use left for the stream after CloseSend is to call
|
||||||
@ -1430,7 +1425,7 @@ func (as *addrConnStream) SendMsg(m any) (err error) {
|
|||||||
return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payload.Len(), *as.callInfo.maxSendMessageSize)
|
return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payload.Len(), *as.callInfo.maxSendMessageSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := as.t.Write(as.s, hdr, payload, &transport.Options{Last: !as.desc.ClientStreams}); err != nil {
|
if err := as.s.Write(hdr, payload, &transport.WriteOptions{Last: !as.desc.ClientStreams}); err != nil {
|
||||||
if !as.desc.ClientStreams {
|
if !as.desc.ClientStreams {
|
||||||
// For non-client-streaming RPCs, we return nil instead of EOF on error
|
// For non-client-streaming RPCs, we return nil instead of EOF on error
|
||||||
// because the generated code requires it. finish is not called; RecvMsg()
|
// because the generated code requires it. finish is not called; RecvMsg()
|
||||||
@ -1440,9 +1435,6 @@ func (as *addrConnStream) SendMsg(m any) (err error) {
|
|||||||
return io.EOF
|
return io.EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
if channelz.IsOn() {
|
|
||||||
as.t.IncrMsgSent()
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1480,9 +1472,6 @@ func (as *addrConnStream) RecvMsg(m any) (err error) {
|
|||||||
return toRPCErr(err)
|
return toRPCErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if channelz.IsOn() {
|
|
||||||
as.t.IncrMsgRecv()
|
|
||||||
}
|
|
||||||
if as.desc.ServerStreams {
|
if as.desc.ServerStreams {
|
||||||
// Subsequent messages should be received by subsequent RecvMsg calls.
|
// Subsequent messages should be received by subsequent RecvMsg calls.
|
||||||
return nil
|
return nil
|
||||||
@ -1510,7 +1499,7 @@ func (as *addrConnStream) finish(err error) {
|
|||||||
err = nil
|
err = nil
|
||||||
}
|
}
|
||||||
if as.s != nil {
|
if as.s != nil {
|
||||||
as.t.CloseStream(as.s, err)
|
as.s.Close(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1577,8 +1566,7 @@ type ServerStream interface {
|
|||||||
// serverStream implements a server side Stream.
|
// serverStream implements a server side Stream.
|
||||||
type serverStream struct {
|
type serverStream struct {
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
t transport.ServerTransport
|
s *transport.ServerStream
|
||||||
s *transport.Stream
|
|
||||||
p *parser
|
p *parser
|
||||||
codec baseCodec
|
codec baseCodec
|
||||||
|
|
||||||
@ -1628,7 +1616,7 @@ func (ss *serverStream) SendHeader(md metadata.MD) error {
|
|||||||
return status.Error(codes.Internal, err.Error())
|
return status.Error(codes.Internal, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
err = ss.t.WriteHeader(ss.s, md)
|
err = ss.s.SendHeader(md)
|
||||||
if len(ss.binlogs) != 0 && !ss.serverHeaderBinlogged {
|
if len(ss.binlogs) != 0 && !ss.serverHeaderBinlogged {
|
||||||
h, _ := ss.s.Header()
|
h, _ := ss.s.Header()
|
||||||
sh := &binarylog.ServerHeader{
|
sh := &binarylog.ServerHeader{
|
||||||
@ -1668,7 +1656,7 @@ func (ss *serverStream) SendMsg(m any) (err error) {
|
|||||||
}
|
}
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && err != io.EOF {
|
||||||
st, _ := status.FromError(toRPCErr(err))
|
st, _ := status.FromError(toRPCErr(err))
|
||||||
ss.t.WriteStatus(ss.s, st)
|
ss.s.WriteStatus(st)
|
||||||
// Non-user specified status was sent out. This should be an error
|
// Non-user specified status was sent out. This should be an error
|
||||||
// case (as a server side Cancel maybe).
|
// case (as a server side Cancel maybe).
|
||||||
//
|
//
|
||||||
@ -1676,9 +1664,6 @@ func (ss *serverStream) SendMsg(m any) (err error) {
|
|||||||
// status from the service handler, we will log that error instead.
|
// status from the service handler, we will log that error instead.
|
||||||
// This behavior is similar to an interceptor.
|
// This behavior is similar to an interceptor.
|
||||||
}
|
}
|
||||||
if channelz.IsOn() && err == nil {
|
|
||||||
ss.t.IncrMsgSent()
|
|
||||||
}
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Server handler could have set new compressor by calling SetSendCompressor.
|
// Server handler could have set new compressor by calling SetSendCompressor.
|
||||||
@ -1710,7 +1695,7 @@ func (ss *serverStream) SendMsg(m any) (err error) {
|
|||||||
if payloadLen > ss.maxSendMessageSize {
|
if payloadLen > ss.maxSendMessageSize {
|
||||||
return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, ss.maxSendMessageSize)
|
return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, ss.maxSendMessageSize)
|
||||||
}
|
}
|
||||||
if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil {
|
if err := ss.s.Write(hdr, payload, &transport.WriteOptions{Last: false}); err != nil {
|
||||||
return toRPCErr(err)
|
return toRPCErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1756,7 +1741,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) {
|
|||||||
}
|
}
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && err != io.EOF {
|
||||||
st, _ := status.FromError(toRPCErr(err))
|
st, _ := status.FromError(toRPCErr(err))
|
||||||
ss.t.WriteStatus(ss.s, st)
|
ss.s.WriteStatus(st)
|
||||||
// Non-user specified status was sent out. This should be an error
|
// Non-user specified status was sent out. This should be an error
|
||||||
// case (as a server side Cancel maybe).
|
// case (as a server side Cancel maybe).
|
||||||
//
|
//
|
||||||
@ -1764,9 +1749,6 @@ func (ss *serverStream) RecvMsg(m any) (err error) {
|
|||||||
// status from the service handler, we will log that error instead.
|
// status from the service handler, we will log that error instead.
|
||||||
// This behavior is similar to an interceptor.
|
// This behavior is similar to an interceptor.
|
||||||
}
|
}
|
||||||
if channelz.IsOn() && err == nil {
|
|
||||||
ss.t.IncrMsgRecv()
|
|
||||||
}
|
|
||||||
}()
|
}()
|
||||||
var payInfo *payloadInfo
|
var payInfo *payloadInfo
|
||||||
if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 {
|
if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 {
|
||||||
|
2
vendor/google.golang.org/grpc/version.go
generated
vendored
2
vendor/google.golang.org/grpc/version.go
generated
vendored
@ -19,4 +19,4 @@
|
|||||||
package grpc
|
package grpc
|
||||||
|
|
||||||
// Version is the current grpc version.
|
// Version is the current grpc version.
|
||||||
const Version = "1.68.1"
|
const Version = "1.69.0-dev"
|
||||||
|
22
vendor/modules.txt
vendored
22
vendor/modules.txt
vendored
@ -661,8 +661,8 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/inte
|
|||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil
|
||||||
# go.opentelemetry.io/otel v1.28.0
|
# go.opentelemetry.io/otel v1.31.0
|
||||||
## explicit; go 1.21
|
## explicit; go 1.22
|
||||||
go.opentelemetry.io/otel
|
go.opentelemetry.io/otel
|
||||||
go.opentelemetry.io/otel/attribute
|
go.opentelemetry.io/otel/attribute
|
||||||
go.opentelemetry.io/otel/baggage
|
go.opentelemetry.io/otel/baggage
|
||||||
@ -689,21 +689,21 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal
|
|||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry
|
||||||
# go.opentelemetry.io/otel/metric v1.28.0
|
# go.opentelemetry.io/otel/metric v1.31.0
|
||||||
## explicit; go 1.21
|
## explicit; go 1.22
|
||||||
go.opentelemetry.io/otel/metric
|
go.opentelemetry.io/otel/metric
|
||||||
go.opentelemetry.io/otel/metric/embedded
|
go.opentelemetry.io/otel/metric/embedded
|
||||||
go.opentelemetry.io/otel/metric/noop
|
go.opentelemetry.io/otel/metric/noop
|
||||||
# go.opentelemetry.io/otel/sdk v1.28.0
|
# go.opentelemetry.io/otel/sdk v1.31.0
|
||||||
## explicit; go 1.21
|
## explicit; go 1.22
|
||||||
go.opentelemetry.io/otel/sdk
|
go.opentelemetry.io/otel/sdk
|
||||||
go.opentelemetry.io/otel/sdk/instrumentation
|
go.opentelemetry.io/otel/sdk/instrumentation
|
||||||
go.opentelemetry.io/otel/sdk/internal/env
|
go.opentelemetry.io/otel/sdk/internal/env
|
||||||
go.opentelemetry.io/otel/sdk/internal/x
|
go.opentelemetry.io/otel/sdk/internal/x
|
||||||
go.opentelemetry.io/otel/sdk/resource
|
go.opentelemetry.io/otel/sdk/resource
|
||||||
go.opentelemetry.io/otel/sdk/trace
|
go.opentelemetry.io/otel/sdk/trace
|
||||||
# go.opentelemetry.io/otel/trace v1.28.0
|
# go.opentelemetry.io/otel/trace v1.31.0
|
||||||
## explicit; go 1.21
|
## explicit; go 1.22
|
||||||
go.opentelemetry.io/otel/trace
|
go.opentelemetry.io/otel/trace
|
||||||
go.opentelemetry.io/otel/trace/embedded
|
go.opentelemetry.io/otel/trace/embedded
|
||||||
go.opentelemetry.io/otel/trace/noop
|
go.opentelemetry.io/otel/trace/noop
|
||||||
@ -827,17 +827,17 @@ golang.org/x/tools/go/ast/inspector
|
|||||||
# gomodules.xyz/jsonpatch/v2 v2.4.0 => github.com/gomodules/jsonpatch/v2 v2.2.0
|
# gomodules.xyz/jsonpatch/v2 v2.4.0 => github.com/gomodules/jsonpatch/v2 v2.2.0
|
||||||
## explicit; go 1.12
|
## explicit; go 1.12
|
||||||
gomodules.xyz/jsonpatch/v2
|
gomodules.xyz/jsonpatch/v2
|
||||||
# google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1
|
# google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53
|
||||||
## explicit; go 1.21
|
## explicit; go 1.21
|
||||||
google.golang.org/genproto/googleapis/api
|
google.golang.org/genproto/googleapis/api
|
||||||
google.golang.org/genproto/googleapis/api/annotations
|
google.golang.org/genproto/googleapis/api/annotations
|
||||||
google.golang.org/genproto/googleapis/api/expr/v1alpha1
|
google.golang.org/genproto/googleapis/api/expr/v1alpha1
|
||||||
google.golang.org/genproto/googleapis/api/httpbody
|
google.golang.org/genproto/googleapis/api/httpbody
|
||||||
# google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1
|
# google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53
|
||||||
## explicit; go 1.21
|
## explicit; go 1.21
|
||||||
google.golang.org/genproto/googleapis/rpc/errdetails
|
google.golang.org/genproto/googleapis/rpc/errdetails
|
||||||
google.golang.org/genproto/googleapis/rpc/status
|
google.golang.org/genproto/googleapis/rpc/status
|
||||||
# google.golang.org/grpc v1.68.1
|
# google.golang.org/grpc v1.69.0
|
||||||
## explicit; go 1.22
|
## explicit; go 1.22
|
||||||
google.golang.org/grpc
|
google.golang.org/grpc
|
||||||
google.golang.org/grpc/attributes
|
google.golang.org/grpc/attributes
|
||||||
|
Loading…
Reference in New Issue
Block a user