diff --git a/go.mod b/go.mod index 69acc8ee2..14906df08 100644 --- a/go.mod +++ b/go.mod @@ -10,6 +10,7 @@ require ( github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/hashicorp/vault/api v1.0.5-0.20200902155336-f9d5ce5a171a + github.com/kube-storage/spec v0.1.0 github.com/kubernetes-csi/csi-lib-utils v0.7.0 github.com/kubernetes-csi/external-snapshotter/v2 v2.1.1 github.com/libopenstorage/secrets v0.0.0-20201006135900-af310b01fe47 @@ -20,7 +21,7 @@ require ( github.com/stretchr/testify v1.7.0 golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 golang.org/x/sys v0.0.0-20201112073958-5cba982894dd - google.golang.org/grpc v1.29.1 + google.golang.org/grpc v1.35.0 k8s.io/api v0.20.0 k8s.io/apimachinery v0.20.0 k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible @@ -37,7 +38,7 @@ replace ( github.com/hashicorp/vault/sdk => github.com/hashicorp/vault/sdk v0.1.14-0.20201116234512-b4d4137dfe8b github.com/kubernetes-csi/external-snapshotter/v2 => github.com/kubernetes-csi/external-snapshotter/v2 v2.1.1-0.20200504125226-859696c419ff github.com/kubernetes-incubator/external-storage => github.com/kubernetes-incubator/external-storage v5.5.0+incompatible - google.golang.org/grpc => google.golang.org/grpc v1.26.0 + google.golang.org/grpc => google.golang.org/grpc v1.35.0 k8s.io/api => k8s.io/api v0.20.0 k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.20.0 k8s.io/apimachinery => k8s.io/apimachinery v0.20.0 diff --git a/go.sum b/go.sum index 09e0a35f4..7e8ff225d 100644 --- a/go.sum +++ b/go.sum @@ -228,6 +228,7 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cloudfoundry-community/go-cfclient v0.0.0-20190201205600-f136f9222381 h1:rdRS5BT13Iae9ssvcslol66gfOOXjaLYwqerEn/cl9s= github.com/cloudfoundry-community/go-cfclient v0.0.0-20190201205600-f136f9222381/go.mod h1:e5+USP2j8Le2M0Jo3qKPFnNhuo1wueU4nWHCXBOfQ14= github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313/go.mod h1:P1wt9Z3DP8O6W3rvwCt0REIlshg1InHImaLW0t3ObY0= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y= @@ -334,8 +335,7 @@ github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkg github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473 h1:4cmBvAEBNJaGARUEs3/suWRyfyBfhf7I60WBZq+bv2w= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= @@ -500,7 +500,6 @@ github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.0.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0 h1:28o5sBqPkBsMGnC6b4MvE2TzSr5/AT4c/1fLqVGIwlk= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s= @@ -860,6 +859,8 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kube-storage/spec v0.1.0 h1:7vsAQ2q6GpS3RkZFJqlm4VExvIXwXz/v+0DGoKxpIJY= +github.com/kube-storage/spec v0.1.0/go.mod h1:aEM5cuaELIjQPTe4LdyAoR2XvkC5vtd1qz8LgGLf4BQ= github.com/kubernetes-csi/csi-lib-utils v0.7.0 h1:t1cS7HTD7z5D7h9iAdjWuHtMxJPb9s1fIv34rxytzqs= github.com/kubernetes-csi/csi-lib-utils v0.7.0/go.mod h1:bze+2G9+cmoHxN6+WyG1qT4MDxgZJMLGwc7V4acPNm0= github.com/kubernetes-csi/csi-test v2.0.0+incompatible/go.mod h1:YxJ4UiuPWIhMBkxUKY5c267DyA0uDZ/MtAimhx/2TA0= @@ -1617,8 +1618,8 @@ google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaR google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a h1:pOwg4OoaRYScjmR4LlLgdtnyoHYTSAVhhqe5uPdpII8= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.35.0 h1:TwIQcH3es+MojMVojxxfQ3l3OF2KzlRxML2xZq0kRo8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/vendor/github.com/kube-storage/spec/LICENSE b/vendor/github.com/kube-storage/spec/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/kube-storage/spec/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/kube-storage/spec/lib/go/replication/replication.pb.go b/vendor/github.com/kube-storage/spec/lib/go/replication/replication.pb.go new file mode 100644 index 000000000..2e8bb85b0 --- /dev/null +++ b/vendor/github.com/kube-storage/spec/lib/go/replication/replication.pb.go @@ -0,0 +1,1044 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0 +// protoc v3.14.0 +// source: replication.proto + +package replication + +import ( + proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +// EnableVolumeReplicationRequest holds the required information to enable +// replication on a volume. +type EnableVolumeReplicationRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The identifier for this volume, generated by the plugin during + // CreateVolume CSI RPC call. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific volume vs all other volumes supported by this plugin. + // This field SHALL be used by the CO in subsequent calls to refer to + // this volume. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + // Plugin specific parameters passed in as opaque key-value pairs. + Parameters map[string]string `protobuf:"bytes,2,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Secrets required by the plugin to complete the request. + Secrets map[string]string `protobuf:"bytes,3,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *EnableVolumeReplicationRequest) Reset() { + *x = EnableVolumeReplicationRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_replication_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnableVolumeReplicationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnableVolumeReplicationRequest) ProtoMessage() {} + +func (x *EnableVolumeReplicationRequest) ProtoReflect() protoreflect.Message { + mi := &file_replication_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnableVolumeReplicationRequest.ProtoReflect.Descriptor instead. +func (*EnableVolumeReplicationRequest) Descriptor() ([]byte, []int) { + return file_replication_proto_rawDescGZIP(), []int{0} +} + +func (x *EnableVolumeReplicationRequest) GetVolumeId() string { + if x != nil { + return x.VolumeId + } + return "" +} + +func (x *EnableVolumeReplicationRequest) GetParameters() map[string]string { + if x != nil { + return x.Parameters + } + return nil +} + +func (x *EnableVolumeReplicationRequest) GetSecrets() map[string]string { + if x != nil { + return x.Secrets + } + return nil +} + +// EnableVolumeReplicationResponse holds the information to send when +// replication is successfully enabled on a volume. +type EnableVolumeReplicationResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *EnableVolumeReplicationResponse) Reset() { + *x = EnableVolumeReplicationResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_replication_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnableVolumeReplicationResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnableVolumeReplicationResponse) ProtoMessage() {} + +func (x *EnableVolumeReplicationResponse) ProtoReflect() protoreflect.Message { + mi := &file_replication_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnableVolumeReplicationResponse.ProtoReflect.Descriptor instead. +func (*EnableVolumeReplicationResponse) Descriptor() ([]byte, []int) { + return file_replication_proto_rawDescGZIP(), []int{1} +} + +// DisableVolumeReplicationRequest holds the required information to disable +// replication on a volume. +type DisableVolumeReplicationRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The identifier for this volume, generated by the plugin during + // CreateVolume CSI RPC call. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific volume vs all other volumes supported by this plugin. + // This field SHALL be used by the CO in subsequent calls to refer to + // this volume. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + // Plugin specific parameters passed in as opaque key-value pairs. + Parameters map[string]string `protobuf:"bytes,2,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Secrets required by the plugin to complete the request. + Secrets map[string]string `protobuf:"bytes,3,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *DisableVolumeReplicationRequest) Reset() { + *x = DisableVolumeReplicationRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_replication_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DisableVolumeReplicationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DisableVolumeReplicationRequest) ProtoMessage() {} + +func (x *DisableVolumeReplicationRequest) ProtoReflect() protoreflect.Message { + mi := &file_replication_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DisableVolumeReplicationRequest.ProtoReflect.Descriptor instead. +func (*DisableVolumeReplicationRequest) Descriptor() ([]byte, []int) { + return file_replication_proto_rawDescGZIP(), []int{2} +} + +func (x *DisableVolumeReplicationRequest) GetVolumeId() string { + if x != nil { + return x.VolumeId + } + return "" +} + +func (x *DisableVolumeReplicationRequest) GetParameters() map[string]string { + if x != nil { + return x.Parameters + } + return nil +} + +func (x *DisableVolumeReplicationRequest) GetSecrets() map[string]string { + if x != nil { + return x.Secrets + } + return nil +} + +// DisableVolumeReplicationResponse holds the information to send when +// replication is successfully disabled on a volume. +type DisableVolumeReplicationResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *DisableVolumeReplicationResponse) Reset() { + *x = DisableVolumeReplicationResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_replication_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DisableVolumeReplicationResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DisableVolumeReplicationResponse) ProtoMessage() {} + +func (x *DisableVolumeReplicationResponse) ProtoReflect() protoreflect.Message { + mi := &file_replication_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DisableVolumeReplicationResponse.ProtoReflect.Descriptor instead. +func (*DisableVolumeReplicationResponse) Descriptor() ([]byte, []int) { + return file_replication_proto_rawDescGZIP(), []int{3} +} + +// PromoteVolumeRequest holds the required information to promote volume as a +// primary on local cluster. +type PromoteVolumeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The identifier for this volume, generated by the plugin during + // CreateVolume CSI RPC call. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific volume vs all other volumes supported by this plugin. + // This field SHALL be used by the CO in subsequent calls to refer to + // this volume. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + // This field is optional. + // Default value is false, force option to Promote the volume. + Force bool `protobuf:"varint,2,opt,name=force,proto3" json:"force,omitempty"` + // Plugin specific parameters passed in as opaque key-value pairs. + Parameters map[string]string `protobuf:"bytes,3,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Secrets required by the plugin to complete the request. + Secrets map[string]string `protobuf:"bytes,4,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *PromoteVolumeRequest) Reset() { + *x = PromoteVolumeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_replication_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PromoteVolumeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PromoteVolumeRequest) ProtoMessage() {} + +func (x *PromoteVolumeRequest) ProtoReflect() protoreflect.Message { + mi := &file_replication_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PromoteVolumeRequest.ProtoReflect.Descriptor instead. +func (*PromoteVolumeRequest) Descriptor() ([]byte, []int) { + return file_replication_proto_rawDescGZIP(), []int{4} +} + +func (x *PromoteVolumeRequest) GetVolumeId() string { + if x != nil { + return x.VolumeId + } + return "" +} + +func (x *PromoteVolumeRequest) GetForce() bool { + if x != nil { + return x.Force + } + return false +} + +func (x *PromoteVolumeRequest) GetParameters() map[string]string { + if x != nil { + return x.Parameters + } + return nil +} + +func (x *PromoteVolumeRequest) GetSecrets() map[string]string { + if x != nil { + return x.Secrets + } + return nil +} + +// PromoteVolumeResponse holds the information to send when +// volume is successfully promoted. +type PromoteVolumeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *PromoteVolumeResponse) Reset() { + *x = PromoteVolumeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_replication_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PromoteVolumeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PromoteVolumeResponse) ProtoMessage() {} + +func (x *PromoteVolumeResponse) ProtoReflect() protoreflect.Message { + mi := &file_replication_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PromoteVolumeResponse.ProtoReflect.Descriptor instead. +func (*PromoteVolumeResponse) Descriptor() ([]byte, []int) { + return file_replication_proto_rawDescGZIP(), []int{5} +} + +// DemoteVolumeRequest holds the required information to demote volume on local +// cluster. +type DemoteVolumeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The identifier for this volume, generated by the plugin during + // CreateVolume CSI RPC call. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific volume vs all other volumes supported by this plugin. + // This field SHALL be used by the CO in subsequent calls to refer to + // this volume. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + // This field is optional. + // Default value is false, force option to Demote the volume. + Force bool `protobuf:"varint,2,opt,name=force,proto3" json:"force,omitempty"` + // Plugin specific parameters passed in as opaque key-value pairs. + Parameters map[string]string `protobuf:"bytes,3,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Secrets required by the plugin to complete the request. + Secrets map[string]string `protobuf:"bytes,4,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *DemoteVolumeRequest) Reset() { + *x = DemoteVolumeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_replication_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DemoteVolumeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DemoteVolumeRequest) ProtoMessage() {} + +func (x *DemoteVolumeRequest) ProtoReflect() protoreflect.Message { + mi := &file_replication_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DemoteVolumeRequest.ProtoReflect.Descriptor instead. +func (*DemoteVolumeRequest) Descriptor() ([]byte, []int) { + return file_replication_proto_rawDescGZIP(), []int{6} +} + +func (x *DemoteVolumeRequest) GetVolumeId() string { + if x != nil { + return x.VolumeId + } + return "" +} + +func (x *DemoteVolumeRequest) GetForce() bool { + if x != nil { + return x.Force + } + return false +} + +func (x *DemoteVolumeRequest) GetParameters() map[string]string { + if x != nil { + return x.Parameters + } + return nil +} + +func (x *DemoteVolumeRequest) GetSecrets() map[string]string { + if x != nil { + return x.Secrets + } + return nil +} + +// DemoteVolumeResponse holds the information to send when +// volume is successfully demoted. +type DemoteVolumeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *DemoteVolumeResponse) Reset() { + *x = DemoteVolumeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_replication_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DemoteVolumeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DemoteVolumeResponse) ProtoMessage() {} + +func (x *DemoteVolumeResponse) ProtoReflect() protoreflect.Message { + mi := &file_replication_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DemoteVolumeResponse.ProtoReflect.Descriptor instead. +func (*DemoteVolumeResponse) Descriptor() ([]byte, []int) { + return file_replication_proto_rawDescGZIP(), []int{7} +} + +// ResyncVolumeRequest holds the required information to resync volume. +type ResyncVolumeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The identifier for this volume, generated by the plugin during + // CreateVolume CSI RPC call. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific volume vs all other volumes supported by this plugin. + // This field SHALL be used by the CO in subsequent calls to refer to + // this volume. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + // This field is optional. + // Default value is false, force option to Resync the volume. + Force bool `protobuf:"varint,2,opt,name=force,proto3" json:"force,omitempty"` + // Plugin specific parameters passed in as opaque key-value pairs. + Parameters map[string]string `protobuf:"bytes,3,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Secrets required by the plugin to complete the request. + Secrets map[string]string `protobuf:"bytes,4,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *ResyncVolumeRequest) Reset() { + *x = ResyncVolumeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_replication_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResyncVolumeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResyncVolumeRequest) ProtoMessage() {} + +func (x *ResyncVolumeRequest) ProtoReflect() protoreflect.Message { + mi := &file_replication_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResyncVolumeRequest.ProtoReflect.Descriptor instead. +func (*ResyncVolumeRequest) Descriptor() ([]byte, []int) { + return file_replication_proto_rawDescGZIP(), []int{8} +} + +func (x *ResyncVolumeRequest) GetVolumeId() string { + if x != nil { + return x.VolumeId + } + return "" +} + +func (x *ResyncVolumeRequest) GetForce() bool { + if x != nil { + return x.Force + } + return false +} + +func (x *ResyncVolumeRequest) GetParameters() map[string]string { + if x != nil { + return x.Parameters + } + return nil +} + +func (x *ResyncVolumeRequest) GetSecrets() map[string]string { + if x != nil { + return x.Secrets + } + return nil +} + +// ResyncVolumeResponse holds the information to send when +// volume is successfully resynced. +type ResyncVolumeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Indicates that the volume is ready to use. + // The default value is false. + // This field is REQUIRED. + Ready bool `protobuf:"varint,1,opt,name=ready,proto3" json:"ready,omitempty"` +} + +func (x *ResyncVolumeResponse) Reset() { + *x = ResyncVolumeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_replication_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResyncVolumeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResyncVolumeResponse) ProtoMessage() {} + +func (x *ResyncVolumeResponse) ProtoReflect() protoreflect.Message { + mi := &file_replication_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResyncVolumeResponse.ProtoReflect.Descriptor instead. +func (*ResyncVolumeResponse) Descriptor() ([]byte, []int) { + return file_replication_proto_rawDescGZIP(), []int{9} +} + +func (x *ResyncVolumeResponse) GetReady() bool { + if x != nil { + return x.Ready + } + return false +} + +var file_replication_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 1099, + Name: "replication.replication_secret", + Tag: "varint,1099,opt,name=replication_secret", + Filename: "replication.proto", + }, +} + +// Extension fields to descriptorpb.FieldOptions. +var ( + // Indicates that a field MAY contain information that is sensitive + // and MUST be treated as such (e.g. not logged). + // + // optional bool replication_secret = 1099; + E_ReplicationSecret = &file_replication_proto_extTypes[0] +) + +var File_replication_proto protoreflect.FileDescriptor + +var file_replication_proto_rawDesc = []byte{ + 0x0a, 0x11, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0xee, 0x02, 0x0a, 0x1e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x49, 0x64, 0x12, 0x5b, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, + 0x57, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x38, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x45, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x65, + 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0xd8, 0x44, 0x01, 0x52, + 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3a, 0x0a, 0x0c, 0x53, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x22, 0x21, 0x0a, 0x1f, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xf1, 0x02, 0x0a, 0x1f, 0x44, 0x69, 0x73, 0x61, 0x62, + 0x6c, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x5c, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x72, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, + 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x58, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x42, 0x03, 0xd8, 0x44, 0x01, 0x52, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x1a, + 0x3d, 0x0a, 0x0f, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3a, + 0x0a, 0x0c, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x22, 0x0a, 0x20, 0x44, 0x69, + 0x73, 0x61, 0x62, 0x6c, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xe6, + 0x02, 0x0a, 0x14, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x51, 0x0a, 0x0a, 0x70, 0x61, + 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, + 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, + 0x6d, 0x6f, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x4d, 0x0a, + 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, + 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, + 0x6d, 0x6f, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, + 0xd8, 0x44, 0x01, 0x52, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3a, 0x0a, 0x0c, 0x53, + 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x17, 0x0a, 0x15, 0x50, 0x72, 0x6f, 0x6d, 0x6f, + 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x50, 0x0a, 0x0a, 0x70, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x30, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x4c, 0x0a, + 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, + 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x65, 0x6d, + 0x6f, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0xd8, + 0x44, 0x01, 0x52, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3a, 0x0a, 0x0c, 0x53, 0x65, + 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x16, 0x0a, 0x14, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xe3, + 0x02, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x79, 0x6e, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x50, 0x0a, 0x0a, 0x70, 0x61, 0x72, + 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, + 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x79, + 0x6e, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x4c, 0x0a, 0x07, 0x73, + 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x72, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x79, 0x6e, + 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, + 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0xd8, 0x44, 0x01, + 0x52, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3a, 0x0a, 0x0c, 0x53, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0x2c, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x79, 0x6e, 0x63, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x72, 0x65, 0x61, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x72, 0x65, 0x61, + 0x64, 0x79, 0x32, 0x87, 0x04, 0x0a, 0x0a, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, + 0x72, 0x12, 0x76, 0x0a, 0x17, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x2e, 0x72, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x45, 0x6e, 0x61, 0x62, 0x6c, + 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x72, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x79, 0x0a, 0x18, 0x44, 0x69, 0x73, + 0x61, 0x62, 0x6c, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x58, 0x0a, 0x0d, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x21, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x55, + 0x0a, 0x0c, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x20, + 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x65, 0x6d, + 0x6f, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x21, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, + 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x79, 0x6e, 0x63, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x20, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x79, 0x6e, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x79, 0x6e, 0x63, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x3a, 0x4d, 0x0a, 0x12, + 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0xcb, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x42, 0x0f, 0x5a, 0x0d, 0x2e, + 0x3b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_replication_proto_rawDescOnce sync.Once + file_replication_proto_rawDescData = file_replication_proto_rawDesc +) + +func file_replication_proto_rawDescGZIP() []byte { + file_replication_proto_rawDescOnce.Do(func() { + file_replication_proto_rawDescData = protoimpl.X.CompressGZIP(file_replication_proto_rawDescData) + }) + return file_replication_proto_rawDescData +} + +var file_replication_proto_msgTypes = make([]protoimpl.MessageInfo, 20) +var file_replication_proto_goTypes = []interface{}{ + (*EnableVolumeReplicationRequest)(nil), // 0: replication.EnableVolumeReplicationRequest + (*EnableVolumeReplicationResponse)(nil), // 1: replication.EnableVolumeReplicationResponse + (*DisableVolumeReplicationRequest)(nil), // 2: replication.DisableVolumeReplicationRequest + (*DisableVolumeReplicationResponse)(nil), // 3: replication.DisableVolumeReplicationResponse + (*PromoteVolumeRequest)(nil), // 4: replication.PromoteVolumeRequest + (*PromoteVolumeResponse)(nil), // 5: replication.PromoteVolumeResponse + (*DemoteVolumeRequest)(nil), // 6: replication.DemoteVolumeRequest + (*DemoteVolumeResponse)(nil), // 7: replication.DemoteVolumeResponse + (*ResyncVolumeRequest)(nil), // 8: replication.ResyncVolumeRequest + (*ResyncVolumeResponse)(nil), // 9: replication.ResyncVolumeResponse + nil, // 10: replication.EnableVolumeReplicationRequest.ParametersEntry + nil, // 11: replication.EnableVolumeReplicationRequest.SecretsEntry + nil, // 12: replication.DisableVolumeReplicationRequest.ParametersEntry + nil, // 13: replication.DisableVolumeReplicationRequest.SecretsEntry + nil, // 14: replication.PromoteVolumeRequest.ParametersEntry + nil, // 15: replication.PromoteVolumeRequest.SecretsEntry + nil, // 16: replication.DemoteVolumeRequest.ParametersEntry + nil, // 17: replication.DemoteVolumeRequest.SecretsEntry + nil, // 18: replication.ResyncVolumeRequest.ParametersEntry + nil, // 19: replication.ResyncVolumeRequest.SecretsEntry + (*descriptorpb.FieldOptions)(nil), // 20: google.protobuf.FieldOptions +} +var file_replication_proto_depIdxs = []int32{ + 10, // 0: replication.EnableVolumeReplicationRequest.parameters:type_name -> replication.EnableVolumeReplicationRequest.ParametersEntry + 11, // 1: replication.EnableVolumeReplicationRequest.secrets:type_name -> replication.EnableVolumeReplicationRequest.SecretsEntry + 12, // 2: replication.DisableVolumeReplicationRequest.parameters:type_name -> replication.DisableVolumeReplicationRequest.ParametersEntry + 13, // 3: replication.DisableVolumeReplicationRequest.secrets:type_name -> replication.DisableVolumeReplicationRequest.SecretsEntry + 14, // 4: replication.PromoteVolumeRequest.parameters:type_name -> replication.PromoteVolumeRequest.ParametersEntry + 15, // 5: replication.PromoteVolumeRequest.secrets:type_name -> replication.PromoteVolumeRequest.SecretsEntry + 16, // 6: replication.DemoteVolumeRequest.parameters:type_name -> replication.DemoteVolumeRequest.ParametersEntry + 17, // 7: replication.DemoteVolumeRequest.secrets:type_name -> replication.DemoteVolumeRequest.SecretsEntry + 18, // 8: replication.ResyncVolumeRequest.parameters:type_name -> replication.ResyncVolumeRequest.ParametersEntry + 19, // 9: replication.ResyncVolumeRequest.secrets:type_name -> replication.ResyncVolumeRequest.SecretsEntry + 20, // 10: replication.replication_secret:extendee -> google.protobuf.FieldOptions + 0, // 11: replication.Controller.EnableVolumeReplication:input_type -> replication.EnableVolumeReplicationRequest + 2, // 12: replication.Controller.DisableVolumeReplication:input_type -> replication.DisableVolumeReplicationRequest + 4, // 13: replication.Controller.PromoteVolume:input_type -> replication.PromoteVolumeRequest + 6, // 14: replication.Controller.DemoteVolume:input_type -> replication.DemoteVolumeRequest + 8, // 15: replication.Controller.ResyncVolume:input_type -> replication.ResyncVolumeRequest + 1, // 16: replication.Controller.EnableVolumeReplication:output_type -> replication.EnableVolumeReplicationResponse + 3, // 17: replication.Controller.DisableVolumeReplication:output_type -> replication.DisableVolumeReplicationResponse + 5, // 18: replication.Controller.PromoteVolume:output_type -> replication.PromoteVolumeResponse + 7, // 19: replication.Controller.DemoteVolume:output_type -> replication.DemoteVolumeResponse + 9, // 20: replication.Controller.ResyncVolume:output_type -> replication.ResyncVolumeResponse + 16, // [16:21] is the sub-list for method output_type + 11, // [11:16] is the sub-list for method input_type + 11, // [11:11] is the sub-list for extension type_name + 10, // [10:11] is the sub-list for extension extendee + 0, // [0:10] is the sub-list for field type_name +} + +func init() { file_replication_proto_init() } +func file_replication_proto_init() { + if File_replication_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_replication_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnableVolumeReplicationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_replication_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnableVolumeReplicationResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_replication_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DisableVolumeReplicationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_replication_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DisableVolumeReplicationResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_replication_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PromoteVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_replication_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PromoteVolumeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_replication_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DemoteVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_replication_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DemoteVolumeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_replication_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResyncVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_replication_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResyncVolumeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_replication_proto_rawDesc, + NumEnums: 0, + NumMessages: 20, + NumExtensions: 1, + NumServices: 1, + }, + GoTypes: file_replication_proto_goTypes, + DependencyIndexes: file_replication_proto_depIdxs, + MessageInfos: file_replication_proto_msgTypes, + ExtensionInfos: file_replication_proto_extTypes, + }.Build() + File_replication_proto = out.File + file_replication_proto_rawDesc = nil + file_replication_proto_goTypes = nil + file_replication_proto_depIdxs = nil +} diff --git a/vendor/github.com/kube-storage/spec/lib/go/replication/replication_grpc.pb.go b/vendor/github.com/kube-storage/spec/lib/go/replication/replication_grpc.pb.go new file mode 100644 index 000000000..c52b2f643 --- /dev/null +++ b/vendor/github.com/kube-storage/spec/lib/go/replication/replication_grpc.pb.go @@ -0,0 +1,255 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package replication + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// ControllerClient is the client API for Controller service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ControllerClient interface { + // EnableVolumeReplication RPC call to enable the volume replication. + EnableVolumeReplication(ctx context.Context, in *EnableVolumeReplicationRequest, opts ...grpc.CallOption) (*EnableVolumeReplicationResponse, error) + // DisableVolumeReplication RPC call to disable the volume replication. + DisableVolumeReplication(ctx context.Context, in *DisableVolumeReplicationRequest, opts ...grpc.CallOption) (*DisableVolumeReplicationResponse, error) + // PromoteVolume RPC call to promote the volume. + PromoteVolume(ctx context.Context, in *PromoteVolumeRequest, opts ...grpc.CallOption) (*PromoteVolumeResponse, error) + // DemoteVolume RPC call to demote the volume. + DemoteVolume(ctx context.Context, in *DemoteVolumeRequest, opts ...grpc.CallOption) (*DemoteVolumeResponse, error) + // ResyncVolume RPC call to resync the volume. + ResyncVolume(ctx context.Context, in *ResyncVolumeRequest, opts ...grpc.CallOption) (*ResyncVolumeResponse, error) +} + +type controllerClient struct { + cc grpc.ClientConnInterface +} + +func NewControllerClient(cc grpc.ClientConnInterface) ControllerClient { + return &controllerClient{cc} +} + +func (c *controllerClient) EnableVolumeReplication(ctx context.Context, in *EnableVolumeReplicationRequest, opts ...grpc.CallOption) (*EnableVolumeReplicationResponse, error) { + out := new(EnableVolumeReplicationResponse) + err := c.cc.Invoke(ctx, "/replication.Controller/EnableVolumeReplication", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) DisableVolumeReplication(ctx context.Context, in *DisableVolumeReplicationRequest, opts ...grpc.CallOption) (*DisableVolumeReplicationResponse, error) { + out := new(DisableVolumeReplicationResponse) + err := c.cc.Invoke(ctx, "/replication.Controller/DisableVolumeReplication", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) PromoteVolume(ctx context.Context, in *PromoteVolumeRequest, opts ...grpc.CallOption) (*PromoteVolumeResponse, error) { + out := new(PromoteVolumeResponse) + err := c.cc.Invoke(ctx, "/replication.Controller/PromoteVolume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) DemoteVolume(ctx context.Context, in *DemoteVolumeRequest, opts ...grpc.CallOption) (*DemoteVolumeResponse, error) { + out := new(DemoteVolumeResponse) + err := c.cc.Invoke(ctx, "/replication.Controller/DemoteVolume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ResyncVolume(ctx context.Context, in *ResyncVolumeRequest, opts ...grpc.CallOption) (*ResyncVolumeResponse, error) { + out := new(ResyncVolumeResponse) + err := c.cc.Invoke(ctx, "/replication.Controller/ResyncVolume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ControllerServer is the server API for Controller service. +// All implementations must embed UnimplementedControllerServer +// for forward compatibility +type ControllerServer interface { + // EnableVolumeReplication RPC call to enable the volume replication. + EnableVolumeReplication(context.Context, *EnableVolumeReplicationRequest) (*EnableVolumeReplicationResponse, error) + // DisableVolumeReplication RPC call to disable the volume replication. + DisableVolumeReplication(context.Context, *DisableVolumeReplicationRequest) (*DisableVolumeReplicationResponse, error) + // PromoteVolume RPC call to promote the volume. + PromoteVolume(context.Context, *PromoteVolumeRequest) (*PromoteVolumeResponse, error) + // DemoteVolume RPC call to demote the volume. + DemoteVolume(context.Context, *DemoteVolumeRequest) (*DemoteVolumeResponse, error) + // ResyncVolume RPC call to resync the volume. + ResyncVolume(context.Context, *ResyncVolumeRequest) (*ResyncVolumeResponse, error) + mustEmbedUnimplementedControllerServer() +} + +// UnimplementedControllerServer must be embedded to have forward compatible implementations. +type UnimplementedControllerServer struct { +} + +func (UnimplementedControllerServer) EnableVolumeReplication(context.Context, *EnableVolumeReplicationRequest) (*EnableVolumeReplicationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method EnableVolumeReplication not implemented") +} +func (UnimplementedControllerServer) DisableVolumeReplication(context.Context, *DisableVolumeReplicationRequest) (*DisableVolumeReplicationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DisableVolumeReplication not implemented") +} +func (UnimplementedControllerServer) PromoteVolume(context.Context, *PromoteVolumeRequest) (*PromoteVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PromoteVolume not implemented") +} +func (UnimplementedControllerServer) DemoteVolume(context.Context, *DemoteVolumeRequest) (*DemoteVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DemoteVolume not implemented") +} +func (UnimplementedControllerServer) ResyncVolume(context.Context, *ResyncVolumeRequest) (*ResyncVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ResyncVolume not implemented") +} +func (UnimplementedControllerServer) mustEmbedUnimplementedControllerServer() {} + +// UnsafeControllerServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ControllerServer will +// result in compilation errors. +type UnsafeControllerServer interface { + mustEmbedUnimplementedControllerServer() +} + +func RegisterControllerServer(s grpc.ServiceRegistrar, srv ControllerServer) { + s.RegisterService(&Controller_ServiceDesc, srv) +} + +func _Controller_EnableVolumeReplication_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(EnableVolumeReplicationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).EnableVolumeReplication(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/replication.Controller/EnableVolumeReplication", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).EnableVolumeReplication(ctx, req.(*EnableVolumeReplicationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_DisableVolumeReplication_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DisableVolumeReplicationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).DisableVolumeReplication(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/replication.Controller/DisableVolumeReplication", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).DisableVolumeReplication(ctx, req.(*DisableVolumeReplicationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_PromoteVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PromoteVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).PromoteVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/replication.Controller/PromoteVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).PromoteVolume(ctx, req.(*PromoteVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_DemoteVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DemoteVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).DemoteVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/replication.Controller/DemoteVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).DemoteVolume(ctx, req.(*DemoteVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ResyncVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ResyncVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ResyncVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/replication.Controller/ResyncVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ResyncVolume(ctx, req.(*ResyncVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Controller_ServiceDesc is the grpc.ServiceDesc for Controller service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Controller_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "replication.Controller", + HandlerType: (*ControllerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "EnableVolumeReplication", + Handler: _Controller_EnableVolumeReplication_Handler, + }, + { + MethodName: "DisableVolumeReplication", + Handler: _Controller_DisableVolumeReplication_Handler, + }, + { + MethodName: "PromoteVolume", + Handler: _Controller_PromoteVolume_Handler, + }, + { + MethodName: "DemoteVolume", + Handler: _Controller_DemoteVolume_Handler, + }, + { + MethodName: "ResyncVolume", + Handler: _Controller_ResyncVolume_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "replication.proto", +} diff --git a/vendor/google.golang.org/grpc/.travis.yml b/vendor/google.golang.org/grpc/.travis.yml index 819c1e2a9..5847d94e5 100644 --- a/vendor/google.golang.org/grpc/.travis.yml +++ b/vendor/google.golang.org/grpc/.travis.yml @@ -2,22 +2,22 @@ language: go matrix: include: - - go: 1.13.x + - go: 1.14.x env: VET=1 GO111MODULE=on - - go: 1.13.x + - go: 1.14.x env: RACE=1 GO111MODULE=on - - go: 1.13.x + - go: 1.14.x env: RUN386=1 - - go: 1.13.x + - go: 1.14.x env: GRPC_GO_RETRY=on - - go: 1.13.x + - go: 1.14.x env: TESTEXTRAS=1 + - go: 1.13.x + env: GO111MODULE=on - go: 1.12.x env: GO111MODULE=on - - go: 1.11.x + - go: 1.11.x # Keep until interop tests no longer require Go1.11 env: GO111MODULE=on - - go: 1.9.x - env: GAE=1 go_import_path: google.golang.org/grpc @@ -35,7 +35,7 @@ install: script: - set -e - - if [[ -n "${TESTEXTRAS}" ]]; then examples/examples_test.sh; interop/interop_test.sh; exit 0; fi + - if [[ -n "${TESTEXTRAS}" ]]; then examples/examples_test.sh; security/advancedtls/examples/examples_test.sh; interop/interop_test.sh; make testsubmodule; exit 0; fi - if [[ -n "${VET}" ]]; then ./vet.sh; fi - if [[ -n "${GAE}" ]]; then make testappengine; exit 0; fi - if [[ -n "${RACE}" ]]; then make testrace; exit 0; fi diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index 4f1567e2f..cd03f8c76 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -57,6 +57,5 @@ How to get your contributions merged smoothly and quickly. - `make vet` to catch vet errors - `make test` to run the tests - `make testrace` to run tests in race mode - - optional `make testappengine` to run tests with appengine - Exceptions to the rules can be made if there's a compelling reason for doing so. diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile index db982aabd..1f0722f16 100644 --- a/vendor/google.golang.org/grpc/Makefile +++ b/vendor/google.golang.org/grpc/Makefile @@ -1,13 +1,13 @@ all: vet test testrace -build: deps +build: go build google.golang.org/grpc/... clean: go clean -i google.golang.org/grpc/... deps: - go get -d -v google.golang.org/grpc/... + GO111MODULE=on go get -d -v google.golang.org/grpc/... proto: @ if ! which protoc > /dev/null; then \ @@ -16,26 +16,18 @@ proto: fi go generate google.golang.org/grpc/... -test: testdeps +test: go test -cpu 1,4 -timeout 7m google.golang.org/grpc/... -testappengine: testappenginedeps - goapp test -cpu 1,4 -timeout 7m google.golang.org/grpc/... +testsubmodule: + cd security/advancedtls && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/advancedtls/... + cd security/authorization && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/authorization/... -testappenginedeps: - goapp get -d -v -t -tags 'appengine appenginevm' google.golang.org/grpc/... - -testdeps: - go get -d -v -t google.golang.org/grpc/... - -testrace: testdeps +testrace: go test -race -cpu 1,4 -timeout 7m google.golang.org/grpc/... -updatedeps: - go get -d -v -u -f google.golang.org/grpc/... - -updatetestdeps: - go get -d -v -t -u -f google.golang.org/grpc/... +testdeps: + GO111MODULE=on go get -d -v -t google.golang.org/grpc/... vet: vetdeps ./vet.sh @@ -47,14 +39,10 @@ vetdeps: all \ build \ clean \ - deps \ proto \ test \ testappengine \ testappenginedeps \ - testdeps \ testrace \ - updatedeps \ - updatetestdeps \ vet \ vetdeps diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index afbc43db5..3949a683f 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -1,64 +1,53 @@ # gRPC-Go [![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) -[![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) +[![GoDoc](https://pkg.go.dev/badge/google.golang.org/grpc)][API] [![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) -The Go implementation of [gRPC](https://grpc.io/): A high performance, open -source, general RPC framework that puts mobile and HTTP/2 first. For more -information see the [gRPC Quick Start: -Go](https://grpc.io/docs/quickstart/go.html) guide. +The [Go][] implementation of [gRPC][]: A high performance, open source, general +RPC framework that puts mobile and HTTP/2 first. For more information see the +[Go gRPC docs][], or jump directly into the [quick start][]. -Installation ------------- +## Prerequisites -To install this package, you need to install Go and setup your Go workspace on -your computer. The simplest way to install the library is to run: +- **[Go][]**: any one of the **three latest major** [releases][go-releases]. +## Installation + +With [Go module][] support (Go 1.11+), simply add the following import + +```go +import "google.golang.org/grpc" ``` + +to your code, and then `go [build|run|test]` will automatically fetch the +necessary dependencies. + +Otherwise, to install the `grpc-go` package, run the following command: + +```console $ go get -u google.golang.org/grpc ``` -With Go module support (Go 1.11+), simply `import "google.golang.org/grpc"` in -your source code and `go [build|run|test]` will automatically download the -necessary dependencies ([Go modules -ref](https://github.com/golang/go/wiki/Modules)). +> **Note:** If you are trying to access `grpc-go` from **China**, see the +> [FAQ](#FAQ) below. -If you are trying to access grpc-go from within China, please see the -[FAQ](#FAQ) below. +## Learn more -Prerequisites -------------- -gRPC-Go requires Go 1.9 or later. +- [Go gRPC docs][], which include a [quick start][] and [API + reference][API] among other resources +- [Low-level technical docs](Documentation) from this repository +- [Performance benchmark][] +- [Examples](examples) -Documentation -------------- -- See [godoc](https://godoc.org/google.golang.org/grpc) for package and API - descriptions. -- Documentation on specific topics can be found in the [Documentation - directory](Documentation/). -- Examples can be found in the [examples directory](examples/). +## FAQ -Performance ------------ -Performance benchmark data for grpc-go and other languages is maintained in -[this -dashboard](https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696). +### I/O Timeout Errors -Status ------- -General Availability [Google Cloud Platform Launch -Stages](https://cloud.google.com/terms/launch-stages). - -FAQ ---- - -#### I/O Timeout Errors - -The `golang.org` domain may be blocked from some countries. `go get` usually +The `golang.org` domain may be blocked from some countries. `go get` usually produces an error like the following when this happens: -``` +```console $ go get -u google.golang.org/grpc package google.golang.org/grpc: unrecognized import path "google.golang.org/grpc" (https fetch: Get https://google.golang.org/grpc?go-get=1: dial tcp 216.239.37.1:443: i/o timeout) ``` @@ -69,7 +58,7 @@ To build Go code, there are several options: - Without Go module support: `git clone` the repo manually: - ``` + ```sh git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc ``` @@ -79,7 +68,7 @@ To build Go code, there are several options: - With Go module support: it is possible to use the `replace` feature of `go mod` to create aliases for golang.org packages. In your project's directory: - ``` + ```sh go mod edit -replace=google.golang.org/grpc=github.com/grpc/grpc-go@latest go mod tidy go mod vendor @@ -87,35 +76,66 @@ To build Go code, there are several options: ``` Again, this will need to be done for all transitive dependencies hosted on - golang.org as well. Please refer to [this - issue](https://github.com/golang/go/issues/28652) in the golang repo regarding - this concern. + golang.org as well. For details, refer to [golang/go issue #28652](https://github.com/golang/go/issues/28652). -#### Compiling error, undefined: grpc.SupportPackageIsVersion +### Compiling error, undefined: grpc.SupportPackageIsVersion -Please update proto package, gRPC package and rebuild the proto files: - - `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}` - - `go get -u google.golang.org/grpc` - - `protoc --go_out=plugins=grpc:. *.proto` +#### If you are using Go modules: -#### How to turn on logging +Ensure your gRPC-Go version is `require`d at the appropriate version in +the same module containing the generated `.pb.go` files. For example, +`SupportPackageIsVersion6` needs `v1.27.0`, so in your `go.mod` file: -The default logger is controlled by the environment variables. Turn everything -on by setting: +```go +module -``` -GRPC_GO_LOG_VERBOSITY_LEVEL=99 GRPC_GO_LOG_SEVERITY_LEVEL=info +require ( + google.golang.org/grpc v1.27.0 +) ``` -#### The RPC failed with error `"code = Unavailable desc = transport is closing"` +#### If you are *not* using Go modules: + +Update the `proto` package, gRPC package, and rebuild the `.proto` files: + +```sh +go get -u github.com/golang/protobuf/{proto,protoc-gen-go} +go get -u google.golang.org/grpc +protoc --go_out=plugins=grpc:. *.proto +``` + +### How to turn on logging + +The default logger is controlled by environment variables. Turn everything on +like this: + +```console +$ export GRPC_GO_LOG_VERBOSITY_LEVEL=99 +$ export GRPC_GO_LOG_SEVERITY_LEVEL=info +``` + +### The RPC failed with error `"code = Unavailable desc = transport is closing"` This error means the connection the RPC is using was closed, and there are many possible reasons, including: 1. mis-configured transport credentials, connection failed on handshaking 1. bytes disrupted, possibly by a proxy in between 1. server shutdown + 1. Keepalive parameters caused connection shutdown, for example if you have configured + your server to terminate connections regularly to [trigger DNS lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). + If this is the case, you may want to increase your [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), + to allow longer RPC calls to finish. It can be tricky to debug this because the error happens on the client side but the root cause of the connection being closed is on the server side. Turn on logging on __both client and server__, and see if there are any transport errors. + +[API]: https://pkg.go.dev/google.golang.org/grpc +[Go]: https://golang.org +[Go module]: https://github.com/golang/go/wiki/Modules +[gRPC]: https://grpc.io +[Go gRPC docs]: https://grpc.io/docs/languages/go +[Performance benchmark]: https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696 +[quick start]: https://grpc.io/docs/languages/go/quickstart +[go-releases]: https://golang.org/doc/devel/release.html diff --git a/vendor/google.golang.org/grpc/SECURITY.md b/vendor/google.golang.org/grpc/SECURITY.md new file mode 100644 index 000000000..be6e10870 --- /dev/null +++ b/vendor/google.golang.org/grpc/SECURITY.md @@ -0,0 +1,3 @@ +# Security Policy + +For information on gRPC Security Policy and reporting potentional security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md). diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go index 68ffc6201..3220d87be 100644 --- a/vendor/google.golang.org/grpc/attributes/attributes.go +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -19,7 +19,10 @@ // Package attributes defines a generic key/value store used in various gRPC // components. // -// All APIs in this package are EXPERIMENTAL. +// Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. package attributes import "fmt" @@ -50,6 +53,9 @@ func New(kvs ...interface{}) *Attributes { // times, the last value overwrites all previous values for that key. To // remove an existing key, use a nil value. func (a *Attributes) WithValues(kvs ...interface{}) *Attributes { + if a == nil { + return New(kvs...) + } if len(kvs)%2 != 0 { panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs))) } @@ -66,5 +72,8 @@ func (a *Attributes) WithValues(kvs ...interface{}) *Attributes { // Value returns the value associated with these attributes for key, or nil if // no value is associated with key. func (a *Attributes) Value(key interface{}) interface{} { + if a == nil { + return nil + } return a.m[key] } diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go index ff7c3ee6f..542594f5c 100644 --- a/vendor/google.golang.org/grpc/backoff.go +++ b/vendor/google.golang.org/grpc/backoff.go @@ -48,7 +48,10 @@ type BackoffConfig struct { // here for more details: // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. // -// This API is EXPERIMENTAL. +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type ConnectParams struct { // Backoff specifies the configuration options for connection backoff. Backoff backoff.Config diff --git a/vendor/google.golang.org/grpc/balancer.go b/vendor/google.golang.org/grpc/balancer.go deleted file mode 100644 index a8eb0f476..000000000 --- a/vendor/google.golang.org/grpc/balancer.go +++ /dev/null @@ -1,391 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "net" - "sync" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/naming" - "google.golang.org/grpc/status" -) - -// Address represents a server the client connects to. -// -// Deprecated: please use package balancer. -type Address struct { - // Addr is the server address on which a connection will be established. - Addr string - // Metadata is the information associated with Addr, which may be used - // to make load balancing decision. - Metadata interface{} -} - -// BalancerConfig specifies the configurations for Balancer. -// -// Deprecated: please use package balancer. May be removed in a future 1.x release. -type BalancerConfig struct { - // DialCreds is the transport credential the Balancer implementation can - // use to dial to a remote load balancer server. The Balancer implementations - // can ignore this if it does not need to talk to another party securely. - DialCreds credentials.TransportCredentials - // Dialer is the custom dialer the Balancer implementation can use to dial - // to a remote load balancer server. The Balancer implementations - // can ignore this if it doesn't need to talk to remote balancer. - Dialer func(context.Context, string) (net.Conn, error) -} - -// BalancerGetOptions configures a Get call. -// -// Deprecated: please use package balancer. May be removed in a future 1.x release. -type BalancerGetOptions struct { - // BlockingWait specifies whether Get should block when there is no - // connected address. - BlockingWait bool -} - -// Balancer chooses network addresses for RPCs. -// -// Deprecated: please use package balancer. May be removed in a future 1.x release. -type Balancer interface { - // Start does the initialization work to bootstrap a Balancer. For example, - // this function may start the name resolution and watch the updates. It will - // be called when dialing. - Start(target string, config BalancerConfig) error - // Up informs the Balancer that gRPC has a connection to the server at - // addr. It returns down which is called once the connection to addr gets - // lost or closed. - // TODO: It is not clear how to construct and take advantage of the meaningful error - // parameter for down. Need realistic demands to guide. - Up(addr Address) (down func(error)) - // Get gets the address of a server for the RPC corresponding to ctx. - // i) If it returns a connected address, gRPC internals issues the RPC on the - // connection to this address; - // ii) If it returns an address on which the connection is under construction - // (initiated by Notify(...)) but not connected, gRPC internals - // * fails RPC if the RPC is fail-fast and connection is in the TransientFailure or - // Shutdown state; - // or - // * issues RPC on the connection otherwise. - // iii) If it returns an address on which the connection does not exist, gRPC - // internals treats it as an error and will fail the corresponding RPC. - // - // Therefore, the following is the recommended rule when writing a custom Balancer. - // If opts.BlockingWait is true, it should return a connected address or - // block if there is no connected address. It should respect the timeout or - // cancellation of ctx when blocking. If opts.BlockingWait is false (for fail-fast - // RPCs), it should return an address it has notified via Notify(...) immediately - // instead of blocking. - // - // The function returns put which is called once the rpc has completed or failed. - // put can collect and report RPC stats to a remote load balancer. - // - // This function should only return the errors Balancer cannot recover by itself. - // gRPC internals will fail the RPC if an error is returned. - Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) - // Notify returns a channel that is used by gRPC internals to watch the addresses - // gRPC needs to connect. The addresses might be from a name resolver or remote - // load balancer. gRPC internals will compare it with the existing connected - // addresses. If the address Balancer notified is not in the existing connected - // addresses, gRPC starts to connect the address. If an address in the existing - // connected addresses is not in the notification list, the corresponding connection - // is shutdown gracefully. Otherwise, there are no operations to take. Note that - // the Address slice must be the full list of the Addresses which should be connected. - // It is NOT delta. - Notify() <-chan []Address - // Close shuts down the balancer. - Close() error -} - -// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch -// the name resolution updates and updates the addresses available correspondingly. -// -// Deprecated: please use package balancer/roundrobin. May be removed in a future 1.x release. -func RoundRobin(r naming.Resolver) Balancer { - return &roundRobin{r: r} -} - -type addrInfo struct { - addr Address - connected bool -} - -type roundRobin struct { - r naming.Resolver - w naming.Watcher - addrs []*addrInfo // all the addresses the client should potentially connect - mu sync.Mutex - addrCh chan []Address // the channel to notify gRPC internals the list of addresses the client should connect to. - next int // index of the next address to return for Get() - waitCh chan struct{} // the channel to block when there is no connected address available - done bool // The Balancer is closed. -} - -func (rr *roundRobin) watchAddrUpdates() error { - updates, err := rr.w.Next() - if err != nil { - grpclog.Warningf("grpc: the naming watcher stops working due to %v.", err) - return err - } - rr.mu.Lock() - defer rr.mu.Unlock() - for _, update := range updates { - addr := Address{ - Addr: update.Addr, - Metadata: update.Metadata, - } - switch update.Op { - case naming.Add: - var exist bool - for _, v := range rr.addrs { - if addr == v.addr { - exist = true - grpclog.Infoln("grpc: The name resolver wanted to add an existing address: ", addr) - break - } - } - if exist { - continue - } - rr.addrs = append(rr.addrs, &addrInfo{addr: addr}) - case naming.Delete: - for i, v := range rr.addrs { - if addr == v.addr { - copy(rr.addrs[i:], rr.addrs[i+1:]) - rr.addrs = rr.addrs[:len(rr.addrs)-1] - break - } - } - default: - grpclog.Errorln("Unknown update.Op ", update.Op) - } - } - // Make a copy of rr.addrs and write it onto rr.addrCh so that gRPC internals gets notified. - open := make([]Address, len(rr.addrs)) - for i, v := range rr.addrs { - open[i] = v.addr - } - if rr.done { - return ErrClientConnClosing - } - select { - case <-rr.addrCh: - default: - } - rr.addrCh <- open - return nil -} - -func (rr *roundRobin) Start(target string, config BalancerConfig) error { - rr.mu.Lock() - defer rr.mu.Unlock() - if rr.done { - return ErrClientConnClosing - } - if rr.r == nil { - // If there is no name resolver installed, it is not needed to - // do name resolution. In this case, target is added into rr.addrs - // as the only address available and rr.addrCh stays nil. - rr.addrs = append(rr.addrs, &addrInfo{addr: Address{Addr: target}}) - return nil - } - w, err := rr.r.Resolve(target) - if err != nil { - return err - } - rr.w = w - rr.addrCh = make(chan []Address, 1) - go func() { - for { - if err := rr.watchAddrUpdates(); err != nil { - return - } - } - }() - return nil -} - -// Up sets the connected state of addr and sends notification if there are pending -// Get() calls. -func (rr *roundRobin) Up(addr Address) func(error) { - rr.mu.Lock() - defer rr.mu.Unlock() - var cnt int - for _, a := range rr.addrs { - if a.addr == addr { - if a.connected { - return nil - } - a.connected = true - } - if a.connected { - cnt++ - } - } - // addr is only one which is connected. Notify the Get() callers who are blocking. - if cnt == 1 && rr.waitCh != nil { - close(rr.waitCh) - rr.waitCh = nil - } - return func(err error) { - rr.down(addr, err) - } -} - -// down unsets the connected state of addr. -func (rr *roundRobin) down(addr Address, err error) { - rr.mu.Lock() - defer rr.mu.Unlock() - for _, a := range rr.addrs { - if addr == a.addr { - a.connected = false - break - } - } -} - -// Get returns the next addr in the rotation. -func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) { - var ch chan struct{} - rr.mu.Lock() - if rr.done { - rr.mu.Unlock() - err = ErrClientConnClosing - return - } - - if len(rr.addrs) > 0 { - if rr.next >= len(rr.addrs) { - rr.next = 0 - } - next := rr.next - for { - a := rr.addrs[next] - next = (next + 1) % len(rr.addrs) - if a.connected { - addr = a.addr - rr.next = next - rr.mu.Unlock() - return - } - if next == rr.next { - // Has iterated all the possible address but none is connected. - break - } - } - } - if !opts.BlockingWait { - if len(rr.addrs) == 0 { - rr.mu.Unlock() - err = status.Errorf(codes.Unavailable, "there is no address available") - return - } - // Returns the next addr on rr.addrs for failfast RPCs. - addr = rr.addrs[rr.next].addr - rr.next++ - rr.mu.Unlock() - return - } - // Wait on rr.waitCh for non-failfast RPCs. - if rr.waitCh == nil { - ch = make(chan struct{}) - rr.waitCh = ch - } else { - ch = rr.waitCh - } - rr.mu.Unlock() - for { - select { - case <-ctx.Done(): - err = ctx.Err() - return - case <-ch: - rr.mu.Lock() - if rr.done { - rr.mu.Unlock() - err = ErrClientConnClosing - return - } - - if len(rr.addrs) > 0 { - if rr.next >= len(rr.addrs) { - rr.next = 0 - } - next := rr.next - for { - a := rr.addrs[next] - next = (next + 1) % len(rr.addrs) - if a.connected { - addr = a.addr - rr.next = next - rr.mu.Unlock() - return - } - if next == rr.next { - // Has iterated all the possible address but none is connected. - break - } - } - } - // The newly added addr got removed by Down() again. - if rr.waitCh == nil { - ch = make(chan struct{}) - rr.waitCh = ch - } else { - ch = rr.waitCh - } - rr.mu.Unlock() - } - } -} - -func (rr *roundRobin) Notify() <-chan []Address { - return rr.addrCh -} - -func (rr *roundRobin) Close() error { - rr.mu.Lock() - defer rr.mu.Unlock() - if rr.done { - return errBalancerClosed - } - rr.done = true - if rr.w != nil { - rr.w.Close() - } - if rr.waitCh != nil { - close(rr.waitCh) - rr.waitCh = nil - } - if rr.addrCh != nil { - close(rr.addrCh) - } - return nil -} - -// pickFirst is used to test multi-addresses in one addrConn in which all addresses share the same addrConn. -// It is a wrapper around roundRobin balancer. The logic of all methods works fine because balancer.Get() -// returns the only address Up by resetTransport(). -type pickFirst struct { - *roundRobin -} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index 531a174a8..788759bde 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -111,6 +111,9 @@ type NewSubConnOptions struct { // CredsBundle is the credentials bundle that will be used in the created // SubConn. If it's nil, the original creds from grpc DialOptions will be // used. + // + // Deprecated: Use the Attributes field in resolver.Address to pass + // arbitrary data to the credential handshaker. CredsBundle credentials.Bundle // HealthCheckEnabled indicates whether health check service should be // enabled on this SubConn @@ -123,7 +126,7 @@ type State struct { // determine the state of the ClientConn. ConnectivityState connectivity.State // Picker is used to choose connections (SubConns) for RPCs. - Picker V2Picker + Picker Picker } // ClientConn represents a gRPC ClientConn. @@ -141,20 +144,11 @@ type ClientConn interface { // The SubConn will be shutdown. RemoveSubConn(SubConn) - // UpdateBalancerState is called by balancer to notify gRPC that some internal - // state in balancer has changed. - // - // gRPC will update the connectivity state of the ClientConn, and will call pick - // on the new picker to pick new SubConn. - // - // Deprecated: use UpdateState instead - UpdateBalancerState(s connectivity.State, p Picker) - // UpdateState notifies gRPC that the balancer's internal state has // changed. // - // gRPC will update the connectivity state of the ClientConn, and will call pick - // on the new picker to pick new SubConns. + // gRPC will update the connectivity state of the ClientConn, and will call + // Pick on the new Picker to pick new SubConns. UpdateState(State) // ResolveNow is called by balancer to notify gRPC to do a name resolving. @@ -180,6 +174,10 @@ type BuildOptions struct { Dialer func(context.Context, string) (net.Conn, error) // ChannelzParentID is the entity parent's channelz unique identification number. ChannelzParentID int64 + // CustomUserAgent is the custom user agent set on the parent ClientConn. + // The balancer should set the same custom user agent if it creates a + // ClientConn. + CustomUserAgent string // Target contains the parsed address info of the dial target. It is the same resolver.Target as // passed to the resolver. // See the documentation for the resolver.Target type for details about what it contains. @@ -203,11 +201,6 @@ type ConfigParser interface { ParseConfig(LoadBalancingConfigJSON json.RawMessage) (serviceconfig.LoadBalancingConfig, error) } -// PickOptions is a type alias of PickInfo for legacy reasons. -// -// Deprecated: use PickInfo instead. -type PickOptions = PickInfo - // PickInfo contains additional information for the Pick operation. type PickInfo struct { // FullMethodName is the method name that NewClientStream() is called @@ -237,56 +230,17 @@ type DoneInfo struct { var ( // ErrNoSubConnAvailable indicates no SubConn is available for pick(). - // gRPC will block the RPC until a new picker is available via UpdateBalancerState(). + // gRPC will block the RPC until a new picker is available via UpdateState(). ErrNoSubConnAvailable = errors.New("no SubConn is available") // ErrTransientFailure indicates all SubConns are in TransientFailure. // WaitForReady RPCs will block, non-WaitForReady RPCs will fail. - ErrTransientFailure = TransientFailureError(errors.New("all SubConns are in TransientFailure")) + // + // Deprecated: return an appropriate error based on the last resolution or + // connection attempt instead. The behavior is the same for any non-gRPC + // status error. + ErrTransientFailure = errors.New("all SubConns are in TransientFailure") ) -// Picker is used by gRPC to pick a SubConn to send an RPC. -// Balancer is expected to generate a new picker from its snapshot every time its -// internal state has changed. -// -// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState(). -// -// Deprecated: use V2Picker instead -type Picker interface { - // Pick returns the SubConn to be used to send the RPC. - // The returned SubConn must be one returned by NewSubConn(). - // - // This functions is expected to return: - // - a SubConn that is known to be READY; - // - ErrNoSubConnAvailable if no SubConn is available, but progress is being - // made (for example, some SubConn is in CONNECTING mode); - // - other errors if no active connecting is happening (for example, all SubConn - // are in TRANSIENT_FAILURE mode). - // - // If a SubConn is returned: - // - If it is READY, gRPC will send the RPC on it; - // - If it is not ready, or becomes not ready after it's returned, gRPC will - // block until UpdateBalancerState() is called and will call pick on the - // new picker. The done function returned from Pick(), if not nil, will be - // called with nil error, no bytes sent and no bytes received. - // - // If the returned error is not nil: - // - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState() - // - If the error is ErrTransientFailure or implements IsTransientFailure() - // bool, returning true: - // - If the RPC is wait-for-ready, gRPC will block until UpdateBalancerState() - // is called to pick again; - // - Otherwise, RPC will fail with unavailable error. - // - Else (error is other non-nil error): - // - The RPC will fail with the error's status code, or Unknown if it is - // not a status error. - // - // The returned done() function will be called once the rpc has finished, - // with the final status of that RPC. If the SubConn returned is not a - // valid SubConn type, done may not be called. done may be nil if balancer - // doesn't care about the RPC status. - Pick(ctx context.Context, info PickInfo) (conn SubConn, done func(DoneInfo), err error) -} - // PickResult contains information related to a connection chosen for an RPC. type PickResult struct { // SubConn is the connection to use for this pick, if its state is Ready. @@ -302,24 +256,19 @@ type PickResult struct { Done func(DoneInfo) } -type transientFailureError struct { - error -} +// TransientFailureError returns e. It exists for backward compatibility and +// will be deleted soon. +// +// Deprecated: no longer necessary, picker errors are treated this way by +// default. +func TransientFailureError(e error) error { return e } -func (e *transientFailureError) IsTransientFailure() bool { return true } - -// TransientFailureError wraps err in an error implementing -// IsTransientFailure() bool, returning true. -func TransientFailureError(err error) error { - return &transientFailureError{error: err} -} - -// V2Picker is used by gRPC to pick a SubConn to send an RPC. +// Picker is used by gRPC to pick a SubConn to send an RPC. // Balancer is expected to generate a new picker from its snapshot every time its // internal state has changed. // -// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState(). -type V2Picker interface { +// The pickers used by gRPC can be updated by ClientConn.UpdateState(). +type Picker interface { // Pick returns the connection to use for this RPC and related information. // // Pick should not block. If the balancer needs to do I/O or any blocking @@ -332,14 +281,13 @@ type V2Picker interface { // - If the error is ErrNoSubConnAvailable, gRPC will block until a new // Picker is provided by the balancer (using ClientConn.UpdateState). // - // - If the error implements IsTransientFailure() bool, returning true, - // wait for ready RPCs will wait, but non-wait for ready RPCs will be - // terminated with this error's Error() string and status code - // Unavailable. + // - If the error is a status error (implemented by the grpc/status + // package), gRPC will terminate the RPC with the code and message + // provided. // - // - Any other errors terminate all RPCs with the code and message - // provided. If the error is not a status error, it will be converted by - // gRPC to a status error with code Unknown. + // - For all other errors, wait for ready RPCs will wait, but non-wait for + // ready RPCs will be terminated with this error's Error() string and + // status code Unavailable. Pick(info PickInfo) (PickResult, error) } @@ -348,29 +296,21 @@ type V2Picker interface { // // It also generates and updates the Picker used by gRPC to pick SubConns for RPCs. // -// HandleSubConnectionStateChange, HandleResolvedAddrs and Close are guaranteed -// to be called synchronously from the same goroutine. -// There's no guarantee on picker.Pick, it may be called anytime. +// UpdateClientConnState, ResolverError, UpdateSubConnState, and Close are +// guaranteed to be called synchronously from the same goroutine. There's no +// guarantee on picker.Pick, it may be called anytime. type Balancer interface { - // HandleSubConnStateChange is called by gRPC when the connectivity state - // of sc has changed. - // Balancer is expected to aggregate all the state of SubConn and report - // that back to gRPC. - // Balancer should also generate and update Pickers when its internal state has - // been changed by the new state. - // - // Deprecated: if V2Balancer is implemented by the Balancer, - // UpdateSubConnState will be called instead. - HandleSubConnStateChange(sc SubConn, state connectivity.State) - // HandleResolvedAddrs is called by gRPC to send updated resolved addresses to - // balancers. - // Balancer can create new SubConn or remove SubConn with the addresses. - // An empty address slice and a non-nil error will be passed if the resolver returns - // non-nil error to gRPC. - // - // Deprecated: if V2Balancer is implemented by the Balancer, - // UpdateClientConnState will be called instead. - HandleResolvedAddrs([]resolver.Address, error) + // UpdateClientConnState is called by gRPC when the state of the ClientConn + // changes. If the error returned is ErrBadResolverState, the ClientConn + // will begin calling ResolveNow on the active name resolver with + // exponential backoff until a subsequent call to UpdateClientConnState + // returns a nil error. Any other errors are currently ignored. + UpdateClientConnState(ClientConnState) error + // ResolverError is called by gRPC when the name resolver reports an error. + ResolverError(error) + // UpdateSubConnState is called by gRPC when the state of a SubConn + // changes. + UpdateSubConnState(SubConn, SubConnState) // Close closes the balancer. The balancer is not required to call // ClientConn.RemoveSubConn for its existing SubConns. Close() @@ -398,27 +338,6 @@ type ClientConnState struct { // problem with the provided name resolver data. var ErrBadResolverState = errors.New("bad resolver state") -// V2Balancer is defined for documentation purposes. If a Balancer also -// implements V2Balancer, its UpdateClientConnState method will be called -// instead of HandleResolvedAddrs and its UpdateSubConnState will be called -// instead of HandleSubConnStateChange. -type V2Balancer interface { - // UpdateClientConnState is called by gRPC when the state of the ClientConn - // changes. If the error returned is ErrBadResolverState, the ClientConn - // will begin calling ResolveNow on the active name resolver with - // exponential backoff until a subsequent call to UpdateClientConnState - // returns a nil error. Any other errors are currently ignored. - UpdateClientConnState(ClientConnState) error - // ResolverError is called by gRPC when the name resolver reports an error. - ResolverError(error) - // UpdateSubConnState is called by gRPC when the state of a SubConn - // changes. - UpdateSubConnState(SubConn, SubConnState) - // Close closes the balancer. The balancer is not required to call - // ClientConn.RemoveSubConn for its existing SubConns. - Close() -} - // ConnectivityStateEvaluator takes the connectivity states of multiple SubConns // and returns one aggregated connectivity state. // diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index d952f09f3..e0d34288c 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -19,8 +19,8 @@ package base import ( - "context" "errors" + "fmt" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" @@ -28,18 +28,18 @@ import ( "google.golang.org/grpc/resolver" ) +var logger = grpclog.Component("balancer") + type baseBuilder struct { - name string - pickerBuilder PickerBuilder - v2PickerBuilder V2PickerBuilder - config Config + name string + pickerBuilder PickerBuilder + config Config } func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { bal := &baseBalancer{ - cc: cc, - pickerBuilder: bb.pickerBuilder, - v2PickerBuilder: bb.v2PickerBuilder, + cc: cc, + pickerBuilder: bb.pickerBuilder, subConns: make(map[resolver.Address]balancer.SubConn), scStates: make(map[balancer.SubConn]connectivity.State), @@ -49,11 +49,7 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) // Initialize picker to a picker that always returns // ErrNoSubConnAvailable, because when state of a SubConn changes, we // may call UpdateState with this picker. - if bb.pickerBuilder != nil { - bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable) - } else { - bal.v2Picker = NewErrPickerV2(balancer.ErrNoSubConnAvailable) - } + bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable) return bal } @@ -61,58 +57,85 @@ func (bb *baseBuilder) Name() string { return bb.name } -var _ balancer.V2Balancer = (*baseBalancer)(nil) // Assert that we implement V2Balancer - type baseBalancer struct { - cc balancer.ClientConn - pickerBuilder PickerBuilder - v2PickerBuilder V2PickerBuilder + cc balancer.ClientConn + pickerBuilder PickerBuilder csEvltr *balancer.ConnectivityStateEvaluator state connectivity.State - subConns map[resolver.Address]balancer.SubConn + subConns map[resolver.Address]balancer.SubConn // `attributes` is stripped from the keys of this map (the addresses) scStates map[balancer.SubConn]connectivity.State picker balancer.Picker - v2Picker balancer.V2Picker config Config -} -func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { - panic("not implemented") + resolverErr error // the last error reported by the resolver; cleared on successful resolution + connErr error // the last connection error; cleared upon leaving TransientFailure } func (b *baseBalancer) ResolverError(err error) { - switch b.state { - case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting: - if b.picker != nil { - b.picker = NewErrPicker(err) - } else { - b.v2Picker = NewErrPickerV2(err) - } + b.resolverErr = err + if len(b.subConns) == 0 { + b.state = connectivity.TransientFailure } + + if b.state != connectivity.TransientFailure { + // The picker will not change since the balancer does not currently + // report an error. + return + } + b.regeneratePicker() + b.cc.UpdateState(balancer.State{ + ConnectivityState: b.state, + Picker: b.picker, + }) } func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { - // TODO: handle s.ResolverState.Err (log if not nil) once implemented. // TODO: handle s.ResolverState.ServiceConfig? - if grpclog.V(2) { - grpclog.Infoln("base.baseBalancer: got new ClientConn state: ", s) + if logger.V(2) { + logger.Info("base.baseBalancer: got new ClientConn state: ", s) } + // Successful resolution; clear resolver error and ensure we return nil. + b.resolverErr = nil // addrsSet is the set converted from addrs, it's used for quick lookup of an address. addrsSet := make(map[resolver.Address]struct{}) for _, a := range s.ResolverState.Addresses { - addrsSet[a] = struct{}{} - if _, ok := b.subConns[a]; !ok { + // Strip attributes from addresses before using them as map keys. So + // that when two addresses only differ in attributes pointers (but with + // the same attribute content), they are considered the same address. + // + // Note that this doesn't handle the case where the attribute content is + // different. So if users want to set different attributes to create + // duplicate connections to the same backend, it doesn't work. This is + // fine for now, because duplicate is done by setting Metadata today. + // + // TODO: read attributes to handle duplicate connections. + aNoAttrs := a + aNoAttrs.Attributes = nil + addrsSet[aNoAttrs] = struct{}{} + if sc, ok := b.subConns[aNoAttrs]; !ok { // a is a new address (not existing in b.subConns). + // + // When creating SubConn, the original address with attributes is + // passed through. So that connection configurations in attributes + // (like creds) will be used. sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) if err != nil { - grpclog.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) + logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) continue } - b.subConns[a] = sc + b.subConns[aNoAttrs] = sc b.scStates[sc] = connectivity.Idle sc.Connect() + } else { + // Always update the subconn's address in case the attributes + // changed. + // + // The SubConn does a reflect.DeepEqual of the new and old + // addresses. So this is a noop if the current address is the same + // as the old one (including attributes). + sc.UpdateAddresses([]resolver.Address{a}) } } for a, sc := range b.subConns { @@ -121,72 +144,72 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { b.cc.RemoveSubConn(sc) delete(b.subConns, a) // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. - // The entry will be deleted in HandleSubConnStateChange. + // The entry will be deleted in UpdateSubConnState. } } + // If resolver state contains no addresses, return an error so ClientConn + // will trigger re-resolve. Also records this as an resolver error, so when + // the overall state turns transient failure, the error message will have + // the zero address information. + if len(s.ResolverState.Addresses) == 0 { + b.ResolverError(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } return nil } -// regeneratePicker takes a snapshot of the balancer, and generates a picker -// from it. The picker is -// - errPicker with ErrTransientFailure if the balancer is in TransientFailure, -// - built by the pickerBuilder with all READY SubConns otherwise. -func (b *baseBalancer) regeneratePicker(err error) { - if b.state == connectivity.TransientFailure { - if b.pickerBuilder != nil { - b.picker = NewErrPicker(balancer.ErrTransientFailure) - } else { - if err != nil { - b.v2Picker = NewErrPickerV2(balancer.TransientFailureError(err)) - } else { - // This means the last subchannel transition was not to - // TransientFailure (otherwise err must be set), but the - // aggregate state of the balancer is TransientFailure, meaning - // there are no other addresses. - b.v2Picker = NewErrPickerV2(balancer.TransientFailureError(errors.New("resolver returned no addresses"))) - } - } - return +// mergeErrors builds an error from the last connection error and the last +// resolver error. Must only be called if b.state is TransientFailure. +func (b *baseBalancer) mergeErrors() error { + // connErr must always be non-nil unless there are no SubConns, in which + // case resolverErr must be non-nil. + if b.connErr == nil { + return fmt.Errorf("last resolver error: %v", b.resolverErr) } - if b.pickerBuilder != nil { - readySCs := make(map[resolver.Address]balancer.SubConn) - - // Filter out all ready SCs from full subConn map. - for addr, sc := range b.subConns { - if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { - readySCs[addr] = sc - } - } - b.picker = b.pickerBuilder.Build(readySCs) - } else { - readySCs := make(map[balancer.SubConn]SubConnInfo) - - // Filter out all ready SCs from full subConn map. - for addr, sc := range b.subConns { - if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { - readySCs[sc] = SubConnInfo{Address: addr} - } - } - b.v2Picker = b.v2PickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) + if b.resolverErr == nil { + return fmt.Errorf("last connection error: %v", b.connErr) } + return fmt.Errorf("last connection error: %v; last resolver error: %v", b.connErr, b.resolverErr) } -func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { - panic("not implemented") +// regeneratePicker takes a snapshot of the balancer, and generates a picker +// from it. The picker is +// - errPicker if the balancer is in TransientFailure, +// - built by the pickerBuilder with all READY SubConns otherwise. +func (b *baseBalancer) regeneratePicker() { + if b.state == connectivity.TransientFailure { + b.picker = NewErrPicker(b.mergeErrors()) + return + } + readySCs := make(map[balancer.SubConn]SubConnInfo) + + // Filter out all ready SCs from full subConn map. + for addr, sc := range b.subConns { + if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { + readySCs[sc] = SubConnInfo{Address: addr} + } + } + b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) } func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { s := state.ConnectivityState - if grpclog.V(2) { - grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) + if logger.V(2) { + logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) } oldS, ok := b.scStates[sc] if !ok { - if grpclog.V(2) { - grpclog.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) + if logger.V(2) { + logger.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) } return } + if oldS == connectivity.TransientFailure && s == connectivity.Connecting { + // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent + // CONNECTING transitions to prevent the aggregated state from being + // always CONNECTING when many backends exist but are all down. + return + } b.scStates[sc] = s switch s { case connectivity.Idle: @@ -195,26 +218,23 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su // When an address was removed by resolver, b called RemoveSubConn but // kept the sc's state in scStates. Remove state for this sc here. delete(b.scStates, sc) + case connectivity.TransientFailure: + // Save error to be reported via picker. + b.connErr = state.ConnectionError } - oldAggrState := b.state b.state = b.csEvltr.RecordTransition(oldS, s) // Regenerate picker when one of the following happens: - // - this sc became ready from not-ready - // - this sc became not-ready from ready - // - the aggregated state of balancer became TransientFailure from non-TransientFailure - // - the aggregated state of balancer became non-TransientFailure from TransientFailure + // - this sc entered or left ready + // - the aggregated state of balancer is TransientFailure + // (may need to update error message) if (s == connectivity.Ready) != (oldS == connectivity.Ready) || - (b.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) { - b.regeneratePicker(state.ConnectionError) + b.state == connectivity.TransientFailure { + b.regeneratePicker() } - if b.picker != nil { - b.cc.UpdateBalancerState(b.state, b.picker) - } else { - b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.v2Picker}) - } + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) } // Close is a nop because base balancer doesn't have internal state to clean up, @@ -222,28 +242,20 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su func (b *baseBalancer) Close() { } -// NewErrPicker returns a picker that always returns err on Pick(). +// NewErrPicker returns a Picker that always returns err on Pick(). func NewErrPicker(err error) balancer.Picker { return &errPicker{err: err} } +// NewErrPickerV2 is temporarily defined for backward compatibility reasons. +// +// Deprecated: use NewErrPicker instead. +var NewErrPickerV2 = NewErrPicker + type errPicker struct { err error // Pick() always returns this err. } -func (p *errPicker) Pick(context.Context, balancer.PickInfo) (balancer.SubConn, func(balancer.DoneInfo), error) { - return nil, nil, p.err -} - -// NewErrPickerV2 returns a V2Picker that always returns err on Pick(). -func NewErrPickerV2(err error) balancer.V2Picker { - return &errPickerV2{err: err} -} - -type errPickerV2 struct { - err error // Pick() always returns this err. -} - -func (p *errPickerV2) Pick(info balancer.PickInfo) (balancer.PickResult, error) { +func (p *errPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { return balancer.PickResult{}, p.err } diff --git a/vendor/google.golang.org/grpc/balancer/base/base.go b/vendor/google.golang.org/grpc/balancer/base/base.go index 4192918b9..e31d76e33 100644 --- a/vendor/google.golang.org/grpc/balancer/base/base.go +++ b/vendor/google.golang.org/grpc/balancer/base/base.go @@ -37,15 +37,8 @@ import ( // PickerBuilder creates balancer.Picker. type PickerBuilder interface { - // Build takes a slice of ready SubConns, and returns a picker that will be - // used by gRPC to pick a SubConn. - Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker -} - -// V2PickerBuilder creates balancer.V2Picker. -type V2PickerBuilder interface { // Build returns a picker that will be used by gRPC to pick a SubConn. - Build(info PickerBuildInfo) balancer.V2Picker + Build(info PickerBuildInfo) balancer.Picker } // PickerBuildInfo contains information needed by the picker builder to @@ -62,32 +55,17 @@ type SubConnInfo struct { Address resolver.Address // the address used to create this SubConn } -// NewBalancerBuilder returns a balancer builder. The balancers -// built by this builder will use the picker builder to build pickers. -func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder { - return NewBalancerBuilderWithConfig(name, pb, Config{}) -} - // Config contains the config info about the base balancer builder. type Config struct { // HealthCheck indicates whether health checking should be enabled for this specific balancer. HealthCheck bool } -// NewBalancerBuilderWithConfig returns a base balancer builder configured by the provided config. -func NewBalancerBuilderWithConfig(name string, pb PickerBuilder, config Config) balancer.Builder { +// NewBalancerBuilder returns a base balancer builder configured by the provided config. +func NewBalancerBuilder(name string, pb PickerBuilder, config Config) balancer.Builder { return &baseBuilder{ name: name, pickerBuilder: pb, config: config, } } - -// NewBalancerBuilderV2 returns a base balancer builder configured by the provided config. -func NewBalancerBuilderV2(name string, pb V2PickerBuilder, config Config) balancer.Builder { - return &baseBuilder{ - name: name, - v2PickerBuilder: pb, - config: config, - } -} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go new file mode 100644 index 000000000..a24264a34 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go @@ -0,0 +1,51 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package state declares grpclb types to be set by resolvers wishing to pass +// information to grpclb via resolver.State Attributes. +package state + +import ( + "google.golang.org/grpc/resolver" +) + +// keyType is the key to use for storing State in Attributes. +type keyType string + +const key = keyType("grpc.grpclb.state") + +// State contains gRPCLB-relevant data passed from the name resolver. +type State struct { + // BalancerAddresses contains the remote load balancer address(es). If + // set, overrides any resolver-provided addresses with Type of GRPCLB. + BalancerAddresses []resolver.Address +} + +// Set returns a copy of the provided state with attributes containing s. s's +// data should not be mutated after calling Set. +func Set(state resolver.State, s *State) resolver.State { + state.Attributes = state.Attributes.WithValues(key, s) + return state +} + +// Get returns the grpclb State in the resolver.State, or nil if not present. +// The returned data should not be mutated. +func Get(state resolver.State) *State { + s, _ := state.Attributes.Value(key).(*State) + return s +} diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index d4d645501..43c2a1537 100644 --- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -33,9 +33,11 @@ import ( // Name is the name of round_robin balancer. const Name = "round_robin" +var logger = grpclog.Component("roundrobin") + // newBuilder creates a new roundrobin balancer builder. func newBuilder() balancer.Builder { - return base.NewBalancerBuilderV2(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true}) + return base.NewBalancerBuilder(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true}) } func init() { @@ -44,10 +46,10 @@ func init() { type rrPickerBuilder struct{} -func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.V2Picker { - grpclog.Infof("roundrobinPicker: newPicker called with info: %v", info) +func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { + logger.Infof("roundrobinPicker: newPicker called with info: %v", info) if len(info.ReadySCs) == 0 { - return base.NewErrPickerV2(balancer.ErrNoSubConnAvailable) + return base.NewErrPicker(balancer.ErrNoSubConnAvailable) } var scs []balancer.SubConn for sc := range info.ReadySCs { diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index 824f28e74..11e592aab 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -24,8 +24,8 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" ) @@ -74,11 +74,7 @@ func (ccb *ccBalancerWrapper) watcher() { } ccb.balancerMu.Lock() su := t.(*scStateUpdate) - if ub, ok := ccb.balancer.(balancer.V2Balancer); ok { - ub.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state, ConnectionError: su.err}) - } else { - ccb.balancer.HandleSubConnStateChange(su.sc, su.state) - } + ccb.balancer.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state, ConnectionError: su.err}) ccb.balancerMu.Unlock() case <-ccb.done.Done(): } @@ -123,19 +119,13 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { ccb.balancerMu.Lock() defer ccb.balancerMu.Unlock() - if ub, ok := ccb.balancer.(balancer.V2Balancer); ok { - return ub.UpdateClientConnState(*ccs) - } - ccb.balancer.HandleResolvedAddrs(ccs.ResolverState.Addresses, nil) - return nil + return ccb.balancer.UpdateClientConnState(*ccs) } func (ccb *ccBalancerWrapper) resolverError(err error) { - if ub, ok := ccb.balancer.(balancer.V2Balancer); ok { - ccb.balancerMu.Lock() - ub.ResolverError(err) - ccb.balancerMu.Unlock() - } + ccb.balancerMu.Lock() + ccb.balancer.ResolverError(err) + ccb.balancerMu.Unlock() } func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { @@ -173,21 +163,6 @@ func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) } -func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balancer.Picker) { - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { - return - } - // Update picker before updating state. Even though the ordering here does - // not matter, it can lead to multiple calls of Pick in the common start-up - // case where we wait for ready and then perform an RPC. If the picker is - // updated later, we could call the "connecting" picker when the state is - // updated, and then call the "ready" picker after the picker gets updated. - ccb.cc.blockingpicker.updatePicker(p) - ccb.cc.csMgr.updateState(s) -} - func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { ccb.mu.Lock() defer ccb.mu.Unlock() @@ -199,7 +174,7 @@ func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { // case where we wait for ready and then perform an RPC. If the picker is // updated later, we could call the "connecting" picker when the state is // updated, and then call the "ready" picker after the picker gets updated. - ccb.cc.blockingpicker.updatePickerV2(s.Picker) + ccb.cc.blockingpicker.updatePicker(s.Picker) ccb.cc.csMgr.updateState(s.ConnectivityState) } @@ -245,7 +220,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { ac, err := cc.newAddrConn(addrs, opts) if err != nil { - grpclog.Warningf("acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) + channelz.Warningf(logger, acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) return } acbw.ac = ac diff --git a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go deleted file mode 100644 index db04b08b8..000000000 --- a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go +++ /dev/null @@ -1,334 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "sync" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/resolver" -) - -type balancerWrapperBuilder struct { - b Balancer // The v1 balancer. -} - -func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { - bwb.b.Start(opts.Target.Endpoint, BalancerConfig{ - DialCreds: opts.DialCreds, - Dialer: opts.Dialer, - }) - _, pickfirst := bwb.b.(*pickFirst) - bw := &balancerWrapper{ - balancer: bwb.b, - pickfirst: pickfirst, - cc: cc, - targetAddr: opts.Target.Endpoint, - startCh: make(chan struct{}), - conns: make(map[resolver.Address]balancer.SubConn), - connSt: make(map[balancer.SubConn]*scState), - csEvltr: &balancer.ConnectivityStateEvaluator{}, - state: connectivity.Idle, - } - cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: bw}) - go bw.lbWatcher() - return bw -} - -func (bwb *balancerWrapperBuilder) Name() string { - return "wrapper" -} - -type scState struct { - addr Address // The v1 address type. - s connectivity.State - down func(error) -} - -type balancerWrapper struct { - balancer Balancer // The v1 balancer. - pickfirst bool - - cc balancer.ClientConn - targetAddr string // Target without the scheme. - - mu sync.Mutex - conns map[resolver.Address]balancer.SubConn - connSt map[balancer.SubConn]*scState - // This channel is closed when handling the first resolver result. - // lbWatcher blocks until this is closed, to avoid race between - // - NewSubConn is created, cc wants to notify balancer of state changes; - // - Build hasn't return, cc doesn't have access to balancer. - startCh chan struct{} - - // To aggregate the connectivity state. - csEvltr *balancer.ConnectivityStateEvaluator - state connectivity.State -} - -// lbWatcher watches the Notify channel of the balancer and manages -// connections accordingly. -func (bw *balancerWrapper) lbWatcher() { - <-bw.startCh - notifyCh := bw.balancer.Notify() - if notifyCh == nil { - // There's no resolver in the balancer. Connect directly. - a := resolver.Address{ - Addr: bw.targetAddr, - Type: resolver.Backend, - } - sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) - if err != nil { - grpclog.Warningf("Error creating connection to %v. Err: %v", a, err) - } else { - bw.mu.Lock() - bw.conns[a] = sc - bw.connSt[sc] = &scState{ - addr: Address{Addr: bw.targetAddr}, - s: connectivity.Idle, - } - bw.mu.Unlock() - sc.Connect() - } - return - } - - for addrs := range notifyCh { - grpclog.Infof("balancerWrapper: got update addr from Notify: %v", addrs) - if bw.pickfirst { - var ( - oldA resolver.Address - oldSC balancer.SubConn - ) - bw.mu.Lock() - for oldA, oldSC = range bw.conns { - break - } - bw.mu.Unlock() - if len(addrs) <= 0 { - if oldSC != nil { - // Teardown old sc. - bw.mu.Lock() - delete(bw.conns, oldA) - delete(bw.connSt, oldSC) - bw.mu.Unlock() - bw.cc.RemoveSubConn(oldSC) - } - continue - } - - var newAddrs []resolver.Address - for _, a := range addrs { - newAddr := resolver.Address{ - Addr: a.Addr, - Type: resolver.Backend, // All addresses from balancer are all backends. - ServerName: "", - Metadata: a.Metadata, - } - newAddrs = append(newAddrs, newAddr) - } - if oldSC == nil { - // Create new sc. - sc, err := bw.cc.NewSubConn(newAddrs, balancer.NewSubConnOptions{}) - if err != nil { - grpclog.Warningf("Error creating connection to %v. Err: %v", newAddrs, err) - } else { - bw.mu.Lock() - // For pickfirst, there should be only one SubConn, so the - // address doesn't matter. All states updating (up and down) - // and picking should all happen on that only SubConn. - bw.conns[resolver.Address{}] = sc - bw.connSt[sc] = &scState{ - addr: addrs[0], // Use the first address. - s: connectivity.Idle, - } - bw.mu.Unlock() - sc.Connect() - } - } else { - bw.mu.Lock() - bw.connSt[oldSC].addr = addrs[0] - bw.mu.Unlock() - oldSC.UpdateAddresses(newAddrs) - } - } else { - var ( - add []resolver.Address // Addresses need to setup connections. - del []balancer.SubConn // Connections need to tear down. - ) - resAddrs := make(map[resolver.Address]Address) - for _, a := range addrs { - resAddrs[resolver.Address{ - Addr: a.Addr, - Type: resolver.Backend, // All addresses from balancer are all backends. - ServerName: "", - Metadata: a.Metadata, - }] = a - } - bw.mu.Lock() - for a := range resAddrs { - if _, ok := bw.conns[a]; !ok { - add = append(add, a) - } - } - for a, c := range bw.conns { - if _, ok := resAddrs[a]; !ok { - del = append(del, c) - delete(bw.conns, a) - // Keep the state of this sc in bw.connSt until its state becomes Shutdown. - } - } - bw.mu.Unlock() - for _, a := range add { - sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) - if err != nil { - grpclog.Warningf("Error creating connection to %v. Err: %v", a, err) - } else { - bw.mu.Lock() - bw.conns[a] = sc - bw.connSt[sc] = &scState{ - addr: resAddrs[a], - s: connectivity.Idle, - } - bw.mu.Unlock() - sc.Connect() - } - } - for _, c := range del { - bw.cc.RemoveSubConn(c) - } - } - } -} - -func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { - bw.mu.Lock() - defer bw.mu.Unlock() - scSt, ok := bw.connSt[sc] - if !ok { - return - } - if s == connectivity.Idle { - sc.Connect() - } - oldS := scSt.s - scSt.s = s - if oldS != connectivity.Ready && s == connectivity.Ready { - scSt.down = bw.balancer.Up(scSt.addr) - } else if oldS == connectivity.Ready && s != connectivity.Ready { - if scSt.down != nil { - scSt.down(errConnClosing) - } - } - sa := bw.csEvltr.RecordTransition(oldS, s) - if bw.state != sa { - bw.state = sa - } - bw.cc.UpdateState(balancer.State{ConnectivityState: bw.state, Picker: bw}) - if s == connectivity.Shutdown { - // Remove state for this sc. - delete(bw.connSt, sc) - } -} - -func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) { - bw.mu.Lock() - defer bw.mu.Unlock() - select { - case <-bw.startCh: - default: - close(bw.startCh) - } - // There should be a resolver inside the balancer. - // All updates here, if any, are ignored. -} - -func (bw *balancerWrapper) Close() { - bw.mu.Lock() - defer bw.mu.Unlock() - select { - case <-bw.startCh: - default: - close(bw.startCh) - } - bw.balancer.Close() -} - -// The picker is the balancerWrapper itself. -// It either blocks or returns error, consistent with v1 balancer Get(). -func (bw *balancerWrapper) Pick(info balancer.PickInfo) (result balancer.PickResult, err error) { - failfast := true // Default failfast is true. - if ss, ok := rpcInfoFromContext(info.Ctx); ok { - failfast = ss.failfast - } - a, p, err := bw.balancer.Get(info.Ctx, BalancerGetOptions{BlockingWait: !failfast}) - if err != nil { - return balancer.PickResult{}, toRPCErr(err) - } - if p != nil { - result.Done = func(balancer.DoneInfo) { p() } - defer func() { - if err != nil { - p() - } - }() - } - - bw.mu.Lock() - defer bw.mu.Unlock() - if bw.pickfirst { - // Get the first sc in conns. - for _, result.SubConn = range bw.conns { - return result, nil - } - return balancer.PickResult{}, balancer.ErrNoSubConnAvailable - } - var ok1 bool - result.SubConn, ok1 = bw.conns[resolver.Address{ - Addr: a.Addr, - Type: resolver.Backend, - ServerName: "", - Metadata: a.Metadata, - }] - s, ok2 := bw.connSt[result.SubConn] - if !ok1 || !ok2 { - // This can only happen due to a race where Get() returned an address - // that was subsequently removed by Notify. In this case we should - // retry always. - return balancer.PickResult{}, balancer.ErrNoSubConnAvailable - } - switch s.s { - case connectivity.Ready, connectivity.Idle: - return result, nil - case connectivity.Shutdown, connectivity.TransientFailure: - // If the returned sc has been shut down or is in transient failure, - // return error, and this RPC will fail or wait for another picker (if - // non-failfast). - return balancer.PickResult{}, balancer.ErrTransientFailure - default: - // For other states (connecting or unknown), the v1 balancer would - // traditionally wait until ready and then issue the RPC. Returning - // ErrNoSubConnAvailable will be a slight improvement in that it will - // allow the balancer to choose another address in case others are - // connected. - return balancer.PickResult{}, balancer.ErrNoSubConnAvailable - } -} diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index f393bb661..ed75290cd 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -1,24 +1,49 @@ +// Copyright 2018 The gRPC Authors +// All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/binlog/v1/binarylog.proto + // Code generated by protoc-gen-go. DO NOT EDIT. -// source: grpc/binarylog/grpc_binarylog_v1/binarylog.proto +// versions: +// protoc-gen-go v1.25.0 +// protoc v3.14.0 +// source: grpc/binlog/v1/binarylog.proto -package grpc_binarylog_v1 // import "google.golang.org/grpc/binarylog/grpc_binarylog_v1" +package grpc_binarylog_v1 -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import duration "github.com/golang/protobuf/ptypes/duration" -import timestamp "github.com/golang/protobuf/ptypes/timestamp" +import ( + proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 // Enumerates the type of event // Note the terminology is different from the RPC semantics @@ -54,32 +79,55 @@ const ( GrpcLogEntry_EVENT_TYPE_CANCEL GrpcLogEntry_EventType = 7 ) -var GrpcLogEntry_EventType_name = map[int32]string{ - 0: "EVENT_TYPE_UNKNOWN", - 1: "EVENT_TYPE_CLIENT_HEADER", - 2: "EVENT_TYPE_SERVER_HEADER", - 3: "EVENT_TYPE_CLIENT_MESSAGE", - 4: "EVENT_TYPE_SERVER_MESSAGE", - 5: "EVENT_TYPE_CLIENT_HALF_CLOSE", - 6: "EVENT_TYPE_SERVER_TRAILER", - 7: "EVENT_TYPE_CANCEL", -} -var GrpcLogEntry_EventType_value = map[string]int32{ - "EVENT_TYPE_UNKNOWN": 0, - "EVENT_TYPE_CLIENT_HEADER": 1, - "EVENT_TYPE_SERVER_HEADER": 2, - "EVENT_TYPE_CLIENT_MESSAGE": 3, - "EVENT_TYPE_SERVER_MESSAGE": 4, - "EVENT_TYPE_CLIENT_HALF_CLOSE": 5, - "EVENT_TYPE_SERVER_TRAILER": 6, - "EVENT_TYPE_CANCEL": 7, +// Enum value maps for GrpcLogEntry_EventType. +var ( + GrpcLogEntry_EventType_name = map[int32]string{ + 0: "EVENT_TYPE_UNKNOWN", + 1: "EVENT_TYPE_CLIENT_HEADER", + 2: "EVENT_TYPE_SERVER_HEADER", + 3: "EVENT_TYPE_CLIENT_MESSAGE", + 4: "EVENT_TYPE_SERVER_MESSAGE", + 5: "EVENT_TYPE_CLIENT_HALF_CLOSE", + 6: "EVENT_TYPE_SERVER_TRAILER", + 7: "EVENT_TYPE_CANCEL", + } + GrpcLogEntry_EventType_value = map[string]int32{ + "EVENT_TYPE_UNKNOWN": 0, + "EVENT_TYPE_CLIENT_HEADER": 1, + "EVENT_TYPE_SERVER_HEADER": 2, + "EVENT_TYPE_CLIENT_MESSAGE": 3, + "EVENT_TYPE_SERVER_MESSAGE": 4, + "EVENT_TYPE_CLIENT_HALF_CLOSE": 5, + "EVENT_TYPE_SERVER_TRAILER": 6, + "EVENT_TYPE_CANCEL": 7, + } +) + +func (x GrpcLogEntry_EventType) Enum() *GrpcLogEntry_EventType { + p := new(GrpcLogEntry_EventType) + *p = x + return p } func (x GrpcLogEntry_EventType) String() string { - return proto.EnumName(GrpcLogEntry_EventType_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } + +func (GrpcLogEntry_EventType) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_binlog_v1_binarylog_proto_enumTypes[0].Descriptor() +} + +func (GrpcLogEntry_EventType) Type() protoreflect.EnumType { + return &file_grpc_binlog_v1_binarylog_proto_enumTypes[0] +} + +func (x GrpcLogEntry_EventType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use GrpcLogEntry_EventType.Descriptor instead. func (GrpcLogEntry_EventType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 0} + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0, 0} } // Enumerates the entity that generates the log entry @@ -91,22 +139,45 @@ const ( GrpcLogEntry_LOGGER_SERVER GrpcLogEntry_Logger = 2 ) -var GrpcLogEntry_Logger_name = map[int32]string{ - 0: "LOGGER_UNKNOWN", - 1: "LOGGER_CLIENT", - 2: "LOGGER_SERVER", -} -var GrpcLogEntry_Logger_value = map[string]int32{ - "LOGGER_UNKNOWN": 0, - "LOGGER_CLIENT": 1, - "LOGGER_SERVER": 2, +// Enum value maps for GrpcLogEntry_Logger. +var ( + GrpcLogEntry_Logger_name = map[int32]string{ + 0: "LOGGER_UNKNOWN", + 1: "LOGGER_CLIENT", + 2: "LOGGER_SERVER", + } + GrpcLogEntry_Logger_value = map[string]int32{ + "LOGGER_UNKNOWN": 0, + "LOGGER_CLIENT": 1, + "LOGGER_SERVER": 2, + } +) + +func (x GrpcLogEntry_Logger) Enum() *GrpcLogEntry_Logger { + p := new(GrpcLogEntry_Logger) + *p = x + return p } func (x GrpcLogEntry_Logger) String() string { - return proto.EnumName(GrpcLogEntry_Logger_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } + +func (GrpcLogEntry_Logger) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_binlog_v1_binarylog_proto_enumTypes[1].Descriptor() +} + +func (GrpcLogEntry_Logger) Type() protoreflect.EnumType { + return &file_grpc_binlog_v1_binarylog_proto_enumTypes[1] +} + +func (x GrpcLogEntry_Logger) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use GrpcLogEntry_Logger.Descriptor instead. func (GrpcLogEntry_Logger) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 1} + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0, 1} } type Address_Type int32 @@ -122,30 +193,57 @@ const ( Address_TYPE_UNIX Address_Type = 3 ) -var Address_Type_name = map[int32]string{ - 0: "TYPE_UNKNOWN", - 1: "TYPE_IPV4", - 2: "TYPE_IPV6", - 3: "TYPE_UNIX", -} -var Address_Type_value = map[string]int32{ - "TYPE_UNKNOWN": 0, - "TYPE_IPV4": 1, - "TYPE_IPV6": 2, - "TYPE_UNIX": 3, +// Enum value maps for Address_Type. +var ( + Address_Type_name = map[int32]string{ + 0: "TYPE_UNKNOWN", + 1: "TYPE_IPV4", + 2: "TYPE_IPV6", + 3: "TYPE_UNIX", + } + Address_Type_value = map[string]int32{ + "TYPE_UNKNOWN": 0, + "TYPE_IPV4": 1, + "TYPE_IPV6": 2, + "TYPE_UNIX": 3, + } +) + +func (x Address_Type) Enum() *Address_Type { + p := new(Address_Type) + *p = x + return p } func (x Address_Type) String() string { - return proto.EnumName(Address_Type_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } + +func (Address_Type) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_binlog_v1_binarylog_proto_enumTypes[2].Descriptor() +} + +func (Address_Type) Type() protoreflect.EnumType { + return &file_grpc_binlog_v1_binarylog_proto_enumTypes[2] +} + +func (x Address_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Address_Type.Descriptor instead. func (Address_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{7, 0} + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{7, 0} } // Log entry we store in binary logs type GrpcLogEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The timestamp of the binary log message - Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` // Uniquely identifies a call. The value must not be 0 in order to disambiguate // from an unset value. // Each call may have several log entries, they will all have the same call_id. @@ -158,11 +256,11 @@ type GrpcLogEntry struct { // durability or ordering is not guaranteed. SequenceIdWithinCall uint64 `protobuf:"varint,3,opt,name=sequence_id_within_call,json=sequenceIdWithinCall,proto3" json:"sequence_id_within_call,omitempty"` Type GrpcLogEntry_EventType `protobuf:"varint,4,opt,name=type,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_EventType" json:"type,omitempty"` - Logger GrpcLogEntry_Logger `protobuf:"varint,5,opt,name=logger,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_Logger" json:"logger,omitempty"` + Logger GrpcLogEntry_Logger `protobuf:"varint,5,opt,name=logger,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_Logger" json:"logger,omitempty"` // One of the above Logger enum // The logger uses one of the following fields to record the payload, // according to the type of the log entry. // - // Types that are valid to be assigned to Payload: + // Types that are assignable to Payload: // *GrpcLogEntry_ClientHeader // *GrpcLogEntry_ServerHeader // *GrpcLogEntry_Message @@ -175,71 +273,125 @@ type GrpcLogEntry struct { // EVENT_TYPE_SERVER_HEADER normally or EVENT_TYPE_SERVER_TRAILER in // the case of trailers-only. On server side, peer is always // logged on EVENT_TYPE_CLIENT_HEADER. - Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"` } -func (m *GrpcLogEntry) Reset() { *m = GrpcLogEntry{} } -func (m *GrpcLogEntry) String() string { return proto.CompactTextString(m) } -func (*GrpcLogEntry) ProtoMessage() {} +func (x *GrpcLogEntry) Reset() { + *x = GrpcLogEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcLogEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcLogEntry) ProtoMessage() {} + +func (x *GrpcLogEntry) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcLogEntry.ProtoReflect.Descriptor instead. func (*GrpcLogEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{0} -} -func (m *GrpcLogEntry) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GrpcLogEntry.Unmarshal(m, b) -} -func (m *GrpcLogEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GrpcLogEntry.Marshal(b, m, deterministic) -} -func (dst *GrpcLogEntry) XXX_Merge(src proto.Message) { - xxx_messageInfo_GrpcLogEntry.Merge(dst, src) -} -func (m *GrpcLogEntry) XXX_Size() int { - return xxx_messageInfo_GrpcLogEntry.Size(m) -} -func (m *GrpcLogEntry) XXX_DiscardUnknown() { - xxx_messageInfo_GrpcLogEntry.DiscardUnknown(m) + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0} } -var xxx_messageInfo_GrpcLogEntry proto.InternalMessageInfo - -func (m *GrpcLogEntry) GetTimestamp() *timestamp.Timestamp { - if m != nil { - return m.Timestamp +func (x *GrpcLogEntry) GetTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.Timestamp } return nil } -func (m *GrpcLogEntry) GetCallId() uint64 { - if m != nil { - return m.CallId +func (x *GrpcLogEntry) GetCallId() uint64 { + if x != nil { + return x.CallId } return 0 } -func (m *GrpcLogEntry) GetSequenceIdWithinCall() uint64 { - if m != nil { - return m.SequenceIdWithinCall +func (x *GrpcLogEntry) GetSequenceIdWithinCall() uint64 { + if x != nil { + return x.SequenceIdWithinCall } return 0 } -func (m *GrpcLogEntry) GetType() GrpcLogEntry_EventType { - if m != nil { - return m.Type +func (x *GrpcLogEntry) GetType() GrpcLogEntry_EventType { + if x != nil { + return x.Type } return GrpcLogEntry_EVENT_TYPE_UNKNOWN } -func (m *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger { - if m != nil { - return m.Logger +func (x *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger { + if x != nil { + return x.Logger } return GrpcLogEntry_LOGGER_UNKNOWN } +func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (x *GrpcLogEntry) GetClientHeader() *ClientHeader { + if x, ok := x.GetPayload().(*GrpcLogEntry_ClientHeader); ok { + return x.ClientHeader + } + return nil +} + +func (x *GrpcLogEntry) GetServerHeader() *ServerHeader { + if x, ok := x.GetPayload().(*GrpcLogEntry_ServerHeader); ok { + return x.ServerHeader + } + return nil +} + +func (x *GrpcLogEntry) GetMessage() *Message { + if x, ok := x.GetPayload().(*GrpcLogEntry_Message); ok { + return x.Message + } + return nil +} + +func (x *GrpcLogEntry) GetTrailer() *Trailer { + if x, ok := x.GetPayload().(*GrpcLogEntry_Trailer); ok { + return x.Trailer + } + return nil +} + +func (x *GrpcLogEntry) GetPayloadTruncated() bool { + if x != nil { + return x.PayloadTruncated + } + return false +} + +func (x *GrpcLogEntry) GetPeer() *Address { + if x != nil { + return x.Peer + } + return nil +} + type isGrpcLogEntry_Payload interface { isGrpcLogEntry_Payload() } @@ -253,6 +405,7 @@ type GrpcLogEntry_ServerHeader struct { } type GrpcLogEntry_Message struct { + // Used by EVENT_TYPE_CLIENT_MESSAGE, EVENT_TYPE_SERVER_MESSAGE Message *Message `protobuf:"bytes,8,opt,name=message,proto3,oneof"` } @@ -268,168 +421,11 @@ func (*GrpcLogEntry_Message) isGrpcLogEntry_Payload() {} func (*GrpcLogEntry_Trailer) isGrpcLogEntry_Payload() {} -func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload { - if m != nil { - return m.Payload - } - return nil -} - -func (m *GrpcLogEntry) GetClientHeader() *ClientHeader { - if x, ok := m.GetPayload().(*GrpcLogEntry_ClientHeader); ok { - return x.ClientHeader - } - return nil -} - -func (m *GrpcLogEntry) GetServerHeader() *ServerHeader { - if x, ok := m.GetPayload().(*GrpcLogEntry_ServerHeader); ok { - return x.ServerHeader - } - return nil -} - -func (m *GrpcLogEntry) GetMessage() *Message { - if x, ok := m.GetPayload().(*GrpcLogEntry_Message); ok { - return x.Message - } - return nil -} - -func (m *GrpcLogEntry) GetTrailer() *Trailer { - if x, ok := m.GetPayload().(*GrpcLogEntry_Trailer); ok { - return x.Trailer - } - return nil -} - -func (m *GrpcLogEntry) GetPayloadTruncated() bool { - if m != nil { - return m.PayloadTruncated - } - return false -} - -func (m *GrpcLogEntry) GetPeer() *Address { - if m != nil { - return m.Peer - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*GrpcLogEntry) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _GrpcLogEntry_OneofMarshaler, _GrpcLogEntry_OneofUnmarshaler, _GrpcLogEntry_OneofSizer, []interface{}{ - (*GrpcLogEntry_ClientHeader)(nil), - (*GrpcLogEntry_ServerHeader)(nil), - (*GrpcLogEntry_Message)(nil), - (*GrpcLogEntry_Trailer)(nil), - } -} - -func _GrpcLogEntry_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*GrpcLogEntry) - // payload - switch x := m.Payload.(type) { - case *GrpcLogEntry_ClientHeader: - b.EncodeVarint(6<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.ClientHeader); err != nil { - return err - } - case *GrpcLogEntry_ServerHeader: - b.EncodeVarint(7<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.ServerHeader); err != nil { - return err - } - case *GrpcLogEntry_Message: - b.EncodeVarint(8<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Message); err != nil { - return err - } - case *GrpcLogEntry_Trailer: - b.EncodeVarint(9<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Trailer); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("GrpcLogEntry.Payload has unexpected type %T", x) - } - return nil -} - -func _GrpcLogEntry_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*GrpcLogEntry) - switch tag { - case 6: // payload.client_header - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(ClientHeader) - err := b.DecodeMessage(msg) - m.Payload = &GrpcLogEntry_ClientHeader{msg} - return true, err - case 7: // payload.server_header - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(ServerHeader) - err := b.DecodeMessage(msg) - m.Payload = &GrpcLogEntry_ServerHeader{msg} - return true, err - case 8: // payload.message - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Message) - err := b.DecodeMessage(msg) - m.Payload = &GrpcLogEntry_Message{msg} - return true, err - case 9: // payload.trailer - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Trailer) - err := b.DecodeMessage(msg) - m.Payload = &GrpcLogEntry_Trailer{msg} - return true, err - default: - return false, nil - } -} - -func _GrpcLogEntry_OneofSizer(msg proto.Message) (n int) { - m := msg.(*GrpcLogEntry) - // payload - switch x := m.Payload.(type) { - case *GrpcLogEntry_ClientHeader: - s := proto.Size(x.ClientHeader) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case *GrpcLogEntry_ServerHeader: - s := proto.Size(x.ServerHeader) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case *GrpcLogEntry_Message: - s := proto.Size(x.Message) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case *GrpcLogEntry_Trailer: - s := proto.Size(x.Trailer) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - type ClientHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // This contains only the metadata from the application. Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` // The name of the RPC method, which looks something like: @@ -443,104 +439,122 @@ type ClientHeader struct { // or : . Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"` // the RPC timeout - Timeout *duration.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Timeout *durationpb.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"` } -func (m *ClientHeader) Reset() { *m = ClientHeader{} } -func (m *ClientHeader) String() string { return proto.CompactTextString(m) } -func (*ClientHeader) ProtoMessage() {} +func (x *ClientHeader) Reset() { + *x = ClientHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientHeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientHeader) ProtoMessage() {} + +func (x *ClientHeader) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientHeader.ProtoReflect.Descriptor instead. func (*ClientHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{1} -} -func (m *ClientHeader) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ClientHeader.Unmarshal(m, b) -} -func (m *ClientHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ClientHeader.Marshal(b, m, deterministic) -} -func (dst *ClientHeader) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClientHeader.Merge(dst, src) -} -func (m *ClientHeader) XXX_Size() int { - return xxx_messageInfo_ClientHeader.Size(m) -} -func (m *ClientHeader) XXX_DiscardUnknown() { - xxx_messageInfo_ClientHeader.DiscardUnknown(m) + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{1} } -var xxx_messageInfo_ClientHeader proto.InternalMessageInfo - -func (m *ClientHeader) GetMetadata() *Metadata { - if m != nil { - return m.Metadata +func (x *ClientHeader) GetMetadata() *Metadata { + if x != nil { + return x.Metadata } return nil } -func (m *ClientHeader) GetMethodName() string { - if m != nil { - return m.MethodName +func (x *ClientHeader) GetMethodName() string { + if x != nil { + return x.MethodName } return "" } -func (m *ClientHeader) GetAuthority() string { - if m != nil { - return m.Authority +func (x *ClientHeader) GetAuthority() string { + if x != nil { + return x.Authority } return "" } -func (m *ClientHeader) GetTimeout() *duration.Duration { - if m != nil { - return m.Timeout +func (x *ClientHeader) GetTimeout() *durationpb.Duration { + if x != nil { + return x.Timeout } return nil } type ServerHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // This contains only the metadata from the application. - Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` } -func (m *ServerHeader) Reset() { *m = ServerHeader{} } -func (m *ServerHeader) String() string { return proto.CompactTextString(m) } -func (*ServerHeader) ProtoMessage() {} +func (x *ServerHeader) Reset() { + *x = ServerHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerHeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerHeader) ProtoMessage() {} + +func (x *ServerHeader) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerHeader.ProtoReflect.Descriptor instead. func (*ServerHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{2} -} -func (m *ServerHeader) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ServerHeader.Unmarshal(m, b) -} -func (m *ServerHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ServerHeader.Marshal(b, m, deterministic) -} -func (dst *ServerHeader) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServerHeader.Merge(dst, src) -} -func (m *ServerHeader) XXX_Size() int { - return xxx_messageInfo_ServerHeader.Size(m) -} -func (m *ServerHeader) XXX_DiscardUnknown() { - xxx_messageInfo_ServerHeader.DiscardUnknown(m) + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{2} } -var xxx_messageInfo_ServerHeader proto.InternalMessageInfo - -func (m *ServerHeader) GetMetadata() *Metadata { - if m != nil { - return m.Metadata +func (x *ServerHeader) GetMetadata() *Metadata { + if x != nil { + return x.Metadata } return nil } type Trailer struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // This contains only the metadata from the application. Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` // The gRPC status code. @@ -550,110 +564,124 @@ type Trailer struct { StatusMessage string `protobuf:"bytes,3,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"` // The value of the 'grpc-status-details-bin' metadata key. If // present, this is always an encoded 'google.rpc.Status' message. - StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"` } -func (m *Trailer) Reset() { *m = Trailer{} } -func (m *Trailer) String() string { return proto.CompactTextString(m) } -func (*Trailer) ProtoMessage() {} +func (x *Trailer) Reset() { + *x = Trailer{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Trailer) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Trailer) ProtoMessage() {} + +func (x *Trailer) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Trailer.ProtoReflect.Descriptor instead. func (*Trailer) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{3} -} -func (m *Trailer) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Trailer.Unmarshal(m, b) -} -func (m *Trailer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Trailer.Marshal(b, m, deterministic) -} -func (dst *Trailer) XXX_Merge(src proto.Message) { - xxx_messageInfo_Trailer.Merge(dst, src) -} -func (m *Trailer) XXX_Size() int { - return xxx_messageInfo_Trailer.Size(m) -} -func (m *Trailer) XXX_DiscardUnknown() { - xxx_messageInfo_Trailer.DiscardUnknown(m) + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{3} } -var xxx_messageInfo_Trailer proto.InternalMessageInfo - -func (m *Trailer) GetMetadata() *Metadata { - if m != nil { - return m.Metadata +func (x *Trailer) GetMetadata() *Metadata { + if x != nil { + return x.Metadata } return nil } -func (m *Trailer) GetStatusCode() uint32 { - if m != nil { - return m.StatusCode +func (x *Trailer) GetStatusCode() uint32 { + if x != nil { + return x.StatusCode } return 0 } -func (m *Trailer) GetStatusMessage() string { - if m != nil { - return m.StatusMessage +func (x *Trailer) GetStatusMessage() string { + if x != nil { + return x.StatusMessage } return "" } -func (m *Trailer) GetStatusDetails() []byte { - if m != nil { - return m.StatusDetails +func (x *Trailer) GetStatusDetails() []byte { + if x != nil { + return x.StatusDetails } return nil } // Message payload, used by CLIENT_MESSAGE and SERVER_MESSAGE type Message struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Length of the message. It may not be the same as the length of the // data field, as the logging payload can be truncated or omitted. Length uint32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"` // May be truncated or omitted. - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` } -func (m *Message) Reset() { *m = Message{} } -func (m *Message) String() string { return proto.CompactTextString(m) } -func (*Message) ProtoMessage() {} +func (x *Message) Reset() { + *x = Message{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Message) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Message) ProtoMessage() {} + +func (x *Message) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Message.ProtoReflect.Descriptor instead. func (*Message) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{4} -} -func (m *Message) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Message.Unmarshal(m, b) -} -func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Message.Marshal(b, m, deterministic) -} -func (dst *Message) XXX_Merge(src proto.Message) { - xxx_messageInfo_Message.Merge(dst, src) -} -func (m *Message) XXX_Size() int { - return xxx_messageInfo_Message.Size(m) -} -func (m *Message) XXX_DiscardUnknown() { - xxx_messageInfo_Message.DiscardUnknown(m) + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{4} } -var xxx_messageInfo_Message proto.InternalMessageInfo - -func (m *Message) GetLength() uint32 { - if m != nil { - return m.Length +func (x *Message) GetLength() uint32 { + if x != nil { + return x.Length } return 0 } -func (m *Message) GetData() []byte { - if m != nil { - return m.Data +func (x *Message) GetData() []byte { + if x != nil { + return x.Data } return nil } @@ -680,221 +708,480 @@ func (m *Message) GetData() []byte { // header is just a normal metadata key. // The pair will not count towards the size limit. type Metadata struct { - Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"` } -func (m *Metadata) Reset() { *m = Metadata{} } -func (m *Metadata) String() string { return proto.CompactTextString(m) } -func (*Metadata) ProtoMessage() {} +func (x *Metadata) Reset() { + *x = Metadata{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Metadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Metadata) ProtoMessage() {} + +func (x *Metadata) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Metadata.ProtoReflect.Descriptor instead. func (*Metadata) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{5} -} -func (m *Metadata) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Metadata.Unmarshal(m, b) -} -func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Metadata.Marshal(b, m, deterministic) -} -func (dst *Metadata) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metadata.Merge(dst, src) -} -func (m *Metadata) XXX_Size() int { - return xxx_messageInfo_Metadata.Size(m) -} -func (m *Metadata) XXX_DiscardUnknown() { - xxx_messageInfo_Metadata.DiscardUnknown(m) + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{5} } -var xxx_messageInfo_Metadata proto.InternalMessageInfo - -func (m *Metadata) GetEntry() []*MetadataEntry { - if m != nil { - return m.Entry +func (x *Metadata) GetEntry() []*MetadataEntry { + if x != nil { + return x.Entry } return nil } // A metadata key value pair type MetadataEntry struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } -func (m *MetadataEntry) Reset() { *m = MetadataEntry{} } -func (m *MetadataEntry) String() string { return proto.CompactTextString(m) } -func (*MetadataEntry) ProtoMessage() {} +func (x *MetadataEntry) Reset() { + *x = MetadataEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetadataEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetadataEntry) ProtoMessage() {} + +func (x *MetadataEntry) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetadataEntry.ProtoReflect.Descriptor instead. func (*MetadataEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{6} -} -func (m *MetadataEntry) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MetadataEntry.Unmarshal(m, b) -} -func (m *MetadataEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MetadataEntry.Marshal(b, m, deterministic) -} -func (dst *MetadataEntry) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetadataEntry.Merge(dst, src) -} -func (m *MetadataEntry) XXX_Size() int { - return xxx_messageInfo_MetadataEntry.Size(m) -} -func (m *MetadataEntry) XXX_DiscardUnknown() { - xxx_messageInfo_MetadataEntry.DiscardUnknown(m) + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{6} } -var xxx_messageInfo_MetadataEntry proto.InternalMessageInfo - -func (m *MetadataEntry) GetKey() string { - if m != nil { - return m.Key +func (x *MetadataEntry) GetKey() string { + if x != nil { + return x.Key } return "" } -func (m *MetadataEntry) GetValue() []byte { - if m != nil { - return m.Value +func (x *MetadataEntry) GetValue() []byte { + if x != nil { + return x.Value } return nil } // Address information type Address struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Type Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"` Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` // only for TYPE_IPV4 and TYPE_IPV6 - IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"` } -func (m *Address) Reset() { *m = Address{} } -func (m *Address) String() string { return proto.CompactTextString(m) } -func (*Address) ProtoMessage() {} +func (x *Address) Reset() { + *x = Address{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Address) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Address) ProtoMessage() {} + +func (x *Address) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Address.ProtoReflect.Descriptor instead. func (*Address) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{7} -} -func (m *Address) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Address.Unmarshal(m, b) -} -func (m *Address) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Address.Marshal(b, m, deterministic) -} -func (dst *Address) XXX_Merge(src proto.Message) { - xxx_messageInfo_Address.Merge(dst, src) -} -func (m *Address) XXX_Size() int { - return xxx_messageInfo_Address.Size(m) -} -func (m *Address) XXX_DiscardUnknown() { - xxx_messageInfo_Address.DiscardUnknown(m) + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{7} } -var xxx_messageInfo_Address proto.InternalMessageInfo - -func (m *Address) GetType() Address_Type { - if m != nil { - return m.Type +func (x *Address) GetType() Address_Type { + if x != nil { + return x.Type } return Address_TYPE_UNKNOWN } -func (m *Address) GetAddress() string { - if m != nil { - return m.Address +func (x *Address) GetAddress() string { + if x != nil { + return x.Address } return "" } -func (m *Address) GetIpPort() uint32 { - if m != nil { - return m.IpPort +func (x *Address) GetIpPort() uint32 { + if x != nil { + return x.IpPort } return 0 } -func init() { - proto.RegisterType((*GrpcLogEntry)(nil), "grpc.binarylog.v1.GrpcLogEntry") - proto.RegisterType((*ClientHeader)(nil), "grpc.binarylog.v1.ClientHeader") - proto.RegisterType((*ServerHeader)(nil), "grpc.binarylog.v1.ServerHeader") - proto.RegisterType((*Trailer)(nil), "grpc.binarylog.v1.Trailer") - proto.RegisterType((*Message)(nil), "grpc.binarylog.v1.Message") - proto.RegisterType((*Metadata)(nil), "grpc.binarylog.v1.Metadata") - proto.RegisterType((*MetadataEntry)(nil), "grpc.binarylog.v1.MetadataEntry") - proto.RegisterType((*Address)(nil), "grpc.binarylog.v1.Address") - proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_EventType", GrpcLogEntry_EventType_name, GrpcLogEntry_EventType_value) - proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_Logger", GrpcLogEntry_Logger_name, GrpcLogEntry_Logger_value) - proto.RegisterEnum("grpc.binarylog.v1.Address_Type", Address_Type_name, Address_Type_value) +var File_grpc_binlog_v1_binarylog_proto protoreflect.FileDescriptor + +var file_grpc_binlog_v1_binarylog_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x31, + 0x2f, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x11, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x31, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbb, 0x07, 0x0a, 0x0c, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, + 0x17, 0x0a, 0x07, 0x63, 0x61, 0x6c, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x06, 0x63, 0x61, 0x6c, 0x6c, 0x49, 0x64, 0x12, 0x35, 0x0a, 0x17, 0x73, 0x65, 0x71, 0x75, + 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x5f, 0x63, + 0x61, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x73, 0x65, 0x71, 0x75, 0x65, + 0x6e, 0x63, 0x65, 0x49, 0x64, 0x57, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x43, 0x61, 0x6c, 0x6c, 0x12, + 0x3d, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, + 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x3e, + 0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, + 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, + 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x52, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x46, + 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, + 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, + 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, + 0x52, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x36, + 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x07, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, + 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, + 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x69, + 0x6c, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x2b, + 0x0a, 0x11, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x04, 0x70, + 0x65, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x22, 0xf5, 0x01, 0x0a, 0x09, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x45, 0x56, 0x45, + 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x00, 0x12, 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x01, 0x12, + 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, + 0x52, 0x56, 0x45, 0x52, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x02, 0x12, 0x1d, 0x0a, + 0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, + 0x4e, 0x54, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x1d, 0x0a, 0x19, + 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, + 0x52, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x04, 0x12, 0x20, 0x0a, 0x1c, 0x45, + 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, + 0x5f, 0x48, 0x41, 0x4c, 0x46, 0x5f, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x10, 0x05, 0x12, 0x1d, 0x0a, + 0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, + 0x45, 0x52, 0x5f, 0x54, 0x52, 0x41, 0x49, 0x4c, 0x45, 0x52, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, + 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x41, 0x4e, 0x43, 0x45, + 0x4c, 0x10, 0x07, 0x22, 0x42, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x12, 0x0a, + 0x0e, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x43, 0x4c, 0x49, 0x45, + 0x4e, 0x54, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x53, + 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x22, 0xbb, 0x01, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, + 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x0b, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, + 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x33, 0x0a, 0x07, 0x74, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x22, 0x47, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, + 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, + 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0xb1, 0x01, 0x0a, 0x07, 0x54, 0x72, + 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, + 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, + 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, + 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x35, 0x0a, + 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, + 0x64, 0x61, 0x74, 0x61, 0x22, 0x42, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x36, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x37, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x22, 0xb8, 0x01, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x33, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x17, 0x0a, 0x07, + 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x69, + 0x70, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x45, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, + 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x34, 0x10, 0x01, 0x12, 0x0d, + 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x02, 0x12, 0x0d, 0x0a, + 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x49, 0x58, 0x10, 0x03, 0x42, 0x5c, 0x0a, 0x14, + 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, + 0x67, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x42, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x4c, 0x6f, 0x67, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, + 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x62, 0x69, + 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } -func init() { - proto.RegisterFile("grpc/binarylog/grpc_binarylog_v1/binarylog.proto", fileDescriptor_binarylog_264c8c9c551ce911) +var ( + file_grpc_binlog_v1_binarylog_proto_rawDescOnce sync.Once + file_grpc_binlog_v1_binarylog_proto_rawDescData = file_grpc_binlog_v1_binarylog_proto_rawDesc +) + +func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte { + file_grpc_binlog_v1_binarylog_proto_rawDescOnce.Do(func() { + file_grpc_binlog_v1_binarylog_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_binlog_v1_binarylog_proto_rawDescData) + }) + return file_grpc_binlog_v1_binarylog_proto_rawDescData } -var fileDescriptor_binarylog_264c8c9c551ce911 = []byte{ - // 900 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x51, 0x6f, 0xe3, 0x44, - 0x10, 0x3e, 0x37, 0x69, 0xdc, 0x4c, 0x92, 0xca, 0x5d, 0x95, 0x3b, 0x5f, 0x29, 0x34, 0xb2, 0x04, - 0x0a, 0x42, 0x72, 0xb9, 0x94, 0xeb, 0xf1, 0x02, 0x52, 0x92, 0xfa, 0xd2, 0x88, 0x5c, 0x1a, 0x6d, - 0x72, 0x3d, 0x40, 0x48, 0xd6, 0x36, 0x5e, 0x1c, 0x0b, 0xc7, 0x6b, 0xd6, 0x9b, 0xa0, 0xfc, 0x2c, - 0xde, 0x90, 0xee, 0x77, 0xf1, 0x8e, 0xbc, 0x6b, 0x27, 0xa6, 0x69, 0x0f, 0x09, 0xde, 0x3c, 0xdf, - 0x7c, 0xf3, 0xcd, 0xee, 0x78, 0x66, 0x16, 0xbe, 0xf2, 0x79, 0x3c, 0x3b, 0xbf, 0x0b, 0x22, 0xc2, - 0xd7, 0x21, 0xf3, 0xcf, 0x53, 0xd3, 0xdd, 0x98, 0xee, 0xea, 0xc5, 0xd6, 0x67, 0xc7, 0x9c, 0x09, - 0x86, 0x8e, 0x52, 0x8a, 0xbd, 0x45, 0x57, 0x2f, 0x4e, 0x3e, 0xf5, 0x19, 0xf3, 0x43, 0x7a, 0x2e, - 0x09, 0x77, 0xcb, 0x5f, 0xce, 0xbd, 0x25, 0x27, 0x22, 0x60, 0x91, 0x0a, 0x39, 0x39, 0xbb, 0xef, - 0x17, 0xc1, 0x82, 0x26, 0x82, 0x2c, 0x62, 0x45, 0xb0, 0xde, 0xeb, 0x50, 0xef, 0xf3, 0x78, 0x36, - 0x64, 0xbe, 0x13, 0x09, 0xbe, 0x46, 0xdf, 0x40, 0x75, 0xc3, 0x31, 0xb5, 0xa6, 0xd6, 0xaa, 0xb5, - 0x4f, 0x6c, 0xa5, 0x62, 0xe7, 0x2a, 0xf6, 0x34, 0x67, 0xe0, 0x2d, 0x19, 0x3d, 0x03, 0x7d, 0x46, - 0xc2, 0xd0, 0x0d, 0x3c, 0x73, 0xaf, 0xa9, 0xb5, 0xca, 0xb8, 0x92, 0x9a, 0x03, 0x0f, 0xbd, 0x84, - 0x67, 0x09, 0xfd, 0x6d, 0x49, 0xa3, 0x19, 0x75, 0x03, 0xcf, 0xfd, 0x3d, 0x10, 0xf3, 0x20, 0x72, - 0x53, 0xa7, 0x59, 0x92, 0xc4, 0xe3, 0xdc, 0x3d, 0xf0, 0xde, 0x49, 0x67, 0x8f, 0x84, 0x21, 0xfa, - 0x16, 0xca, 0x62, 0x1d, 0x53, 0xb3, 0xdc, 0xd4, 0x5a, 0x87, 0xed, 0x2f, 0xec, 0x9d, 0xdb, 0xdb, - 0xc5, 0x83, 0xdb, 0xce, 0x8a, 0x46, 0x62, 0xba, 0x8e, 0x29, 0x96, 0x61, 0xe8, 0x3b, 0xa8, 0x84, - 0xcc, 0xf7, 0x29, 0x37, 0xf7, 0xa5, 0xc0, 0xe7, 0xff, 0x26, 0x30, 0x94, 0x6c, 0x9c, 0x45, 0xa1, - 0xd7, 0xd0, 0x98, 0x85, 0x01, 0x8d, 0x84, 0x3b, 0xa7, 0xc4, 0xa3, 0xdc, 0xac, 0xc8, 0x62, 0x9c, - 0x3d, 0x20, 0xd3, 0x93, 0xbc, 0x6b, 0x49, 0xbb, 0x7e, 0x82, 0xeb, 0xb3, 0x82, 0x9d, 0xea, 0x24, - 0x94, 0xaf, 0x28, 0xcf, 0x75, 0xf4, 0x47, 0x75, 0x26, 0x92, 0xb7, 0xd5, 0x49, 0x0a, 0x36, 0xba, - 0x04, 0x7d, 0x41, 0x93, 0x84, 0xf8, 0xd4, 0x3c, 0xc8, 0x7f, 0xcb, 0x8e, 0xc2, 0x1b, 0xc5, 0xb8, - 0x7e, 0x82, 0x73, 0x72, 0x1a, 0x27, 0x38, 0x09, 0x42, 0xca, 0xcd, 0xea, 0xa3, 0x71, 0x53, 0xc5, - 0x48, 0xe3, 0x32, 0x32, 0xfa, 0x12, 0x8e, 0x62, 0xb2, 0x0e, 0x19, 0xf1, 0x5c, 0xc1, 0x97, 0xd1, - 0x8c, 0x08, 0xea, 0x99, 0xd0, 0xd4, 0x5a, 0x07, 0xd8, 0xc8, 0x1c, 0xd3, 0x1c, 0x47, 0x36, 0x94, - 0x63, 0x4a, 0xb9, 0x59, 0x7b, 0x34, 0x43, 0xc7, 0xf3, 0x38, 0x4d, 0x12, 0x2c, 0x79, 0xd6, 0x5f, - 0x1a, 0x54, 0x37, 0x3f, 0x0c, 0x3d, 0x05, 0xe4, 0xdc, 0x3a, 0xa3, 0xa9, 0x3b, 0xfd, 0x71, 0xec, - 0xb8, 0x6f, 0x47, 0xdf, 0x8f, 0x6e, 0xde, 0x8d, 0x8c, 0x27, 0xe8, 0x14, 0xcc, 0x02, 0xde, 0x1b, - 0x0e, 0xd2, 0xef, 0x6b, 0xa7, 0x73, 0xe5, 0x60, 0x43, 0xbb, 0xe7, 0x9d, 0x38, 0xf8, 0xd6, 0xc1, - 0xb9, 0x77, 0x0f, 0x7d, 0x02, 0xcf, 0x77, 0x63, 0xdf, 0x38, 0x93, 0x49, 0xa7, 0xef, 0x18, 0xa5, - 0x7b, 0xee, 0x2c, 0x38, 0x77, 0x97, 0x51, 0x13, 0x4e, 0x1f, 0xc8, 0xdc, 0x19, 0xbe, 0x76, 0x7b, - 0xc3, 0x9b, 0x89, 0x63, 0xec, 0x3f, 0x2c, 0x30, 0xc5, 0x9d, 0xc1, 0xd0, 0xc1, 0x46, 0x05, 0x7d, - 0x04, 0x47, 0x45, 0x81, 0xce, 0xa8, 0xe7, 0x0c, 0x0d, 0xdd, 0xea, 0x42, 0x45, 0xb5, 0x19, 0x42, - 0x70, 0x38, 0xbc, 0xe9, 0xf7, 0x1d, 0x5c, 0xb8, 0xef, 0x11, 0x34, 0x32, 0x4c, 0x65, 0x34, 0xb4, - 0x02, 0xa4, 0x52, 0x18, 0x7b, 0xdd, 0x2a, 0xe8, 0x59, 0xfd, 0xad, 0xf7, 0x1a, 0xd4, 0x8b, 0xcd, - 0x87, 0x5e, 0xc1, 0xc1, 0x82, 0x0a, 0xe2, 0x11, 0x41, 0xb2, 0xe1, 0xfd, 0xf8, 0xc1, 0x2e, 0x51, - 0x14, 0xbc, 0x21, 0xa3, 0x33, 0xa8, 0x2d, 0xa8, 0x98, 0x33, 0xcf, 0x8d, 0xc8, 0x82, 0xca, 0x01, - 0xae, 0x62, 0x50, 0xd0, 0x88, 0x2c, 0x28, 0x3a, 0x85, 0x2a, 0x59, 0x8a, 0x39, 0xe3, 0x81, 0x58, - 0xcb, 0xb1, 0xad, 0xe2, 0x2d, 0x80, 0x2e, 0x40, 0x4f, 0x17, 0x01, 0x5b, 0x0a, 0x39, 0xae, 0xb5, - 0xf6, 0xf3, 0x9d, 0x9d, 0x71, 0x95, 0x6d, 0x26, 0x9c, 0x33, 0xad, 0x3e, 0xd4, 0x8b, 0x1d, 0xff, - 0x9f, 0x0f, 0x6f, 0xfd, 0xa1, 0x81, 0x9e, 0x75, 0xf0, 0xff, 0xaa, 0x40, 0x22, 0x88, 0x58, 0x26, - 0xee, 0x8c, 0x79, 0xaa, 0x02, 0x0d, 0x0c, 0x0a, 0xea, 0x31, 0x8f, 0xa2, 0xcf, 0xe0, 0x30, 0x23, - 0xe4, 0x73, 0xa8, 0xca, 0xd0, 0x50, 0x68, 0x36, 0x7a, 0x05, 0x9a, 0x47, 0x05, 0x09, 0xc2, 0x44, - 0x56, 0xa4, 0x9e, 0xd3, 0xae, 0x14, 0x68, 0xbd, 0x04, 0x3d, 0x8f, 0x78, 0x0a, 0x95, 0x90, 0x46, - 0xbe, 0x98, 0xcb, 0x03, 0x37, 0x70, 0x66, 0x21, 0x04, 0x65, 0x79, 0x8d, 0x3d, 0x19, 0x2f, 0xbf, - 0xad, 0x2e, 0x1c, 0xe4, 0x67, 0x47, 0x97, 0xb0, 0x4f, 0xd3, 0xcd, 0x65, 0x6a, 0xcd, 0x52, 0xab, - 0xd6, 0x6e, 0x7e, 0xe0, 0x9e, 0x72, 0xc3, 0x61, 0x45, 0xb7, 0x5e, 0x41, 0xe3, 0x1f, 0x38, 0x32, - 0xa0, 0xf4, 0x2b, 0x5d, 0xcb, 0xec, 0x55, 0x9c, 0x7e, 0xa2, 0x63, 0xd8, 0x5f, 0x91, 0x70, 0x49, - 0xb3, 0xdc, 0xca, 0xb0, 0xfe, 0xd4, 0x40, 0xcf, 0xe6, 0x18, 0x5d, 0x64, 0xdb, 0x59, 0x93, 0xcb, - 0xf5, 0xec, 0xf1, 0x89, 0xb7, 0x0b, 0x3b, 0xd9, 0x04, 0x9d, 0x28, 0x34, 0xeb, 0xb0, 0xdc, 0x4c, - 0x1f, 0x8f, 0x20, 0x76, 0x63, 0xc6, 0x85, 0xac, 0x6a, 0x03, 0x57, 0x82, 0x78, 0xcc, 0xb8, 0xb0, - 0x1c, 0x28, 0xcb, 0x1d, 0x61, 0x40, 0xfd, 0xde, 0x76, 0x68, 0x40, 0x55, 0x22, 0x83, 0xf1, 0xed, - 0xd7, 0x86, 0x56, 0x34, 0x2f, 0x8d, 0xbd, 0x8d, 0xf9, 0x76, 0x34, 0xf8, 0xc1, 0x28, 0x75, 0x7f, - 0x86, 0xe3, 0x80, 0xed, 0x1e, 0xb2, 0x7b, 0xd8, 0x95, 0xd6, 0x90, 0xf9, 0xe3, 0xb4, 0x51, 0xc7, - 0xda, 0x4f, 0xed, 0xac, 0x71, 0x7d, 0x16, 0x92, 0xc8, 0xb7, 0x19, 0x57, 0x4f, 0xf3, 0x87, 0x5e, - 0xea, 0xbb, 0x8a, 0xec, 0xf2, 0x8b, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xe7, 0xf6, 0x4b, 0x50, - 0xd4, 0x07, 0x00, 0x00, +var file_grpc_binlog_v1_binarylog_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_grpc_binlog_v1_binarylog_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_grpc_binlog_v1_binarylog_proto_goTypes = []interface{}{ + (GrpcLogEntry_EventType)(0), // 0: grpc.binarylog.v1.GrpcLogEntry.EventType + (GrpcLogEntry_Logger)(0), // 1: grpc.binarylog.v1.GrpcLogEntry.Logger + (Address_Type)(0), // 2: grpc.binarylog.v1.Address.Type + (*GrpcLogEntry)(nil), // 3: grpc.binarylog.v1.GrpcLogEntry + (*ClientHeader)(nil), // 4: grpc.binarylog.v1.ClientHeader + (*ServerHeader)(nil), // 5: grpc.binarylog.v1.ServerHeader + (*Trailer)(nil), // 6: grpc.binarylog.v1.Trailer + (*Message)(nil), // 7: grpc.binarylog.v1.Message + (*Metadata)(nil), // 8: grpc.binarylog.v1.Metadata + (*MetadataEntry)(nil), // 9: grpc.binarylog.v1.MetadataEntry + (*Address)(nil), // 10: grpc.binarylog.v1.Address + (*timestamppb.Timestamp)(nil), // 11: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 12: google.protobuf.Duration +} +var file_grpc_binlog_v1_binarylog_proto_depIdxs = []int32{ + 11, // 0: grpc.binarylog.v1.GrpcLogEntry.timestamp:type_name -> google.protobuf.Timestamp + 0, // 1: grpc.binarylog.v1.GrpcLogEntry.type:type_name -> grpc.binarylog.v1.GrpcLogEntry.EventType + 1, // 2: grpc.binarylog.v1.GrpcLogEntry.logger:type_name -> grpc.binarylog.v1.GrpcLogEntry.Logger + 4, // 3: grpc.binarylog.v1.GrpcLogEntry.client_header:type_name -> grpc.binarylog.v1.ClientHeader + 5, // 4: grpc.binarylog.v1.GrpcLogEntry.server_header:type_name -> grpc.binarylog.v1.ServerHeader + 7, // 5: grpc.binarylog.v1.GrpcLogEntry.message:type_name -> grpc.binarylog.v1.Message + 6, // 6: grpc.binarylog.v1.GrpcLogEntry.trailer:type_name -> grpc.binarylog.v1.Trailer + 10, // 7: grpc.binarylog.v1.GrpcLogEntry.peer:type_name -> grpc.binarylog.v1.Address + 8, // 8: grpc.binarylog.v1.ClientHeader.metadata:type_name -> grpc.binarylog.v1.Metadata + 12, // 9: grpc.binarylog.v1.ClientHeader.timeout:type_name -> google.protobuf.Duration + 8, // 10: grpc.binarylog.v1.ServerHeader.metadata:type_name -> grpc.binarylog.v1.Metadata + 8, // 11: grpc.binarylog.v1.Trailer.metadata:type_name -> grpc.binarylog.v1.Metadata + 9, // 12: grpc.binarylog.v1.Metadata.entry:type_name -> grpc.binarylog.v1.MetadataEntry + 2, // 13: grpc.binarylog.v1.Address.type:type_name -> grpc.binarylog.v1.Address.Type + 14, // [14:14] is the sub-list for method output_type + 14, // [14:14] is the sub-list for method input_type + 14, // [14:14] is the sub-list for extension type_name + 14, // [14:14] is the sub-list for extension extendee + 0, // [0:14] is the sub-list for field type_name +} + +func init() { file_grpc_binlog_v1_binarylog_proto_init() } +func file_grpc_binlog_v1_binarylog_proto_init() { + if File_grpc_binlog_v1_binarylog_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcLogEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Trailer); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Message); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Metadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetadataEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Address); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*GrpcLogEntry_ClientHeader)(nil), + (*GrpcLogEntry_ServerHeader)(nil), + (*GrpcLogEntry_Message)(nil), + (*GrpcLogEntry_Trailer)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_binlog_v1_binarylog_proto_rawDesc, + NumEnums: 3, + NumMessages: 8, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_grpc_binlog_v1_binarylog_proto_goTypes, + DependencyIndexes: file_grpc_binlog_v1_binarylog_proto_depIdxs, + EnumInfos: file_grpc_binlog_v1_binarylog_proto_enumTypes, + MessageInfos: file_grpc_binlog_v1_binarylog_proto_msgTypes, + }.Build() + File_grpc_binlog_v1_binarylog_proto = out.File + file_grpc_binlog_v1_binarylog_proto_rawDesc = nil + file_grpc_binlog_v1_binarylog_proto_goTypes = nil + file_grpc_binlog_v1_binarylog_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 14ce9c76a..77a08fd33 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -23,7 +23,6 @@ import ( "errors" "fmt" "math" - "net" "reflect" "strings" "sync" @@ -35,10 +34,11 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/grpcutil" + iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" @@ -48,6 +48,7 @@ import ( _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. _ "google.golang.org/grpc/internal/resolver/dns" // To register dns resolver. _ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver. + _ "google.golang.org/grpc/internal/resolver/unix" // To register unix resolver. ) const ( @@ -68,8 +69,6 @@ var ( errConnDrain = errors.New("grpc: the connection is drained") // errConnClosing indicates that the connection is closing. errConnClosing = errors.New("grpc: the connection is closing") - // errBalancerClosed indicates that the balancer is closed. - errBalancerClosed = errors.New("grpc: balancer is closed") // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default // service config. invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" @@ -106,6 +105,17 @@ func Dial(target string, opts ...DialOption) (*ClientConn, error) { return DialContext(context.Background(), target, opts...) } +type defaultConfigSelector struct { + sc *ServiceConfig +} + +func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RPCConfig, error) { + return &iresolver.RPCConfig{ + Context: rpcInfo.Context, + MethodConfig: getMethodConfig(dcs.sc, rpcInfo.Method), + }, nil +} + // DialContext creates a client connection to the given target. By default, it's // a non-blocking dial (the function won't wait for connections to be // established, and connecting happens in the background). To make it a blocking @@ -151,20 +161,17 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * if channelz.IsOn() { if cc.dopts.channelzParentID != 0 { cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) - channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(logger, cc.channelzID, 0, &channelz.TraceEventDesc{ Desc: "Channel Created", - Severity: channelz.CtINFO, + Severity: channelz.CtInfo, Parent: &channelz.TraceEventDesc{ Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID), - Severity: channelz.CtINFO, + Severity: channelz.CtInfo, }, }) } else { cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target) - channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ - Desc: "Channel Created", - Severity: channelz.CtINFO, - }) + channelz.Info(logger, cc.channelzID, "Channel Created") } cc.csMgr.channelzID = cc.channelzID } @@ -196,15 +203,6 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } cc.mkp = cc.dopts.copts.KeepaliveParams - if cc.dopts.copts.Dialer == nil { - cc.dopts.copts.Dialer = newProxyDialer( - func(ctx context.Context, addr string) (net.Conn, error) { - network, addr := parseDialTarget(addr) - return (&net.Dialer{}).DialContext(ctx, network, addr) - }, - ) - } - if cc.dopts.copts.UserAgent != "" { cc.dopts.copts.UserAgent += " " + grpcUA } else { @@ -219,7 +217,14 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * defer func() { select { case <-ctx.Done(): - conn, err = nil, ctx.Err() + switch { + case ctx.Err() == err: + conn = nil + case err == nil || !cc.dopts.returnLastError: + conn, err = nil, ctx.Err() + default: + conn, err = nil, fmt.Errorf("%v: %v", ctx.Err(), err) + } default: } }() @@ -231,6 +236,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * case sc, ok := <-cc.dopts.scChan: if ok { cc.sc = &sc + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) scSet = true } default: @@ -239,30 +245,35 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * if cc.dopts.bs == nil { cc.dopts.bs = backoff.DefaultExponential } - if cc.dopts.resolverBuilder == nil { - // Only try to parse target when resolver builder is not already set. - cc.parsedTarget = parseTarget(cc.target) - grpclog.Infof("parsed scheme: %q", cc.parsedTarget.Scheme) - cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme) - if cc.dopts.resolverBuilder == nil { - // If resolver builder is still nil, the parsed target's scheme is - // not registered. Fallback to default resolver and set Endpoint to - // the original target. - grpclog.Infof("scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme) - cc.parsedTarget = resolver.Target{ - Scheme: resolver.GetDefaultScheme(), - Endpoint: target, - } - cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme) + + // Determine the resolver to use. + cc.parsedTarget = grpcutil.ParseTarget(cc.target, cc.dopts.copts.Dialer != nil) + channelz.Infof(logger, cc.channelzID, "parsed scheme: %q", cc.parsedTarget.Scheme) + resolverBuilder := cc.getResolver(cc.parsedTarget.Scheme) + if resolverBuilder == nil { + // If resolver builder is still nil, the parsed target's scheme is + // not registered. Fallback to default resolver and set Endpoint to + // the original target. + channelz.Infof(logger, cc.channelzID, "scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme) + cc.parsedTarget = resolver.Target{ + Scheme: resolver.GetDefaultScheme(), + Endpoint: target, + } + resolverBuilder = cc.getResolver(cc.parsedTarget.Scheme) + if resolverBuilder == nil { + return nil, fmt.Errorf("could not get resolver for default scheme: %q", cc.parsedTarget.Scheme) } - } else { - cc.parsedTarget = resolver.Target{Endpoint: target} } + creds := cc.dopts.copts.TransportCredentials if creds != nil && creds.Info().ServerName != "" { cc.authority = creds.Info().ServerName } else if cc.dopts.insecure && cc.dopts.authority != "" { cc.authority = cc.dopts.authority + } else if strings.HasPrefix(cc.target, "unix:") || strings.HasPrefix(cc.target, "unix-abstract:") { + cc.authority = "localhost" + } else if strings.HasPrefix(cc.parsedTarget.Endpoint, ":") { + cc.authority = "localhost" + cc.parsedTarget.Endpoint } else { // Use endpoint from "scheme://authority/endpoint" as the default // authority for ClientConn. @@ -275,6 +286,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * case sc, ok := <-cc.dopts.scChan: if ok { cc.sc = &sc + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) } case <-ctx.Done(): return nil, ctx.Err() @@ -292,19 +304,20 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * DialCreds: credsClone, CredsBundle: cc.dopts.copts.CredsBundle, Dialer: cc.dopts.copts.Dialer, + CustomUserAgent: cc.dopts.copts.UserAgent, ChannelzParentID: cc.channelzID, Target: cc.parsedTarget, } // Build the resolver. - rWrapper, err := newCCResolverWrapper(cc) + rWrapper, err := newCCResolverWrapper(cc, resolverBuilder) if err != nil { return nil, fmt.Errorf("failed to build resolver: %v", err) } - cc.mu.Lock() cc.resolverWrapper = rWrapper cc.mu.Unlock() + // A blocking dial blocks until the clientConn is ready. if cc.dopts.block { for { @@ -312,7 +325,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * if s == connectivity.Ready { break } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { - if err = cc.blockingpicker.connectionError(); err != nil { + if err = cc.connectionError(); err != nil { terr, ok := err.(interface { Temporary() bool }) @@ -323,6 +336,9 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } if !cc.WaitForStateChange(ctx, s) { // ctx got timeout or canceled. + if err = cc.connectionError(); err != nil && cc.dopts.returnLastError { + return nil, err + } return nil, ctx.Err() } } @@ -415,12 +431,7 @@ func (csm *connectivityStateManager) updateState(state connectivity.State) { return } csm.state = state - if channelz.IsOn() { - channelz.AddTraceEvent(csm.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Channel Connectivity change to %v", state), - Severity: channelz.CtINFO, - }) - } + channelz.Infof(logger, csm.channelzID, "Channel Connectivity change to %v", state) if csm.notifyChan != nil { // There are other goroutines waiting on this channel. close(csm.notifyChan) @@ -443,6 +454,20 @@ func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} { return csm.notifyChan } +// ClientConnInterface defines the functions clients need to perform unary and +// streaming RPCs. It is implemented by *ClientConn, and is only intended to +// be referenced by generated code. +type ClientConnInterface interface { + // Invoke performs a unary RPC and returns after the response is received + // into reply. + Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error + // NewStream begins a streaming RPC. + NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) +} + +// Assert *ClientConn implements ClientConnInterface. +var _ ClientConnInterface = (*ClientConn)(nil) + // ClientConn represents a virtual connection to a conceptual endpoint, to // perform RPCs. // @@ -468,6 +493,8 @@ type ClientConn struct { balancerBuildOpts balancer.BuildOptions blockingpicker *pickerWrapper + safeConfigSelector iresolver.SafeConfigSelector + mu sync.RWMutex resolverWrapper *ccResolverWrapper sc *ServiceConfig @@ -482,11 +509,18 @@ type ClientConn struct { channelzID int64 // channelz unique identification number czData *channelzData + + lceMu sync.Mutex // protects lastConnectionError + lastConnectionError error } // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. -// This is an EXPERIMENTAL API. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool { ch := cc.csMgr.getNotifyChan() if cc.csMgr.getState() != sourceState { @@ -501,7 +535,11 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec } // GetState returns the connectivity.State of ClientConn. -// This is an EXPERIMENTAL API. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func (cc *ClientConn) GetState() connectivity.State { return cc.csMgr.getState() } @@ -517,6 +555,7 @@ func (cc *ClientConn) scWatcher() { // TODO: load balance policy runtime change is ignored. // We may revisit this decision in the future. cc.sc = &sc + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) cc.mu.Unlock() case <-cc.ctx.Done(): return @@ -555,13 +594,13 @@ func init() { func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { if cc.sc != nil { - cc.applyServiceConfigAndBalancer(cc.sc, addrs) + cc.applyServiceConfigAndBalancer(cc.sc, nil, addrs) return } if cc.dopts.defaultServiceConfig != nil { - cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, addrs) + cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, &defaultConfigSelector{cc.dopts.defaultServiceConfig}, addrs) } else { - cc.applyServiceConfigAndBalancer(emptyServiceConfig, addrs) + cc.applyServiceConfigAndBalancer(emptyServiceConfig, &defaultConfigSelector{emptyServiceConfig}, addrs) } } @@ -598,7 +637,15 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { // default, per the error handling design? } else { if sc, ok := s.ServiceConfig.Config.(*ServiceConfig); s.ServiceConfig.Err == nil && ok { - cc.applyServiceConfigAndBalancer(sc, s.Addresses) + configSelector := iresolver.GetConfigSelector(s) + if configSelector != nil { + if len(s.ServiceConfig.Config.(*ServiceConfig).Methods) != 0 { + channelz.Infof(logger, cc.channelzID, "method configs in service config will be ignored due to presence of config selector") + } + } else { + configSelector = &defaultConfigSelector{sc} + } + cc.applyServiceConfigAndBalancer(sc, configSelector, s.Addresses) } else { ret = balancer.ErrBadResolverState if cc.balancerWrapper == nil { @@ -608,6 +655,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { } else { err = status.Errorf(codes.Unavailable, "illegal service config type: %T", s.ServiceConfig.Config) } + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{cc.sc}) cc.blockingpicker.updatePicker(base.NewErrPicker(err)) cc.csMgr.updateState(connectivity.TransientFailure) cc.mu.Unlock() @@ -656,9 +704,9 @@ func (cc *ClientConn) switchBalancer(name string) { return } - grpclog.Infof("ClientConn switching balancer to %q", name) + channelz.Infof(logger, cc.channelzID, "ClientConn switching balancer to %q", name) if cc.dopts.balancerBuilder != nil { - grpclog.Infoln("ignoring balancer switching: Balancer DialOption used instead") + channelz.Info(logger, cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead") return } if cc.balancerWrapper != nil { @@ -666,22 +714,12 @@ func (cc *ClientConn) switchBalancer(name string) { } builder := balancer.Get(name) - if channelz.IsOn() { - if builder == nil { - channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName), - Severity: channelz.CtWarning, - }) - } else { - channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Channel switches to new LB policy %q", name), - Severity: channelz.CtINFO, - }) - } - } if builder == nil { - grpclog.Infof("failed to get balancer builder for: %v, using pick_first instead", name) + channelz.Warningf(logger, cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName) + channelz.Infof(logger, cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name) builder = newPickfirstBuilder() + } else { + channelz.Infof(logger, cc.channelzID, "Channel switches to new LB policy %q", name) } cc.curBalancerName = builder.Name() @@ -705,6 +743,7 @@ func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivi // Caller needs to make sure len(addrs) > 0. func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { ac := &addrConn{ + state: connectivity.Idle, cc: cc, addrs: addrs, scopts: opts, @@ -721,12 +760,12 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub } if channelz.IsOn() { ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "") - channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ Desc: "Subchannel Created", - Severity: channelz.CtINFO, + Severity: channelz.CtInfo, Parent: &channelz.TraceEventDesc{ Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID), - Severity: channelz.CtINFO, + Severity: channelz.CtInfo, }, }) } @@ -760,7 +799,11 @@ func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric { } // Target returns the target string of the ClientConn. -// This is an EXPERIMENTAL API. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func (cc *ClientConn) Target() string { return cc.target } @@ -819,7 +862,7 @@ func (ac *addrConn) connect() error { func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { ac.mu.Lock() defer ac.mu.Unlock() - grpclog.Infof("addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) if ac.state == connectivity.Shutdown || ac.state == connectivity.TransientFailure || ac.state == connectivity.Idle { @@ -839,7 +882,7 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { break } } - grpclog.Infof("addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) + channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) if curAddrFound { ac.addrs = addrs } @@ -847,26 +890,33 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { return curAddrFound } +func getMethodConfig(sc *ServiceConfig, method string) MethodConfig { + if sc == nil { + return MethodConfig{} + } + if m, ok := sc.Methods[method]; ok { + return m + } + i := strings.LastIndex(method, "/") + if m, ok := sc.Methods[method[:i+1]]; ok { + return m + } + return sc.Methods[""] +} + // GetMethodConfig gets the method config of the input method. // If there's an exact match for input method (i.e. /service/method), we return // the corresponding MethodConfig. -// If there isn't an exact match for the input method, we look for the default config -// under the service (i.e /service/). If there is a default MethodConfig for -// the service, we return it. +// If there isn't an exact match for the input method, we look for the service's default +// config under the service (i.e /service/) and then for the default for all services (empty string). +// +// If there is a default MethodConfig for the service, we return it. // Otherwise, we return an empty MethodConfig. func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { // TODO: Avoid the locking here. cc.mu.RLock() defer cc.mu.RUnlock() - if cc.sc == nil { - return MethodConfig{} - } - m, ok := cc.sc.Methods[method] - if !ok { - i := strings.LastIndex(method, "/") - m = cc.sc.Methods[method[:i+1]] - } - return m + return getMethodConfig(cc.sc, method) } func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { @@ -889,12 +939,15 @@ func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method st return t, done, nil } -func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, addrs []resolver.Address) { +func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector, addrs []resolver.Address) { if sc == nil { // should never reach here. return } cc.sc = sc + if configSelector != nil { + cc.safeConfigSelector.UpdateConfigSelector(configSelector) + } if cc.sc.retryThrottling != nil { newThrottler := &retryThrottler{ @@ -958,7 +1011,10 @@ func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { // However, if a previously unavailable network becomes available, this may be // used to trigger an immediate reconnect. // -// This API is EXPERIMENTAL. +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func (cc *ClientConn) ResetConnectBackoff() { cc.mu.Lock() conns := cc.conns @@ -1002,15 +1058,15 @@ func (cc *ClientConn) Close() error { if channelz.IsOn() { ted := &channelz.TraceEventDesc{ Desc: "Channel Deleted", - Severity: channelz.CtINFO, + Severity: channelz.CtInfo, } if cc.dopts.channelzParentID != 0 { ted.Parent = &channelz.TraceEventDesc{ Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID), - Severity: channelz.CtINFO, + Severity: channelz.CtInfo, } } - channelz.AddTraceEvent(cc.channelzID, ted) + channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to // the entity being deleted, and thus prevent it from being deleted right away. channelz.RemoveEntry(cc.channelzID) @@ -1053,15 +1109,8 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) if ac.state == s { return } - - updateMsg := fmt.Sprintf("Subchannel Connectivity change to %v", s) ac.state = s - if channelz.IsOn() { - channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ - Desc: updateMsg, - Severity: channelz.CtINFO, - }) - } + channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s) ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr) } @@ -1198,12 +1247,7 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T } ac.mu.Unlock() - if channelz.IsOn() { - channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchannel picks a new address %q to connect", addr.Addr), - Severity: channelz.CtINFO, - }) - } + channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr) newTr, reconnect, err := ac.createTransport(addr, copts, connectDeadline) if err == nil { @@ -1212,7 +1256,7 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T if firstConnErr == nil { firstConnErr = err } - ac.cc.blockingpicker.updateConnectionError(err) + ac.cc.updateConnectionError(err) } // Couldn't connect to any address. @@ -1227,16 +1271,9 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne onCloseCalled := make(chan struct{}) reconnect := grpcsync.NewEvent() - authority := ac.cc.authority // addr.ServerName takes precedent over ClientConn authority, if present. - if addr.ServerName != "" { - authority = addr.ServerName - } - - target := transport.TargetInfo{ - Addr: addr.Addr, - Metadata: addr.Metadata, - Authority: authority, + if addr.ServerName == "" { + addr.ServerName = ac.cc.authority } once := sync.Once{} @@ -1282,10 +1319,10 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne copts.ChannelzParentID = ac.channelzID } - newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt, onGoAway, onClose) + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onPrefaceReceipt, onGoAway, onClose) if err != nil { // newTr is either nil, or closed. - grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v. Err :%v. Reconnecting...", addr, err) + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v. Reconnecting...", addr, err) return nil, nil, err } @@ -1293,7 +1330,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne case <-time.After(time.Until(connectDeadline)): // We didn't get the preface in time. newTr.Close() - grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr) + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr) return nil, nil, errors.New("timed out waiting for server handshake") case <-prefaceReceived: // We got the preface - huzzah! things are good. @@ -1310,7 +1347,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne // // LB channel health checking is enabled when all requirements below are met: // 1. it is not disabled by the user with the WithDisableHealthCheck DialOption -// 2. internal.HealthCheckFunc is set by importing the grpc/healthcheck package +// 2. internal.HealthCheckFunc is set by importing the grpc/health package // 3. a service config with non-empty healthCheckConfig field is provided // 4. the load balancer requests it // @@ -1340,7 +1377,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { // The health package is not imported to set health check function. // // TODO: add a link to the health check doc in the error message. - grpclog.Error("Health check is requested but health check function is not set.") + channelz.Error(logger, ac.channelzID, "Health check is requested but health check function is not set.") return } @@ -1370,15 +1407,9 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) if err != nil { if status.Code(err) == codes.Unimplemented { - if channelz.IsOn() { - channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ - Desc: "Subchannel health check is unimplemented at server side, thus health check is disabled", - Severity: channelz.CtError, - }) - } - grpclog.Error("Subchannel health check is unimplemented at server side, thus health check is disabled") + channelz.Error(logger, ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled") } else { - grpclog.Errorf("HealthCheckFunc exits with unexpected error %v", err) + channelz.Errorf(logger, ac.channelzID, "HealthCheckFunc exits with unexpected error %v", err) } } }() @@ -1443,12 +1474,12 @@ func (ac *addrConn) tearDown(err error) { ac.mu.Lock() } if channelz.IsOn() { - channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ Desc: "Subchannel Deleted", - Severity: channelz.CtINFO, + Severity: channelz.CtInfo, Parent: &channelz.TraceEventDesc{ Desc: fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID), - Severity: channelz.CtINFO, + Severity: channelz.CtInfo, }, }) // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to @@ -1542,3 +1573,24 @@ func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric { // Deprecated: This error is never returned by grpc and should not be // referenced by users. var ErrClientConnTimeout = errors.New("grpc: timed out when dialing") + +func (cc *ClientConn) getResolver(scheme string) resolver.Builder { + for _, rb := range cc.dopts.resolvers { + if scheme == rb.Scheme() { + return rb + } + } + return resolver.Get(scheme) +} + +func (cc *ClientConn) updateConnectionError(err error) { + cc.lceMu.Lock() + cc.lastConnectionError = err + cc.lceMu.Unlock() +} + +func (cc *ClientConn) connectionError() error { + cc.lceMu.Lock() + defer cc.lceMu.Unlock() + return cc.lastConnectionError +} diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go index 02738839d..11b106182 100644 --- a/vendor/google.golang.org/grpc/codes/codes.go +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -33,6 +33,9 @@ const ( OK Code = 0 // Canceled indicates the operation was canceled (typically by the caller). + // + // The gRPC framework will generate this error code when cancellation + // is requested. Canceled Code = 1 // Unknown error. An example of where this error may be returned is @@ -40,12 +43,17 @@ const ( // an error-space that is not known in this address space. Also // errors raised by APIs that do not return enough error information // may be converted to this error. + // + // The gRPC framework will generate this error code in the above two + // mentioned cases. Unknown Code = 2 // InvalidArgument indicates client specified an invalid argument. // Note that this differs from FailedPrecondition. It indicates arguments // that are problematic regardless of the state of the system // (e.g., a malformed file name). + // + // This error code will not be generated by the gRPC framework. InvalidArgument Code = 3 // DeadlineExceeded means operation expired before completion. @@ -53,14 +61,21 @@ const ( // returned even if the operation has completed successfully. For // example, a successful response from a server could have been delayed // long enough for the deadline to expire. + // + // The gRPC framework will generate this error code when the deadline is + // exceeded. DeadlineExceeded Code = 4 // NotFound means some requested entity (e.g., file or directory) was // not found. + // + // This error code will not be generated by the gRPC framework. NotFound Code = 5 // AlreadyExists means an attempt to create an entity failed because one // already exists. + // + // This error code will not be generated by the gRPC framework. AlreadyExists Code = 6 // PermissionDenied indicates the caller does not have permission to @@ -69,10 +84,17 @@ const ( // instead for those errors). It must not be // used if the caller cannot be identified (use Unauthenticated // instead for those errors). + // + // This error code will not be generated by the gRPC core framework, + // but expect authentication middleware to use it. PermissionDenied Code = 7 // ResourceExhausted indicates some resource has been exhausted, perhaps // a per-user quota, or perhaps the entire file system is out of space. + // + // This error code will be generated by the gRPC framework in + // out-of-memory and server overload situations, or when a message is + // larger than the configured maximum size. ResourceExhausted Code = 8 // FailedPrecondition indicates operation was rejected because the @@ -94,6 +116,8 @@ const ( // REST Get/Update/Delete on a resource and the resource on the // server does not match the condition. E.g., conflicting // read-modify-write on the same resource. + // + // This error code will not be generated by the gRPC framework. FailedPrecondition Code = 9 // Aborted indicates the operation was aborted, typically due to a @@ -102,6 +126,8 @@ const ( // // See litmus test above for deciding between FailedPrecondition, // Aborted, and Unavailable. + // + // This error code will not be generated by the gRPC framework. Aborted Code = 10 // OutOfRange means operation was attempted past the valid range. @@ -119,15 +145,26 @@ const ( // error) when it applies so that callers who are iterating through // a space can easily look for an OutOfRange error to detect when // they are done. + // + // This error code will not be generated by the gRPC framework. OutOfRange Code = 11 // Unimplemented indicates operation is not implemented or not // supported/enabled in this service. + // + // This error code will be generated by the gRPC framework. Most + // commonly, you will see this error code when a method implementation + // is missing on the server. It can also be generated for unknown + // compression algorithms or a disagreement as to whether an RPC should + // be streaming. Unimplemented Code = 12 // Internal errors. Means some invariants expected by underlying // system has been broken. If you see one of these errors, // something is very broken. + // + // This error code will be generated by the gRPC framework in several + // internal error conditions. Internal Code = 13 // Unavailable indicates the service is currently unavailable. @@ -137,13 +174,22 @@ const ( // // See litmus test above for deciding between FailedPrecondition, // Aborted, and Unavailable. + // + // This error code will be generated by the gRPC framework during + // abrupt shutdown of a server process or network connection. Unavailable Code = 14 // DataLoss indicates unrecoverable data loss or corruption. + // + // This error code will not be generated by the gRPC framework. DataLoss Code = 15 // Unauthenticated indicates the request does not have valid // authentication credentials for the operation. + // + // The gRPC framework will generate this error code when the + // authentication metadata is invalid or a Credentials callback fails, + // but also expect authentication middleware to generate it. Unauthenticated Code = 16 _maxCode = 17 diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go index 34ec36fbf..010156261 100644 --- a/vendor/google.golang.org/grpc/connectivity/connectivity.go +++ b/vendor/google.golang.org/grpc/connectivity/connectivity.go @@ -22,11 +22,11 @@ package connectivity import ( - "context" - "google.golang.org/grpc/grpclog" ) +var logger = grpclog.Component("core") + // State indicates the state of connectivity. // It can be the state of a ClientConn or SubConn. type State int @@ -44,7 +44,7 @@ func (s State) String() string { case Shutdown: return "SHUTDOWN" default: - grpclog.Errorf("unknown connectivity state: %d", s) + logger.Errorf("unknown connectivity state: %d", s) return "Invalid-State" } } @@ -61,13 +61,3 @@ const ( // Shutdown indicates the ClientConn has started shutting down. Shutdown ) - -// Reporter reports the connectivity states. -type Reporter interface { - // CurrentState returns the current state of the reporter. - CurrentState() State - // WaitForStateChange blocks until the reporter's state is different from the given state, - // and returns true. - // It returns false if <-ctx.Done() can proceed (ctx got timeout or got canceled). - WaitForStateChange(context.Context, State) bool -} diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index 667cf6b33..e69562e78 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -25,9 +25,11 @@ package credentials // import "google.golang.org/grpc/credentials" import ( "context" "errors" + "fmt" "net" "github.com/golang/protobuf/proto" + "google.golang.org/grpc/attributes" "google.golang.org/grpc/internal" ) @@ -50,6 +52,50 @@ type PerRPCCredentials interface { RequireTransportSecurity() bool } +// SecurityLevel defines the protection level on an established connection. +// +// This API is experimental. +type SecurityLevel int + +const ( + // InvalidSecurityLevel indicates an invalid security level. + // The zero SecurityLevel value is invalid for backward compatibility. + InvalidSecurityLevel SecurityLevel = iota + // NoSecurity indicates a connection is insecure. + NoSecurity + // IntegrityOnly indicates a connection only provides integrity protection. + IntegrityOnly + // PrivacyAndIntegrity indicates a connection provides both privacy and integrity protection. + PrivacyAndIntegrity +) + +// String returns SecurityLevel in a string format. +func (s SecurityLevel) String() string { + switch s { + case NoSecurity: + return "NoSecurity" + case IntegrityOnly: + return "IntegrityOnly" + case PrivacyAndIntegrity: + return "PrivacyAndIntegrity" + } + return fmt.Sprintf("invalid SecurityLevel: %v", int(s)) +} + +// CommonAuthInfo contains authenticated information common to AuthInfo implementations. +// It should be embedded in a struct implementing AuthInfo to provide additional information +// about the credentials. +// +// This API is experimental. +type CommonAuthInfo struct { + SecurityLevel SecurityLevel +} + +// GetCommonAuthInfo returns the pointer to CommonAuthInfo struct. +func (c CommonAuthInfo) GetCommonAuthInfo() CommonAuthInfo { + return c +} + // ProtocolInfo provides information regarding the gRPC wire protocol version, // security protocol, security protocol version in use, server name, etc. type ProtocolInfo struct { @@ -57,13 +103,19 @@ type ProtocolInfo struct { ProtocolVersion string // SecurityProtocol is the security protocol in use. SecurityProtocol string - // SecurityVersion is the security protocol version. + // SecurityVersion is the security protocol version. It is a static version string from the + // credentials, not a value that reflects per-connection protocol negotiation. To retrieve + // details about the credentials used for a connection, use the Peer's AuthInfo field instead. + // + // Deprecated: please use Peer.AuthInfo. SecurityVersion string // ServerName is the user-configured server name. ServerName string } // AuthInfo defines the common interface for the auth information the users are interested in. +// A struct that implements AuthInfo should embed CommonAuthInfo by including additional +// information about the credentials in it. type AuthInfo interface { AuthType() string } @@ -75,20 +127,25 @@ var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gR // TransportCredentials defines the common interface for all the live gRPC wire // protocols and supported transport security protocols (e.g., TLS, SSL). type TransportCredentials interface { - // ClientHandshake does the authentication handshake specified by the corresponding - // authentication protocol on rawConn for clients. It returns the authenticated - // connection and the corresponding auth information about the connection. - // Implementations must use the provided context to implement timely cancellation. - // gRPC will try to reconnect if the error returned is a temporary error - // (io.EOF, context.DeadlineExceeded or err.Temporary() == true). - // If the returned error is a wrapper error, implementations should make sure that + // ClientHandshake does the authentication handshake specified by the + // corresponding authentication protocol on rawConn for clients. It returns + // the authenticated connection and the corresponding auth information + // about the connection. The auth information should embed CommonAuthInfo + // to return additional information about the credentials. Implementations + // must use the provided context to implement timely cancellation. gRPC + // will try to reconnect if the error returned is a temporary error + // (io.EOF, context.DeadlineExceeded or err.Temporary() == true). If the + // returned error is a wrapper error, implementations should make sure that // the error implements Temporary() to have the correct retry behaviors. + // Additionally, ClientHandshakeInfo data will be available via the context + // passed to this call. // // If the returned net.Conn is closed, it MUST close the net.Conn provided. ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) // ServerHandshake does the authentication handshake for servers. It returns // the authenticated connection and the corresponding auth information about - // the connection. + // the connection. The auth information should embed CommonAuthInfo to return additional information + // about the credentials. // // If the returned net.Conn is closed, it MUST close the net.Conn provided. ServerHandshake(net.Conn) (net.Conn, AuthInfo, error) @@ -127,6 +184,8 @@ type Bundle interface { type RequestInfo struct { // The method passed to Invoke or NewStream for this RPC. (For proto methods, this has the format "/some.Service/Method") Method string + // AuthInfo contains the information from a security handshake (TransportCredentials.ClientHandshake, TransportCredentials.ServerHandshake) + AuthInfo AuthInfo } // requestInfoKey is a struct to be used as the key when attaching a RequestInfo to a context object. @@ -140,10 +199,63 @@ func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) { return } +// ClientHandshakeInfo holds data to be passed to ClientHandshake. This makes +// it possible to pass arbitrary data to the handshaker from gRPC, resolver, +// balancer etc. Individual credential implementations control the actual +// format of the data that they are willing to receive. +// +// This API is experimental. +type ClientHandshakeInfo struct { + // Attributes contains the attributes for the address. It could be provided + // by the gRPC, resolver, balancer etc. + Attributes *attributes.Attributes +} + +// clientHandshakeInfoKey is a struct used as the key to store +// ClientHandshakeInfo in a context. +type clientHandshakeInfoKey struct{} + +// ClientHandshakeInfoFromContext returns the ClientHandshakeInfo struct stored +// in ctx. +// +// This API is experimental. +func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo { + chi, _ := ctx.Value(clientHandshakeInfoKey{}).(ClientHandshakeInfo) + return chi +} + +// CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one. +// It returns success if 1) the condition is satisified or 2) AuthInfo struct does not implement GetCommonAuthInfo() method +// or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility. +// +// This API is experimental. +func CheckSecurityLevel(ai AuthInfo, level SecurityLevel) error { + type internalInfo interface { + GetCommonAuthInfo() CommonAuthInfo + } + if ai == nil { + return errors.New("AuthInfo is nil") + } + if ci, ok := ai.(internalInfo); ok { + // CommonAuthInfo.SecurityLevel has an invalid value. + if ci.GetCommonAuthInfo().SecurityLevel == InvalidSecurityLevel { + return nil + } + if ci.GetCommonAuthInfo().SecurityLevel < level { + return fmt.Errorf("requires SecurityLevel %v; connection has %v", level, ci.GetCommonAuthInfo().SecurityLevel) + } + } + // The condition is satisfied or AuthInfo struct does not implement GetCommonAuthInfo() method. + return nil +} + func init() { internal.NewRequestInfoContext = func(ctx context.Context, ri RequestInfo) context.Context { return context.WithValue(ctx, requestInfoKey{}, ri) } + internal.NewClientHandshakeInfoContext = func(ctx context.Context, chi ClientHandshakeInfo) context.Context { + return context.WithValue(ctx, clientHandshakeInfoKey{}, chi) + } } // ChannelzSecurityInfo defines the interface that security protocols should implement diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 7c3361368..8ee7124f2 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -25,14 +25,18 @@ import ( "fmt" "io/ioutil" "net" + "net/url" - "google.golang.org/grpc/credentials/internal" + credinternal "google.golang.org/grpc/internal/credentials" ) // TLSInfo contains the auth information for a TLS authenticated connection. // It implements the AuthInfo interface. type TLSInfo struct { State tls.ConnectionState + CommonAuthInfo + // This API is experimental. + SPIFFEID *url.URL } // AuthType returns the type of TLSInfo as a string. @@ -68,7 +72,7 @@ func (c tlsCreds) Info() ProtocolInfo { func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { // use local cfg to avoid clobbering ServerName if using multiple endpoints - cfg := cloneTLSConfig(c.config) + cfg := credinternal.CloneTLSConfig(c.config) if cfg.ServerName == "" { serverName, _, err := net.SplitHostPort(authority) if err != nil { @@ -81,24 +85,48 @@ func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawCon errChannel := make(chan error, 1) go func() { errChannel <- conn.Handshake() + close(errChannel) }() select { case err := <-errChannel: if err != nil { + conn.Close() return nil, nil, err } case <-ctx.Done(): + conn.Close() return nil, nil, ctx.Err() } - return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState()}, nil + tlsInfo := TLSInfo{ + State: conn.ConnectionState(), + CommonAuthInfo: CommonAuthInfo{ + SecurityLevel: PrivacyAndIntegrity, + }, + } + id := credinternal.SPIFFEIDFromState(conn.ConnectionState()) + if id != nil { + tlsInfo.SPIFFEID = id + } + return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil } func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { conn := tls.Server(rawConn, c.config) if err := conn.Handshake(); err != nil { + conn.Close() return nil, nil, err } - return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState()}, nil + tlsInfo := TLSInfo{ + State: conn.ConnectionState(), + CommonAuthInfo: CommonAuthInfo{ + SecurityLevel: PrivacyAndIntegrity, + }, + } + id := credinternal.SPIFFEIDFromState(conn.ConnectionState()) + if id != nil { + tlsInfo.SPIFFEID = id + } + return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil } func (c *tlsCreds) Clone() TransportCredentials { @@ -110,36 +138,33 @@ func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { return nil } -const alpnProtoStrH2 = "h2" - -func appendH2ToNextProtos(ps []string) []string { - for _, p := range ps { - if p == alpnProtoStrH2 { - return ps - } - } - ret := make([]string, 0, len(ps)+1) - ret = append(ret, ps...) - return append(ret, alpnProtoStrH2) -} - // NewTLS uses c to construct a TransportCredentials based on TLS. func NewTLS(c *tls.Config) TransportCredentials { - tc := &tlsCreds{cloneTLSConfig(c)} - tc.config.NextProtos = appendH2ToNextProtos(tc.config.NextProtos) + tc := &tlsCreds{credinternal.CloneTLSConfig(c)} + tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos) return tc } -// NewClientTLSFromCert constructs TLS credentials from the input certificate for client. +// NewClientTLSFromCert constructs TLS credentials from the provided root +// certificate authority certificate(s) to validate server connections. If +// certificates to establish the identity of the client need to be included in +// the credentials (eg: for mTLS), use NewTLS instead, where a complete +// tls.Config can be specified. // serverNameOverride is for testing only. If set to a non empty string, -// it will override the virtual host name of authority (e.g. :authority header field) in requests. +// it will override the virtual host name of authority (e.g. :authority header +// field) in requests. func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) } -// NewClientTLSFromFile constructs TLS credentials from the input certificate file for client. +// NewClientTLSFromFile constructs TLS credentials from the provided root +// certificate authority certificate file(s) to validate server connections. If +// certificates to establish the identity of the client need to be included in +// the credentials (eg: for mTLS), use NewTLS instead, where a complete +// tls.Config can be specified. // serverNameOverride is for testing only. If set to a non empty string, -// it will override the virtual host name of authority (e.g. :authority header field) in requests. +// it will override the virtual host name of authority (e.g. :authority header +// field) in requests. func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { b, err := ioutil.ReadFile(certFile) if err != nil { @@ -170,7 +195,10 @@ func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error // TLSChannelzSecurityValue defines the struct that TLS protocol should return // from GetSecurityValue(), containing security info like cipher and certificate used. // -// This API is EXPERIMENTAL. +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type TLSChannelzSecurityValue struct { ChannelzSecurityValue StandardName string @@ -203,18 +231,3 @@ var cipherSuiteLookup = map[uint16]string{ tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", } - -// cloneTLSConfig returns a shallow clone of the exported -// fields of cfg, ignoring the unexported sync.Once, which -// contains a mutex and must not be copied. -// -// If cfg is nil, a new zero tls.Config is returned. -// -// TODO: inline this function if possible. -func cloneTLSConfig(cfg *tls.Config) *tls.Config { - if cfg == nil { - return &tls.Config{} - } - - return cfg.Clone() -} diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 9af3eef7a..e7f86e6d7 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -27,7 +27,6 @@ import ( "google.golang.org/grpc/backoff" "google.golang.org/grpc/balancer" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" internalbackoff "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/envconfig" @@ -46,21 +45,19 @@ type dialOptions struct { chainUnaryInts []UnaryClientInterceptor chainStreamInts []StreamClientInterceptor - cp Compressor - dc Decompressor - bs internalbackoff.Strategy - block bool - insecure bool - timeout time.Duration - scChan <-chan ServiceConfig - authority string - copts transport.ConnectOptions - callOptions []CallOption - // This is used by v1 balancer dial option WithBalancer to support v1 - // balancer, and also by WithBalancerName dial option. - balancerBuilder balancer.Builder - // This is to support grpclb. - resolverBuilder resolver.Builder + cp Compressor + dc Decompressor + bs internalbackoff.Strategy + block bool + returnLastError bool + insecure bool + timeout time.Duration + scChan <-chan ServiceConfig + authority string + copts transport.ConnectOptions + callOptions []CallOption + // This is used by WithBalancerName dial option. + balancerBuilder balancer.Builder channelzParentID int64 disableServiceConfig bool disableRetry bool @@ -73,6 +70,7 @@ type dialOptions struct { // resolver.ResolveNow(). The user will have no need to configure this, but // we need to be able to configure this in tests. resolveNowBackoff func(int) time.Duration + resolvers []resolver.Builder } // DialOption configures how we set up the connection. @@ -83,7 +81,10 @@ type DialOption interface { // EmptyDialOption does not alter the dial configuration. It can be embedded in // another structure to build custom dial options. // -// This API is EXPERIMENTAL. +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type EmptyDialOption struct{} func (EmptyDialOption) apply(*dialOptions) {} @@ -199,19 +200,6 @@ func WithDecompressor(dc Decompressor) DialOption { }) } -// WithBalancer returns a DialOption which sets a load balancer with the v1 API. -// Name resolver will be ignored if this DialOption is specified. -// -// Deprecated: use the new balancer APIs in balancer package and -// WithBalancerName. Will be removed in a future 1.x release. -func WithBalancer(b Balancer) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.balancerBuilder = &balancerWrapperBuilder{ - b: b, - } - }) -} - // WithBalancerName sets the balancer that the ClientConn will be initialized // with. Balancer registered with balancerName will be used. This function // panics if no balancer was registered by balancerName. @@ -231,13 +219,6 @@ func WithBalancerName(balancerName string) DialOption { }) } -// withResolverBuilder is only for grpclb. -func withResolverBuilder(b resolver.Builder) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.resolverBuilder = b - }) -} - // WithServiceConfig returns a DialOption which has a channel to read the // service configuration. // @@ -259,7 +240,10 @@ func WithServiceConfig(c <-chan ServiceConfig) DialOption { // using the backoff.DefaultConfig as a base, in cases where you want to // override only a subset of the backoff configuration. // -// This API is EXPERIMENTAL. +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func WithConnectParams(p ConnectParams) DialOption { return newFuncDialOption(func(o *dialOptions) { o.bs = internalbackoff.Exponential{Config: p.Backoff} @@ -306,6 +290,22 @@ func WithBlock() DialOption { }) } +// WithReturnConnectionError returns a DialOption which makes the client connection +// return a string containing both the last connection error that occurred and +// the context.DeadlineExceeded error. +// Implies WithBlock() +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithReturnConnectionError() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.block = true + o.returnLastError = true + }) +} + // WithInsecure returns a DialOption which disables transport security for this // ClientConn. Note that transport security is required unless WithInsecure is // set. @@ -315,6 +315,19 @@ func WithInsecure() DialOption { }) } +// WithNoProxy returns a DialOption which disables the use of proxies for this +// ClientConn. This is ignored if WithDialer or WithContextDialer are used. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithNoProxy() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.UseProxy = false + }) +} + // WithTransportCredentials returns a DialOption which configures a connection // level security credentials (e.g., TLS/SSL). This should not be used together // with WithCredentialsBundle. @@ -336,7 +349,10 @@ func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { // the ClientConn.WithCreds. This should not be used together with // WithTransportCredentials. // -// This API is experimental. +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func WithCredentialsBundle(b credentials.Bundle) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.CredsBundle = b @@ -365,7 +381,6 @@ func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOp } func init() { - internal.WithResolverBuilder = withResolverBuilder internal.WithHealthCheckFunc = withHealthCheckFunc } @@ -402,7 +417,10 @@ func WithStatsHandler(h stats.Handler) DialOption { // FailOnNonTempDialError only affects the initial dial, and does not do // anything useful unless you are also using WithBlock(). // -// This is an EXPERIMENTAL API. +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func FailOnNonTempDialError(f bool) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.FailOnNonTempDialError = f @@ -421,7 +439,7 @@ func WithUserAgent(s string) DialOption { // for the client transport. func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { if kp.Time < internal.KeepaliveMinPingTime { - grpclog.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime) + logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime) kp.Time = internal.KeepaliveMinPingTime } return newFuncDialOption(func(o *dialOptions) { @@ -457,7 +475,7 @@ func WithStreamInterceptor(f StreamClientInterceptor) DialOption { } // WithChainStreamInterceptor returns a DialOption that specifies the chained -// interceptor for unary RPCs. The first interceptor will be the outer most, +// interceptor for streaming RPCs. The first interceptor will be the outer most, // while the last interceptor will be the inner most wrapper around the real call. // All interceptors added by this method will be chained, and the interceptor // defined by WithStreamInterceptor will always be prepended to the chain. @@ -480,7 +498,10 @@ func WithAuthority(a string) DialOption { // current ClientConn's parent. This function is used in nested channel creation // (e.g. grpclb dial). // -// This API is EXPERIMENTAL. +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func WithChannelzParentID(id int64) DialOption { return newFuncDialOption(func(o *dialOptions) { o.channelzParentID = id @@ -506,7 +527,10 @@ func WithDisableServiceConfig() DialOption { // 2. Resolver does not return a service config or if the resolver returns an // invalid service config. // -// This API is EXPERIMENTAL. +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func WithDefaultServiceConfig(s string) DialOption { return newFuncDialOption(func(o *dialOptions) { o.defaultServiceConfigRawJSON = &s @@ -522,7 +546,10 @@ func WithDefaultServiceConfig(s string) DialOption { // default in the future. Until then, it may be enabled by setting the // environment variable "GRPC_GO_RETRY" to "on". // -// This API is EXPERIMENTAL. +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func WithDisableRetry() DialOption { return newFuncDialOption(func(o *dialOptions) { o.disableRetry = true @@ -540,7 +567,10 @@ func WithMaxHeaderListSize(s uint32) DialOption { // WithDisableHealthCheck disables the LB channel health checking for all // SubConns of this ClientConn. // -// This API is EXPERIMENTAL. +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func WithDisableHealthCheck() DialOption { return newFuncDialOption(func(o *dialOptions) { o.disableHealthCheck = true @@ -564,6 +594,7 @@ func defaultDialOptions() dialOptions { copts: transport.ConnectOptions{ WriteBufferSize: defaultWriteBufSize, ReadBufferSize: defaultReadBufSize, + UseProxy: true, }, resolveNowBackoff: internalbackoff.DefaultExponential.Backoff, } @@ -589,3 +620,18 @@ func withResolveNowBackoff(f func(int) time.Duration) DialOption { o.resolveNowBackoff = f }) } + +// WithResolvers allows a list of resolver implementations to be registered +// locally with the ClientConn without needing to be globally registered via +// resolver.Register. They will be matched against the scheme used for the +// current Dial only, and will take precedence over the global registry. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithResolvers(rs ...resolver.Builder) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.resolvers = append(o.resolvers, rs...) + }) +} diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go index 187adbb11..0022859ad 100644 --- a/vendor/google.golang.org/grpc/doc.go +++ b/vendor/google.golang.org/grpc/doc.go @@ -16,6 +16,8 @@ * */ +//go:generate ./regenerate.sh + /* Package grpc implements an RPC system called gRPC. diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 195e8448b..6d84f74c7 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -19,7 +19,10 @@ // Package encoding defines the interface for the compressor and codec, and // functions to register and retrieve compressors and codecs. // -// This package is EXPERIMENTAL. +// Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. package encoding import ( @@ -46,10 +49,15 @@ type Compressor interface { // coding header. The result must be static; the result cannot change // between calls. Name() string - // EXPERIMENTAL: if a Compressor implements + // If a Compressor implements // DecompressedSize(compressedBytes []byte) int, gRPC will call it // to determine the size of the buffer allocated for the result of decompression. // Return -1 to indicate unknown size. + // + // Experimental + // + // Notice: This API is EXPERIMENTAL and may be changed or removed in a + // later release. } var registeredCompressor = make(map[string]Compressor) diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go index 66b97a6f6..e1230fdd3 100644 --- a/vendor/google.golang.org/grpc/encoding/proto/proto.go +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -21,9 +21,6 @@ package proto import ( - "math" - "sync" - "github.com/golang/protobuf/proto" "google.golang.org/grpc/encoding" ) @@ -38,73 +35,14 @@ func init() { // codec is a Codec implementation with protobuf. It is the default codec for gRPC. type codec struct{} -type cachedProtoBuffer struct { - lastMarshaledSize uint32 - proto.Buffer -} - -func capToMaxInt32(val int) uint32 { - if val > math.MaxInt32 { - return uint32(math.MaxInt32) - } - return uint32(val) -} - -func marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) { - protoMsg := v.(proto.Message) - newSlice := make([]byte, 0, cb.lastMarshaledSize) - - cb.SetBuf(newSlice) - cb.Reset() - if err := cb.Marshal(protoMsg); err != nil { - return nil, err - } - out := cb.Bytes() - cb.lastMarshaledSize = capToMaxInt32(len(out)) - return out, nil -} - func (codec) Marshal(v interface{}) ([]byte, error) { - if pm, ok := v.(proto.Marshaler); ok { - // object can marshal itself, no need for buffer - return pm.Marshal() - } - - cb := protoBufferPool.Get().(*cachedProtoBuffer) - out, err := marshal(v, cb) - - // put back buffer and lose the ref to the slice - cb.SetBuf(nil) - protoBufferPool.Put(cb) - return out, err + return proto.Marshal(v.(proto.Message)) } func (codec) Unmarshal(data []byte, v interface{}) error { - protoMsg := v.(proto.Message) - protoMsg.Reset() - - if pu, ok := protoMsg.(proto.Unmarshaler); ok { - // object can unmarshal itself, no need for buffer - return pu.Unmarshal(data) - } - - cb := protoBufferPool.Get().(*cachedProtoBuffer) - cb.SetBuf(data) - err := cb.Unmarshal(protoMsg) - cb.SetBuf(nil) - protoBufferPool.Put(cb) - return err + return proto.Unmarshal(data, v.(proto.Message)) } func (codec) Name() string { return Name } - -var protoBufferPool = &sync.Pool{ - New: func() interface{} { - return &cachedProtoBuffer{ - Buffer: proto.Buffer{}, - lastMarshaledSize: 16, - } - }, -} diff --git a/vendor/google.golang.org/grpc/go.mod b/vendor/google.golang.org/grpc/go.mod index 237836130..cab74e557 100644 --- a/vendor/google.golang.org/grpc/go.mod +++ b/vendor/google.golang.org/grpc/go.mod @@ -3,14 +3,15 @@ module google.golang.org/grpc go 1.11 require ( - github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473 - github.com/envoyproxy/protoc-gen-validate v0.1.0 + github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 + github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b - github.com/golang/mock v1.1.1 - github.com/golang/protobuf v1.3.2 - github.com/google/go-cmp v0.2.0 + github.com/golang/protobuf v1.4.2 + github.com/google/go-cmp v0.5.0 + github.com/google/uuid v1.1.2 golang.org/x/net v0.0.0-20190311183353-d8887717615a golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a - google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 + google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 + google.golang.org/protobuf v1.25.0 ) diff --git a/vendor/google.golang.org/grpc/go.sum b/vendor/google.golang.org/grpc/go.sum index dd5d0cee7..77ee70b44 100644 --- a/vendor/google.golang.org/grpc/go.sum +++ b/vendor/google.golang.org/grpc/go.sum @@ -1,22 +1,49 @@ cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473 h1:4cmBvAEBNJaGARUEs3/suWRyfyBfhf7I60WBZq+bv2w= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 h1:cqQfy1jclcSy/FwLjemeg3SR1yaINm74aQyupQ0Bl8M= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad h1:EmNYJhPYy0pOFjCx2PrgtaBXmee0iUX9hLlxE1xHOJE= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -41,13 +68,32 @@ golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go new file mode 100644 index 000000000..8358dd6e2 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/component.go @@ -0,0 +1,117 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "fmt" + + "google.golang.org/grpc/internal/grpclog" +) + +// componentData records the settings for a component. +type componentData struct { + name string +} + +var cache = map[string]*componentData{} + +func (c *componentData) InfoDepth(depth int, args ...interface{}) { + args = append([]interface{}{"[" + string(c.name) + "]"}, args...) + grpclog.InfoDepth(depth+1, args...) +} + +func (c *componentData) WarningDepth(depth int, args ...interface{}) { + args = append([]interface{}{"[" + string(c.name) + "]"}, args...) + grpclog.WarningDepth(depth+1, args...) +} + +func (c *componentData) ErrorDepth(depth int, args ...interface{}) { + args = append([]interface{}{"[" + string(c.name) + "]"}, args...) + grpclog.ErrorDepth(depth+1, args...) +} + +func (c *componentData) FatalDepth(depth int, args ...interface{}) { + args = append([]interface{}{"[" + string(c.name) + "]"}, args...) + grpclog.FatalDepth(depth+1, args...) +} + +func (c *componentData) Info(args ...interface{}) { + c.InfoDepth(1, args...) +} + +func (c *componentData) Warning(args ...interface{}) { + c.WarningDepth(1, args...) +} + +func (c *componentData) Error(args ...interface{}) { + c.ErrorDepth(1, args...) +} + +func (c *componentData) Fatal(args ...interface{}) { + c.FatalDepth(1, args...) +} + +func (c *componentData) Infof(format string, args ...interface{}) { + c.InfoDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Warningf(format string, args ...interface{}) { + c.WarningDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Errorf(format string, args ...interface{}) { + c.ErrorDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Fatalf(format string, args ...interface{}) { + c.FatalDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Infoln(args ...interface{}) { + c.InfoDepth(1, args...) +} + +func (c *componentData) Warningln(args ...interface{}) { + c.WarningDepth(1, args...) +} + +func (c *componentData) Errorln(args ...interface{}) { + c.ErrorDepth(1, args...) +} + +func (c *componentData) Fatalln(args ...interface{}) { + c.FatalDepth(1, args...) +} + +func (c *componentData) V(l int) bool { + return V(l) +} + +// Component creates a new component and returns it for logging. If a component +// with the name already exists, nothing will be created and it will be +// returned. SetLoggerV2 will panic if it is called with a logger created by +// Component. +func Component(componentName string) DepthLoggerV2 { + if cData, ok := cache[componentName]; ok { + return cData + } + c := &componentData{componentName} + cache[componentName] = c + return c +} diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go index 874ea6d98..c8bb2be34 100644 --- a/vendor/google.golang.org/grpc/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -26,64 +26,70 @@ // verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. package grpclog // import "google.golang.org/grpc/grpclog" -import "os" +import ( + "os" -var logger = newLoggerV2() + "google.golang.org/grpc/internal/grpclog" +) + +func init() { + SetLoggerV2(newLoggerV2()) +} // V reports whether verbosity level l is at least the requested verbose level. func V(l int) bool { - return logger.V(l) + return grpclog.Logger.V(l) } // Info logs to the INFO log. func Info(args ...interface{}) { - logger.Info(args...) + grpclog.Logger.Info(args...) } // Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. func Infof(format string, args ...interface{}) { - logger.Infof(format, args...) + grpclog.Logger.Infof(format, args...) } // Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. func Infoln(args ...interface{}) { - logger.Infoln(args...) + grpclog.Logger.Infoln(args...) } // Warning logs to the WARNING log. func Warning(args ...interface{}) { - logger.Warning(args...) + grpclog.Logger.Warning(args...) } // Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. func Warningf(format string, args ...interface{}) { - logger.Warningf(format, args...) + grpclog.Logger.Warningf(format, args...) } // Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. func Warningln(args ...interface{}) { - logger.Warningln(args...) + grpclog.Logger.Warningln(args...) } // Error logs to the ERROR log. func Error(args ...interface{}) { - logger.Error(args...) + grpclog.Logger.Error(args...) } // Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. func Errorf(format string, args ...interface{}) { - logger.Errorf(format, args...) + grpclog.Logger.Errorf(format, args...) } // Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. func Errorln(args ...interface{}) { - logger.Errorln(args...) + grpclog.Logger.Errorln(args...) } // Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. // It calls os.Exit() with exit code 1. func Fatal(args ...interface{}) { - logger.Fatal(args...) + grpclog.Logger.Fatal(args...) // Make sure fatal logs will exit. os.Exit(1) } @@ -91,7 +97,7 @@ func Fatal(args ...interface{}) { // Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. // It calls os.Exit() with exit code 1. func Fatalf(format string, args ...interface{}) { - logger.Fatalf(format, args...) + grpclog.Logger.Fatalf(format, args...) // Make sure fatal logs will exit. os.Exit(1) } @@ -99,7 +105,7 @@ func Fatalf(format string, args ...interface{}) { // Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. // It calle os.Exit()) with exit code 1. func Fatalln(args ...interface{}) { - logger.Fatalln(args...) + grpclog.Logger.Fatalln(args...) // Make sure fatal logs will exit. os.Exit(1) } @@ -108,19 +114,19 @@ func Fatalln(args ...interface{}) { // // Deprecated: use Info. func Print(args ...interface{}) { - logger.Info(args...) + grpclog.Logger.Info(args...) } // Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. // // Deprecated: use Infof. func Printf(format string, args ...interface{}) { - logger.Infof(format, args...) + grpclog.Logger.Infof(format, args...) } // Println prints to the logger. Arguments are handled in the manner of fmt.Println. // // Deprecated: use Infoln. func Println(args ...interface{}) { - logger.Infoln(args...) + grpclog.Logger.Infoln(args...) } diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go index 097494f71..ef06a4822 100644 --- a/vendor/google.golang.org/grpc/grpclog/logger.go +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -18,6 +18,8 @@ package grpclog +import "google.golang.org/grpc/internal/grpclog" + // Logger mimics golang's standard Logger as an interface. // // Deprecated: use LoggerV2. @@ -35,7 +37,7 @@ type Logger interface { // // Deprecated: use SetLoggerV2. func SetLogger(l Logger) { - logger = &loggerWrapper{Logger: l} + grpclog.Logger = &loggerWrapper{Logger: l} } // loggerWrapper wraps Logger into a LoggerV2. diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go index d49325776..4ee33171e 100644 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -24,6 +24,8 @@ import ( "log" "os" "strconv" + + "google.golang.org/grpc/internal/grpclog" ) // LoggerV2 does underlying logging work for grpclog. @@ -65,7 +67,11 @@ type LoggerV2 interface { // SetLoggerV2 sets logger that is used in grpc to a V2 logger. // Not mutex-protected, should be called before any gRPC functions. func SetLoggerV2(l LoggerV2) { - logger = l + if _, ok := l.(*componentData); ok { + panic("cannot use component logger as grpclog logger") + } + grpclog.Logger = l + grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2) } const ( @@ -193,3 +199,23 @@ func (g *loggerT) Fatalf(format string, args ...interface{}) { func (g *loggerT) V(l int) bool { return l <= g.v } + +// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements +// DepthLoggerV2, the below functions will be called with the appropriate stack +// depth set for trivial functions the logger may ignore. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type DepthLoggerV2 interface { + LoggerV2 + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. + InfoDepth(depth int, args ...interface{}) + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print. + WarningDepth(depth int, args ...interface{}) + // ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print. + ErrorDepth(depth int, args ...interface{}) + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print. + FatalDepth(depth int, args ...interface{}) +} diff --git a/vendor/google.golang.org/grpc/install_gae.sh b/vendor/google.golang.org/grpc/install_gae.sh index 7c7bcada5..15ff9facd 100644 --- a/vendor/google.golang.org/grpc/install_gae.sh +++ b/vendor/google.golang.org/grpc/install_gae.sh @@ -3,4 +3,4 @@ TMP=$(mktemp -d /tmp/sdk.XXX) \ && curl -o $TMP.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" \ && unzip -q $TMP.zip -d $TMP \ -&& export PATH="$PATH:$TMP/go_appengine" +&& export PATH="$PATH:$TMP/go_appengine" \ No newline at end of file diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go index 8b7350022..668e0adcf 100644 --- a/vendor/google.golang.org/grpc/interceptor.go +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -25,17 +25,41 @@ import ( // UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error -// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. invoker is the handler to complete the RPC -// and it is the responsibility of the interceptor to call it. -// This is an EXPERIMENTAL API. +// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. +// Unary interceptors can be specified as a DialOption, using +// WithUnaryInterceptor() or WithChainUnaryInterceptor(), when creating a +// ClientConn. When a unary interceptor(s) is set on a ClientConn, gRPC +// delegates all unary RPC invocations to the interceptor, and it is the +// responsibility of the interceptor to call invoker to complete the processing +// of the RPC. +// +// method is the RPC name. req and reply are the corresponding request and +// response messages. cc is the ClientConn on which the RPC was invoked. invoker +// is the handler to complete the RPC and it is the responsibility of the +// interceptor to call it. opts contain all applicable call options, including +// defaults from the ClientConn as well as per-call options. +// +// The returned error must be compatible with the status package. type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error // Streamer is called by StreamClientInterceptor to create a ClientStream. type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) -// StreamClientInterceptor intercepts the creation of ClientStream. It may return a custom ClientStream to intercept all I/O -// operations. streamer is the handler to create a ClientStream and it is the responsibility of the interceptor to call it. -// This is an EXPERIMENTAL API. +// StreamClientInterceptor intercepts the creation of a ClientStream. Stream +// interceptors can be specified as a DialOption, using WithStreamInterceptor() +// or WithChainStreamInterceptor(), when creating a ClientConn. When a stream +// interceptor(s) is set on the ClientConn, gRPC delegates all stream creations +// to the interceptor, and it is the responsibility of the interceptor to call +// streamer. +// +// desc contains a description of the stream. cc is the ClientConn on which the +// RPC was invoked. streamer is the handler to create a ClientStream and it is +// the responsibility of the interceptor to call it. opts contain all applicable +// call options, including defaults from the ClientConn as well as per-call +// options. +// +// StreamClientInterceptor may return a custom ClientStream to intercept all I/O +// operations. The returned error must be compatible with the status package. type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) // UnaryServerInfo consists of various information about a unary RPC on diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go index 8b1051674..5cc3aeddb 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go @@ -25,6 +25,7 @@ import ( "os" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/grpcutil" ) // Logger is the global binary logger. It can be used to get binary logger for @@ -39,6 +40,8 @@ type Logger interface { // It is used to get a methodLogger for each individual method. var binLogger Logger +var grpclogLogger = grpclog.Component("binarylog") + // SetLogger sets the binarg logger. // // Only call this at init time. @@ -146,9 +149,9 @@ func (l *logger) setBlacklist(method string) error { // Each methodLogger returned by this method is a new instance. This is to // generate sequence id within the call. func (l *logger) getMethodLogger(methodName string) *MethodLogger { - s, m, err := parseMethodName(methodName) + s, m, err := grpcutil.ParseMethod(methodName) if err != nil { - grpclog.Infof("binarylogging: failed to parse %q: %v", methodName, err) + grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err) return nil } if ml, ok := l.methods[s+"/"+m]; ok { diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go index be30d0e65..d8f4e7602 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go @@ -24,8 +24,6 @@ import ( "regexp" "strconv" "strings" - - "google.golang.org/grpc/grpclog" ) // NewLoggerFromConfigString reads the string and build a logger. It can be used @@ -52,7 +50,7 @@ func NewLoggerFromConfigString(s string) Logger { methods := strings.Split(s, ",") for _, method := range methods { if err := l.fillMethodLoggerWithConfigString(method); err != nil { - grpclog.Warningf("failed to parse binary log config: %v", err) + grpclogLogger.Warningf("failed to parse binary log config: %v", err) return nil } } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index 160f6e861..0cdb41831 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -27,7 +27,6 @@ import ( "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" ) @@ -66,7 +65,7 @@ func newMethodLogger(h, m uint64) *MethodLogger { callID: idGen.next(), idWithinCallGen: &callIDGenerator{}, - sink: defaultSink, // TODO(blog): make it plugable. + sink: DefaultSink, // TODO(blog): make it plugable. } } @@ -219,12 +218,12 @@ func (c *ClientMessage) toProto() *pb.GrpcLogEntry { if m, ok := c.Message.(proto.Message); ok { data, err = proto.Marshal(m) if err != nil { - grpclog.Infof("binarylogging: failed to marshal proto message: %v", err) + grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err) } } else if b, ok := c.Message.([]byte); ok { data = b } else { - grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte") + grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") } ret := &pb.GrpcLogEntry{ Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, @@ -259,12 +258,12 @@ func (c *ServerMessage) toProto() *pb.GrpcLogEntry { if m, ok := c.Message.(proto.Message); ok { data, err = proto.Marshal(m) if err != nil { - grpclog.Infof("binarylogging: failed to marshal proto message: %v", err) + grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err) } } else if b, ok := c.Message.([]byte); ok { data = b } else { - grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte") + grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") } ret := &pb.GrpcLogEntry{ Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, @@ -315,7 +314,7 @@ type ServerTrailer struct { func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { st, ok := status.FromError(c.Err) if !ok { - grpclog.Info("binarylogging: error in trailer is not a status error") + grpclogLogger.Info("binarylogging: error in trailer is not a status error") } var ( detailsBytes []byte @@ -325,7 +324,7 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { if stProto != nil && len(stProto.Details) != 0 { detailsBytes, err = proto.Marshal(stProto) if err != nil { - grpclog.Infof("binarylogging: failed to marshal status proto: %v", err) + grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err) } } ret := &pb.GrpcLogEntry{ diff --git a/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh b/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh deleted file mode 100644 index 113d40cbe..000000000 --- a/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash -# Copyright 2018 gRPC authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -eux -o pipefail - -TMP=$(mktemp -d) - -function finish { - rm -rf "$TMP" -} -trap finish EXIT - -pushd "$TMP" -mkdir -p grpc/binarylog/grpc_binarylog_v1 -curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/binlog/v1/binarylog.proto > grpc/binarylog/grpc_binarylog_v1/binarylog.proto - -protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/binarylog/grpc_binarylog_v1/*.proto -popd -rm -f ./grpc_binarylog_v1/*.pb.go -cp "$TMP"/grpc/binarylog/grpc_binarylog_v1/*.pb.go ../../binarylog/grpc_binarylog_v1/ - diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go index a2e7c346d..7d7a3056b 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/sink.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go @@ -21,32 +21,23 @@ package binarylog import ( "bufio" "encoding/binary" - "fmt" "io" - "io/ioutil" "sync" "time" "github.com/golang/protobuf/proto" pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" - "google.golang.org/grpc/grpclog" ) var ( - defaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp). + // DefaultSink is the sink where the logs will be written to. It's exported + // for the binarylog package to update. + DefaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp). ) -// SetDefaultSink sets the sink where binary logs will be written to. -// -// Not thread safe. Only set during initialization. -func SetDefaultSink(s Sink) { - if defaultSink != nil { - defaultSink.Close() - } - defaultSink = s -} - // Sink writes log entry into the binary log sink. +// +// sink is a copy of the exported binarylog.Sink, to avoid circular dependency. type Sink interface { // Write will be called to write the log entry into the sink. // @@ -67,7 +58,7 @@ func (ns *noopSink) Close() error { return nil } // message is prefixed with a 4 byte big endian unsigned integer as the length. // // No buffer is done, Close() doesn't try to close the writer. -func newWriterSink(w io.Writer) *writerSink { +func newWriterSink(w io.Writer) Sink { return &writerSink{out: w} } @@ -78,7 +69,7 @@ type writerSink struct { func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { b, err := proto.Marshal(e) if err != nil { - grpclog.Infof("binary logging: failed to marshal proto message: %v", err) + grpclogLogger.Infof("binary logging: failed to marshal proto message: %v", err) } hdr := make([]byte, 4) binary.BigEndian.PutUint32(hdr, uint32(len(b))) @@ -93,17 +84,17 @@ func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { func (ws *writerSink) Close() error { return nil } -type bufWriteCloserSink struct { +type bufferedSink struct { mu sync.Mutex closer io.Closer - out *writerSink // out is built on buf. + out Sink // out is built on buf. buf *bufio.Writer // buf is kept for flush. writeStartOnce sync.Once writeTicker *time.Ticker } -func (fs *bufWriteCloserSink) Write(e *pb.GrpcLogEntry) error { +func (fs *bufferedSink) Write(e *pb.GrpcLogEntry) error { // Start the write loop when Write is called. fs.writeStartOnce.Do(fs.startFlushGoroutine) fs.mu.Lock() @@ -119,44 +110,50 @@ const ( bufFlushDuration = 60 * time.Second ) -func (fs *bufWriteCloserSink) startFlushGoroutine() { +func (fs *bufferedSink) startFlushGoroutine() { fs.writeTicker = time.NewTicker(bufFlushDuration) go func() { for range fs.writeTicker.C { fs.mu.Lock() - fs.buf.Flush() + if err := fs.buf.Flush(); err != nil { + grpclogLogger.Warningf("failed to flush to Sink: %v", err) + } fs.mu.Unlock() } }() } -func (fs *bufWriteCloserSink) Close() error { +func (fs *bufferedSink) Close() error { if fs.writeTicker != nil { fs.writeTicker.Stop() } fs.mu.Lock() - fs.buf.Flush() - fs.closer.Close() - fs.out.Close() + if err := fs.buf.Flush(); err != nil { + grpclogLogger.Warningf("failed to flush to Sink: %v", err) + } + if err := fs.closer.Close(); err != nil { + grpclogLogger.Warningf("failed to close the underlying WriterCloser: %v", err) + } + if err := fs.out.Close(); err != nil { + grpclogLogger.Warningf("failed to close the Sink: %v", err) + } fs.mu.Unlock() return nil } -func newBufWriteCloserSink(o io.WriteCloser) Sink { +// NewBufferedSink creates a binary log sink with the given WriteCloser. +// +// Write() marshals the proto message and writes it to the given writer. Each +// message is prefixed with a 4 byte big endian unsigned integer as the length. +// +// Content is kept in a buffer, and is flushed every 60 seconds. +// +// Close closes the WriteCloser. +func NewBufferedSink(o io.WriteCloser) Sink { bufW := bufio.NewWriter(o) - return &bufWriteCloserSink{ + return &bufferedSink{ closer: o, out: newWriterSink(bufW), buf: bufW, } } - -// NewTempFileSink creates a temp file and returns a Sink that writes to this -// file. -func NewTempFileSink() (Sink, error) { - tempFile, err := ioutil.TempFile("/tmp", "grpcgo_binarylog_*.txt") - if err != nil { - return nil, fmt.Errorf("failed to create temp file: %v", err) - } - return newBufWriteCloserSink(tempFile), nil -} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/util.go b/vendor/google.golang.org/grpc/internal/binarylog/util.go deleted file mode 100644 index 15dc7803d..000000000 --- a/vendor/google.golang.org/grpc/internal/binarylog/util.go +++ /dev/null @@ -1,41 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package binarylog - -import ( - "errors" - "strings" -) - -// parseMethodName splits service and method from the input. It expects format -// "/service/method". -// -// TODO: move to internal/grpcutil. -func parseMethodName(methodName string) (service, method string, _ error) { - if !strings.HasPrefix(methodName, "/") { - return "", "", errors.New("invalid method name: should start with /") - } - methodName = methodName[1:] - - pos := strings.LastIndex(methodName, "/") - if pos < 0 { - return "", "", errors.New("invalid method name: suffix /method is missing") - } - return methodName[:pos], methodName[pos+1:], nil -} diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index f0744f993..f73141393 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -216,7 +216,7 @@ func RegisterChannel(c Channel, pid int64, ref string) int64 { // by pid). It returns the unique channelz tracking id assigned to this subchannel. func RegisterSubChannel(c Channel, pid int64, ref string) int64 { if pid == 0 { - grpclog.Error("a SubChannel's parent id cannot be 0") + logger.Error("a SubChannel's parent id cannot be 0") return 0 } id := idGen.genID() @@ -253,7 +253,7 @@ func RegisterServer(s Server, ref string) int64 { // this listen socket. func RegisterListenSocket(s Socket, pid int64, ref string) int64 { if pid == 0 { - grpclog.Error("a ListenSocket's parent id cannot be 0") + logger.Error("a ListenSocket's parent id cannot be 0") return 0 } id := idGen.genID() @@ -268,7 +268,7 @@ func RegisterListenSocket(s Socket, pid int64, ref string) int64 { // this normal socket. func RegisterNormalSocket(s Socket, pid int64, ref string) int64 { if pid == 0 { - grpclog.Error("a NormalSocket's parent id cannot be 0") + logger.Error("a NormalSocket's parent id cannot be 0") return 0 } id := idGen.genID() @@ -294,7 +294,17 @@ type TraceEventDesc struct { } // AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc. -func AddTraceEvent(id int64, desc *TraceEventDesc) { +func AddTraceEvent(l grpclog.DepthLoggerV2, id int64, depth int, desc *TraceEventDesc) { + for d := desc; d != nil; d = d.Parent { + switch d.Severity { + case CtUnknown, CtInfo: + l.InfoDepth(depth+1, d.Desc) + case CtWarning: + l.WarningDepth(depth+1, d.Desc) + case CtError: + l.ErrorDepth(depth+1, d.Desc) + } + } if getMaxTraceEntry() == 0 { return } diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go new file mode 100644 index 000000000..b0013f9c8 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -0,0 +1,102 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("channelz") + +// Info logs and adds a trace event if channelz is on. +func Info(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { + if IsOn() { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtInfo, + }) + } else { + l.InfoDepth(1, args...) + } +} + +// Infof logs and adds a trace event if channelz is on. +func Infof(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + if IsOn() { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: msg, + Severity: CtInfo, + }) + } else { + l.InfoDepth(1, msg) + } +} + +// Warning logs and adds a trace event if channelz is on. +func Warning(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { + if IsOn() { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtWarning, + }) + } else { + l.WarningDepth(1, args...) + } +} + +// Warningf logs and adds a trace event if channelz is on. +func Warningf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + if IsOn() { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: msg, + Severity: CtWarning, + }) + } else { + l.WarningDepth(1, msg) + } +} + +// Error logs and adds a trace event if channelz is on. +func Error(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { + if IsOn() { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtError, + }) + } else { + l.ErrorDepth(1, args...) + } +} + +// Errorf logs and adds a trace event if channelz is on. +func Errorf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + if IsOn() { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: msg, + Severity: CtError, + }) + } else { + l.ErrorDepth(1, msg) + } +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go index 17c2274cb..3c595d154 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -26,7 +26,6 @@ import ( "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" ) // entry represents a node in the channelz database. @@ -60,17 +59,17 @@ func (d *dummyEntry) addChild(id int64, e entry) { // the addrConn will create a new transport. And when registering the new transport in // channelz, its parent addrConn could have already been torn down and deleted // from channelz tracking, and thus reach the code here. - grpclog.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound) + logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound) } func (d *dummyEntry) deleteChild(id int64) { // It is possible for a normal program to reach here under race condition. // Refer to the example described in addChild(). - grpclog.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound) + logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound) } func (d *dummyEntry) triggerDelete() { - grpclog.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound) + logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound) } func (*dummyEntry) deleteSelfIfReady() { @@ -215,7 +214,7 @@ func (c *channel) addChild(id int64, e entry) { case *channel: c.nestedChans[id] = v.refName default: - grpclog.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e) + logger.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e) } } @@ -326,7 +325,7 @@ func (sc *subChannel) addChild(id int64, e entry) { if v, ok := e.(*normalSocket); ok { sc.sockets[id] = v.refName } else { - grpclog.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e) + logger.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e) } } @@ -493,11 +492,11 @@ type listenSocket struct { } func (ls *listenSocket) addChild(id int64, e entry) { - grpclog.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e) + logger.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e) } func (ls *listenSocket) deleteChild(id int64) { - grpclog.Errorf("cannot delete a child (id = %d) from a listen socket", id) + logger.Errorf("cannot delete a child (id = %d) from a listen socket", id) } func (ls *listenSocket) triggerDelete() { @@ -506,7 +505,7 @@ func (ls *listenSocket) triggerDelete() { } func (ls *listenSocket) deleteSelfIfReady() { - grpclog.Errorf("cannot call deleteSelfIfReady on a listen socket") + logger.Errorf("cannot call deleteSelfIfReady on a listen socket") } func (ls *listenSocket) getParentID() int64 { @@ -522,11 +521,11 @@ type normalSocket struct { } func (ns *normalSocket) addChild(id int64, e entry) { - grpclog.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e) + logger.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e) } func (ns *normalSocket) deleteChild(id int64) { - grpclog.Errorf("cannot delete a child (id = %d) from a normal socket", id) + logger.Errorf("cannot delete a child (id = %d) from a normal socket", id) } func (ns *normalSocket) triggerDelete() { @@ -535,7 +534,7 @@ func (ns *normalSocket) triggerDelete() { } func (ns *normalSocket) deleteSelfIfReady() { - grpclog.Errorf("cannot call deleteSelfIfReady on a normal socket") + logger.Errorf("cannot call deleteSelfIfReady on a normal socket") } func (ns *normalSocket) getParentID() int64 { @@ -594,7 +593,7 @@ func (s *server) addChild(id int64, e entry) { case *listenSocket: s.listenSockets[id] = v.refName default: - grpclog.Errorf("cannot add a child (id = %d) of type %T to a server", id, e) + logger.Errorf("cannot add a child (id = %d) of type %T to a server", id, e) } } @@ -673,10 +672,10 @@ func (c *channelTrace) clear() { type Severity int const ( - // CtUNKNOWN indicates unknown severity of a trace event. - CtUNKNOWN Severity = iota - // CtINFO indicates info level severity of a trace event. - CtINFO + // CtUnknown indicates unknown severity of a trace event. + CtUnknown Severity = iota + // CtInfo indicates info level severity of a trace event. + CtInfo // CtWarning indicates warning level severity of a trace event. CtWarning // CtError indicates error level severity of a trace event. diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go index 79edbefc4..19c2fc521 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go @@ -22,8 +22,6 @@ package channelz import ( "sync" - - "google.golang.org/grpc/grpclog" ) var once sync.Once @@ -39,6 +37,6 @@ type SocketOptionData struct { // Windows OS doesn't support Socket Option func (s *SocketOptionData) Getsockopt(fd uintptr) { once.Do(func() { - grpclog.Warningln("Channelz: socket options are not supported on non-linux os and appengine.") + logger.Warning("Channelz: socket options are not supported on non-linux os and appengine.") }) } diff --git a/vendor/google.golang.org/grpc/internal/credentials/spiffe.go b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go new file mode 100644 index 000000000..be70b6cdf --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go @@ -0,0 +1,77 @@ +// +build !appengine + +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package credentials defines APIs for parsing SPIFFE ID. +// +// All APIs in this package are experimental. +package credentials + +import ( + "crypto/tls" + "crypto/x509" + "net/url" + + "google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("credentials") + +// SPIFFEIDFromState parses the SPIFFE ID from State. If the SPIFFE ID format +// is invalid, return nil with warning. +func SPIFFEIDFromState(state tls.ConnectionState) *url.URL { + if len(state.PeerCertificates) == 0 || len(state.PeerCertificates[0].URIs) == 0 { + return nil + } + return SPIFFEIDFromCert(state.PeerCertificates[0]) +} + +// SPIFFEIDFromCert parses the SPIFFE ID from x509.Certificate. If the SPIFFE +// ID format is invalid, return nil with warning. +func SPIFFEIDFromCert(cert *x509.Certificate) *url.URL { + if cert == nil || cert.URIs == nil { + return nil + } + var spiffeID *url.URL + for _, uri := range cert.URIs { + if uri == nil || uri.Scheme != "spiffe" || uri.Opaque != "" || (uri.User != nil && uri.User.Username() != "") { + continue + } + // From this point, we assume the uri is intended for a SPIFFE ID. + if len(uri.String()) > 2048 { + logger.Warning("invalid SPIFFE ID: total ID length larger than 2048 bytes") + return nil + } + if len(uri.Host) == 0 || len(uri.Path) == 0 { + logger.Warning("invalid SPIFFE ID: domain or workload ID is empty") + return nil + } + if len(uri.Host) > 255 { + logger.Warning("invalid SPIFFE ID: domain length larger than 255 characters") + return nil + } + // A valid SPIFFE certificate can only have exactly one URI SAN field. + if len(cert.URIs) > 1 { + logger.Warning("invalid SPIFFE ID: multiple URI SANs") + return nil + } + spiffeID = uri + } + return spiffeID +} diff --git a/vendor/google.golang.org/grpc/internal/transport/log.go b/vendor/google.golang.org/grpc/internal/credentials/spiffe_appengine.go similarity index 50% rename from vendor/google.golang.org/grpc/internal/transport/log.go rename to vendor/google.golang.org/grpc/internal/credentials/spiffe_appengine.go index 879df80c4..af6f57719 100644 --- a/vendor/google.golang.org/grpc/internal/transport/log.go +++ b/vendor/google.golang.org/grpc/internal/credentials/spiffe_appengine.go @@ -1,6 +1,8 @@ +// +build appengine + /* * - * Copyright 2017 gRPC authors. + * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,29 +18,14 @@ * */ -// This file contains wrappers for grpclog functions. -// The transport package only logs to verbose level 2 by default. +package credentials -package transport +import ( + "crypto/tls" + "net/url" +) -import "google.golang.org/grpc/grpclog" - -const logLevel = 2 - -func infof(format string, args ...interface{}) { - if grpclog.V(logLevel) { - grpclog.Infof(format, args...) - } -} - -func warningf(format string, args ...interface{}) { - if grpclog.V(logLevel) { - grpclog.Warningf(format, args...) - } -} - -func errorf(format string, args ...interface{}) { - if grpclog.V(logLevel) { - grpclog.Errorf(format, args...) - } +// SPIFFEIDFromState is a no-op for appengine builds. +func SPIFFEIDFromState(state tls.ConnectionState) *url.URL { + return nil } diff --git a/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go similarity index 96% rename from vendor/google.golang.org/grpc/credentials/internal/syscallconn.go rename to vendor/google.golang.org/grpc/internal/credentials/syscallconn.go index 2f4472bec..f499a614c 100644 --- a/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go +++ b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go @@ -18,8 +18,7 @@ * */ -// Package internal contains credentials-internal code. -package internal +package credentials import ( "net" diff --git a/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go b/vendor/google.golang.org/grpc/internal/credentials/syscallconn_appengine.go similarity index 97% rename from vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go rename to vendor/google.golang.org/grpc/internal/credentials/syscallconn_appengine.go index d4346e9ea..a6144cd66 100644 --- a/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go +++ b/vendor/google.golang.org/grpc/internal/credentials/syscallconn_appengine.go @@ -18,7 +18,7 @@ * */ -package internal +package credentials import ( "net" diff --git a/vendor/google.golang.org/grpc/internal/credentials/util.go b/vendor/google.golang.org/grpc/internal/credentials/util.go new file mode 100644 index 000000000..55664fa46 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/util.go @@ -0,0 +1,50 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import "crypto/tls" + +const alpnProtoStrH2 = "h2" + +// AppendH2ToNextProtos appends h2 to next protos. +func AppendH2ToNextProtos(ps []string) []string { + for _, p := range ps { + if p == alpnProtoStrH2 { + return ps + } + } + ret := make([]string, 0, len(ps)+1) + ret = append(ret, ps...) + return append(ret, alpnProtoStrH2) +} + +// CloneTLSConfig returns a shallow clone of the exported +// fields of cfg, ignoring the unexported sync.Once, which +// contains a mutex and must not be copied. +// +// If cfg is nil, a new zero tls.Config is returned. +// +// TODO: inline this function if possible. +func CloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + + return cfg.Clone() +} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 3ee8740f1..73931a94b 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -25,11 +25,14 @@ import ( ) const ( - prefix = "GRPC_GO_" - retryStr = prefix + "RETRY" + prefix = "GRPC_GO_" + retryStr = prefix + "RETRY" + txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS" ) var ( // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on". Retry = strings.EqualFold(os.Getenv(retryStr), "on") + // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). + TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false") ) diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go new file mode 100644 index 000000000..e6f975cbf --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go @@ -0,0 +1,126 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpclog (internal) defines depth logging for grpc. +package grpclog + +import ( + "os" +) + +// Logger is the logger used for the non-depth log functions. +var Logger LoggerV2 + +// DepthLogger is the logger used for the depth log functions. +var DepthLogger DepthLoggerV2 + +// InfoDepth logs to the INFO log at the specified depth. +func InfoDepth(depth int, args ...interface{}) { + if DepthLogger != nil { + DepthLogger.InfoDepth(depth, args...) + } else { + Logger.Infoln(args...) + } +} + +// WarningDepth logs to the WARNING log at the specified depth. +func WarningDepth(depth int, args ...interface{}) { + if DepthLogger != nil { + DepthLogger.WarningDepth(depth, args...) + } else { + Logger.Warningln(args...) + } +} + +// ErrorDepth logs to the ERROR log at the specified depth. +func ErrorDepth(depth int, args ...interface{}) { + if DepthLogger != nil { + DepthLogger.ErrorDepth(depth, args...) + } else { + Logger.Errorln(args...) + } +} + +// FatalDepth logs to the FATAL log at the specified depth. +func FatalDepth(depth int, args ...interface{}) { + if DepthLogger != nil { + DepthLogger.FatalDepth(depth, args...) + } else { + Logger.Fatalln(args...) + } + os.Exit(1) +} + +// LoggerV2 does underlying logging work for grpclog. +// This is a copy of the LoggerV2 defined in the external grpclog package. It +// is defined here to avoid a circular dependency. +type LoggerV2 interface { + // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. + Info(args ...interface{}) + // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. + Infoln(args ...interface{}) + // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. + Infof(format string, args ...interface{}) + // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. + Warning(args ...interface{}) + // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. + Warningln(args ...interface{}) + // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. + Warningf(format string, args ...interface{}) + // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. + Error(args ...interface{}) + // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + Errorln(args ...interface{}) + // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + Errorf(format string, args ...interface{}) + // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatal(args ...interface{}) + // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalln(args ...interface{}) + // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalf(format string, args ...interface{}) + // V reports whether verbosity level l is at least the requested verbose level. + V(l int) bool +} + +// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements +// DepthLoggerV2, the below functions will be called with the appropriate stack +// depth set for trivial functions the logger may ignore. +// This is a copy of the DepthLoggerV2 defined in the external grpclog package. +// It is defined here to avoid a circular dependency. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type DepthLoggerV2 interface { + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. + InfoDepth(depth int, args ...interface{}) + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print. + WarningDepth(depth int, args ...interface{}) + // ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print. + ErrorDepth(depth int, args ...interface{}) + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print. + FatalDepth(depth int, args ...interface{}) +} diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go new file mode 100644 index 000000000..82af70e96 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go @@ -0,0 +1,81 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "fmt" +) + +// PrefixLogger does logging with a prefix. +// +// Logging method on a nil logs without any prefix. +type PrefixLogger struct { + logger DepthLoggerV2 + prefix string +} + +// Infof does info logging. +func (pl *PrefixLogger) Infof(format string, args ...interface{}) { + if pl != nil { + // Handle nil, so the tests can pass in a nil logger. + format = pl.prefix + format + pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) + return + } + InfoDepth(1, fmt.Sprintf(format, args...)) +} + +// Warningf does warning logging. +func (pl *PrefixLogger) Warningf(format string, args ...interface{}) { + if pl != nil { + format = pl.prefix + format + pl.logger.WarningDepth(1, fmt.Sprintf(format, args...)) + return + } + WarningDepth(1, fmt.Sprintf(format, args...)) +} + +// Errorf does error logging. +func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { + if pl != nil { + format = pl.prefix + format + pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...)) + return + } + ErrorDepth(1, fmt.Sprintf(format, args...)) +} + +// Debugf does info logging at verbose level 2. +func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { + if !Logger.V(2) { + return + } + if pl != nil { + // Handle nil, so the tests can pass in a nil logger. + format = pl.prefix + format + pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) + return + } + InfoDepth(1, fmt.Sprintf(format, args...)) +} + +// NewPrefixLogger creates a prefix logger with the given prefix. +func NewPrefixLogger(logger DepthLoggerV2, prefix string) *PrefixLogger { + return &PrefixLogger{logger: logger, prefix: prefix} +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go b/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go new file mode 100644 index 000000000..b25b0baec --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "strconv" + "time" +) + +const maxTimeoutValue int64 = 100000000 - 1 + +// div does integer division and round-up the result. Note that this is +// equivalent to (d+r-1)/r but has less chance to overflow. +func div(d, r time.Duration) int64 { + if d%r > 0 { + return int64(d/r + 1) + } + return int64(d / r) +} + +// EncodeDuration encodes the duration to the format grpc-timeout header +// accepts. +// +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests +func EncodeDuration(t time.Duration) string { + // TODO: This is simplistic and not bandwidth efficient. Improve it. + if t <= 0 { + return "0n" + } + if d := div(t, time.Nanosecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "n" + } + if d := div(t, time.Microsecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "u" + } + if d := div(t, time.Millisecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "m" + } + if d := div(t, time.Second); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "S" + } + if d := div(t, time.Minute); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "M" + } + // Note that maxTimeoutValue * time.Hour > MaxInt64. + return strconv.FormatInt(div(t, time.Hour), 10) + "H" +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go b/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go new file mode 100644 index 000000000..6f22bd891 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go @@ -0,0 +1,40 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "context" + + "google.golang.org/grpc/metadata" +) + +type mdExtraKey struct{} + +// WithExtraMetadata creates a new context with incoming md attached. +func WithExtraMetadata(ctx context.Context, md metadata.MD) context.Context { + return context.WithValue(ctx, mdExtraKey{}, md) +} + +// ExtraMetadata returns the incoming metadata in ctx if it exists. The +// returned MD should not be modified. Writing to it may cause races. +// Modification should be made to copies of the returned MD. +func ExtraMetadata(ctx context.Context) (md metadata.MD, ok bool) { + md, ok = ctx.Value(mdExtraKey{}).(metadata.MD) + return +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go new file mode 100644 index 000000000..4e7475060 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/method.go @@ -0,0 +1,84 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "errors" + "strings" +) + +// ParseMethod splits service and method from the input. It expects format +// "/service/method". +// +func ParseMethod(methodName string) (service, method string, _ error) { + if !strings.HasPrefix(methodName, "/") { + return "", "", errors.New("invalid method name: should start with /") + } + methodName = methodName[1:] + + pos := strings.LastIndex(methodName, "/") + if pos < 0 { + return "", "", errors.New("invalid method name: suffix /method is missing") + } + return methodName[:pos], methodName[pos+1:], nil +} + +const baseContentType = "application/grpc" + +// ContentSubtype returns the content-subtype for the given content-type. The +// given content-type must be a valid content-type that starts with +// "application/grpc". A content-subtype will follow "application/grpc" after a +// "+" or ";". See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If contentType is not a valid content-type for gRPC, the boolean +// will be false, otherwise true. If content-type == "application/grpc", +// "application/grpc+", or "application/grpc;", the boolean will be true, +// but no content-subtype will be returned. +// +// contentType is assumed to be lowercase already. +func ContentSubtype(contentType string) (string, bool) { + if contentType == baseContentType { + return "", true + } + if !strings.HasPrefix(contentType, baseContentType) { + return "", false + } + // guaranteed since != baseContentType and has baseContentType prefix + switch contentType[len(baseContentType)] { + case '+', ';': + // this will return true for "application/grpc+" or "application/grpc;" + // which the previous validContentType function tested to be valid, so we + // just say that no content-subtype is specified in this case + return contentType[len(baseContentType)+1:], true + default: + return "", false + } +} + +// ContentType builds full content type with the given sub-type. +// +// contentSubtype is assumed to be lowercase +func ContentType(contentSubtype string) string { + if contentSubtype == "" { + return baseContentType + } + return baseContentType + "+" + contentSubtype +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/target.go b/vendor/google.golang.org/grpc/internal/grpcutil/target.go new file mode 100644 index 000000000..8833021da --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/target.go @@ -0,0 +1,89 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcutil provides a bunch of utility functions to be used across the +// gRPC codebase. +package grpcutil + +import ( + "strings" + + "google.golang.org/grpc/resolver" +) + +// split2 returns the values from strings.SplitN(s, sep, 2). +// If sep is not found, it returns ("", "", false) instead. +func split2(s, sep string) (string, string, bool) { + spl := strings.SplitN(s, sep, 2) + if len(spl) < 2 { + return "", "", false + } + return spl[0], spl[1], true +} + +// ParseTarget splits target into a resolver.Target struct containing scheme, +// authority and endpoint. skipUnixColonParsing indicates that the parse should +// not parse "unix:[path]" cases. This should be true in cases where a custom +// dialer is present, to prevent a behavior change. +// +// If target is not a valid scheme://authority/endpoint as specified in +// https://github.com/grpc/grpc/blob/master/doc/naming.md, +// it returns {Endpoint: target}. +func ParseTarget(target string, skipUnixColonParsing bool) (ret resolver.Target) { + var ok bool + if strings.HasPrefix(target, "unix-abstract:") { + if strings.HasPrefix(target, "unix-abstract://") { + // Maybe, with Authority specified, try to parse it + var remain string + ret.Scheme, remain, _ = split2(target, "://") + ret.Authority, ret.Endpoint, ok = split2(remain, "/") + if !ok { + // No Authority, add the "//" back + ret.Endpoint = "//" + remain + } else { + // Found Authority, add the "/" back + ret.Endpoint = "/" + ret.Endpoint + } + } else { + // Without Authority specified, split target on ":" + ret.Scheme, ret.Endpoint, _ = split2(target, ":") + } + return ret + } + ret.Scheme, ret.Endpoint, ok = split2(target, "://") + if !ok { + if strings.HasPrefix(target, "unix:") && !skipUnixColonParsing { + // Handle the "unix:[local/path]" and "unix:[/absolute/path]" cases, + // because splitting on :// only handles the + // "unix://[/absolute/path]" case. Only handle if the dialer is nil, + // to avoid a behavior change with custom dialers. + return resolver.Target{Scheme: "unix", Endpoint: target[len("unix:"):]} + } + return resolver.Target{Endpoint: target} + } + ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/") + if !ok { + return resolver.Target{Endpoint: target} + } + if ret.Scheme == "unix" { + // Add the "/" back in the unix case, so the unix resolver receives the + // actual endpoint in the "unix://[/absolute/path]" case. + ret.Endpoint = "/" + ret.Endpoint + } + return ret +} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index eae18e18c..1e2834c70 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -25,11 +25,10 @@ import ( "time" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/serviceconfig" ) var ( - // WithResolverBuilder is set by dialoptions.go - WithResolverBuilder interface{} // func (resolver.Builder) grpc.DialOption // WithHealthCheckFunc is set by dialoptions.go WithHealthCheckFunc interface{} // func (HealthChecker) DialOption // HealthCheckFunc is used to provide client-side LB channel health checking @@ -39,17 +38,33 @@ var ( // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by // default, but tests may wish to set it lower for convenience. KeepaliveMinPingTime = 10 * time.Second - // StatusRawProto is exported by status/status.go. This func returns a - // pointer to the wrapped Status proto for a given status.Status without a - // call to proto.Clone(). The returned Status proto should not be mutated by - // the caller. - StatusRawProto interface{} // func (*status.Status) *spb.Status // NewRequestInfoContext creates a new context based on the argument context attaching // the passed in RequestInfo to the new context. NewRequestInfoContext interface{} // func(context.Context, credentials.RequestInfo) context.Context + // NewClientHandshakeInfoContext returns a copy of the input context with + // the passed in ClientHandshakeInfo struct added to it. + NewClientHandshakeInfoContext interface{} // func(context.Context, credentials.ClientHandshakeInfo) context.Context // ParseServiceConfigForTesting is for creating a fake // ClientConn for resolver testing only ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult + // EqualServiceConfigForTesting is for testing service config generation and + // parsing. Both a and b should be returned by ParseServiceConfigForTesting. + // This function compares the config without rawJSON stripped, in case the + // there's difference in white space. + EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool + // GetCertificateProviderBuilder returns the registered builder for the + // given name. This is set by package certprovider for use from xDS + // bootstrap code while parsing certificate provider configs in the + // bootstrap file. + GetCertificateProviderBuilder interface{} // func(string) certprovider.Builder + // GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo + // stored in the passed in attributes. This is set by + // credentials/xds/xds.go. + GetXDSHandshakeInfoForTesting interface{} // func (*attributes.Attributes) *xds.HandshakeInfo + // GetServerCredentials returns the transport credentials configured on a + // gRPC server. An xDS-enabled server needs to know what type of credentials + // is configured on the underlying gRPC server. This is set by server.go. + GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials ) // HealthChecker defines the signature of the client-side LB channel health checking function. diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go new file mode 100644 index 000000000..302262613 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -0,0 +1,50 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package metadata contains functions to set and get metadata from addresses. +// +// This package is experimental. +package metadata + +import ( + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" +) + +type mdKeyType string + +const mdKey = mdKeyType("grpc.internal.address.metadata") + +// Get returns the metadata of addr. +func Get(addr resolver.Address) metadata.MD { + attrs := addr.Attributes + if attrs == nil { + return nil + } + md, _ := attrs.Value(mdKey).(metadata.MD) + return md +} + +// Set sets (overrides) the metadata in addr. +// +// When a SubConn is created with this address, the RPCs sent on it will all +// have this metadata. +func Set(addr resolver.Address, md metadata.MD) resolver.Address { + addr.Attributes = addr.Attributes.WithValues(mdKey, md) + return addr +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go new file mode 100644 index 000000000..e69900400 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go @@ -0,0 +1,95 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package resolver provides internal resolver-related functionality. +package resolver + +import ( + "context" + "sync" + + "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/resolver" +) + +// ConfigSelector controls what configuration to use for every RPC. +type ConfigSelector interface { + // Selects the configuration for the RPC, or terminates it using the error. + // This error will be converted by the gRPC library to a status error with + // code UNKNOWN if it is not returned as a status error. + SelectConfig(RPCInfo) (*RPCConfig, error) +} + +// RPCInfo contains RPC information needed by a ConfigSelector. +type RPCInfo struct { + // Context is the user's context for the RPC and contains headers and + // application timeout. It is passed for interception purposes and for + // efficiency reasons. SelectConfig should not be blocking. + Context context.Context + Method string // i.e. "/Service/Method" +} + +// RPCConfig describes the configuration to use for each RPC. +type RPCConfig struct { + // The context to use for the remainder of the RPC; can pass info to LB + // policy or affect timeout or metadata. + Context context.Context + MethodConfig serviceconfig.MethodConfig // configuration to use for this RPC + OnCommitted func() // Called when the RPC has been committed (retries no longer possible) +} + +type csKeyType string + +const csKey = csKeyType("grpc.internal.resolver.configSelector") + +// SetConfigSelector sets the config selector in state and returns the new +// state. +func SetConfigSelector(state resolver.State, cs ConfigSelector) resolver.State { + state.Attributes = state.Attributes.WithValues(csKey, cs) + return state +} + +// GetConfigSelector retrieves the config selector from state, if present, and +// returns it or nil if absent. +func GetConfigSelector(state resolver.State) ConfigSelector { + cs, _ := state.Attributes.Value(csKey).(ConfigSelector) + return cs +} + +// SafeConfigSelector allows for safe switching of ConfigSelector +// implementations such that previous values are guaranteed to not be in use +// when UpdateConfigSelector returns. +type SafeConfigSelector struct { + mu sync.RWMutex + cs ConfigSelector +} + +// UpdateConfigSelector swaps to the provided ConfigSelector and blocks until +// all uses of the previous ConfigSelector have completed. +func (scs *SafeConfigSelector) UpdateConfigSelector(cs ConfigSelector) { + scs.mu.Lock() + defer scs.mu.Unlock() + scs.cs = cs +} + +// SelectConfig defers to the current ConfigSelector in scs. +func (scs *SafeConfigSelector) SelectConfig(r RPCInfo) (*RPCConfig, error) { + scs.mu.RLock() + defer scs.mu.RUnlock() + return scs.cs.SelectConfig(r) +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 7705ca22e..304235566 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -32,7 +32,9 @@ import ( "sync" "time" + grpclbstate "google.golang.org/grpc/balancer/grpclb/state" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" @@ -42,6 +44,8 @@ import ( // addresses from SRV records. Must not be changed after init time. var EnableSRVLookups = false +var logger = grpclog.Component("dns") + func init() { resolver.Register(NewBuilder()) } @@ -204,8 +208,12 @@ func (d *dnsResolver) watcher() { case <-d.rn: } - state := d.lookup() - d.cc.UpdateState(*state) + state, err := d.lookup() + if err != nil { + d.cc.ReportError(err) + } else { + d.cc.UpdateState(*state) + } // Sleep to prevent excessive re-resolutions. Incoming resolution requests // will be queued in d.rn. @@ -219,33 +227,37 @@ func (d *dnsResolver) watcher() { } } -func (d *dnsResolver) lookupSRV() []resolver.Address { +func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { if !EnableSRVLookups { - return nil + return nil, nil } var newAddrs []resolver.Address _, srvs, err := d.resolver.LookupSRV(d.ctx, "grpclb", "tcp", d.host) if err != nil { - grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err) - return nil + err = handleDNSError(err, "SRV") // may become nil + return nil, err } for _, s := range srvs { lbAddrs, err := d.resolver.LookupHost(d.ctx, s.Target) if err != nil { - grpclog.Infof("grpc: failed load balancer address dns lookup due to %v.\n", err) - continue - } - for _, a := range lbAddrs { - a, ok := formatIP(a) - if !ok { - grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) + err = handleDNSError(err, "A") // may become nil + if err == nil { + // If there are other SRV records, look them up and ignore this + // one that does not exist. continue } - addr := a + ":" + strconv.Itoa(int(s.Port)) - newAddrs = append(newAddrs, resolver.Address{Addr: addr, Type: resolver.GRPCLB, ServerName: s.Target}) + return nil, err + } + for _, a := range lbAddrs { + ip, ok := formatIP(a) + if !ok { + return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) + } + addr := ip + ":" + strconv.Itoa(int(s.Port)) + newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target}) } } - return newAddrs + return newAddrs, nil } var filterError = func(err error) error { @@ -258,13 +270,22 @@ var filterError = func(err error) error { return err } +func handleDNSError(err error, lookupType string) error { + err = filterError(err) + if err != nil { + err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err) + logger.Info(err) + } + return err +} + func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { ss, err := d.resolver.LookupTXT(d.ctx, txtPrefix+d.host) if err != nil { - err = filterError(err) - if err != nil { - err = fmt.Errorf("error from DNS TXT record lookup: %v", err) - grpclog.Infoln("grpc:", err) + if envconfig.TXTErrIgnore { + return nil + } + if err = handleDNSError(err, "TXT"); err != nil { return &serviceconfig.ParseResult{Err: err} } return nil @@ -276,7 +297,7 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { // TXT record must have "grpc_config=" attribute in order to be used as service config. if !strings.HasPrefix(res, txtAttribute) { - grpclog.Warningf("grpc: DNS TXT record %v missing %v attribute", res, txtAttribute) + logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute) // This is not an error; it is the equivalent of not having a service config. return nil } @@ -284,34 +305,39 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { return d.cc.ParseServiceConfig(sc) } -func (d *dnsResolver) lookupHost() []resolver.Address { +func (d *dnsResolver) lookupHost() ([]resolver.Address, error) { var newAddrs []resolver.Address addrs, err := d.resolver.LookupHost(d.ctx, d.host) if err != nil { - grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err) - return nil + err = handleDNSError(err, "A") + return nil, err } for _, a := range addrs { - a, ok := formatIP(a) + ip, ok := formatIP(a) if !ok { - grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) - continue + return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) } - addr := a + ":" + d.port + addr := ip + ":" + d.port newAddrs = append(newAddrs, resolver.Address{Addr: addr}) } - return newAddrs + return newAddrs, nil } -func (d *dnsResolver) lookup() *resolver.State { - srv := d.lookupSRV() - state := &resolver.State{ - Addresses: append(d.lookupHost(), srv...), +func (d *dnsResolver) lookup() (*resolver.State, error) { + srv, srvErr := d.lookupSRV() + addrs, hostErr := d.lookupHost() + if hostErr != nil && (srvErr != nil || len(srv) == 0) { + return nil, hostErr + } + + state := resolver.State{Addresses: addrs} + if len(srv) > 0 { + state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv}) } if !d.disableServiceConfig { state.ServiceConfig = d.lookupTXT() } - return state + return &state, nil } // formatIP returns ok = false if addr is not a valid textual representation of an IP address. @@ -397,12 +423,12 @@ func canaryingSC(js string) string { var rcs []rawChoice err := json.Unmarshal([]byte(js), &rcs) if err != nil { - grpclog.Warningf("grpc: failed to parse service config json string due to %v.\n", err) + logger.Warningf("dns: error parsing service config json: %v", err) return "" } cliHostname, err := os.Hostname() if err != nil { - grpclog.Warningf("grpc: failed to get client hostname due to %v.\n", err) + logger.Warningf("dns: error getting client hostname: %v", err) return "" } var sc string diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go new file mode 100644 index 000000000..0d5a811dd --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package unix implements a resolver for unix targets. +package unix + +import ( + "fmt" + + "google.golang.org/grpc/internal/transport/networktype" + "google.golang.org/grpc/resolver" +) + +const unixScheme = "unix" +const unixAbstractScheme = "unix-abstract" + +type builder struct { + scheme string +} + +func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { + if target.Authority != "" { + return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.Authority) + } + addr := resolver.Address{Addr: target.Endpoint} + if b.scheme == unixAbstractScheme { + // prepend "\x00" to address for unix-abstract + addr.Addr = "\x00" + addr.Addr + } + cc.UpdateState(resolver.State{Addresses: []resolver.Address{networktype.Set(addr, "unix")}}) + return &nopResolver{}, nil +} + +func (b *builder) Scheme() string { + return b.scheme +} + +type nopResolver struct { +} + +func (*nopResolver) ResolveNow(resolver.ResolveNowOptions) {} + +func (*nopResolver) Close() {} + +func init() { + resolver.Register(&builder{scheme: unixScheme}) + resolver.Register(&builder{scheme: unixAbstractScheme}) +} diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go new file mode 100644 index 000000000..bd4b8875f --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go @@ -0,0 +1,162 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package serviceconfig contains utility functions to parse service config. +package serviceconfig + +import ( + "encoding/json" + "fmt" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + externalserviceconfig "google.golang.org/grpc/serviceconfig" +) + +var logger = grpclog.Component("core") + +// BalancerConfig wraps the name and config associated with one load balancing +// policy. It corresponds to a single entry of the loadBalancingConfig field +// from ServiceConfig. +// +// It implements the json.Unmarshaler interface. +// +// https://github.com/grpc/grpc-proto/blob/54713b1e8bc6ed2d4f25fb4dff527842150b91b2/grpc/service_config/service_config.proto#L247 +type BalancerConfig struct { + Name string + Config externalserviceconfig.LoadBalancingConfig +} + +type intermediateBalancerConfig []map[string]json.RawMessage + +// UnmarshalJSON implements the json.Unmarshaler interface. +// +// ServiceConfig contains a list of loadBalancingConfigs, each with a name and +// config. This method iterates through that list in order, and stops at the +// first policy that is supported. +// - If the config for the first supported policy is invalid, the whole service +// config is invalid. +// - If the list doesn't contain any supported policy, the whole service config +// is invalid. +func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { + var ir intermediateBalancerConfig + err := json.Unmarshal(b, &ir) + if err != nil { + return err + } + + for i, lbcfg := range ir { + if len(lbcfg) != 1 { + return fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg) + } + + var ( + name string + jsonCfg json.RawMessage + ) + // Get the key:value pair from the map. We have already made sure that + // the map contains a single entry. + for name, jsonCfg = range lbcfg { + } + + builder := balancer.Get(name) + if builder == nil { + // If the balancer is not registered, move on to the next config. + // This is not an error. + continue + } + bc.Name = name + + parser, ok := builder.(balancer.ConfigParser) + if !ok { + if string(jsonCfg) != "{}" { + logger.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg)) + } + // Stop at this, though the builder doesn't support parsing config. + return nil + } + + cfg, err := parser.ParseConfig(jsonCfg) + if err != nil { + return fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err) + } + bc.Config = cfg + return nil + } + // This is reached when the for loop iterates over all entries, but didn't + // return. This means we had a loadBalancingConfig slice but did not + // encounter a registered policy. The config is considered invalid in this + // case. + return fmt.Errorf("invalid loadBalancingConfig: no supported policies found") +} + +// MethodConfig defines the configuration recommended by the service providers for a +// particular method. +type MethodConfig struct { + // WaitForReady indicates whether RPCs sent to this method should wait until + // the connection is ready by default (!failfast). The value specified via the + // gRPC client API will override the value set here. + WaitForReady *bool + // Timeout is the default timeout for RPCs sent to this method. The actual + // deadline used will be the minimum of the value specified here and the value + // set by the application via the gRPC client API. If either one is not set, + // then the other will be used. If neither is set, then the RPC has no deadline. + Timeout *time.Duration + // MaxReqSize is the maximum allowed payload size for an individual request in a + // stream (client->server) in bytes. The size which is measured is the serialized + // payload after per-message compression (but before stream compression) in bytes. + // The actual value used is the minimum of the value specified here and the value set + // by the application via the gRPC client API. If either one is not set, then the other + // will be used. If neither is set, then the built-in default is used. + MaxReqSize *int + // MaxRespSize is the maximum allowed payload size for an individual response in a + // stream (server->client) in bytes. + MaxRespSize *int + // RetryPolicy configures retry options for the method. + RetryPolicy *RetryPolicy +} + +// RetryPolicy defines the go-native version of the retry policy defined by the +// service config here: +// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config +type RetryPolicy struct { + // MaxAttempts is the maximum number of attempts, including the original RPC. + // + // This field is required and must be two or greater. + MaxAttempts int + + // Exponential backoff parameters. The initial retry attempt will occur at + // random(0, initialBackoff). In general, the nth attempt will occur at + // random(0, + // min(initialBackoff*backoffMultiplier**(n-1), maxBackoff)). + // + // These fields are required and must be greater than zero. + InitialBackoff time.Duration + MaxBackoff time.Duration + BackoffMultiplier float64 + + // The set of status codes which may be retried. + // + // Status codes are specified as strings, e.g., "UNAVAILABLE". + // + // This field is required and must be non-empty. + // Note: a set is used to store this for easy lookup. + RetryableStatusCodes map[codes.Code]bool +} diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go new file mode 100644 index 000000000..710223b8d --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -0,0 +1,162 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package status implements errors returned by gRPC. These errors are +// serialized and transmitted on the wire between server and client, and allow +// for additional data to be transmitted via the Details field in the status +// proto. gRPC service handlers should return an error created by this +// package, and gRPC clients should expect a corresponding error to be +// returned from the RPC call. +// +// This package upholds the invariants that a non-nil error may not +// contain an OK code, and an OK code must result in a nil error. +package status + +import ( + "errors" + "fmt" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" +) + +// Status represents an RPC status code, message, and details. It is immutable +// and should be created with New, Newf, or FromProto. +type Status struct { + s *spb.Status +} + +// New returns a Status representing c and msg. +func New(c codes.Code, msg string) *Status { + return &Status{s: &spb.Status{Code: int32(c), Message: msg}} +} + +// Newf returns New(c, fmt.Sprintf(format, a...)). +func Newf(c codes.Code, format string, a ...interface{}) *Status { + return New(c, fmt.Sprintf(format, a...)) +} + +// FromProto returns a Status representing s. +func FromProto(s *spb.Status) *Status { + return &Status{s: proto.Clone(s).(*spb.Status)} +} + +// Err returns an error representing c and msg. If c is OK, returns nil. +func Err(c codes.Code, msg string) error { + return New(c, msg).Err() +} + +// Errorf returns Error(c, fmt.Sprintf(format, a...)). +func Errorf(c codes.Code, format string, a ...interface{}) error { + return Err(c, fmt.Sprintf(format, a...)) +} + +// Code returns the status code contained in s. +func (s *Status) Code() codes.Code { + if s == nil || s.s == nil { + return codes.OK + } + return codes.Code(s.s.Code) +} + +// Message returns the message contained in s. +func (s *Status) Message() string { + if s == nil || s.s == nil { + return "" + } + return s.s.Message +} + +// Proto returns s's status as an spb.Status proto message. +func (s *Status) Proto() *spb.Status { + if s == nil { + return nil + } + return proto.Clone(s.s).(*spb.Status) +} + +// Err returns an immutable error representing s; returns nil if s.Code() is OK. +func (s *Status) Err() error { + if s.Code() == codes.OK { + return nil + } + return &Error{e: s.Proto()} +} + +// WithDetails returns a new status with the provided details messages appended to the status. +// If any errors are encountered, it returns nil and the first error encountered. +func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { + if s.Code() == codes.OK { + return nil, errors.New("no error details for status with code OK") + } + // s.Code() != OK implies that s.Proto() != nil. + p := s.Proto() + for _, detail := range details { + any, err := ptypes.MarshalAny(detail) + if err != nil { + return nil, err + } + p.Details = append(p.Details, any) + } + return &Status{s: p}, nil +} + +// Details returns a slice of details messages attached to the status. +// If a detail cannot be decoded, the error is returned in place of the detail. +func (s *Status) Details() []interface{} { + if s == nil || s.s == nil { + return nil + } + details := make([]interface{}, 0, len(s.s.Details)) + for _, any := range s.s.Details { + detail := &ptypes.DynamicAny{} + if err := ptypes.UnmarshalAny(any, detail); err != nil { + details = append(details, err) + continue + } + details = append(details, detail.Message) + } + return details +} + +// Error wraps a pointer of a status proto. It implements error and Status, +// and a nil *Error should never be returned by this package. +type Error struct { + e *spb.Status +} + +func (e *Error) Error() string { + return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(e.e.GetCode()), e.e.GetMessage()) +} + +// GRPCStatus returns the Status represented by se. +func (e *Error) GRPCStatus() *Status { + return FromProto(e.e) +} + +// Is implements future error.Is functionality. +// A Error is equivalent if the code and message are identical. +func (e *Error) Is(target error) bool { + tse, ok := target.(*Error) + if !ok { + return false + } + return proto.Equal(e.e, tse.e) +} diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go index 43281a3e0..4b2964f2a 100644 --- a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go @@ -32,35 +32,35 @@ import ( "google.golang.org/grpc/grpclog" ) +var logger = grpclog.Component("core") + // GetCPUTime returns the how much CPU time has passed since the start of this process. func GetCPUTime() int64 { var ts unix.Timespec if err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts); err != nil { - grpclog.Fatal(err) + logger.Fatal(err) } return ts.Nano() } -// Rusage is an alias for syscall.Rusage under linux non-appengine environment. -type Rusage syscall.Rusage +// Rusage is an alias for syscall.Rusage under linux environment. +type Rusage = syscall.Rusage // GetRusage returns the resource usage of current process. -func GetRusage() (rusage *Rusage) { - rusage = new(Rusage) - syscall.Getrusage(syscall.RUSAGE_SELF, (*syscall.Rusage)(rusage)) - return +func GetRusage() *Rusage { + rusage := new(Rusage) + syscall.Getrusage(syscall.RUSAGE_SELF, rusage) + return rusage } // CPUTimeDiff returns the differences of user CPU time and system CPU time used // between two Rusage structs. func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { - f := (*syscall.Rusage)(first) - l := (*syscall.Rusage)(latest) var ( - utimeDiffs = l.Utime.Sec - f.Utime.Sec - utimeDiffus = l.Utime.Usec - f.Utime.Usec - stimeDiffs = l.Stime.Sec - f.Stime.Sec - stimeDiffus = l.Stime.Usec - f.Stime.Usec + utimeDiffs = latest.Utime.Sec - first.Utime.Sec + utimeDiffus = latest.Utime.Usec - first.Utime.Usec + stimeDiffs = latest.Stime.Sec - first.Stime.Sec + stimeDiffus = latest.Stime.Usec - first.Stime.Usec ) uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6 diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go index d3fd9dab3..7913ef1db 100644 --- a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go @@ -18,6 +18,8 @@ * */ +// Package syscall provides functionalities that grpc uses to get low-level +// operating system stats/info. package syscall import ( @@ -29,10 +31,11 @@ import ( ) var once sync.Once +var logger = grpclog.Component("core") func log() { once.Do(func() { - grpclog.Info("CPU time info is unavailable on non-linux or appengine environment.") + logger.Info("CPU time info is unavailable on non-linux or appengine environment.") }) } @@ -47,7 +50,7 @@ func GetCPUTime() int64 { type Rusage struct{} // GetRusage is a no-op function under non-linux or appengine environment. -func GetRusage() (rusage *Rusage) { +func GetRusage() *Rusage { log() return nil } diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index ddee20b6b..40ef23923 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -505,7 +505,9 @@ func (l *loopyWriter) run() (err error) { // 1. When the connection is closed by some other known issue. // 2. User closed the connection. // 3. A graceful close of connection. - infof("transport: loopyWriter.run returning. %v", err) + if logger.V(logLevel) { + logger.Infof("transport: loopyWriter.run returning. %v", err) + } err = nil } }() @@ -605,7 +607,9 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error { if l.side == serverSide { str, ok := l.estdStreams[h.streamID] if !ok { - warningf("transport: loopy doesn't recognize the stream: %d", h.streamID) + if logger.V(logLevel) { + logger.Warningf("transport: loopy doesn't recognize the stream: %d", h.streamID) + } return nil } // Case 1.A: Server is responding back with headers. @@ -658,7 +662,9 @@ func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.He l.hBuf.Reset() for _, f := range hf { if err := l.hEnc.WriteField(f); err != nil { - warningf("transport: loopyWriter.writeHeader encountered error while encoding headers:", err) + if logger.V(logLevel) { + logger.Warningf("transport: loopyWriter.writeHeader encountered error while encoding headers: %v", err) + } } } var ( @@ -857,38 +863,45 @@ func (l *loopyWriter) processData() (bool, error) { return false, nil } var ( - idx int buf []byte ) - if len(dataItem.h) != 0 { // data header has not been written out yet. - buf = dataItem.h - } else { - idx = 1 - buf = dataItem.d - } - size := http2MaxFrameLen - if len(buf) < size { - size = len(buf) - } + // Figure out the maximum size we can send + maxSize := http2MaxFrameLen if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control. str.state = waitingOnStreamQuota return false, nil - } else if strQuota < size { - size = strQuota + } else if maxSize > strQuota { + maxSize = strQuota + } + if maxSize > int(l.sendQuota) { // connection-level flow control. + maxSize = int(l.sendQuota) + } + // Compute how much of the header and data we can send within quota and max frame length + hSize := min(maxSize, len(dataItem.h)) + dSize := min(maxSize-hSize, len(dataItem.d)) + if hSize != 0 { + if dSize == 0 { + buf = dataItem.h + } else { + // We can add some data to grpc message header to distribute bytes more equally across frames. + // Copy on the stack to avoid generating garbage + var localBuf [http2MaxFrameLen]byte + copy(localBuf[:hSize], dataItem.h) + copy(localBuf[hSize:], dataItem.d[:dSize]) + buf = localBuf[:hSize+dSize] + } + } else { + buf = dataItem.d } - if l.sendQuota < uint32(size) { // connection-level flow control. - size = int(l.sendQuota) - } + size := hSize + dSize + // Now that outgoing flow controls are checked we can replenish str's write quota str.wq.replenish(size) var endStream bool // If this is the last data message on this stream and all of it can be written in this iteration. - if dataItem.endStream && size == len(buf) { - // buf contains either data or it contains header but data is empty. - if idx == 1 || len(dataItem.d) == 0 { - endStream = true - } + if dataItem.endStream && len(dataItem.h)+len(dataItem.d) <= size { + endStream = true } if dataItem.onEachWrite != nil { dataItem.onEachWrite() @@ -896,14 +909,10 @@ func (l *loopyWriter) processData() (bool, error) { if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil { return false, err } - buf = buf[size:] str.bytesOutStanding += size l.sendQuota -= uint32(size) - if idx == 0 { - dataItem.h = buf - } else { - dataItem.d = buf - } + dataItem.h = dataItem.h[hSize:] + dataItem.d = dataItem.d[dSize:] if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out. str.itl.dequeue() @@ -924,3 +933,10 @@ func (l *loopyWriter) processData() (bool, error) { } return false, nil } + +func min(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index fbf01d5fe..05d3871e6 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -39,6 +39,7 @@ import ( "golang.org/x/net/http2" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -57,7 +58,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats sta } contentType := r.Header.Get("Content-Type") // TODO: do we assume contentType is lowercase? we did before - contentSubtype, validContentType := contentSubtype(contentType) + contentSubtype, validContentType := grpcutil.ContentSubtype(contentType) if !validContentType { return nil, errors.New("invalid gRPC request content-type") } @@ -112,11 +113,10 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats sta // at this point to be speaking over HTTP/2, so it's able to speak valid // gRPC. type serverHandlerTransport struct { - rw http.ResponseWriter - req *http.Request - timeoutSet bool - timeout time.Duration - didCommonHeaders bool + rw http.ResponseWriter + req *http.Request + timeoutSet bool + timeout time.Duration headerMD metadata.MD @@ -186,8 +186,11 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro ht.writeStatusMu.Lock() defer ht.writeStatusMu.Unlock() + headersWritten := s.updateHeaderSent() err := ht.do(func() { - ht.writeCommonHeaders(s) + if !headersWritten { + ht.writePendingHeaders(s) + } // And flush, in case no header or body has been sent yet. // This forces a separation of headers and trailers if this is the @@ -227,6 +230,8 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro if err == nil { // transport has not been closed if ht.stats != nil { + // Note: The trailer fields are compressed with hpack after this call returns. + // No WireLength field is set here. ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{ Trailer: s.trailer.Copy(), }) @@ -236,14 +241,16 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro return err } +// writePendingHeaders sets common and custom headers on the first +// write call (Write, WriteHeader, or WriteStatus) +func (ht *serverHandlerTransport) writePendingHeaders(s *Stream) { + ht.writeCommonHeaders(s) + ht.writeCustomHeaders(s) +} + // writeCommonHeaders sets common headers on the first write // call (Write, WriteHeader, or WriteStatus). func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { - if ht.didCommonHeaders { - return - } - ht.didCommonHeaders = true - h := ht.rw.Header() h["Date"] = nil // suppress Date to make tests happy; TODO: restore h.Set("Content-Type", ht.contentType) @@ -262,9 +269,30 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { } } +// writeCustomHeaders sets custom headers set on the stream via SetHeader +// on the first write call (Write, WriteHeader, or WriteStatus). +func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { + h := ht.rw.Header() + + s.hdrMu.Lock() + for k, vv := range s.header { + if isReservedHeader(k) { + continue + } + for _, v := range vv { + h.Add(k, encodeMetadataHeader(k, v)) + } + } + + s.hdrMu.Unlock() +} + func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + headersWritten := s.updateHeaderSent() return ht.do(func() { - ht.writeCommonHeaders(s) + if !headersWritten { + ht.writePendingHeaders(s) + } ht.rw.Write(hdr) ht.rw.Write(data) ht.rw.(http.Flusher).Flush() @@ -272,27 +300,27 @@ func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts } func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { + if err := s.SetHeader(md); err != nil { + return err + } + + headersWritten := s.updateHeaderSent() err := ht.do(func() { - ht.writeCommonHeaders(s) - h := ht.rw.Header() - for k, vv := range md { - // Clients don't tolerate reading restricted headers after some non restricted ones were sent. - if isReservedHeader(k) { - continue - } - for _, v := range vv { - v = encodeMetadataHeader(k, v) - h.Add(k, v) - } + if !headersWritten { + ht.writePendingHeaders(s) } + ht.rw.WriteHeader(200) ht.rw.(http.Flusher).Flush() }) if err == nil { if ht.stats != nil { + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. ht.stats.HandleRPC(s.Context(), &stats.OutHeader{ - Header: md.Copy(), + Header: md.Copy(), + Compression: s.sendCompress, }) } } @@ -338,7 +366,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace Addr: ht.RemoteAddr(), } if req.TLS != nil { - pr.AuthInfo = credentials.TLSInfo{State: *req.TLS} + pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}} } ctx = metadata.NewIncomingContext(ctx, ht.headerMD) s.ctx = peer.NewContext(ctx, pr) diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index e18935653..8902b7f90 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -32,6 +32,9 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpcutil" + imetadata "google.golang.org/grpc/internal/metadata" + "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" @@ -41,6 +44,7 @@ import ( "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" + "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" ) @@ -57,7 +61,7 @@ type http2Client struct { cancel context.CancelFunc ctxDone <-chan struct{} // Cache the ctx.Done() chan. userAgent string - md interface{} + md metadata.MD conn net.Conn // underlying communication channel loopy *loopyWriter remoteAddr net.Addr @@ -135,11 +139,27 @@ type http2Client struct { connectionID uint64 } -func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) { +func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) { + address := addr.Addr + networkType, ok := networktype.Get(addr) if fn != nil { - return fn(ctx, addr) + if networkType == "unix" && !strings.HasPrefix(address, "\x00") { + // For backward compatibility, if the user dialed "unix:///path", + // the passthrough resolver would be used and the user's custom + // dialer would see "unix:///path". Since the unix resolver is used + // and the address is now "/path", prepend "unix://" so the user's + // custom dialer sees the same address. + return fn(ctx, "unix://"+address) + } + return fn(ctx, address) } - return (&net.Dialer{}).DialContext(ctx, "tcp", addr) + if !ok { + networkType, address = parseDialTarget(address) + } + if networkType == "tcp" && useProxy { + return proxyDial(ctx, address, grpcUA) + } + return (&net.Dialer{}).DialContext(ctx, networkType, address) } func isTemporary(err error) bool { @@ -161,7 +181,7 @@ func isTemporary(err error) bool { // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. -func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { +func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { scheme := "http" ctx, cancel := context.WithCancel(ctx) defer func() { @@ -170,7 +190,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne } }() - conn, err := dial(connectCtx, opts.Dialer, addr.Addr) + conn, err := dial(connectCtx, opts.Dialer, addr, opts.UseProxy, opts.UserAgent) if err != nil { if opts.FailOnNonTempDialError { return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) @@ -214,12 +234,32 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne } } if transportCreds != nil { - scheme = "https" - conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.Authority, conn) + // gRPC, resolver, balancer etc. can specify arbitrary data in the + // Attributes field of resolver.Address, which is shoved into connectCtx + // and passed to the credential handshaker. This makes it possible for + // address specific arbitrary data to reach the credential handshaker. + contextWithHandshakeInfo := internal.NewClientHandshakeInfoContext.(func(context.Context, credentials.ClientHandshakeInfo) context.Context) + connectCtx = contextWithHandshakeInfo(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) + conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn) if err != nil { return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) } + for _, cd := range perRPCCreds { + if cd.RequireTransportSecurity() { + if ci, ok := authInfo.(interface { + GetCommonAuthInfo() credentials.CommonAuthInfo + }); ok { + secLevel := ci.GetCommonAuthInfo().SecurityLevel + if secLevel != credentials.InvalidSecurityLevel && secLevel < credentials.PrivacyAndIntegrity { + return nil, connectionErrorf(true, nil, "transport: cannot send secure credentials on an insecure connection") + } + } + } + } isSecure = true + if transportCreds.Info().SecurityProtocol == "tls" { + scheme = "https" + } } dynamicWindow := true icwz := int32(initialWindowSize) @@ -238,7 +278,6 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne ctxDone: ctx.Done(), // Cache Done chan. cancel: cancel, userAgent: opts.UserAgent, - md: addr.Metadata, conn: conn, remoteAddr: conn.RemoteAddr(), localAddr: conn.LocalAddr(), @@ -266,6 +305,12 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne keepaliveEnabled: keepaliveEnabled, bufferPool: newBufferPool(), } + + if md, ok := addr.Metadata.(*metadata.MD); ok { + t.md = *md + } else if md := imetadata.Get(addr); md != nil { + t.md = md + } t.controlBuf = newControlBuffer(t.ctxDone) if opts.InitialWindowSize >= defaultWindowSize { t.initialWindowSize = opts.InitialWindowSize @@ -345,7 +390,9 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) err := t.loopy.run() if err != nil { - errorf("transport: loopyWriter.run returning. Err: %v", err) + if logger.V(logLevel) { + logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) + } } // If it's a connection error, let reader goroutine handle it // since there might be data in the buffers. @@ -403,7 +450,8 @@ func (t *http2Client) getPeer() *peer.Peer { func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) { aud := t.createAudience(callHdr) ri := credentials.RequestInfo{ - Method: callHdr.Method, + Method: callHdr.Method, + AuthInfo: t.authInfo, } ctxWithRequestInfo := internal.NewRequestInfoContext.(func(context.Context, credentials.RequestInfo) context.Context)(ctx, ri) authData, err := t.getTrAuthData(ctxWithRequestInfo, aud) @@ -424,7 +472,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme}) headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method}) headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) - headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(callHdr.ContentSubtype)}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(callHdr.ContentSubtype)}) headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"}) if callHdr.PreviousAttempts > 0 { @@ -439,7 +487,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) // Send out timeout regardless its value. The server can detect timeout context by itself. // TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire. timeout := time.Until(dl) - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: grpcutil.EncodeDuration(timeout)}) } for k, v := range authData { headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) @@ -468,25 +516,23 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) for _, vv := range added { for i, v := range vv { if i%2 == 0 { - k = v + k = strings.ToLower(v) continue } // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. if isReservedHeader(k) { continue } - headerFields = append(headerFields, hpack.HeaderField{Name: strings.ToLower(k), Value: encodeMetadataHeader(k, v)}) + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } } } - if md, ok := t.md.(*metadata.MD); ok { - for k, vv := range *md { - if isReservedHeader(k) { - continue - } - for _, v := range vv { - headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) - } + for k, vv := range t.md { + if isReservedHeader(k) { + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } } return headerFields, nil @@ -536,8 +582,11 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call // Note: if these credentials are provided both via dial options and call // options, then both sets of credentials will be applied. if callCreds := callHdr.Creds; callCreds != nil { - if !t.isSecure && callCreds.RequireTransportSecurity() { - return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") + if callCreds.RequireTransportSecurity() { + ri, _ := credentials.RequestInfoFromContext(ctx) + if !t.isSecure || credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity) != nil { + return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") + } } data, err := callCreds.GetRequestMetadata(ctx, audience) if err != nil { @@ -553,13 +602,26 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call return callAuthData, nil } +// PerformedIOError wraps an error to indicate IO may have been performed +// before the error occurred. +type PerformedIOError struct { + Err error +} + +// Error implements error. +func (p PerformedIOError) Error() string { + return p.Err.Error() +} + // NewStream creates a stream and registers it into the transport as "active" // streams. func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { ctx = peer.NewContext(ctx, t.getPeer()) headerFields, err := t.createHeaderFields(ctx, callHdr) if err != nil { - return nil, err + // We may have performed I/O in the per-RPC creds callback, so do not + // allow transparent retry. + return nil, PerformedIOError{err} } s := t.newStream(ctx, callHdr) cleanup := func(err error) { @@ -679,14 +741,21 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea } } if t.statsHandler != nil { - header, _, _ := metadata.FromOutgoingContextRaw(ctx) + header, ok := metadata.FromOutgoingContext(ctx) + if ok { + header.Set("user-agent", t.userAgent) + } else { + header = metadata.Pairs("user-agent", t.userAgent) + } + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. outHeader := &stats.OutHeader{ Client: true, FullMethod: callHdr.Method, RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, Compression: callHdr.SendCompress, - Header: header.Copy(), + Header: header, } t.statsHandler.HandleRPC(s.ctx, outHeader) } @@ -846,18 +915,10 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) e df := &dataFrame{ streamID: s.id, endStream: opts.Last, + h: hdr, + d: data, } - if hdr != nil || data != nil { // If it's not an empty data frame. - // Add some data to grpc message header so that we can equally - // distribute bytes across frames. - emptyLen := http2MaxFrameLen - len(hdr) - if emptyLen > len(data) { - emptyLen = len(data) - } - hdr = append(hdr, data[:emptyLen]...) - data = data[emptyLen:] - df.h, df.d = hdr, data - // TODO(mmukhi): The above logic in this if can be moved to loopyWriter's data handler. + if hdr != nil || data != nil { // If it's not an empty data frame, check quota. if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { return err } @@ -991,7 +1052,9 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { } statusCode, ok := http2ErrConvTab[f.ErrCode] if !ok { - warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) + if logger.V(logLevel) { + logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) + } statusCode = codes.Unknown } if statusCode == codes.Canceled { @@ -1073,7 +1136,9 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { return } if f.ErrCode == http2.ErrCodeEnhanceYourCalm { - infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") + if logger.V(logLevel) { + logger.Infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") + } } id := f.LastStreamID if id > 0 && id%2 != 1 { @@ -1177,8 +1242,8 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { state := &decodeState{} // Initialize isGRPC value to be !initialHeader, since if a gRPC Response-Headers has already been received, then it means that the peer is speaking gRPC and we are in gRPC mode. state.data.isGRPC = !initialHeader - if err := state.decodeHeader(frame); err != nil { - t.closeStream(s, err, true, http2.ErrCodeProtocol, status.Convert(err), nil, endStream) + if h2code, err := state.decodeHeader(frame); err != nil { + t.closeStream(s, err, true, h2code, status.Convert(err), nil, endStream) return } @@ -1187,9 +1252,10 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { if t.statsHandler != nil { if isHeader { inHeader := &stats.InHeader{ - Client: true, - WireLength: int(frame.Header().Length), - Header: s.header.Copy(), + Client: true, + WireLength: int(frame.Header().Length), + Header: s.header.Copy(), + Compression: s.recvCompress, } t.statsHandler.HandleRPC(s.ctx, inHeader) } else { @@ -1276,7 +1342,13 @@ func (t *http2Client) reader() { if s != nil { // use error detail to provide better err message code := http2ErrConvTab[se.Code] - msg := t.framer.fr.ErrorDetail().Error() + errorDetail := t.framer.fr.ErrorDetail() + var msg string + if errorDetail != nil { + msg = errorDetail.Error() + } else { + msg = "received invalid frame" + } t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false) } continue @@ -1302,7 +1374,9 @@ func (t *http2Client) reader() { case *http2.WindowUpdateFrame: t.handleWindowUpdate(frame) default: - errorf("transport: http2Client.reader got unhandled frame type %v.", frame) + if logger.V(logLevel) { + logger.Errorf("transport: http2Client.reader got unhandled frame type %v.", frame) + } } } } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 8b04b0392..0cf1cc320 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -34,12 +34,10 @@ import ( "github.com/golang/protobuf/proto" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpcutil" - spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/keepalive" @@ -57,9 +55,6 @@ var ( // ErrHeaderListSizeLimitViolation indicates that the header list size is larger // than the limit set by peer. ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer") - // statusRawProto is a function to get to the raw status proto wrapped in a - // status.Status without a proto.Clone(). - statusRawProto = internal.StatusRawProto.(func(*status.Status) *spb.Status) ) // serverConnectionCounter counts the number of connections a server has seen @@ -294,7 +289,9 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst) t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler if err := t.loopy.run(); err != nil { - errorf("transport: loopyWriter.run returning. Err: %v", err) + if logger.V(logLevel) { + logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) + } } t.conn.Close() close(t.writerDone) @@ -309,12 +306,12 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( state := &decodeState{ serverSide: true, } - if err := state.decodeHeader(frame); err != nil { - if se, ok := status.FromError(err); ok { + if h2code, err := state.decodeHeader(frame); err != nil { + if _, ok := status.FromError(err); ok { t.controlBuf.put(&cleanupStream{ streamID: streamID, rst: true, - rstCode: statusCodeConvTab[se.Code()], + rstCode: h2code, onWrite: func() {}, }) } @@ -365,7 +362,9 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } s.ctx, err = t.inTapHandle(s.ctx, info) if err != nil { - warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) + if logger.V(logLevel) { + logger.Warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) + } t.controlBuf.put(&cleanupStream{ streamID: s.id, rst: true, @@ -396,7 +395,9 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( if streamID%2 != 1 || streamID <= t.maxStreamID { t.mu.Unlock() // illegal gRPC stream id. - errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) + if logger.V(logLevel) { + logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) + } s.cancel() return true } @@ -459,7 +460,9 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) if err != nil { if se, ok := err.(http2.StreamError); ok { - warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se) + if logger.V(logLevel) { + logger.Warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se) + } t.mu.Lock() s := t.activeStreams[se.StreamID] t.mu.Unlock() @@ -479,7 +482,9 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. t.Close() return } - warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) + if logger.V(logLevel) { + logger.Warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) + } t.Close() return } @@ -502,7 +507,9 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. case *http2.GoAwayFrame: // TODO: Handle GoAway from the client appropriately. default: - errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) + if logger.V(logLevel) { + logger.Errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) + } } } } @@ -604,6 +611,10 @@ func (t *http2Server) handleData(f *http2.DataFrame) { if !ok { return } + if s.getState() == streamReadDone { + t.closeStream(s, true, http2.ErrCodeStreamClosed, false) + return + } if size > 0 { if err := s.fc.onData(size); err != nil { t.closeStream(s, true, http2.ErrCodeFlowControl, false) @@ -724,7 +735,9 @@ func (t *http2Server) handlePing(f *http2.PingFrame) { if t.pingStrikes > maxPingStrikes { // Send goaway and close the connection. - errorf("transport: Got too many pings from the client, closing the connection.") + if logger.V(logLevel) { + logger.Errorf("transport: Got too many pings from the client, closing the connection.") + } t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true}) } } @@ -757,7 +770,9 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool { var sz int64 for _, f := range hdrFrame.hf { if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { - errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) + if logger.V(logLevel) { + logger.Errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) + } return false } } @@ -794,7 +809,7 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { // first and create a slice of that exact size. headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) - headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)}) if s.sendCompress != "" { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) } @@ -813,10 +828,11 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { return ErrHeaderListSizeLimitViolation } if t.stats != nil { - // Note: WireLength is not set in outHeader. - // TODO(mmukhi): Revisit this later, if needed. + // Note: Headers are compressed with hpack after this call returns. + // No WireLength field is set here. outHeader := &stats.OutHeader{ - Header: s.header.Copy(), + Header: s.header.Copy(), + Compression: s.sendCompress, } t.stats.HandleRPC(s.Context(), outHeader) } @@ -843,17 +859,17 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { } } else { // Send a trailer only response. headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) - headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)}) } } headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))}) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) - if p := statusRawProto(st); p != nil && len(p.Details) > 0 { + if p := st.Proto(); p != nil && len(p.Details) > 0 { stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. - grpclog.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err) + logger.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err) } else { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) } @@ -880,6 +896,8 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { rst := s.getState() == streamActive t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true) if t.stats != nil { + // Note: The trailer fields are compressed with hpack after this call returns. + // No WireLength field is set here. t.stats.HandleRPC(s.Context(), &stats.OutTrailer{ Trailer: s.trailer.Copy(), }) @@ -911,13 +929,6 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e return ContextErr(s.ctx.Err()) } } - // Add some data to header frame so that we can equally distribute bytes across frames. - emptyLen := http2MaxFrameLen - len(hdr) - if emptyLen > len(data) { - emptyLen = len(data) - } - hdr = append(hdr, data[:emptyLen]...) - data = data[emptyLen:] df := &dataFrame{ streamID: s.id, h: hdr, @@ -989,7 +1000,9 @@ func (t *http2Server) keepalive() { select { case <-ageTimer.C: // Close the connection after grace period. - infof("transport: closing server transport due to maximum connection age.") + if logger.V(logLevel) { + logger.Infof("transport: closing server transport due to maximum connection age.") + } t.Close() case <-t.done: } @@ -1006,7 +1019,9 @@ func (t *http2Server) keepalive() { continue } if outstandingPing && kpTimeoutLeft <= 0 { - infof("transport: closing server transport due to idleness.") + if logger.V(logLevel) { + logger.Infof("transport: closing server transport due to idleness.") + } t.Close() return } diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 8f5f3349d..7e41d1183 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -27,6 +27,7 @@ import ( "math" "net" "net/http" + "net/url" "strconv" "strings" "time" @@ -37,6 +38,8 @@ import ( "golang.org/x/net/http2/hpack" spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/status" ) @@ -50,7 +53,7 @@ const ( // "proto" as a suffix after "+" or ";". See // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests // for more details. - baseContentType = "application/grpc" + ) var ( @@ -71,13 +74,6 @@ var ( http2.ErrCodeInadequateSecurity: codes.PermissionDenied, http2.ErrCodeHTTP11Required: codes.Internal, } - statusCodeConvTab = map[codes.Code]http2.ErrCode{ - codes.Internal: http2.ErrCodeInternal, - codes.Canceled: http2.ErrCodeCancel, - codes.Unavailable: http2.ErrCodeRefusedStream, - codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm, - codes.PermissionDenied: http2.ErrCodeInadequateSecurity, - } // HTTPStatusConvTab is the HTTP status code to gRPC error code conversion table. HTTPStatusConvTab = map[int]codes.Code{ // 400 Bad Request - INTERNAL. @@ -97,6 +93,7 @@ var ( // 504 Gateway timeout - UNAVAILABLE. http.StatusGatewayTimeout: codes.Unavailable, } + logger = grpclog.Component("transport") ) type parsedHeaderData struct { @@ -182,46 +179,6 @@ func isWhitelistedHeader(hdr string) bool { } } -// contentSubtype returns the content-subtype for the given content-type. The -// given content-type must be a valid content-type that starts with -// "application/grpc". A content-subtype will follow "application/grpc" after a -// "+" or ";". See -// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for -// more details. -// -// If contentType is not a valid content-type for gRPC, the boolean -// will be false, otherwise true. If content-type == "application/grpc", -// "application/grpc+", or "application/grpc;", the boolean will be true, -// but no content-subtype will be returned. -// -// contentType is assumed to be lowercase already. -func contentSubtype(contentType string) (string, bool) { - if contentType == baseContentType { - return "", true - } - if !strings.HasPrefix(contentType, baseContentType) { - return "", false - } - // guaranteed since != baseContentType and has baseContentType prefix - switch contentType[len(baseContentType)] { - case '+', ';': - // this will return true for "application/grpc+" or "application/grpc;" - // which the previous validContentType function tested to be valid, so we - // just say that no content-subtype is specified in this case - return contentType[len(baseContentType)+1:], true - default: - return "", false - } -} - -// contentSubtype is assumed to be lowercase -func contentType(contentSubtype string) string { - if contentSubtype == "" { - return baseContentType - } - return baseContentType + "+" + contentSubtype -} - func (d *decodeState) status() *status.Status { if d.data.statusGen == nil { // No status-details were provided; generate status using code/msg. @@ -259,11 +216,11 @@ func decodeMetadataHeader(k, v string) (string, error) { return v, nil } -func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) error { +func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) (http2.ErrCode, error) { // frame.Truncated is set to true when framer detects that the current header // list size hits MaxHeaderListSize limit. if frame.Truncated { - return status.Error(codes.Internal, "peer header list size exceeded limit") + return http2.ErrCodeFrameSize, status.Error(codes.Internal, "peer header list size exceeded limit") } for _, hf := range frame.Fields { @@ -272,10 +229,10 @@ func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) error { if d.data.isGRPC { if d.data.grpcErr != nil { - return d.data.grpcErr + return http2.ErrCodeProtocol, d.data.grpcErr } if d.serverSide { - return nil + return http2.ErrCodeNo, nil } if d.data.rawStatusCode == nil && d.data.statusGen == nil { // gRPC status doesn't exist. @@ -287,12 +244,12 @@ func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) error { code := int(codes.Unknown) d.data.rawStatusCode = &code } - return nil + return http2.ErrCodeNo, nil } // HTTP fallback mode if d.data.httpErr != nil { - return d.data.httpErr + return http2.ErrCodeProtocol, d.data.httpErr } var ( @@ -307,7 +264,7 @@ func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) error { } } - return status.Error(code, d.constructHTTPErrMsg()) + return http2.ErrCodeProtocol, status.Error(code, d.constructHTTPErrMsg()) } // constructErrMsg constructs error message to be returned in HTTP fallback mode. @@ -340,7 +297,7 @@ func (d *decodeState) addMetadata(k, v string) { func (d *decodeState) processHeaderField(f hpack.HeaderField) { switch f.Name { case "content-type": - contentSubtype, validContentType := contentSubtype(f.Value) + contentSubtype, validContentType := grpcutil.ContentSubtype(f.Value) if !validContentType { d.data.contentTypeErr = fmt.Sprintf("transport: received the unexpected content-type %q", f.Value) return @@ -412,7 +369,9 @@ func (d *decodeState) processHeaderField(f hpack.HeaderField) { } v, err := decodeMetadataHeader(f.Name, f.Value) if err != nil { - errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err) + if logger.V(logLevel) { + logger.Errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err) + } return } d.addMetadata(f.Name, v) @@ -449,41 +408,6 @@ func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) { return } -const maxTimeoutValue int64 = 100000000 - 1 - -// div does integer division and round-up the result. Note that this is -// equivalent to (d+r-1)/r but has less chance to overflow. -func div(d, r time.Duration) int64 { - if m := d % r; m > 0 { - return int64(d/r + 1) - } - return int64(d / r) -} - -// TODO(zhaoq): It is the simplistic and not bandwidth efficient. Improve it. -func encodeTimeout(t time.Duration) string { - if t <= 0 { - return "0n" - } - if d := div(t, time.Nanosecond); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "n" - } - if d := div(t, time.Microsecond); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "u" - } - if d := div(t, time.Millisecond); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "m" - } - if d := div(t, time.Second); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "S" - } - if d := div(t, time.Minute); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "M" - } - // Note that maxTimeoutValue * time.Hour > MaxInt64. - return strconv.FormatInt(div(t, time.Hour), 10) + "H" -} - func decodeTimeout(s string) (time.Duration, error) { size := len(s) if size < 2 { @@ -675,3 +599,31 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderList f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil) return f } + +// parseDialTarget returns the network and address to pass to dialer. +func parseDialTarget(target string) (string, string) { + net := "tcp" + m1 := strings.Index(target, ":") + m2 := strings.Index(target, ":/") + // handle unix:addr which will fail with url.Parse + if m1 >= 0 && m2 < 0 { + if n := target[0:m1]; n == "unix" { + return n, target[m1+1:] + } + } + if m2 >= 0 { + t, err := url.Parse(target) + if err != nil { + return net, target + } + scheme := t.Scheme + addr := t.Path + if scheme == "unix" { + if addr == "" { + addr = t.Host + } + return scheme, addr + } + } + return net, target +} diff --git a/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go new file mode 100644 index 000000000..96967428b --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go @@ -0,0 +1,46 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package networktype declares the network type to be used in the default +// dailer. Attribute of a resolver.Address. +package networktype + +import ( + "google.golang.org/grpc/resolver" +) + +// keyType is the key to use for storing State in Attributes. +type keyType string + +const key = keyType("grpc.internal.transport.networktype") + +// Set returns a copy of the provided address with attributes containing networkType. +func Set(address resolver.Address, networkType string) resolver.Address { + address.Attributes = address.Attributes.WithValues(key, networkType) + return address +} + +// Get returns the network type in the resolver.Address and true, or "", false +// if not present. +func Get(address resolver.Address) (string, bool) { + v := address.Attributes.Value(key) + if v == nil { + return "", false + } + return v.(string), true +} diff --git a/vendor/google.golang.org/grpc/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go similarity index 73% rename from vendor/google.golang.org/grpc/proxy.go rename to vendor/google.golang.org/grpc/internal/transport/proxy.go index f8f69bfb7..a662bf39a 100644 --- a/vendor/google.golang.org/grpc/proxy.go +++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -16,13 +16,12 @@ * */ -package grpc +package transport import ( "bufio" "context" "encoding/base64" - "errors" "fmt" "io" "net" @@ -34,8 +33,6 @@ import ( const proxyAuthHeaderKey = "Proxy-Authorization" var ( - // errDisabled indicates that proxy is disabled for the address. - errDisabled = errors.New("proxy is disabled for the address") // The following variable will be overwritten in the tests. httpProxyFromEnvironment = http.ProxyFromEnvironment ) @@ -51,9 +48,6 @@ func mapAddress(ctx context.Context, address string) (*url.URL, error) { if err != nil { return nil, err } - if url == nil { - return nil, errDisabled - } return url, nil } @@ -76,7 +70,7 @@ func basicAuth(username, password string) string { return base64.StdEncoding.EncodeToString([]byte(auth)) } -func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL) (_ net.Conn, err error) { +func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL, grpcUA string) (_ net.Conn, err error) { defer func() { if err != nil { conn.Close() @@ -115,32 +109,28 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri return &bufConn{Conn: conn, r: r}, nil } -// newProxyDialer returns a dialer that connects to proxy first if necessary. -// The returned dialer checks if a proxy is necessary, dial to the proxy with the -// provided dialer, does HTTP CONNECT handshake and returns the connection. -func newProxyDialer(dialer func(context.Context, string) (net.Conn, error)) func(context.Context, string) (net.Conn, error) { - return func(ctx context.Context, addr string) (conn net.Conn, err error) { - var newAddr string - proxyURL, err := mapAddress(ctx, addr) - if err != nil { - if err != errDisabled { - return nil, err - } - newAddr = addr - } else { - newAddr = proxyURL.Host - } +// proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy +// is necessary, dials, does the HTTP CONNECT handshake, and returns the +// connection. +func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, err error) { + newAddr := addr + proxyURL, err := mapAddress(ctx, addr) + if err != nil { + return nil, err + } + if proxyURL != nil { + newAddr = proxyURL.Host + } - conn, err = dialer(ctx, newAddr) - if err != nil { - return - } - if proxyURL != nil { - // proxy is disabled if proxyURL is nil. - conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL) - } + conn, err = (&net.Dialer{}).DialContext(ctx, "tcp", newAddr) + if err != nil { return } + if proxyURL != nil { + // proxy is disabled if proxyURL is nil. + conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA) + } + return } func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index a30da9eb3..9c8f79cb4 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -35,11 +35,14 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "google.golang.org/grpc/tap" ) +const logLevel = 2 + type bufferPool struct { pool sync.Pool } @@ -566,19 +569,14 @@ type ConnectOptions struct { ChannelzParentID int64 // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. MaxHeaderListSize *uint32 -} - -// TargetInfo contains the information of the target such as network address and metadata. -type TargetInfo struct { - Addr string - Metadata interface{} - Authority string + // UseProxy specifies if a proxy should be used. + UseProxy bool } // NewClientTransport establishes the transport with the required ConnectOptions // and returns it to the caller. -func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) { - return newHTTP2Client(connectCtx, ctx, target, opts, onPrefaceReceipt, onGoAway, onClose) +func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) { + return newHTTP2Client(connectCtx, ctx, addr, opts, onPrefaceReceipt, onGoAway, onClose) } // Options provides additional hints and information for message diff --git a/vendor/google.golang.org/grpc/naming/dns_resolver.go b/vendor/google.golang.org/grpc/naming/dns_resolver.go deleted file mode 100644 index c9f79dc53..000000000 --- a/vendor/google.golang.org/grpc/naming/dns_resolver.go +++ /dev/null @@ -1,293 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package naming - -import ( - "context" - "errors" - "fmt" - "net" - "strconv" - "time" - - "google.golang.org/grpc/grpclog" -) - -const ( - defaultPort = "443" - defaultFreq = time.Minute * 30 -) - -var ( - errMissingAddr = errors.New("missing address") - errWatcherClose = errors.New("watcher has been closed") - - lookupHost = net.DefaultResolver.LookupHost - lookupSRV = net.DefaultResolver.LookupSRV -) - -// NewDNSResolverWithFreq creates a DNS Resolver that can resolve DNS names, and -// create watchers that poll the DNS server using the frequency set by freq. -func NewDNSResolverWithFreq(freq time.Duration) (Resolver, error) { - return &dnsResolver{freq: freq}, nil -} - -// NewDNSResolver creates a DNS Resolver that can resolve DNS names, and create -// watchers that poll the DNS server using the default frequency defined by defaultFreq. -func NewDNSResolver() (Resolver, error) { - return NewDNSResolverWithFreq(defaultFreq) -} - -// dnsResolver handles name resolution for names following the DNS scheme -type dnsResolver struct { - // frequency of polling the DNS server that the watchers created by this resolver will use. - freq time.Duration -} - -// formatIP returns ok = false if addr is not a valid textual representation of an IP address. -// If addr is an IPv4 address, return the addr and ok = true. -// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. -func formatIP(addr string) (addrIP string, ok bool) { - ip := net.ParseIP(addr) - if ip == nil { - return "", false - } - if ip.To4() != nil { - return addr, true - } - return "[" + addr + "]", true -} - -// parseTarget takes the user input target string, returns formatted host and port info. -// If target doesn't specify a port, set the port to be the defaultPort. -// If target is in IPv6 format and host-name is enclosed in square brackets, brackets -// are stripped when setting the host. -// examples: -// target: "www.google.com" returns host: "www.google.com", port: "443" -// target: "ipv4-host:80" returns host: "ipv4-host", port: "80" -// target: "[ipv6-host]" returns host: "ipv6-host", port: "443" -// target: ":80" returns host: "localhost", port: "80" -// target: ":" returns host: "localhost", port: "443" -func parseTarget(target string) (host, port string, err error) { - if target == "" { - return "", "", errMissingAddr - } - - if ip := net.ParseIP(target); ip != nil { - // target is an IPv4 or IPv6(without brackets) address - return target, defaultPort, nil - } - if host, port, err := net.SplitHostPort(target); err == nil { - // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port - if host == "" { - // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. - host = "localhost" - } - if port == "" { - // If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used. - port = defaultPort - } - return host, port, nil - } - if host, port, err := net.SplitHostPort(target + ":" + defaultPort); err == nil { - // target doesn't have port - return host, port, nil - } - return "", "", fmt.Errorf("invalid target address %v", target) -} - -// Resolve creates a watcher that watches the name resolution of the target. -func (r *dnsResolver) Resolve(target string) (Watcher, error) { - host, port, err := parseTarget(target) - if err != nil { - return nil, err - } - - if net.ParseIP(host) != nil { - ipWatcher := &ipWatcher{ - updateChan: make(chan *Update, 1), - } - host, _ = formatIP(host) - ipWatcher.updateChan <- &Update{Op: Add, Addr: host + ":" + port} - return ipWatcher, nil - } - - ctx, cancel := context.WithCancel(context.Background()) - return &dnsWatcher{ - r: r, - host: host, - port: port, - ctx: ctx, - cancel: cancel, - t: time.NewTimer(0), - }, nil -} - -// dnsWatcher watches for the name resolution update for a specific target -type dnsWatcher struct { - r *dnsResolver - host string - port string - // The latest resolved address set - curAddrs map[string]*Update - ctx context.Context - cancel context.CancelFunc - t *time.Timer -} - -// ipWatcher watches for the name resolution update for an IP address. -type ipWatcher struct { - updateChan chan *Update -} - -// Next returns the address resolution Update for the target. For IP address, -// the resolution is itself, thus polling name server is unnecessary. Therefore, -// Next() will return an Update the first time it is called, and will be blocked -// for all following calls as no Update exists until watcher is closed. -func (i *ipWatcher) Next() ([]*Update, error) { - u, ok := <-i.updateChan - if !ok { - return nil, errWatcherClose - } - return []*Update{u}, nil -} - -// Close closes the ipWatcher. -func (i *ipWatcher) Close() { - close(i.updateChan) -} - -// AddressType indicates the address type returned by name resolution. -type AddressType uint8 - -const ( - // Backend indicates the server is a backend server. - Backend AddressType = iota - // GRPCLB indicates the server is a grpclb load balancer. - GRPCLB -) - -// AddrMetadataGRPCLB contains the information the name resolver for grpclb should provide. The -// name resolver used by the grpclb balancer is required to provide this type of metadata in -// its address updates. -type AddrMetadataGRPCLB struct { - // AddrType is the type of server (grpc load balancer or backend). - AddrType AddressType - // ServerName is the name of the grpc load balancer. Used for authentication. - ServerName string -} - -// compileUpdate compares the old resolved addresses and newly resolved addresses, -// and generates an update list -func (w *dnsWatcher) compileUpdate(newAddrs map[string]*Update) []*Update { - var res []*Update - for a, u := range w.curAddrs { - if _, ok := newAddrs[a]; !ok { - u.Op = Delete - res = append(res, u) - } - } - for a, u := range newAddrs { - if _, ok := w.curAddrs[a]; !ok { - res = append(res, u) - } - } - return res -} - -func (w *dnsWatcher) lookupSRV() map[string]*Update { - newAddrs := make(map[string]*Update) - _, srvs, err := lookupSRV(w.ctx, "grpclb", "tcp", w.host) - if err != nil { - grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err) - return nil - } - for _, s := range srvs { - lbAddrs, err := lookupHost(w.ctx, s.Target) - if err != nil { - grpclog.Warningf("grpc: failed load balancer address dns lookup due to %v.\n", err) - continue - } - for _, a := range lbAddrs { - a, ok := formatIP(a) - if !ok { - grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) - continue - } - addr := a + ":" + strconv.Itoa(int(s.Port)) - newAddrs[addr] = &Update{Addr: addr, - Metadata: AddrMetadataGRPCLB{AddrType: GRPCLB, ServerName: s.Target}} - } - } - return newAddrs -} - -func (w *dnsWatcher) lookupHost() map[string]*Update { - newAddrs := make(map[string]*Update) - addrs, err := lookupHost(w.ctx, w.host) - if err != nil { - grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err) - return nil - } - for _, a := range addrs { - a, ok := formatIP(a) - if !ok { - grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) - continue - } - addr := a + ":" + w.port - newAddrs[addr] = &Update{Addr: addr} - } - return newAddrs -} - -func (w *dnsWatcher) lookup() []*Update { - newAddrs := w.lookupSRV() - if newAddrs == nil { - // If failed to get any balancer address (either no corresponding SRV for the - // target, or caused by failure during resolution/parsing of the balancer target), - // return any A record info available. - newAddrs = w.lookupHost() - } - result := w.compileUpdate(newAddrs) - w.curAddrs = newAddrs - return result -} - -// Next returns the resolved address update(delta) for the target. If there's no -// change, it will sleep for 30 mins and try to resolve again after that. -func (w *dnsWatcher) Next() ([]*Update, error) { - for { - select { - case <-w.ctx.Done(): - return nil, errWatcherClose - case <-w.t.C: - } - result := w.lookup() - // Next lookup should happen after an interval defined by w.r.freq. - w.t.Reset(w.r.freq) - if len(result) > 0 { - return result, nil - } - } -} - -func (w *dnsWatcher) Close() { - w.cancel() -} diff --git a/vendor/google.golang.org/grpc/naming/naming.go b/vendor/google.golang.org/grpc/naming/naming.go deleted file mode 100644 index f4c1c8b68..000000000 --- a/vendor/google.golang.org/grpc/naming/naming.go +++ /dev/null @@ -1,68 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package naming defines the naming API and related data structures for gRPC. -// -// This package is deprecated: please use package resolver instead. -package naming - -// Operation defines the corresponding operations for a name resolution change. -// -// Deprecated: please use package resolver. -type Operation uint8 - -const ( - // Add indicates a new address is added. - Add Operation = iota - // Delete indicates an existing address is deleted. - Delete -) - -// Update defines a name resolution update. Notice that it is not valid having both -// empty string Addr and nil Metadata in an Update. -// -// Deprecated: please use package resolver. -type Update struct { - // Op indicates the operation of the update. - Op Operation - // Addr is the updated address. It is empty string if there is no address update. - Addr string - // Metadata is the updated metadata. It is nil if there is no metadata update. - // Metadata is not required for a custom naming implementation. - Metadata interface{} -} - -// Resolver creates a Watcher for a target to track its resolution changes. -// -// Deprecated: please use package resolver. -type Resolver interface { - // Resolve creates a Watcher for target. - Resolve(target string) (Watcher, error) -} - -// Watcher watches for the updates on the specified target. -// -// Deprecated: please use package resolver. -type Watcher interface { - // Next blocks until an update or error happens. It may return one or more - // updates. The first call should get the full set of the results. It should - // return an error if and only if Watcher cannot recover. - Next() ([]*Update, error) - // Close closes the Watcher. - Close() -} diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index 00447894f..a58174b6f 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -20,80 +20,31 @@ package grpc import ( "context" - "fmt" "io" "sync" "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/status" ) -// v2PickerWrapper wraps a balancer.Picker while providing the -// balancer.V2Picker API. It requires a pickerWrapper to generate errors -// including the latest connectionError. To be deleted when balancer.Picker is -// updated to the balancer.V2Picker API. -type v2PickerWrapper struct { - picker balancer.Picker - connErr *connErr -} - -func (v *v2PickerWrapper) Pick(info balancer.PickInfo) (balancer.PickResult, error) { - sc, done, err := v.picker.Pick(info.Ctx, info) - if err != nil { - if err == balancer.ErrTransientFailure { - return balancer.PickResult{}, balancer.TransientFailureError(fmt.Errorf("%v, latest connection error: %v", err, v.connErr.connectionError())) - } - return balancer.PickResult{}, err - } - return balancer.PickResult{SubConn: sc, Done: done}, nil -} - // pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick // actions and unblock when there's a picker update. type pickerWrapper struct { mu sync.Mutex done bool blockingCh chan struct{} - picker balancer.V2Picker - - // The latest connection error. TODO: remove when V1 picker is deprecated; - // balancer should be responsible for providing the error. - *connErr -} - -type connErr struct { - mu sync.Mutex - err error -} - -func (c *connErr) updateConnectionError(err error) { - c.mu.Lock() - c.err = err - c.mu.Unlock() -} - -func (c *connErr) connectionError() error { - c.mu.Lock() - err := c.err - c.mu.Unlock() - return err + picker balancer.Picker } func newPickerWrapper() *pickerWrapper { - return &pickerWrapper{blockingCh: make(chan struct{}), connErr: &connErr{}} + return &pickerWrapper{blockingCh: make(chan struct{})} } // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. func (pw *pickerWrapper) updatePicker(p balancer.Picker) { - pw.updatePickerV2(&v2PickerWrapper{picker: p, connErr: pw.connErr}) -} - -// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. -func (pw *pickerWrapper) updatePickerV2(p balancer.V2Picker) { pw.mu.Lock() if pw.done { pw.mu.Unlock() @@ -154,8 +105,6 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. var errStr string if lastPickErr != nil { errStr = "latest balancer error: " + lastPickErr.Error() - } else if connectionErr := pw.connectionError(); connectionErr != nil { - errStr = "latest connection error: " + connectionErr.Error() } else { errStr = ctx.Err().Error() } @@ -180,23 +129,22 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. if err == balancer.ErrNoSubConnAvailable { continue } - if tfe, ok := err.(interface{ IsTransientFailure() bool }); ok && tfe.IsTransientFailure() { - if !failfast { - lastPickErr = err - continue - } - return nil, nil, status.Error(codes.Unavailable, err.Error()) - } if _, ok := status.FromError(err); ok { + // Status error: end the RPC unconditionally with this status. return nil, nil, err } - // err is some other error. - return nil, nil, status.Error(codes.Unknown, err.Error()) + // For all other errors, wait for ready RPCs should block and other + // RPCs should fail with unavailable. + if !failfast { + lastPickErr = err + continue + } + return nil, nil, status.Error(codes.Unavailable, err.Error()) } acw, ok := pickResult.SubConn.(*acBalancerWrapper) if !ok { - grpclog.Error("subconn returned from pick is not *acBalancerWrapper") + logger.Error("subconn returned from pick is not *acBalancerWrapper") continue } if t, ok := acw.getAddrConn().getReadyTransport(); ok { @@ -210,7 +158,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. // DoneInfo with default value works. pickResult.Done(balancer.DoneInfo{}) } - grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick") + logger.Infof("blockingPicker: the picked transport is not ready, loop back to repick") // If ok == false, ac.state is not READY. // A valid picker always returns READY subConn. This means the state of ac // just changed, and picker will be updated shortly. diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index c43dac9ad..56e33f6c7 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -20,13 +20,10 @@ package grpc import ( "errors" + "fmt" "google.golang.org/grpc/balancer" - "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/status" ) // PickFirstBalancerName is the name of the pick_first balancer. @@ -52,30 +49,16 @@ type pickfirstBalancer struct { sc balancer.SubConn } -var _ balancer.V2Balancer = &pickfirstBalancer{} // Assert we implement v2 - -func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { - if err != nil { - b.ResolverError(err) - return - } - b.UpdateClientConnState(balancer.ClientConnState{ResolverState: resolver.State{Addresses: addrs}}) // Ignore error -} - -func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { - b.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s}) -} - func (b *pickfirstBalancer) ResolverError(err error) { switch b.state { case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting: // Set a failing picker if we don't have a good picker. b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: status.Errorf(codes.Unavailable, "name resolver error: %v", err)}}, - ) + Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, + }) } - if grpclog.V(2) { - grpclog.Infof("pickfirstBalancer: ResolverError called with error %v", err) + if logger.V(2) { + logger.Infof("pickfirstBalancer: ResolverError called with error %v", err) } } @@ -88,13 +71,13 @@ func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) e var err error b.sc, err = b.cc.NewSubConn(cs.ResolverState.Addresses, balancer.NewSubConnOptions{}) if err != nil { - if grpclog.V(2) { - grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) + if logger.V(2) { + logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) } b.state = connectivity.TransientFailure b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: status.Errorf(codes.Unavailable, "error creating connection: %v", err)}}, - ) + Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, + }) return balancer.ErrBadResolverState } b.state = connectivity.Idle @@ -108,12 +91,12 @@ func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) e } func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) { - if grpclog.V(2) { - grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s) + if logger.V(2) { + logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", sc, s) } if b.sc != sc { - if grpclog.V(2) { - grpclog.Infof("pickfirstBalancer: ignored state change because sc is not recognized") + if logger.V(2) { + logger.Infof("pickfirstBalancer: ignored state change because sc is not recognized") } return } @@ -129,15 +112,9 @@ func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.S case connectivity.Connecting: b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}}) case connectivity.TransientFailure: - err := balancer.ErrTransientFailure - // TODO: this can be unconditional after the V1 API is removed, as - // SubConnState will always contain a connection error. - if s.ConnectionError != nil { - err = balancer.TransientFailureError(s.ConnectionError) - } b.cc.UpdateState(balancer.State{ ConnectivityState: s.ConnectivityState, - Picker: &picker{err: err}, + Picker: &picker{err: s.ConnectionError}, }) } } diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go index 76acbbcc9..0a1e975ad 100644 --- a/vendor/google.golang.org/grpc/preloader.go +++ b/vendor/google.golang.org/grpc/preloader.go @@ -25,7 +25,10 @@ import ( // PreparedMsg is responsible for creating a Marshalled and Compressed object. // -// This API is EXPERIMENTAL. +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type PreparedMsg struct { // Struct for preparing msg before sending them encodedData []byte diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh new file mode 100644 index 000000000..ed52187df --- /dev/null +++ b/vendor/google.golang.org/grpc/regenerate.sh @@ -0,0 +1,124 @@ +#!/bin/bash +# Copyright 2020 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu -o pipefail + +WORKDIR=$(mktemp -d) + +function finish { + rm -rf "$WORKDIR" +} +trap finish EXIT + +export GOBIN=${WORKDIR}/bin +export PATH=${GOBIN}:${PATH} +mkdir -p ${GOBIN} + +echo "remove existing generated files" +# grpc_testingv3/testv3.pb.go is not re-generated because it was +# intentionally generated by an older version of protoc-gen-go. +rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testingv3/testv3.pb.go') + +echo "go install google.golang.org/protobuf/cmd/protoc-gen-go" +(cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go) + +echo "go install cmd/protoc-gen-go-grpc" +(cd cmd/protoc-gen-go-grpc && go install .) + +echo "git clone https://github.com/grpc/grpc-proto" +git clone --quiet https://github.com/grpc/grpc-proto ${WORKDIR}/grpc-proto + +# Pull in code.proto as a proto dependency +mkdir -p ${WORKDIR}/googleapis/google/rpc +echo "curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto" +curl --silent https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto > ${WORKDIR}/googleapis/google/rpc/code.proto + +# Pull in the MeshCA service proto. +mkdir -p ${WORKDIR}/istio/istio/google/security/meshca/v1 +echo "curl https://raw.githubusercontent.com/istio/istio/master/security/proto/providers/google/meshca.proto" +curl --silent https://raw.githubusercontent.com/istio/istio/master/security/proto/providers/google/meshca.proto > ${WORKDIR}/istio/istio/google/security/meshca/v1/meshca.proto + +mkdir -p ${WORKDIR}/out + +# Generates sources without the embed requirement +LEGACY_SOURCES=( + ${WORKDIR}/grpc-proto/grpc/binlog/v1/binarylog.proto + ${WORKDIR}/grpc-proto/grpc/channelz/v1/channelz.proto + ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto + ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto + profiling/proto/service.proto + reflection/grpc_reflection_v1alpha/reflection.proto +) + +# Generates only the new gRPC Service symbols +SOURCES=( + $(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^\(profiling/proto/service.proto\|reflection/grpc_reflection_v1alpha/reflection.proto\)$') + ${WORKDIR}/grpc-proto/grpc/gcp/altscontext.proto + ${WORKDIR}/grpc-proto/grpc/gcp/handshaker.proto + ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto + ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto + ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto + ${WORKDIR}/grpc-proto/grpc/service_config/service_config.proto + ${WORKDIR}/grpc-proto/grpc/testing/*.proto + ${WORKDIR}/grpc-proto/grpc/core/*.proto + ${WORKDIR}/istio/istio/google/security/meshca/v1/meshca.proto +) + +# These options of the form 'Mfoo.proto=bar' instruct the codegen to use an +# import path of 'bar' in the generated code when 'foo.proto' is imported in +# one of the sources. +OPTS=Mgrpc/service_config/service_config.proto=/internal/proto/grpc_service_config,Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core + +for src in ${SOURCES[@]}; do + echo "protoc ${src}" + protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS}:${WORKDIR}/out \ + -I"." \ + -I${WORKDIR}/grpc-proto \ + -I${WORKDIR}/googleapis \ + -I${WORKDIR}/istio \ + ${src} +done + +for src in ${LEGACY_SOURCES[@]}; do + echo "protoc ${src}" + protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},require_unimplemented_servers=false:${WORKDIR}/out \ + -I"." \ + -I${WORKDIR}/grpc-proto \ + -I${WORKDIR}/googleapis \ + -I${WORKDIR}/istio \ + ${src} +done + +# The go_package option in grpc/lookup/v1/rls.proto doesn't match the +# current location. Move it into the right place. +mkdir -p ${WORKDIR}/out/google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1 +mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1 + +# grpc_testingv3/testv3.pb.go is not re-generated because it was +# intentionally generated by an older version of protoc-gen-go. +rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testingv3/*.pb.go + +# grpc/service_config/service_config.proto does not have a go_package option. +mv ${WORKDIR}/out/grpc/service_config/service_config.pb.go internal/proto/grpc_service_config + +# grpc/testing does not have a go_package option. +mv ${WORKDIR}/out/grpc/testing/*.pb.go interop/grpc_testing/ +mv ${WORKDIR}/out/grpc/core/*.pb.go interop/grpc_testing/core/ + +# istio/google/security/meshca/v1/meshca.proto does not have a go_package option. +mkdir -p ${WORKDIR}/out/google.golang.org/grpc/credentials/tls/certprovider/meshca/internal/v1/ +mv ${WORKDIR}/out/istio/google/security/meshca/v1/* ${WORKDIR}/out/google.golang.org/grpc/credentials/tls/certprovider/meshca/internal/v1/ + +cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index 03567d7be..e9fa8e33d 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -85,12 +85,19 @@ const ( Backend AddressType = iota // GRPCLB indicates the address is for a grpclb load balancer. // - // Deprecated: use Attributes in Address instead. + // Deprecated: to select the GRPCLB load balancing policy, use a service + // config with a corresponding loadBalancingConfig. To supply balancer + // addresses to the GRPCLB load balancing policy, set State.Attributes + // using balancer/grpclb/state.Set. GRPCLB ) // Address represents a server the client connects to. -// This is the EXPERIMENTAL API and may be changed or extended in the future. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type Address struct { // Addr is the server address on which a connection will be established. Addr string @@ -124,11 +131,6 @@ type Address struct { Metadata interface{} } -// BuildOption is a type alias of BuildOptions for legacy reasons. -// -// Deprecated: use BuildOptions instead. -type BuildOption = BuildOptions - // BuildOptions includes additional information for the builder to create // the resolver. type BuildOptions struct { @@ -235,11 +237,6 @@ type Builder interface { Scheme() string } -// ResolveNowOption is a type alias of ResolveNowOptions for legacy reasons. -// -// Deprecated: use ResolveNowOptions instead. -type ResolveNowOption = ResolveNowOptions - // ResolveNowOptions includes additional information for ResolveNow. type ResolveNowOptions struct{} diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go index 89ba9fa3e..f2d81968f 100644 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -26,7 +26,6 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" @@ -34,7 +33,7 @@ import ( ) // ccResolverWrapper is a wrapper on top of cc for resolvers. -// It implements resolver.ClientConnection interface. +// It implements resolver.ClientConn interface. type ccResolverWrapper struct { cc *ClientConn resolverMu sync.Mutex @@ -46,43 +45,9 @@ type ccResolverWrapper struct { polling chan struct{} } -// split2 returns the values from strings.SplitN(s, sep, 2). -// If sep is not found, it returns ("", "", false) instead. -func split2(s, sep string) (string, string, bool) { - spl := strings.SplitN(s, sep, 2) - if len(spl) < 2 { - return "", "", false - } - return spl[0], spl[1], true -} - -// parseTarget splits target into a struct containing scheme, authority and -// endpoint. -// -// If target is not a valid scheme://authority/endpoint, it returns {Endpoint: -// target}. -func parseTarget(target string) (ret resolver.Target) { - var ok bool - ret.Scheme, ret.Endpoint, ok = split2(target, "://") - if !ok { - return resolver.Target{Endpoint: target} - } - ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/") - if !ok { - return resolver.Target{Endpoint: target} - } - return ret -} - -// newCCResolverWrapper uses the resolver.Builder stored in the ClientConn to -// build a Resolver and returns a ccResolverWrapper object which wraps the -// newly built resolver. -func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) { - rb := cc.dopts.resolverBuilder - if rb == nil { - return nil, fmt.Errorf("could not get resolver for scheme: %q", cc.parsedTarget.Scheme) - } - +// newCCResolverWrapper uses the resolver.Builder to build a Resolver and +// returns a ccResolverWrapper object which wraps the newly built resolver. +func newCCResolverWrapper(cc *ClientConn, rb resolver.Builder) (*ccResolverWrapper, error) { ccr := &ccResolverWrapper{ cc: cc, done: grpcsync.NewEvent(), @@ -175,7 +140,7 @@ func (ccr *ccResolverWrapper) UpdateState(s resolver.State) { if ccr.done.HasFired() { return } - grpclog.Infof("ccResolverWrapper: sending update to cc: %v", s) + channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s) if channelz.IsOn() { ccr.addChannelzTraceEvent(s) } @@ -187,13 +152,7 @@ func (ccr *ccResolverWrapper) ReportError(err error) { if ccr.done.HasFired() { return } - grpclog.Warningf("ccResolverWrapper: reporting error to cc: %v", err) - if channelz.IsOn() { - channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Resolver reported error: %v", err), - Severity: channelz.CtWarning, - }) - } + channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) ccr.poll(ccr.cc.updateResolverState(resolver.State{}, err)) } @@ -202,7 +161,7 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { if ccr.done.HasFired() { return } - grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs) + channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending new addresses to cc: %v", addrs) if channelz.IsOn() { ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) } @@ -216,20 +175,14 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { if ccr.done.HasFired() { return } - grpclog.Infof("ccResolverWrapper: got new service config: %v", sc) + channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %v", sc) if ccr.cc.dopts.disableServiceConfig { - grpclog.Infof("Service config lookups disabled; ignoring config") + channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config") return } scpr := parseServiceConfig(sc) if scpr.Err != nil { - grpclog.Warningf("ccResolverWrapper: error parsing service config: %v", scpr.Err) - if channelz.IsOn() { - channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Error parsing service config: %v", scpr.Err), - Severity: channelz.CtWarning, - }) - } + channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) ccr.poll(balancer.ErrBadResolverState) return } @@ -262,8 +215,8 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { updates = append(updates, "resolver returned new addresses") } - channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(logger, ccr.cc.channelzID, 0, &channelz.TraceEventDesc{ Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")), - Severity: channelz.CtINFO, + Severity: channelz.CtInfo, }) } diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index edaba7957..ea5bb8d0c 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -27,7 +27,6 @@ import ( "io" "io/ioutil" "math" - "net/url" "strings" "sync" "time" @@ -155,7 +154,6 @@ func (d *gzipDecompressor) Type() string { type callInfo struct { compressorType string failFast bool - stream ClientStream maxReceiveMessageSize *int maxSendMessageSize *int creds credentials.PerRPCCredentials @@ -180,7 +178,7 @@ type CallOption interface { // after is called after the call has completed. after cannot return an // error, so any failures should be reported via output parameters. - after(*callInfo) + after(*callInfo, *csAttempt) } // EmptyCallOption does not alter the Call configuration. @@ -188,8 +186,8 @@ type CallOption interface { // by interceptors. type EmptyCallOption struct{} -func (EmptyCallOption) before(*callInfo) error { return nil } -func (EmptyCallOption) after(*callInfo) {} +func (EmptyCallOption) before(*callInfo) error { return nil } +func (EmptyCallOption) after(*callInfo, *csAttempt) {} // Header returns a CallOptions that retrieves the header metadata // for a unary RPC. @@ -199,16 +197,18 @@ func Header(md *metadata.MD) CallOption { // HeaderCallOption is a CallOption for collecting response header metadata. // The metadata field will be populated *after* the RPC completes. -// This is an EXPERIMENTAL API. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type HeaderCallOption struct { HeaderAddr *metadata.MD } func (o HeaderCallOption) before(c *callInfo) error { return nil } -func (o HeaderCallOption) after(c *callInfo) { - if c.stream != nil { - *o.HeaderAddr, _ = c.stream.Header() - } +func (o HeaderCallOption) after(c *callInfo, attempt *csAttempt) { + *o.HeaderAddr, _ = attempt.s.Header() } // Trailer returns a CallOptions that retrieves the trailer metadata @@ -219,16 +219,18 @@ func Trailer(md *metadata.MD) CallOption { // TrailerCallOption is a CallOption for collecting response trailer metadata. // The metadata field will be populated *after* the RPC completes. -// This is an EXPERIMENTAL API. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type TrailerCallOption struct { TrailerAddr *metadata.MD } func (o TrailerCallOption) before(c *callInfo) error { return nil } -func (o TrailerCallOption) after(c *callInfo) { - if c.stream != nil { - *o.TrailerAddr = c.stream.Trailer() - } +func (o TrailerCallOption) after(c *callInfo, attempt *csAttempt) { + *o.TrailerAddr = attempt.s.Trailer() } // Peer returns a CallOption that retrieves peer information for a unary RPC. @@ -239,17 +241,19 @@ func Peer(p *peer.Peer) CallOption { // PeerCallOption is a CallOption for collecting the identity of the remote // peer. The peer field will be populated *after* the RPC completes. -// This is an EXPERIMENTAL API. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type PeerCallOption struct { PeerAddr *peer.Peer } func (o PeerCallOption) before(c *callInfo) error { return nil } -func (o PeerCallOption) after(c *callInfo) { - if c.stream != nil { - if x, ok := peer.FromContext(c.stream.Context()); ok { - *o.PeerAddr = *x - } +func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) { + if x, ok := peer.FromContext(attempt.s.Context()); ok { + *o.PeerAddr = *x } } @@ -276,7 +280,11 @@ func FailFast(failFast bool) CallOption { // FailFastCallOption is a CallOption for indicating whether an RPC should fail // fast or not. -// This is an EXPERIMENTAL API. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type FailFastCallOption struct { FailFast bool } @@ -285,16 +293,21 @@ func (o FailFastCallOption) before(c *callInfo) error { c.failFast = o.FailFast return nil } -func (o FailFastCallOption) after(c *callInfo) {} +func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {} -// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size the client can receive. -func MaxCallRecvMsgSize(s int) CallOption { - return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: s} +// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size +// in bytes the client can receive. +func MaxCallRecvMsgSize(bytes int) CallOption { + return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: bytes} } // MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message -// size the client can receive. -// This is an EXPERIMENTAL API. +// size in bytes the client can receive. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type MaxRecvMsgSizeCallOption struct { MaxRecvMsgSize int } @@ -303,16 +316,21 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { c.maxReceiveMessageSize = &o.MaxRecvMsgSize return nil } -func (o MaxRecvMsgSizeCallOption) after(c *callInfo) {} +func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} -// MaxCallSendMsgSize returns a CallOption which sets the maximum message size the client can send. -func MaxCallSendMsgSize(s int) CallOption { - return MaxSendMsgSizeCallOption{MaxSendMsgSize: s} +// MaxCallSendMsgSize returns a CallOption which sets the maximum message size +// in bytes the client can send. +func MaxCallSendMsgSize(bytes int) CallOption { + return MaxSendMsgSizeCallOption{MaxSendMsgSize: bytes} } // MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message -// size the client can send. -// This is an EXPERIMENTAL API. +// size in bytes the client can send. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type MaxSendMsgSizeCallOption struct { MaxSendMsgSize int } @@ -321,7 +339,7 @@ func (o MaxSendMsgSizeCallOption) before(c *callInfo) error { c.maxSendMessageSize = &o.MaxSendMsgSize return nil } -func (o MaxSendMsgSizeCallOption) after(c *callInfo) {} +func (o MaxSendMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} // PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials // for a call. @@ -331,7 +349,11 @@ func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption { // PerRPCCredsCallOption is a CallOption that indicates the per-RPC // credentials to use for the call. -// This is an EXPERIMENTAL API. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type PerRPCCredsCallOption struct { Creds credentials.PerRPCCredentials } @@ -340,19 +362,26 @@ func (o PerRPCCredsCallOption) before(c *callInfo) error { c.creds = o.Creds return nil } -func (o PerRPCCredsCallOption) after(c *callInfo) {} +func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {} // UseCompressor returns a CallOption which sets the compressor used when // sending the request. If WithCompressor is also set, UseCompressor has // higher priority. // -// This API is EXPERIMENTAL. +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func UseCompressor(name string) CallOption { return CompressorCallOption{CompressorType: name} } // CompressorCallOption is a CallOption that indicates the compressor to use. -// This is an EXPERIMENTAL API. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type CompressorCallOption struct { CompressorType string } @@ -361,7 +390,7 @@ func (o CompressorCallOption) before(c *callInfo) error { c.compressorType = o.CompressorType return nil } -func (o CompressorCallOption) after(c *callInfo) {} +func (o CompressorCallOption) after(c *callInfo, attempt *csAttempt) {} // CallContentSubtype returns a CallOption that will set the content-subtype // for a call. For example, if content-subtype is "json", the Content-Type over @@ -385,7 +414,11 @@ func CallContentSubtype(contentSubtype string) CallOption { // ContentSubtypeCallOption is a CallOption that indicates the content-subtype // used for marshaling messages. -// This is an EXPERIMENTAL API. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type ContentSubtypeCallOption struct { ContentSubtype string } @@ -394,7 +427,7 @@ func (o ContentSubtypeCallOption) before(c *callInfo) error { c.contentSubtype = o.ContentSubtype return nil } -func (o ContentSubtypeCallOption) after(c *callInfo) {} +func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {} // ForceCodec returns a CallOption that will set the given Codec to be // used for all request and response messages for a call. The result of calling @@ -409,7 +442,10 @@ func (o ContentSubtypeCallOption) after(c *callInfo) {} // This function is provided for advanced users; prefer to use only // CallContentSubtype to select a registered codec instead. // -// This is an EXPERIMENTAL API. +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func ForceCodec(codec encoding.Codec) CallOption { return ForceCodecCallOption{Codec: codec} } @@ -417,7 +453,10 @@ func ForceCodec(codec encoding.Codec) CallOption { // ForceCodecCallOption is a CallOption that indicates the codec used for // marshaling messages. // -// This is an EXPERIMENTAL API. +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type ForceCodecCallOption struct { Codec encoding.Codec } @@ -426,7 +465,7 @@ func (o ForceCodecCallOption) before(c *callInfo) error { c.codec = o.Codec return nil } -func (o ForceCodecCallOption) after(c *callInfo) {} +func (o ForceCodecCallOption) after(c *callInfo, attempt *csAttempt) {} // CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of // an encoding.Codec. @@ -439,7 +478,10 @@ func CallCustomCodec(codec Codec) CallOption { // CustomCodecCallOption is a CallOption that indicates the codec used for // marshaling messages. // -// This is an EXPERIMENTAL API. +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type CustomCodecCallOption struct { Codec Codec } @@ -448,19 +490,26 @@ func (o CustomCodecCallOption) before(c *callInfo) error { c.codec = o.Codec return nil } -func (o CustomCodecCallOption) after(c *callInfo) {} +func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {} // MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory // used for buffering this RPC's requests for retry purposes. // -// This API is EXPERIMENTAL. +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func MaxRetryRPCBufferSize(bytes int) CallOption { return MaxRetryRPCBufferSizeCallOption{bytes} } // MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of // memory to be used for caching this RPC for retry purposes. -// This is an EXPERIMENTAL API. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type MaxRetryRPCBufferSizeCallOption struct { MaxRetryRPCBufferSize int } @@ -469,7 +518,7 @@ func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error { c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize return nil } -func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo) {} +func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo, attempt *csAttempt) {} // The format of the payload: compressed or not? type payloadFormat uint8 @@ -822,40 +871,6 @@ func setCallInfoCodec(c *callInfo) error { return nil } -// parseDialTarget returns the network and address to pass to dialer -func parseDialTarget(target string) (net string, addr string) { - net = "tcp" - - m1 := strings.Index(target, ":") - m2 := strings.Index(target, ":/") - - // handle unix:addr which will fail with url.Parse - if m1 >= 0 && m2 < 0 { - if n := target[0:m1]; n == "unix" { - net = n - addr = target[m1+1:] - return net, addr - } - } - if m2 >= 0 { - t, err := url.Parse(target) - if err != nil { - return net, target - } - scheme := t.Scheme - addr = t.Path - if scheme == "unix" { - net = scheme - if addr == "" { - addr = t.Host - } - return net, addr - } - } - - return net, target -} - // channelzData is used to store channelz related data for ClientConn, addrConn and Server. // These fields cannot be embedded in the original structs (e.g. ClientConn), since to do atomic // operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. @@ -871,7 +886,7 @@ type channelzData struct { // The SupportPackageIsVersion variables are referenced from generated protocol // buffer files to ensure compatibility with the gRPC version used. The latest -// support package version is 5. +// support package version is 7. // // Older versions are kept for compatibility. They may be removed if // compatibility cannot be maintained. @@ -881,6 +896,8 @@ const ( SupportPackageIsVersion3 = true SupportPackageIsVersion4 = true SupportPackageIsVersion5 = true + SupportPackageIsVersion6 = true + SupportPackageIsVersion7 = true ) const grpcUA = "grpc-go/" + Version diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 0d75cb109..7a2aa28a1 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -40,8 +40,10 @@ import ( "google.golang.org/grpc/encoding" "google.golang.org/grpc/encoding/proto" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" @@ -57,7 +59,14 @@ const ( defaultServerMaxSendMessageSize = math.MaxInt32 ) +func init() { + internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials { + return srv.opts.creds + } +} + var statusOK = status.New(codes.OK, "") +var logger = grpclog.Component("core") type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) @@ -78,27 +87,34 @@ type ServiceDesc struct { Metadata interface{} } -// service consists of the information of the server serving this service and -// the methods in this service. -type service struct { - server interface{} // the server for service methods - md map[string]*MethodDesc - sd map[string]*StreamDesc - mdata interface{} +// serviceInfo wraps information about a service. It is very similar to +// ServiceDesc and is constructed from it for internal purposes. +type serviceInfo struct { + // Contains the implementation for the methods in this service. + serviceImpl interface{} + methods map[string]*MethodDesc + streams map[string]*StreamDesc + mdata interface{} +} + +type serverWorkerData struct { + st transport.ServerTransport + wg *sync.WaitGroup + stream *transport.Stream } // Server is a gRPC server to serve RPC requests. type Server struct { opts serverOptions - mu sync.Mutex // guards following - lis map[net.Listener]bool - conns map[transport.ServerTransport]bool - serve bool - drain bool - cv *sync.Cond // signaled when connections close for GracefulStop - m map[string]*service // service name -> service info - events trace.EventLog + mu sync.Mutex // guards following + lis map[net.Listener]bool + conns map[transport.ServerTransport]bool + serve bool + drain bool + cv *sync.Cond // signaled when connections close for GracefulStop + services map[string]*serviceInfo // service name -> service info + events trace.EventLog quit *grpcsync.Event done *grpcsync.Event @@ -107,6 +123,8 @@ type Server struct { channelzID int64 // channelz unique identification number czData *channelzData + + serverWorkerChannels []chan *serverWorkerData } type serverOptions struct { @@ -116,6 +134,8 @@ type serverOptions struct { dc Decompressor unaryInt UnaryServerInterceptor streamInt StreamServerInterceptor + chainUnaryInts []UnaryServerInterceptor + chainStreamInts []StreamServerInterceptor inTapHandle tap.ServerInHandle statsHandler stats.Handler maxConcurrentStreams uint32 @@ -131,6 +151,7 @@ type serverOptions struct { connectionTimeout time.Duration maxHeaderListSize *uint32 headerTableSize *uint32 + numServerWorkers uint32 } var defaultServerOptions = serverOptions{ @@ -149,7 +170,10 @@ type ServerOption interface { // EmptyServerOption does not alter the server configuration. It can be embedded // in another structure to build custom server options. // -// This API is EXPERIMENTAL. +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type EmptyServerOption struct{} func (EmptyServerOption) apply(*serverOptions) {} @@ -211,7 +235,7 @@ func InitialConnWindowSize(s int32) ServerOption { // KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { if kp.Time > 0 && kp.Time < time.Second { - grpclog.Warning("Adjusting keepalive ping interval to minimum period of 1s") + logger.Warning("Adjusting keepalive ping interval to minimum period of 1s") kp.Time = time.Second } @@ -230,6 +254,12 @@ func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption { // CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling. // // This will override any lookups by content-subtype for Codecs registered with RegisterCodec. +// +// Deprecated: register codecs using encoding.RegisterCodec. The server will +// automatically use registered codecs based on the incoming requests' headers. +// See also +// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec. +// Will be supported throughout 1.x. func CustomCodec(codec Codec) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.codec = codec @@ -242,7 +272,8 @@ func CustomCodec(codec Codec) ServerOption { // default, server messages will be sent using the same compressor with which // request messages were sent. // -// Deprecated: use encoding.RegisterCompressor instead. +// Deprecated: use encoding.RegisterCompressor instead. Will be supported +// throughout 1.x. func RPCCompressor(cp Compressor) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.cp = cp @@ -253,7 +284,8 @@ func RPCCompressor(cp Compressor) ServerOption { // messages. It has higher priority than decompressors registered via // encoding.RegisterCompressor. // -// Deprecated: use encoding.RegisterCompressor instead. +// Deprecated: use encoding.RegisterCompressor instead. Will be supported +// throughout 1.x. func RPCDecompressor(dc Decompressor) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.dc = dc @@ -263,7 +295,7 @@ func RPCDecompressor(dc Decompressor) ServerOption { // MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive. // If this is not set, gRPC uses the default limit. // -// Deprecated: use MaxRecvMsgSize instead. +// Deprecated: use MaxRecvMsgSize instead. Will be supported throughout 1.x. func MaxMsgSize(m int) ServerOption { return MaxRecvMsgSize(m) } @@ -311,6 +343,16 @@ func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { }) } +// ChainUnaryInterceptor returns a ServerOption that specifies the chained interceptor +// for unary RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All unary interceptors added by this method will be chained. +func ChainUnaryInterceptor(interceptors ...UnaryServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.chainUnaryInts = append(o.chainUnaryInts, interceptors...) + }) +} + // StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the // server. Only one stream interceptor can be installed. func StreamInterceptor(i StreamServerInterceptor) ServerOption { @@ -322,6 +364,16 @@ func StreamInterceptor(i StreamServerInterceptor) ServerOption { }) } +// ChainStreamInterceptor returns a ServerOption that specifies the chained interceptor +// for streaming RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All stream interceptors added by this method will be chained. +func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.chainStreamInts = append(o.chainStreamInts, interceptors...) + }) +} + // InTapHandle returns a ServerOption that sets the tap handle for all the server // transport to be created. Only one can be installed. func InTapHandle(h tap.ServerInHandle) ServerOption { @@ -363,7 +415,10 @@ func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { // new connections. If this is not set, the default is 120 seconds. A zero or // negative value will result in an immediate timeout. // -// This API is EXPERIMENTAL. +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func ConnectionTimeout(d time.Duration) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.connectionTimeout = d @@ -381,13 +436,79 @@ func MaxHeaderListSize(s uint32) ServerOption { // HeaderTableSize returns a ServerOption that sets the size of dynamic // header table for stream. // -// This API is EXPERIMENTAL. +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func HeaderTableSize(s uint32) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.headerTableSize = &s }) } +// NumStreamWorkers returns a ServerOption that sets the number of worker +// goroutines that should be used to process incoming streams. Setting this to +// zero (default) will disable workers and spawn a new goroutine for each +// stream. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func NumStreamWorkers(numServerWorkers uint32) ServerOption { + // TODO: If/when this API gets stabilized (i.e. stream workers become the + // only way streams are processed), change the behavior of the zero value to + // a sane default. Preliminary experiments suggest that a value equal to the + // number of CPUs available is most performant; requires thorough testing. + return newFuncServerOption(func(o *serverOptions) { + o.numServerWorkers = numServerWorkers + }) +} + +// serverWorkerResetThreshold defines how often the stack must be reset. Every +// N requests, by spawning a new goroutine in its place, a worker can reset its +// stack so that large stacks don't live in memory forever. 2^16 should allow +// each goroutine stack to live for at least a few seconds in a typical +// workload (assuming a QPS of a few thousand requests/sec). +const serverWorkerResetThreshold = 1 << 16 + +// serverWorkers blocks on a *transport.Stream channel forever and waits for +// data to be fed by serveStreams. This allows different requests to be +// processed by the same goroutine, removing the need for expensive stack +// re-allocations (see the runtime.morestack problem [1]). +// +// [1] https://github.com/golang/go/issues/18138 +func (s *Server) serverWorker(ch chan *serverWorkerData) { + // To make sure all server workers don't reset at the same time, choose a + // random number of iterations before resetting. + threshold := serverWorkerResetThreshold + grpcrand.Intn(serverWorkerResetThreshold) + for completed := 0; completed < threshold; completed++ { + data, ok := <-ch + if !ok { + return + } + s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream)) + data.wg.Done() + } + go s.serverWorker(ch) +} + +// initServerWorkers creates worker goroutines and channels to process incoming +// connections to reduce the time spent overall on runtime.morestack. +func (s *Server) initServerWorkers() { + s.serverWorkerChannels = make([]chan *serverWorkerData, s.opts.numServerWorkers) + for i := uint32(0); i < s.opts.numServerWorkers; i++ { + s.serverWorkerChannels[i] = make(chan *serverWorkerData) + go s.serverWorker(s.serverWorkerChannels[i]) + } +} + +func (s *Server) stopServerWorkers() { + for i := uint32(0); i < s.opts.numServerWorkers; i++ { + close(s.serverWorkerChannels[i]) + } +} + // NewServer creates a gRPC server which has no service registered and has not // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { @@ -396,20 +517,26 @@ func NewServer(opt ...ServerOption) *Server { o.apply(&opts) } s := &Server{ - lis: make(map[net.Listener]bool), - opts: opts, - conns: make(map[transport.ServerTransport]bool), - m: make(map[string]*service), - quit: grpcsync.NewEvent(), - done: grpcsync.NewEvent(), - czData: new(channelzData), + lis: make(map[net.Listener]bool), + opts: opts, + conns: make(map[transport.ServerTransport]bool), + services: make(map[string]*serviceInfo), + quit: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + czData: new(channelzData), } + chainUnaryServerInterceptors(s) + chainStreamServerInterceptors(s) s.cv = sync.NewCond(&s.mu) if EnableTracing { _, file, line, _ := runtime.Caller(1) s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) } + if s.opts.numServerWorkers > 0 { + s.initServerWorkers() + } + if channelz.IsOn() { s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") } @@ -432,14 +559,29 @@ func (s *Server) errorf(format string, a ...interface{}) { } } +// ServiceRegistrar wraps a single method that supports service registration. It +// enables users to pass concrete types other than grpc.Server to the service +// registration methods exported by the IDL generated code. +type ServiceRegistrar interface { + // RegisterService registers a service and its implementation to the + // concrete type implementing this interface. It may not be called + // once the server has started serving. + // desc describes the service and its methods and handlers. impl is the + // service implementation which is passed to the method handlers. + RegisterService(desc *ServiceDesc, impl interface{}) +} + // RegisterService registers a service and its implementation to the gRPC // server. It is called from the IDL generated code. This must be called before -// invoking Serve. +// invoking Serve. If ss is non-nil (for legacy code), its type is checked to +// ensure it implements sd.HandlerType. func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { - ht := reflect.TypeOf(sd.HandlerType).Elem() - st := reflect.TypeOf(ss) - if !st.Implements(ht) { - grpclog.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) + if ss != nil { + ht := reflect.TypeOf(sd.HandlerType).Elem() + st := reflect.TypeOf(ss) + if !st.Implements(ht) { + logger.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) + } } s.register(sd, ss) } @@ -449,26 +591,26 @@ func (s *Server) register(sd *ServiceDesc, ss interface{}) { defer s.mu.Unlock() s.printf("RegisterService(%q)", sd.ServiceName) if s.serve { - grpclog.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName) + logger.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName) } - if _, ok := s.m[sd.ServiceName]; ok { - grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) + if _, ok := s.services[sd.ServiceName]; ok { + logger.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) } - srv := &service{ - server: ss, - md: make(map[string]*MethodDesc), - sd: make(map[string]*StreamDesc), - mdata: sd.Metadata, + info := &serviceInfo{ + serviceImpl: ss, + methods: make(map[string]*MethodDesc), + streams: make(map[string]*StreamDesc), + mdata: sd.Metadata, } for i := range sd.Methods { d := &sd.Methods[i] - srv.md[d.MethodName] = d + info.methods[d.MethodName] = d } for i := range sd.Streams { d := &sd.Streams[i] - srv.sd[d.StreamName] = d + info.streams[d.StreamName] = d } - s.m[sd.ServiceName] = srv + s.services[sd.ServiceName] = info } // MethodInfo contains the information of an RPC including its method name and type. @@ -492,16 +634,16 @@ type ServiceInfo struct { // Service names include the package names, in the form of .. func (s *Server) GetServiceInfo() map[string]ServiceInfo { ret := make(map[string]ServiceInfo) - for n, srv := range s.m { - methods := make([]MethodInfo, 0, len(srv.md)+len(srv.sd)) - for m := range srv.md { + for n, srv := range s.services { + methods := make([]MethodInfo, 0, len(srv.methods)+len(srv.streams)) + for m := range srv.methods { methods = append(methods, MethodInfo{ Name: m, IsClientStream: false, IsServerStream: false, }) } - for m, d := range srv.sd { + for m, d := range srv.streams { methods = append(methods, MethodInfo{ Name: m, IsClientStream: d.ClientStreams, @@ -658,7 +800,7 @@ func (s *Server) handleRawConn(rawConn net.Conn) { s.mu.Lock() s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) s.mu.Unlock() - grpclog.Warningf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) + channelz.Warningf(logger, s.channelzID, "grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) rawConn.Close() } rawConn.SetDeadline(time.Time{}) @@ -705,7 +847,7 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) s.mu.Unlock() c.Close() - grpclog.Warningln("grpc: Server.Serve failed to create ServerTransport: ", err) + channelz.Warning(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) return nil } @@ -715,12 +857,27 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr func (s *Server) serveStreams(st transport.ServerTransport) { defer st.Close() var wg sync.WaitGroup + + var roundRobinCounter uint32 st.HandleStreams(func(stream *transport.Stream) { wg.Add(1) - go func() { - defer wg.Done() - s.handleStream(st, stream, s.traceInfo(st, stream)) - }() + if s.opts.numServerWorkers > 0 { + data := &serverWorkerData{st: st, wg: &wg, stream: stream} + select { + case s.serverWorkerChannels[atomic.AddUint32(&roundRobinCounter, 1)%s.opts.numServerWorkers] <- data: + default: + // If all stream workers are busy, fallback to the default code path. + go func() { + s.handleStream(st, stream, s.traceInfo(st, stream)) + wg.Done() + }() + } + } else { + go func() { + defer wg.Done() + s.handleStream(st, stream, s.traceInfo(st, stream)) + }() + } }, func(ctx context.Context, method string) context.Context { if !EnableTracing { return ctx @@ -755,8 +912,12 @@ var _ http.Handler = (*Server)(nil) // Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally // separate from grpc-go's HTTP/2 server. Performance and features may vary // between the two paths. ServeHTTP does not support some gRPC features -// available through grpc-go's HTTP/2 server, and it is currently EXPERIMENTAL -// and subject to change. +// available through grpc-go's HTTP/2 server. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandler) if err != nil { @@ -844,12 +1005,12 @@ func (s *Server) incrCallsFailed() { func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { - grpclog.Errorln("grpc: server failed to encode response: ", err) + channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err) return err } compData, err := compress(data, cp, comp) if err != nil { - grpclog.Errorln("grpc: server failed to compress response: ", err) + channelz.Error(logger, s.channelzID, "grpc: server failed to compress response: ", err) return err } hdr, payload := msgHeader(data, compData) @@ -864,7 +1025,41 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str return err } -func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) { +// chainUnaryServerInterceptors chains all unary server interceptors into one. +func chainUnaryServerInterceptors(s *Server) { + // Prepend opts.unaryInt to the chaining interceptors if it exists, since unaryInt will + // be executed before any other chained interceptors. + interceptors := s.opts.chainUnaryInts + if s.opts.unaryInt != nil { + interceptors = append([]UnaryServerInterceptor{s.opts.unaryInt}, s.opts.chainUnaryInts...) + } + + var chainedInt UnaryServerInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { + return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) + } + } + + s.opts.unaryInt = chainedInt +} + +// getChainUnaryHandler recursively generate the chained UnaryHandler +func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + + return func(ctx context.Context, req interface{}) (interface{}, error) { + return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) + } +} + +func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { sh := s.opts.statsHandler if sh != nil || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { @@ -987,10 +1182,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) if err != nil { - if st, ok := status.FromError(err); ok { - if e := t.WriteStatus(stream, st); e != nil { - grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) - } + if e := t.WriteStatus(stream, status.Convert(err)); e != nil { + channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status %v", e) } return err } @@ -1005,7 +1198,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. sh.HandleRPC(stream.Context(), &stats.InPayload{ RecvTime: time.Now(), Payload: v, - WireLength: payInfo.wireLength, + WireLength: payInfo.wireLength + headerLen, Data: d, Length: len(d), }) @@ -1021,7 +1214,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. return nil } ctx := NewContextWithServerTransportStream(stream.Context(), stream) - reply, appErr := md.Handler(srv.server, ctx, df, s.opts.unaryInt) + reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt) if appErr != nil { appStatus, ok := status.FromError(appErr) if !ok { @@ -1034,7 +1227,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. trInfo.tr.SetError() } if e := t.WriteStatus(stream, appStatus); e != nil { - grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e) + channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) } if binlog != nil { if h, _ := stream.Header(); h.Len() > 0 { @@ -1061,9 +1254,9 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. // The entire stream is done (for unary RPC only). return err } - if s, ok := status.FromError(err); ok { - if e := t.WriteStatus(stream, s); e != nil { - grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e) + if sts, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, sts); e != nil { + channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) } } else { switch st := err.(type) { @@ -1113,7 +1306,41 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. return err } -func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) { +// chainStreamServerInterceptors chains all stream server interceptors into one. +func chainStreamServerInterceptors(s *Server) { + // Prepend opts.streamInt to the chaining interceptors if it exists, since streamInt will + // be executed before any other chained interceptors. + interceptors := s.opts.chainStreamInts + if s.opts.streamInt != nil { + interceptors = append([]StreamServerInterceptor{s.opts.streamInt}, s.opts.chainStreamInts...) + } + + var chainedInt StreamServerInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { + return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) + } + } + + s.opts.streamInt = chainedInt +} + +// getChainStreamHandler recursively generate the chained StreamHandler +func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + + return func(srv interface{}, ss ServerStream) error { + return interceptors[curr+1](srv, ss, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) + } +} + +func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { if channelz.IsOn() { s.incrCallsStarted() } @@ -1230,8 +1457,8 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp } var appErr error var server interface{} - if srv != nil { - server = srv.server + if info != nil { + server = info.serviceImpl } if s.opts.streamInt == nil { appErr = sd.Handler(server, ss) @@ -1297,7 +1524,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.SetError() } - grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) + channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } if trInfo != nil { trInfo.tr.Finish() @@ -1307,13 +1534,13 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str service := sm[:pos] method := sm[pos+1:] - srv, knownService := s.m[service] + srv, knownService := s.services[service] if knownService { - if md, ok := srv.md[method]; ok { + if md, ok := srv.methods[method]; ok { s.processUnaryRPC(t, stream, srv, md, trInfo) return } - if sd, ok := srv.sd[method]; ok { + if sd, ok := srv.streams[method]; ok { s.processStreamingRPC(t, stream, srv, sd, trInfo) return } @@ -1338,7 +1565,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.SetError() } - grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) + channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } if trInfo != nil { trInfo.tr.Finish() @@ -1351,7 +1578,10 @@ type streamKey struct{} // NewContextWithServerTransportStream creates a new context from ctx and // attaches stream to it. // -// This API is EXPERIMENTAL. +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func NewContextWithServerTransportStream(ctx context.Context, stream ServerTransportStream) context.Context { return context.WithValue(ctx, streamKey{}, stream) } @@ -1363,7 +1593,10 @@ func NewContextWithServerTransportStream(ctx context.Context, stream ServerTrans // // See also NewContextWithServerTransportStream. // -// This API is EXPERIMENTAL. +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type ServerTransportStream interface { Method() string SetHeader(md metadata.MD) error @@ -1375,7 +1608,10 @@ type ServerTransportStream interface { // ctx. Returns nil if the given context has no stream associated with it // (which implies it is not an RPC invocation context). // -// This API is EXPERIMENTAL. +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream { s, _ := ctx.Value(streamKey{}).(ServerTransportStream) return s @@ -1415,6 +1651,9 @@ func (s *Server) Stop() { for c := range st { c.Close() } + if s.opts.numServerWorkers > 0 { + s.stopServerWorkers() + } s.mu.Lock() if s.events != nil { diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 4f8836d48..22c4240cf 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -20,15 +20,16 @@ package grpc import ( "encoding/json" + "errors" "fmt" + "reflect" "strconv" "strings" "time" - "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/serviceconfig" ) @@ -40,29 +41,7 @@ const maxInt = int(^uint(0) >> 1) // Deprecated: Users should not use this struct. Service config should be received // through name resolver, as specified here // https://github.com/grpc/grpc/blob/master/doc/service_config.md -type MethodConfig struct { - // WaitForReady indicates whether RPCs sent to this method should wait until - // the connection is ready by default (!failfast). The value specified via the - // gRPC client API will override the value set here. - WaitForReady *bool - // Timeout is the default timeout for RPCs sent to this method. The actual - // deadline used will be the minimum of the value specified here and the value - // set by the application via the gRPC client API. If either one is not set, - // then the other will be used. If neither is set, then the RPC has no deadline. - Timeout *time.Duration - // MaxReqSize is the maximum allowed payload size for an individual request in a - // stream (client->server) in bytes. The size which is measured is the serialized - // payload after per-message compression (but before stream compression) in bytes. - // The actual value used is the minimum of the value specified here and the value set - // by the application via the gRPC client API. If either one is not set, then the other - // will be used. If neither is set, then the built-in default is used. - MaxReqSize *int - // MaxRespSize is the maximum allowed payload size for an individual response in a - // stream (server->client) in bytes. - MaxRespSize *int - // RetryPolicy configures retry options for the method. - retryPolicy *retryPolicy -} +type MethodConfig = internalserviceconfig.MethodConfig type lbConfig struct { name string @@ -79,7 +58,7 @@ type ServiceConfig struct { serviceconfig.Config // LB is the load balancer the service providers recommends. The balancer - // specified via grpc.WithBalancer will override this. This is deprecated; + // specified via grpc.WithBalancerName will override this. This is deprecated; // lbConfigs is preferred. If lbConfig and LB are both present, lbConfig // will be used. LB *string @@ -126,34 +105,6 @@ type healthCheckConfig struct { ServiceName string } -// retryPolicy defines the go-native version of the retry policy defined by the -// service config here: -// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config -type retryPolicy struct { - // MaxAttempts is the maximum number of attempts, including the original RPC. - // - // This field is required and must be two or greater. - maxAttempts int - - // Exponential backoff parameters. The initial retry attempt will occur at - // random(0, initialBackoffMS). In general, the nth attempt will occur at - // random(0, - // min(initialBackoffMS*backoffMultiplier**(n-1), maxBackoffMS)). - // - // These fields are required and must be greater than zero. - initialBackoff time.Duration - maxBackoff time.Duration - backoffMultiplier float64 - - // The set of status codes which may be retried. - // - // Status codes are specified as strings, e.g., "UNAVAILABLE". - // - // This field is required and must be non-empty. - // Note: a set is used to store this for easy lookup. - retryableStatusCodes map[codes.Code]bool -} - type jsonRetryPolicy struct { MaxAttempts int InitialBackoff string @@ -224,19 +175,27 @@ func parseDuration(s *string) (*time.Duration, error) { } type jsonName struct { - Service *string - Method *string + Service string + Method string } -func (j jsonName) generatePath() (string, bool) { - if j.Service == nil { - return "", false +var ( + errDuplicatedName = errors.New("duplicated name") + errEmptyServiceNonEmptyMethod = errors.New("cannot combine empty 'service' and non-empty 'method'") +) + +func (j jsonName) generatePath() (string, error) { + if j.Service == "" { + if j.Method != "" { + return "", errEmptyServiceNonEmptyMethod + } + return "", nil } - res := "/" + *j.Service + "/" - if j.Method != nil { - res += *j.Method + res := "/" + j.Service + "/" + if j.Method != "" { + res += j.Method } - return res, true + return res, nil } // TODO(lyuxuan): delete this struct after cleaning up old service config implementation. @@ -249,12 +208,10 @@ type jsonMC struct { RetryPolicy *jsonRetryPolicy } -type loadBalancingConfig map[string]json.RawMessage - // TODO(lyuxuan): delete this struct after cleaning up old service config implementation. type jsonSC struct { LoadBalancingPolicy *string - LoadBalancingConfig *[]loadBalancingConfig + LoadBalancingConfig *internalserviceconfig.BalancerConfig MethodConfig *[]jsonMC RetryThrottling *retryThrottlingPolicy HealthCheckConfig *healthCheckConfig @@ -270,7 +227,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { var rsc jsonSC err := json.Unmarshal([]byte(js), &rsc) if err != nil { - grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) return &serviceconfig.ParseResult{Err: err} } sc := ServiceConfig{ @@ -280,53 +237,25 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { healthCheckConfig: rsc.HealthCheckConfig, rawJSONString: js, } - if rsc.LoadBalancingConfig != nil { - for i, lbcfg := range *rsc.LoadBalancingConfig { - if len(lbcfg) != 1 { - err := fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg) - grpclog.Warningf(err.Error()) - return &serviceconfig.ParseResult{Err: err} - } - var name string - var jsonCfg json.RawMessage - for name, jsonCfg = range lbcfg { - } - builder := balancer.Get(name) - if builder == nil { - continue - } - sc.lbConfig = &lbConfig{name: name} - if parser, ok := builder.(balancer.ConfigParser); ok { - var err error - sc.lbConfig.cfg, err = parser.ParseConfig(jsonCfg) - if err != nil { - return &serviceconfig.ParseResult{Err: fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err)} - } - } else if string(jsonCfg) != "{}" { - grpclog.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg)) - } - break - } - if sc.lbConfig == nil { - // We had a loadBalancingConfig field but did not encounter a - // supported policy. The config is considered invalid in this - // case. - err := fmt.Errorf("invalid loadBalancingConfig: no supported policies found") - grpclog.Warningf(err.Error()) - return &serviceconfig.ParseResult{Err: err} + if c := rsc.LoadBalancingConfig; c != nil { + sc.lbConfig = &lbConfig{ + name: c.Name, + cfg: c.Config, } } if rsc.MethodConfig == nil { return &serviceconfig.ParseResult{Config: &sc} } + + paths := map[string]struct{}{} for _, m := range *rsc.MethodConfig { if m.Name == nil { continue } d, err := parseDuration(m.Timeout) if err != nil { - grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) return &serviceconfig.ParseResult{Err: err} } @@ -334,8 +263,8 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { WaitForReady: m.WaitForReady, Timeout: d, } - if mc.retryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { - grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { + logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) return &serviceconfig.ParseResult{Err: err} } if m.MaxRequestMessageBytes != nil { @@ -352,10 +281,20 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { mc.MaxRespSize = newInt(int(*m.MaxResponseMessageBytes)) } } - for _, n := range *m.Name { - if path, valid := n.generatePath(); valid { - sc.Methods[path] = mc + for i, n := range *m.Name { + path, err := n.generatePath() + if err != nil { + logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err) + return &serviceconfig.ParseResult{Err: err} } + + if _, ok := paths[path]; ok { + err = errDuplicatedName + logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err) + return &serviceconfig.ParseResult{Err: err} + } + paths[path] = struct{}{} + sc.Methods[path] = mc } } @@ -370,7 +309,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { return &serviceconfig.ParseResult{Config: &sc} } -func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) { +func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPolicy, err error) { if jrp == nil { return nil, nil } @@ -388,23 +327,23 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) { *mb <= 0 || jrp.BackoffMultiplier <= 0 || len(jrp.RetryableStatusCodes) == 0 { - grpclog.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) + logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) return nil, nil } - rp := &retryPolicy{ - maxAttempts: jrp.MaxAttempts, - initialBackoff: *ib, - maxBackoff: *mb, - backoffMultiplier: jrp.BackoffMultiplier, - retryableStatusCodes: make(map[codes.Code]bool), + rp := &internalserviceconfig.RetryPolicy{ + MaxAttempts: jrp.MaxAttempts, + InitialBackoff: *ib, + MaxBackoff: *mb, + BackoffMultiplier: jrp.BackoffMultiplier, + RetryableStatusCodes: make(map[codes.Code]bool), } - if rp.maxAttempts > 5 { + if rp.MaxAttempts > 5 { // TODO(retry): Make the max maxAttempts configurable. - rp.maxAttempts = 5 + rp.MaxAttempts = 5 } for _, code := range jrp.RetryableStatusCodes { - rp.retryableStatusCodes[code] = true + rp.RetryableStatusCodes[code] = true } return rp, nil } @@ -432,3 +371,34 @@ func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { func newInt(b int) *int { return &b } + +func init() { + internal.EqualServiceConfigForTesting = equalServiceConfig +} + +// equalServiceConfig compares two configs. The rawJSONString field is ignored, +// because they may diff in white spaces. +// +// If any of them is NOT *ServiceConfig, return false. +func equalServiceConfig(a, b serviceconfig.Config) bool { + aa, ok := a.(*ServiceConfig) + if !ok { + return false + } + bb, ok := b.(*ServiceConfig) + if !ok { + return false + } + aaRaw := aa.rawJSONString + aa.rawJSONString = "" + bbRaw := bb.rawJSONString + bb.rawJSONString = "" + defer func() { + aa.rawJSONString = aaRaw + bb.rawJSONString = bbRaw + }() + // Using reflect.DeepEqual instead of cmp.Equal because many balancer + // configs are unexported, and cmp.Equal cannot compare unexported fields + // from unexported structs. + return reflect.DeepEqual(aa, bb) +} diff --git a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go index 187c30442..73a2f9266 100644 --- a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go +++ b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go @@ -19,7 +19,10 @@ // Package serviceconfig defines types and methods for operating on gRPC // service configs. // -// This package is EXPERIMENTAL. +// Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. package serviceconfig // Config represents an opaque data structure holding a service config. diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index 9e22c393f..63e476ee7 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -16,8 +16,6 @@ * */ -//go:generate protoc --go_out=plugins=grpc:. grpc_testing/test.proto - // Package stats is for collecting and reporting various network and RPC stats. // This package is for monitoring purpose only. All fields are read-only. // All APIs are experimental. @@ -81,6 +79,10 @@ type InHeader struct { Client bool // WireLength is the wire length of header. WireLength int + // Compression is the compression algorithm used for the RPC. + Compression string + // Header contains the header metadata received. + Header metadata.MD // The following fields are valid only if Client is false. // FullMethod is the full RPC method string, i.e., /package.service/method. @@ -89,10 +91,6 @@ type InHeader struct { RemoteAddr net.Addr // LocalAddr is the local address of the corresponding connection. LocalAddr net.Addr - // Compression is the compression algorithm used for the RPC. - Compression string - // Header contains the header metadata received. - Header metadata.MD } // IsClient indicates if the stats information is from client side. @@ -141,6 +139,10 @@ func (s *OutPayload) isRPCStats() {} type OutHeader struct { // Client is true if this OutHeader is from client side. Client bool + // Compression is the compression algorithm used for the RPC. + Compression string + // Header contains the header metadata sent. + Header metadata.MD // The following fields are valid only if Client is true. // FullMethod is the full RPC method string, i.e., /package.service/method. @@ -149,10 +151,6 @@ type OutHeader struct { RemoteAddr net.Addr // LocalAddr is the local address of the corresponding connection. LocalAddr net.Addr - // Compression is the compression algorithm used for the RPC. - Compression string - // Header contains the header metadata sent. - Header metadata.MD } // IsClient indicates if this stats information is from client side. @@ -165,6 +163,9 @@ type OutTrailer struct { // Client is true if this OutTrailer is from client side. Client bool // WireLength is the wire length of trailer. + // + // Deprecated: This field is never set. The length is not known when this message is + // emitted because the trailer fields are compressed with hpack after that. WireLength int // Trailer contains the trailer metadata sent to the client. This // field is only valid if this OutTrailer is from the server side. diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go index a1348e9b1..01e182c30 100644 --- a/vendor/google.golang.org/grpc/status/status.go +++ b/vendor/google.golang.org/grpc/status/status.go @@ -29,88 +29,23 @@ package status import ( "context" - "errors" "fmt" - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" - "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/status" ) -func init() { - internal.StatusRawProto = statusRawProto -} - -func statusRawProto(s *Status) *spb.Status { return s.s } - -// statusError is an alias of a status proto. It implements error and Status, -// and a nil statusError should never be returned by this package. -type statusError spb.Status - -func (se *statusError) Error() string { - p := (*spb.Status)(se) - return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage()) -} - -func (se *statusError) GRPCStatus() *Status { - return &Status{s: (*spb.Status)(se)} -} - -// Is implements future error.Is functionality. -// A statusError is equivalent if the code and message are identical. -func (se *statusError) Is(target error) bool { - tse, ok := target.(*statusError) - if !ok { - return false - } - - return proto.Equal((*spb.Status)(se), (*spb.Status)(tse)) -} - -// Status represents an RPC status code, message, and details. It is immutable -// and should be created with New, Newf, or FromProto. -type Status struct { - s *spb.Status -} - -// Code returns the status code contained in s. -func (s *Status) Code() codes.Code { - if s == nil || s.s == nil { - return codes.OK - } - return codes.Code(s.s.Code) -} - -// Message returns the message contained in s. -func (s *Status) Message() string { - if s == nil || s.s == nil { - return "" - } - return s.s.Message -} - -// Proto returns s's status as an spb.Status proto message. -func (s *Status) Proto() *spb.Status { - if s == nil { - return nil - } - return proto.Clone(s.s).(*spb.Status) -} - -// Err returns an immutable error representing s; returns nil if s.Code() is -// OK. -func (s *Status) Err() error { - if s.Code() == codes.OK { - return nil - } - return (*statusError)(s.s) -} +// Status references google.golang.org/grpc/internal/status. It represents an +// RPC status code, message, and details. It is immutable and should be +// created with New, Newf, or FromProto. +// https://godoc.org/google.golang.org/grpc/internal/status +type Status = status.Status // New returns a Status representing c and msg. func New(c codes.Code, msg string) *Status { - return &Status{s: &spb.Status{Code: int32(c), Message: msg}} + return status.New(c, msg) } // Newf returns New(c, fmt.Sprintf(format, a...)). @@ -135,7 +70,7 @@ func ErrorProto(s *spb.Status) error { // FromProto returns a Status representing s. func FromProto(s *spb.Status) *Status { - return &Status{s: proto.Clone(s).(*spb.Status)} + return status.FromProto(s) } // FromError returns a Status representing err if it was produced from this @@ -160,42 +95,6 @@ func Convert(err error) *Status { return s } -// WithDetails returns a new status with the provided details messages appended to the status. -// If any errors are encountered, it returns nil and the first error encountered. -func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { - if s.Code() == codes.OK { - return nil, errors.New("no error details for status with code OK") - } - // s.Code() != OK implies that s.Proto() != nil. - p := s.Proto() - for _, detail := range details { - any, err := ptypes.MarshalAny(detail) - if err != nil { - return nil, err - } - p.Details = append(p.Details, any) - } - return &Status{s: p}, nil -} - -// Details returns a slice of details messages attached to the status. -// If a detail cannot be decoded, the error is returned in place of the detail. -func (s *Status) Details() []interface{} { - if s == nil || s.s == nil { - return nil - } - details := make([]interface{}, 0, len(s.s.Details)) - for _, any := range s.s.Details { - detail := &ptypes.DynamicAny{} - if err := ptypes.UnmarshalAny(any, detail); err != nil { - details = append(details, err) - continue - } - details = append(details, detail.Message) - } - return details -} - // Code returns the Code of the error if it is a Status error, codes.OK if err // is nil, or codes.Unknown otherwise. func Code(err error) codes.Code { diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index bb99940e3..eda1248d6 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -31,11 +31,13 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/encoding" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/balancerload" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/grpcutil" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -170,7 +172,21 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth if err := cc.waitForResolvedAddrs(ctx); err != nil { return nil, err } - mc := cc.GetMethodConfig(method) + + var mc serviceconfig.MethodConfig + var onCommit func() + rpcConfig, err := cc.safeConfigSelector.SelectConfig(iresolver.RPCInfo{Context: ctx, Method: method}) + if err != nil { + return nil, status.Convert(err).Err() + } + if rpcConfig != nil { + if rpcConfig.Context != nil { + ctx = rpcConfig.Context + } + mc = rpcConfig.MethodConfig + onCommit = rpcConfig.OnCommitted + } + if mc.WaitForReady != nil { c.failFast = !*mc.WaitForReady } @@ -272,13 +288,13 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth cancel: cancel, beginTime: beginTime, firstAttempt: true, + onCommit: onCommit, } if !cc.dopts.disableRetry { cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler) } cs.binlog = binarylog.GetMethodLogger(method) - cs.callInfo.stream = cs // Only this initial attempt has stats/tracing. // TODO(dfawley): move to newAttempt when per-attempt stats are implemented. if err := cs.newAttemptLocked(sh, trInfo); err != nil { @@ -348,7 +364,16 @@ func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) (r if err := cs.ctx.Err(); err != nil { return toRPCErr(err) } - t, done, err := cs.cc.getTransport(cs.ctx, cs.callInfo.failFast, cs.callHdr.Method) + + ctx := cs.ctx + if cs.cc.parsedTarget.Scheme == "xds" { + // Add extra metadata (metadata that will be added by transport) to context + // so the balancer can see them. + ctx = grpcutil.WithExtraMetadata(cs.ctx, metadata.Pairs( + "content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype), + )) + } + t, done, err := cs.cc.getTransport(ctx, cs.callInfo.failFast, cs.callHdr.Method) if err != nil { return err } @@ -366,6 +391,11 @@ func (a *csAttempt) newStream() error { cs.callHdr.PreviousAttempts = cs.numRetries s, err := a.t.NewStream(cs.ctx, cs.callHdr) if err != nil { + if _, ok := err.(transport.PerformedIOError); ok { + // Return without converting to an RPC error so retry code can + // inspect. + return err + } return toRPCErr(err) } cs.attempt.s = s @@ -419,7 +449,8 @@ type clientStream struct { // place where we need to check if the attempt is nil. attempt *csAttempt // TODO(hedging): hedging will have multiple attempts simultaneously. - committed bool // active attempt committed for retry? + committed bool // active attempt committed for retry? + onCommit func() buffer []func(a *csAttempt) error // operations to replay on retry bufferSize int // current size of buffer } @@ -448,6 +479,9 @@ type csAttempt struct { } func (cs *clientStream) commitAttemptLocked() { + if !cs.committed && cs.onCommit != nil { + cs.onCommit() + } cs.committed = true cs.buffer = nil } @@ -461,11 +495,21 @@ func (cs *clientStream) commitAttempt() { // shouldRetry returns nil if the RPC should be retried; otherwise it returns // the error that should be returned by the operation. func (cs *clientStream) shouldRetry(err error) error { - if cs.attempt.s == nil && !cs.callInfo.failFast { - // In the event of any error from NewStream (attempt.s == nil), we - // never attempted to write anything to the wire, so we can retry - // indefinitely for non-fail-fast RPCs. - return nil + unprocessed := false + if cs.attempt.s == nil { + pioErr, ok := err.(transport.PerformedIOError) + if ok { + // Unwrap error. + err = toRPCErr(pioErr.Err) + } else { + unprocessed = true + } + if !ok && !cs.callInfo.failFast { + // In the event of a non-IO operation error from NewStream, we + // never attempted to write anything to the wire, so we can retry + // indefinitely for non-fail-fast RPCs. + return nil + } } if cs.finished || cs.committed { // RPC is finished or committed; cannot retry. @@ -474,13 +518,12 @@ func (cs *clientStream) shouldRetry(err error) error { // Wait for the trailers. if cs.attempt.s != nil { <-cs.attempt.s.Done() + unprocessed = cs.attempt.s.Unprocessed() } - if cs.firstAttempt && (cs.attempt.s == nil || cs.attempt.s.Unprocessed()) { + if cs.firstAttempt && unprocessed { // First attempt, stream unprocessed: transparently retry. - cs.firstAttempt = false return nil } - cs.firstAttempt = false if cs.cc.dopts.disableRetry { return err } @@ -498,13 +541,13 @@ func (cs *clientStream) shouldRetry(err error) error { if len(sps) == 1 { var e error if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { - grpclog.Infof("Server retry pushback specified to abort (%q).", sps[0]) + channelz.Infof(logger, cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0]) cs.retryThrottler.throttle() // This counts as a failure for throttling. return err } hasPushback = true } else if len(sps) > 1 { - grpclog.Warningf("Server retry pushback specified multiple values (%q); not retrying.", sps) + channelz.Warningf(logger, cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps) cs.retryThrottler.throttle() // This counts as a failure for throttling. return err } @@ -517,8 +560,8 @@ func (cs *clientStream) shouldRetry(err error) error { code = status.Convert(err).Code() } - rp := cs.methodConfig.retryPolicy - if rp == nil || !rp.retryableStatusCodes[code] { + rp := cs.methodConfig.RetryPolicy + if rp == nil || !rp.RetryableStatusCodes[code] { return err } @@ -527,7 +570,7 @@ func (cs *clientStream) shouldRetry(err error) error { if cs.retryThrottler.throttle() { return err } - if cs.numRetries+1 >= rp.maxAttempts { + if cs.numRetries+1 >= rp.MaxAttempts { return err } @@ -536,9 +579,9 @@ func (cs *clientStream) shouldRetry(err error) error { dur = time.Millisecond * time.Duration(pushback) cs.numRetriesSincePushback = 0 } else { - fact := math.Pow(rp.backoffMultiplier, float64(cs.numRetriesSincePushback)) - cur := float64(rp.initialBackoff) * fact - if max := float64(rp.maxBackoff); cur > max { + fact := math.Pow(rp.BackoffMultiplier, float64(cs.numRetriesSincePushback)) + cur := float64(rp.InitialBackoff) * fact + if max := float64(rp.MaxBackoff); cur > max { cur = max } dur = time.Duration(grpcrand.Int63n(int64(cur))) @@ -566,6 +609,7 @@ func (cs *clientStream) retryLocked(lastErr error) error { cs.commitAttemptLocked() return err } + cs.firstAttempt = false if err := cs.newAttemptLocked(nil, nil); err != nil { return err } @@ -800,6 +844,15 @@ func (cs *clientStream) finish(err error) { } cs.finished = true cs.commitAttemptLocked() + if cs.attempt != nil { + cs.attempt.finish(err) + // after functions all rely upon having a stream. + if cs.attempt.s != nil { + for _, o := range cs.opts { + o.after(cs.callInfo, cs.attempt) + } + } + } cs.mu.Unlock() // For binary logging. only log cancel in finish (could be caused by RPC ctx // canceled or ClientConn closed). Trailer will be logged in RecvMsg. @@ -821,15 +874,6 @@ func (cs *clientStream) finish(err error) { cs.cc.incrCallsSucceeded() } } - if cs.attempt != nil { - cs.attempt.finish(err) - // after functions all rely upon having a stream. - if cs.attempt.s != nil { - for _, o := range cs.opts { - o.after(cs.callInfo) - } - } - } cs.cancel() } @@ -906,7 +950,7 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { Payload: m, // TODO truncate large payload. Data: payInfo.uncompressedBytes, - WireLength: payInfo.wireLength, + WireLength: payInfo.wireLength + headerLen, Length: len(payInfo.uncompressedBytes), }) } @@ -1067,7 +1111,6 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin t: t, } - as.callInfo.stream = as s, err := as.t.NewStream(as.ctx, as.callHdr) if err != nil { err = toRPCErr(err) @@ -1489,7 +1532,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { Payload: m, // TODO truncate large payload. Data: payInfo.uncompressedBytes, - WireLength: payInfo.wireLength, + WireLength: payInfo.wireLength + headerLen, Length: len(payInfo.uncompressedBytes), }) } diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go index 584360f68..caea1ebed 100644 --- a/vendor/google.golang.org/grpc/tap/tap.go +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -17,7 +17,12 @@ */ // Package tap defines the function handles which are executed on the transport -// layer of gRPC-Go and related information. Everything here is EXPERIMENTAL. +// layer of gRPC-Go and related information. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. package tap import ( diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go index 0a57b9994..07a2d26b3 100644 --- a/vendor/google.golang.org/grpc/trace.go +++ b/vendor/google.golang.org/grpc/trace.go @@ -41,9 +41,6 @@ func methodFamily(m string) string { if i := strings.Index(m, "/"); i >= 0 { m = m[:i] // remove everything from second slash } - if i := strings.LastIndex(m, "."); i >= 0 { - m = m[i+1:] // cut down to last dotted component - } return m } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 1d3b043ec..685b91c70 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.26.0" +const Version = "1.35.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index 798921acc..b41df6dc8 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -1,20 +1,22 @@ #!/bin/bash -if [[ `uname -a` = *"Darwin"* ]]; then - echo "It seems you are running on Mac. This script does not work on Mac. See https://github.com/grpc/grpc-go/issues/2047" - exit 1 -fi - set -ex # Exit on error; debugging enabled. set -o pipefail # Fail a pipe if any sub-command fails. +# not makes sure the command passed to it does not exit with a return code of 0. +not() { + # This is required instead of the earlier (! $COMMAND) because subshells and + # pipefail don't work the same on Darwin as in Linux. + ! "$@" +} + die() { echo "$@" >&2 exit 1 } fail_on_output() { - tee /dev/stderr | (! read) + tee /dev/stderr | not read } # Check to make sure it's safe to modify the user's git repo. @@ -37,8 +39,7 @@ if [[ "$1" = "-install" ]]; then golang.org/x/lint/golint \ golang.org/x/tools/cmd/goimports \ honnef.co/go/tools/cmd/staticcheck \ - github.com/client9/misspell/cmd/misspell \ - github.com/golang/protobuf/protoc-gen-go + github.com/client9/misspell/cmd/misspell popd else # Ye olde `go get` incantation. @@ -48,19 +49,26 @@ if [[ "$1" = "-install" ]]; then golang.org/x/lint/golint \ golang.org/x/tools/cmd/goimports \ honnef.co/go/tools/cmd/staticcheck \ - github.com/client9/misspell/cmd/misspell \ - github.com/golang/protobuf/protoc-gen-go + github.com/client9/misspell/cmd/misspell fi if [[ -z "${VET_SKIP_PROTO}" ]]; then if [[ "${TRAVIS}" = "true" ]]; then - PROTOBUF_VERSION=3.3.0 + PROTOBUF_VERSION=3.14.0 PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip pushd /home/travis wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} unzip ${PROTOC_FILENAME} bin/protoc --version popd - elif ! which protoc > /dev/null; then + elif [[ "${GITHUB_ACTIONS}" = "true" ]]; then + PROTOBUF_VERSION=3.14.0 + PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip + pushd /home/runner/go + wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} + unzip ${PROTOC_FILENAME} + bin/protoc --version + popd + elif not which protoc > /dev/null; then die "Please install protoc into your path" fi fi @@ -70,21 +78,27 @@ elif [[ "$#" -ne 0 ]]; then fi # - Ensure all source files contain a copyright message. -(! git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" -- '*.go') +not git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" -- '*.go' # - Make sure all tests in grpc and grpc/test use leakcheck via Teardown. -(! grep 'func Test[^(]' *_test.go) -(! grep 'func Test[^(]' test/*.go) +not grep 'func Test[^(]' *_test.go +not grep 'func Test[^(]' test/*.go # - Do not import x/net/context. -(! git grep -l 'x/net/context' -- "*.go") +not git grep -l 'x/net/context' -- "*.go" # - Do not import math/rand for real library code. Use internal/grpcrand for # thread safety. -git grep -l '"math/rand"' -- "*.go" 2>&1 | (! grep -v '^examples\|^stress\|grpcrand\|wrr_test') +git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' + +# - Do not call grpclog directly. Use grpclog.Component instead. +git grep -l 'grpclog.I\|grpclog.W\|grpclog.E\|grpclog.F\|grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' # - Ensure all ptypes proto packages are renamed when importing. -(! git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go") +not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" + +# - Ensure all xds proto imports are renamed to *pb or *grpc. +git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' # - Check imports that are illegal in appengine (until Go 1.11). # TODO: Remove when we drop Go 1.10 support @@ -92,9 +106,9 @@ go list -f {{.Dir}} ./... | xargs go run test/go_vet/vet.go # - gofmt, goimports, golint (with exceptions for generated code), go vet. gofmt -s -d -l . 2>&1 | fail_on_output -goimports -l . 2>&1 | (! grep -vE "(_mock|\.pb)\.go") -golint ./... 2>&1 | (! grep -vE "(_mock|\.pb)\.go:") -go vet -all . +goimports -l . 2>&1 | not grep -vE "\.pb\.go" +golint ./... 2>&1 | not grep -vE "\.pb\.go:" +go vet -all ./... misspell -error . @@ -105,10 +119,10 @@ if [[ -z "${VET_SKIP_PROTO}" ]]; then (git status; git --no-pager diff; exit 1) fi -# - Check that our module is tidy. +# - Check that our modules are tidy. if go help mod >& /dev/null; then - go mod tidy && \ - git status --porcelain 2>&1 | fail_on_output || \ + find . -name 'go.mod' | xargs -IXXX bash -c 'cd $(dirname XXX); go mod tidy' + git status --porcelain 2>&1 | fail_on_output || \ (git status; git --no-pager diff; exit 1) fi @@ -119,20 +133,20 @@ fi SC_OUT="$(mktemp)" staticcheck -go 1.9 -checks 'inherit,-ST1015' ./... > "${SC_OUT}" || true # Error if anything other than deprecation warnings are printed. -(! grep -v "is deprecated:.*SA1019" "${SC_OUT}") +not grep -v "is deprecated:.*SA1019" "${SC_OUT}" # Only ignore the following deprecated types/fields/functions. -(! grep -Fv '.HandleResolvedAddrs -.HandleSubConnStateChange +not grep -Fv '.CredsBundle .HeaderMap +.Metadata is deprecated: use Attributes .NewAddress .NewServiceConfig -.Metadata is deprecated: use Attributes .Type is deprecated: use Attributes -.UpdateBalancerState +balancer.ErrTransientFailure balancer.Picker grpc.CallCustomCodec grpc.Code grpc.Compressor +grpc.CustomCodec grpc.Decompressor grpc.MaxMsgSize grpc.MethodConfig @@ -140,9 +154,7 @@ grpc.NewGZIPCompressor grpc.NewGZIPDecompressor grpc.RPCCompressor grpc.RPCDecompressor -grpc.RoundRobin grpc.ServiceConfig -grpc.WithBalancer grpc.WithBalancerName grpc.WithCompressor grpc.WithDecompressor @@ -151,9 +163,58 @@ grpc.WithMaxMsgSize grpc.WithServiceConfig grpc.WithTimeout http.CloseNotifier -naming.Resolver -naming.Update -naming.Watcher +info.SecurityVersion resolver.Backend -resolver.GRPCLB' "${SC_OUT}" -) +resolver.GRPCLB +extDesc.Filename is deprecated +BuildVersion is deprecated +github.com/golang/protobuf/jsonpb is deprecated +proto is deprecated +xxx_messageInfo_ +proto.InternalMessageInfo is deprecated +proto.EnumName is deprecated +proto.ErrInternalBadWireType is deprecated +proto.FileDescriptor is deprecated +proto.Marshaler is deprecated +proto.MessageType is deprecated +proto.RegisterEnum is deprecated +proto.RegisterFile is deprecated +proto.RegisterType is deprecated +proto.RegisterExtension is deprecated +proto.RegisteredExtension is deprecated +proto.RegisteredExtensions is deprecated +proto.RegisterMapType is deprecated +proto.Unmarshaler is deprecated' "${SC_OUT}" + +# - special golint on package comments. +lint_package_comment_per_package() { + # Number of files in this go package. + fileCount=$(go list -f '{{len .GoFiles}}' $1) + if [ ${fileCount} -eq 0 ]; then + return 0 + fi + # Number of package errors generated by golint. + lintPackageCommentErrorsCount=$(golint --min_confidence 0 $1 | grep -c "should have a package comment") + # golint complains about every file that's missing the package comment. If the + # number of files for this package is greater than the number of errors, there's + # at least one file with package comment, good. Otherwise, fail. + if [ ${fileCount} -le ${lintPackageCommentErrorsCount} ]; then + echo "Package $1 (with ${fileCount} files) is missing package comment" + return 1 + fi +} +lint_package_comment() { + set +ex + + count=0 + for i in $(go list ./...); do + lint_package_comment_per_package "$i" + ((count += $?)) + done + + set -ex + return $count +} +lint_package_comment + +echo SUCCESS diff --git a/vendor/modules.txt b/vendor/modules.txt index 7344ac27b..63d9fa471 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -138,6 +138,8 @@ github.com/imdario/mergo github.com/jmespath/go-jmespath # github.com/json-iterator/go v1.1.10 github.com/json-iterator/go +# github.com/kube-storage/spec v0.1.0 +github.com/kube-storage/spec/lib/go/replication # github.com/kubernetes-csi/csi-lib-utils v0.7.0 github.com/kubernetes-csi/csi-lib-utils/connection github.com/kubernetes-csi/csi-lib-utils/metrics @@ -305,18 +307,18 @@ google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch # google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.29.1 => google.golang.org/grpc v1.26.0 +# google.golang.org/grpc v1.35.0 => google.golang.org/grpc v1.35.0 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff google.golang.org/grpc/balancer google.golang.org/grpc/balancer/base +google.golang.org/grpc/balancer/grpclb/state google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 google.golang.org/grpc/codes google.golang.org/grpc/connectivity google.golang.org/grpc/credentials -google.golang.org/grpc/credentials/internal google.golang.org/grpc/encoding google.golang.org/grpc/encoding/proto google.golang.org/grpc/grpclog @@ -326,16 +328,24 @@ google.golang.org/grpc/internal/balancerload google.golang.org/grpc/internal/binarylog google.golang.org/grpc/internal/buffer google.golang.org/grpc/internal/channelz +google.golang.org/grpc/internal/credentials google.golang.org/grpc/internal/envconfig +google.golang.org/grpc/internal/grpclog google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync +google.golang.org/grpc/internal/grpcutil +google.golang.org/grpc/internal/metadata +google.golang.org/grpc/internal/resolver google.golang.org/grpc/internal/resolver/dns google.golang.org/grpc/internal/resolver/passthrough +google.golang.org/grpc/internal/resolver/unix +google.golang.org/grpc/internal/serviceconfig +google.golang.org/grpc/internal/status google.golang.org/grpc/internal/syscall google.golang.org/grpc/internal/transport +google.golang.org/grpc/internal/transport/networktype google.golang.org/grpc/keepalive google.golang.org/grpc/metadata -google.golang.org/grpc/naming google.golang.org/grpc/peer google.golang.org/grpc/resolver google.golang.org/grpc/serviceconfig